Skip to content

Commit

Permalink
fix proposed changes, mainly insure robust error handling and type hints
Browse files Browse the repository at this point in the history
  • Loading branch information
yalsaffar committed Oct 29, 2024
1 parent 69aa9f0 commit 9eb3cd3
Show file tree
Hide file tree
Showing 7 changed files with 37 additions and 39 deletions.
7 changes: 5 additions & 2 deletions aepsych/acquisition/lookahead.py
Original file line number Diff line number Diff line change
Expand Up @@ -406,14 +406,17 @@ def construct_inputs_global_lookahead(
lookahead_type="levelset",
target: Optional[float] = None,
posterior_transform: Optional[PosteriorTransform] = None,
query_set_size: int = 256,
query_set_size: Optional[int] = 256,
Xq: Optional[Tensor] = None,
**kwargs,
) -> Dict[str, Any]:
lb = torch.tensor([bounds[0] for bounds in kwargs["bounds"]])
ub = torch.tensor([bounds[1] for bounds in kwargs["bounds"]])
Xq = Xq if Xq is not None else make_scaled_sobol(lb, ub, query_set_size)
if Xq is None and query_set_size is None:
raise ValueError("Either Xq or query_set_size must be provided.")

if Xq is None and query_set_size is not None:
Xq = make_scaled_sobol(lb, ub, query_set_size)
return {
"model": model,
"lookahead_type": lookahead_type,
Expand Down
11 changes: 4 additions & 7 deletions aepsych/benchmark/problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ def p_hat(self, model: aepsych.models.base.ModelProtocol) -> torch.Tensor:

def evaluate(
self,
strat: Union[SequentialStrategy, Strategy],
strat: SequentialStrategy,
) -> Dict[str, float]:
"""Evaluate the strategy with respect to this problem.
Expand Down Expand Up @@ -257,7 +257,7 @@ def true_below_threshold(self) -> torch.Tensor:
self.p(self.eval_grid).reshape(1, -1) <= self.thresholds.reshape(-1, 1)
).to(torch.float32)

def evaluate(self, strat: Union[SequentialStrategy, Strategy]) -> Dict[str, float]:
def evaluate(self, strat: SequentialStrategy) -> Dict[str, float]:
"""Evaluate the model with respect to this problem.
For level set estimation, we add metrics w.r.t. the true threshold:
Expand Down Expand Up @@ -319,16 +319,13 @@ class LSEProblemWithEdgeLogging(LSEProblem):
def __init__(self, thresholds: Union[float, List, torch.Tensor]) -> None:
super().__init__(thresholds)

def evaluate(self, strat: Union[SequentialStrategy, Strategy]) -> Dict[str, float]:
def evaluate(self, strat: SequentialStrategy) -> Dict[str, float]:
metrics = super().evaluate(strat)

# add number of edge samples to the log

# get the trials selected by the final strat only
if isinstance(strat, SequentialStrategy):
n_opt_trials = strat.strat_list[-1].n_trials
elif isinstance(strat, Strategy):
n_opt_trials = strat.n_trials
n_opt_trials = strat.strat_list[-1].n_trials

lb, ub = strat.lb, strat.ub
r = ub - lb
Expand Down
18 changes: 9 additions & 9 deletions aepsych/kernels/pairwisekernel.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import Any, Optional
from typing import Any, Optional, Union
import torch
from gpytorch.kernels import Kernel
from linear_operator import to_linear_operator
Expand Down Expand Up @@ -39,11 +39,11 @@ def forward(self, x1: torch.Tensor, x2: torch.Tensor, diag: bool=False, **params
* `diag`: `n` or `b x n`
"""
if self.is_partial_obs:
d_ = x1.shape[-1] - 1 #avoiding assigning d int then tensor(for mypy)
assert d_ == x2.shape[-1] - 1, "tensors not the same dimension"
assert d_ % 2 == 0, "dimension must be even"
d : Union[torch.Tensor, int] = x1.shape[-1] - 1
assert d == x2.shape[-1] - 1, "tensors not the same dimension"
assert d % 2 == 0, "dimension must be even"

k = int(d_ / 2)
k = int(d / 2)

# special handling for kernels that (also) do funky
# things with the input dimension
Expand All @@ -56,12 +56,12 @@ def forward(self, x1: torch.Tensor, x2: torch.Tensor, diag: bool=False, **params
d = torch.cat((x2[..., k:-1], deriv_idx_2), dim=1)

else:
d_ = x1.shape[-1]
d = x1.shape[-1]

assert d_ == x2.shape[-1], "tensors not the same dimension"
assert d_ % 2 == 0, "dimension must be even"
assert d == x2.shape[-1], "tensors not the same dimension"
assert d % 2 == 0, "dimension must be even"

k = int(d_ / 2)
k = int(d / 2)

a = x1[..., :k]
b = x1[..., k:]
Expand Down
10 changes: 5 additions & 5 deletions aepsych/models/gp_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,7 @@ def fit(
self._fit_mll(mll, **kwargs)

def sample(
self, x: Union[torch.Tensor, np.ndarray], num_samples: int
self, x: torch.Tensor, num_samples: int
) -> torch.Tensor:
"""Sample from underlying model.
Expand All @@ -257,7 +257,7 @@ def sample(
return self.posterior(x).rsample(torch.Size([num_samples])).detach().squeeze()

def predict(
self, x: Union[torch.Tensor, np.ndarray], probability_space: bool = False
self, x: torch.Tensor, probability_space: bool = False
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Query the model for posterior mean and variance.
Expand All @@ -267,7 +267,7 @@ def predict(
response probability instead of latent function value. Defaults to False.
Returns:
Tuple[np.ndarray, np.ndarray]: Posterior mean and variance at queries points.
Tuple[torch.Tensor, torch.Tensor]: Posterior mean and variance at queries points.
"""
with torch.no_grad():
post = self.posterior(x)
Expand All @@ -280,7 +280,7 @@ def predict(
a_star = fmean / torch.sqrt(1 + fvar)
pmean = Normal(0, 1).cdf(a_star)
t_term = torch.tensor(
owens_t(a_star.numpy(), 1 / np.sqrt(1 + 2 * fvar.numpy())),
owens_t(a_star, 1 / torch.sqrt(1 + 2 * fvar)),
dtype=a_star.dtype,
)
pvar = pmean - 2 * t_term - pmean.square()
Expand All @@ -298,7 +298,7 @@ def predict(
return promote_0d(fmean), promote_0d(fvar)

def predict_probability(
self, x: Union[torch.Tensor, np.ndarray]
self, x: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
return self.predict(x, probability_space=True)

Expand Down
2 changes: 1 addition & 1 deletion aepsych/models/monotonic_projection_gp.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ def posterior(
return GPyTorchPosterior(mvn_proj)

def sample(
self, x: Union[torch.Tensor, np.ndarray], num_samples: int
self, x: torch.Tensor, num_samples: int
) -> torch.Tensor:
samps = super().sample(x=x, num_samples=num_samples)
if self.min_f_val is not None:
Expand Down
14 changes: 6 additions & 8 deletions aepsych/models/semi_p.py
Original file line number Diff line number Diff line change
Expand Up @@ -333,7 +333,7 @@ def fit(

def sample(
self,
x: Union[torch.Tensor, np.ndarray],
x: torch.Tensor,
num_samples: int,
probability_space=False,
) -> torch.Tensor:
Expand All @@ -357,7 +357,7 @@ def sample(
return samps.squeeze(1)

def predict(
self, x: Union[torch.Tensor, np.ndarray], probability_space: bool = False
self, x: torch.Tensor, probability_space: bool = False
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Query the model for posterior mean and variance.
Expand All @@ -367,7 +367,7 @@ def predict(
response probability instead of latent function value. Defaults to False.
Returns:
Tuple[np.ndarray, np.ndarray]: Posterior mean and variance at query points.
Tuple[torch.Tensor, torch.Tensor]: Posterior mean and variance at query points.
"""
with torch.no_grad():
samps = self.sample(
Expand All @@ -376,9 +376,7 @@ def predict(
m, v = samps.mean(0), samps.var(0)
return promote_0d(m), promote_0d(v)

def posterior(self, X: Union[torch.Tensor, np.ndarray], posterior_transform: Optional[PosteriorTransform] = None) -> SemiPPosterior:
if isinstance(X, np.ndarray):
X = torch.tensor(X)
def posterior(self, X: torch.Tensor, posterior_transform: Optional[PosteriorTransform] = None) -> SemiPPosterior:
# Assume x is (b) x n x d
if X.ndim > 3:
raise ValueError
Expand Down Expand Up @@ -607,7 +605,7 @@ def from_config(cls, config: Config) -> HadamardSemiPModel:
)

def predict(
self, x: Union[torch.Tensor, np.ndarray], probability_space: bool = False
self, x: torch.Tensor, probability_space: bool = False
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Query the model for posterior mean and variance.
Expand All @@ -617,7 +615,7 @@ def predict(
response probability instead of latent function value. Defaults to False.
Returns:
Tuple[np.ndarray, np.ndarray]: Posterior mean and variance at queries points.
Tuple[torch.Tensor, torch.Tensor]: Posterior mean and variance at queries points.
"""
if probability_space:
if hasattr(self.likelihood, "objective"):
Expand Down
14 changes: 7 additions & 7 deletions aepsych/strategy.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,40 +223,40 @@ def gen(self, num_points: int = 1) -> torch.Tensor:
@ensure_model_is_fresh
def get_max(self, constraints: Optional[Mapping[int, List[float]]] = None, probability_space: bool = False, max_time: Optional[float] = None) -> Tuple[float, torch.Tensor]:
constraints = constraints or {}
assert self.model is not None, "a model is needed here!"
assert self.model is not None, "model is None! Cannot get the max without a model!"
return self.model.get_max(
constraints, probability_space=probability_space, max_time=max_time
)

@ensure_model_is_fresh
def get_min(self, constraints: Optional[Mapping[int, List[float]]] = None, probability_space: bool = False, max_time: Optional[float] = None) -> Tuple[float, torch.Tensor]:
constraints = constraints or {}
assert self.model is not None, "a model is needed here!"
assert self.model is not None, "model is None! Cannot get the min without a model!"
return self.model.get_min(
constraints, probability_space=probability_space, max_time=max_time
)

@ensure_model_is_fresh
def inv_query(self, y: int, constraints: Optional[Mapping[int, List[float]]] = None, probability_space: bool = False, max_time: Optional[float] = None) -> Tuple[float, torch.Tensor]:
constraints = constraints or {}
assert self.model is not None, "a model is needed here!"
assert self.model is not None, "model is None! Cannot get the inv_query without a model!"
return self.model.inv_query(
y, constraints, probability_space, max_time=max_time
)

@ensure_model_is_fresh
def predict(self, x: torch.Tensor, probability_space: bool = False) -> torch.Tensor:
assert self.model is not None, "a model is needed here!"
assert self.model is not None, "model is None! Cannot predict without a model!"
return self.model.predict(x=x, probability_space=probability_space)

@ensure_model_is_fresh
def get_jnd(self, *args, **kwargs) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
assert self.model is not None, "a model is needed here!"
assert self.model is not None, "model is None! Cannot get the get jnd without a model!"
return self.model.get_jnd(*args, **kwargs)

@ensure_model_is_fresh
def sample(self, x: torch.Tensor, num_samples: Optional[int] = None) -> torch.Tensor:
assert self.model is not None, "a model is needed here!"
assert self.model is not None, "model is None! Cannot sample without a model!"
return self.model.sample(x, num_samples=num_samples)

def finish(self) -> None:
Expand Down Expand Up @@ -290,7 +290,7 @@ def finished(self) -> bool:
sufficient_outcomes = True

if self.min_post_range is not None:
assert self.model is not None, "a model is needed here!"
assert self.model is not None, "model is None! Cannot predict without a model!"
fmean, _ = self.model.predict(self.eval_grid, probability_space=True)
meets_post_range = ((fmean.max() - fmean.min()) >= self.min_post_range).item()
else:
Expand Down

0 comments on commit 9eb3cd3

Please sign in to comment.