Skip to content

Commit

Permalink
update formating in all modified files
Browse files Browse the repository at this point in the history
  • Loading branch information
yalsaffar committed Nov 19, 2024
1 parent 87261b4 commit f2eadbc
Show file tree
Hide file tree
Showing 28 changed files with 380 additions and 181 deletions.
6 changes: 4 additions & 2 deletions aepsych/benchmark/example_problems.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,8 +105,10 @@ def __init__(
)
y = torch.LongTensor(self.data[:, 0])
x = torch.Tensor(self.data[:, 1:])
inducing_size=100
inducing_points = select_inducing_points(inducing_size=inducing_size, allocator=SobolAllocator(bounds=self.bounds))
inducing_size = 100
inducing_points = select_inducing_points(
inducing_size=inducing_size, allocator=SobolAllocator(bounds=self.bounds)
)

# Fit a model, with a large number of inducing points
self.m = GPClassificationModel(
Expand Down
10 changes: 8 additions & 2 deletions aepsych/generators/epsilon_greedy_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,13 @@


class EpsilonGreedyGenerator(AEPsychGenerator):
def __init__(self, lb:torch.Tensor, ub:torch.Tensor,subgenerator: AEPsychGenerator, epsilon: float = 0.1) -> None:
def __init__(
self,
lb: torch.Tensor,
ub: torch.Tensor,
subgenerator: AEPsychGenerator,
epsilon: float = 0.1,
) -> None:
self.subgenerator = subgenerator
self.epsilon = epsilon
self.lb = lb
Expand All @@ -31,7 +37,7 @@ def from_config(cls, config: Config) -> "EpsilonGreedyGenerator":
)
subgen = subgen_cls.from_config(config)
epsilon = config.getfloat(classname, "epsilon", fallback=0.1)
return cls(lb=lb, ub=ub,subgenerator=subgen, epsilon=epsilon)
return cls(lb=lb, ub=ub, subgenerator=subgen, epsilon=epsilon)

def gen(self, num_points: int, model: ModelProtocol) -> torch.Tensor:
if num_points > 1:
Expand Down
1 change: 0 additions & 1 deletion aepsych/generators/monotonic_rejection_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,6 @@ def from_config(cls, config: Config) -> "MonotonicRejectionGenerator":
extra_acqf_args = cls._get_acqf_options(acqf, config)
lb = torch.tensor(config.getlist(classname, "lb"))
ub = torch.tensor(config.getlist(classname, "ub"))


options = {}
options["num_restarts"] = config.getint(classname, "restarts", fallback=10)
Expand Down
14 changes: 11 additions & 3 deletions aepsych/generators/optimize_acqf_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,14 +72,22 @@ def __init__(
self.lb = lb
self.ub = ub


def _instantiate_acquisition_fn(self, model: ModelProtocol) -> AcquisitionFunction:
if "lb" in inspect.signature(self.acqf).parameters and "ub" in inspect.signature(self.acqf).parameters:
if (
"lb" in inspect.signature(self.acqf).parameters
and "ub" in inspect.signature(self.acqf).parameters
):
if self.acqf == AnalyticExpectedUtilityOfBestOption:
return self.acqf(pref_model=model, lb=self.lb, ub=self.ub)

if self.acqf in self.baseline_requiring_acqfs:
return self.acqf(model, model.train_inputs[0], lb=self.lb, ub=self.ub, **self.acqf_kwargs)
return self.acqf(
model,
model.train_inputs[0],
lb=self.lb,
ub=self.ub,
**self.acqf_kwargs,
)

return self.acqf(model=model, lb=self.lb, ub=self.ub, **self.acqf_kwargs)

Expand Down
12 changes: 6 additions & 6 deletions aepsych/kernels/pairwisekernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,12 @@ class PairwiseKernel(Kernel):
"""

def __init__(
self, latent_kernel: Kernel, is_partial_obs: bool=False, **kwargs
self, latent_kernel: Kernel, is_partial_obs: bool = False, **kwargs
) -> None:
"""
Args:
latent_kernel (Kernel): The underlying kernel used to compute the covariance for the GP.
is_partial_obs (bool): If the kernel should handle partial observations. Defaults to False.
Args:
latent_kernel (Kernel): The underlying kernel used to compute the covariance for the GP.
is_partial_obs (bool): If the kernel should handle partial observations. Defaults to False.
"""
super(PairwiseKernel, self).__init__(**kwargs)

Expand All @@ -40,11 +40,11 @@ def forward(
x1 (torch.Tensor): A `b x n x d` or `n x d` tensor, where `d = 2k` and `k` is the dimension of the latent space.
x2 (torch.Tensor): A `b x m x d` or `m x d` tensor, where `d = 2k` and `k` is the dimension of the latent space.
diag (bool): Should the Kernel compute the whole covariance matrix or just the diagonal? Defaults to False.
Returns:
torch.Tensor (or :class:`gpytorch.lazy.LazyTensor`) : A `b x n x m` or `n x m` tensor representing
the covariance matrix between `x1` and `x2`.
the covariance matrix between `x1` and `x2`.
The exact size depends on the kernel's evaluation mode:
* `full_covar`: `n x m` or `b x n x m`
* `diag`: `n` or `b x n`
Expand Down
6 changes: 3 additions & 3 deletions aepsych/kernels/rbf_partial_grad.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,14 +31,14 @@ def forward(
self, x1: torch.Tensor, x2: torch.Tensor, diag: bool = False, **params: Any
) -> torch.Tensor:
"""Computes the covariance matrix between x1 and x2 based on the RBF
Args:
x1 (torch.Tensor): A `b x n x d` or `n x d` tensor, where `d = 2k` and `k` is the dimension of the latent space.
x2 (torch.Tensor): A `b x m x d` or `m x d` tensor, where `d = 2k` and `k` is the dimension of the latent space.
diag (bool): Should the Kernel compute the whole covariance matrix (False) or just the diagonal (True)? Defaults to False.
Returns:
torch.Tensor: A `b x n x m` or `n x m` tensor representing the covariance matrix between `x1` and `x2`.
The exact size depends on the kernel's evaluation mode:
Expand Down
16 changes: 8 additions & 8 deletions aepsych/models/gp_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,10 +97,14 @@ def __init__(
"Both inducing_points and SobolAllocator are provided. "
"The initial inducing_points will be overwritten by the allocator."
)
self.inducing_points = inducing_point_method.allocate_inducing_points(num_inducing=self.inducing_size)
self.inducing_points = inducing_point_method.allocate_inducing_points(
num_inducing=self.inducing_size
)

elif inducing_points is None and inducing_point_method is SobolAllocator:
self.inducing_points = inducing_point_method.allocate_inducing_points(num_inducing=self.inducing_size)
self.inducing_points = inducing_point_method.allocate_inducing_points(
num_inducing=self.inducing_size
)

elif inducing_points is None and dim is not None:
# No allocator or unsupported allocator: create a dummy tensor
Expand All @@ -119,14 +123,12 @@ def __init__(
# Always assign the inducing point method
self.inducing_point_method = inducing_point_method or AutoAllocator()


self.dim = dim or self.inducing_points.size(-1)

if mean_module is None or covar_module is None:
default_mean, default_covar = default_mean_covar_factory(
dim=self.dim, stimuli_per_trial=self.stimuli_per_trial
)


variational_distribution = CholeskyVariationalDistribution(
self.inducing_points.size(0), batch_shape=torch.Size([self._batch_size])
Expand All @@ -140,8 +142,6 @@ def __init__(
)
super().__init__(variational_strategy)



self.likelihood = likelihood
self.mean_module = mean_module or default_mean
self.covar_module = covar_module or default_covar
Expand Down Expand Up @@ -225,7 +225,7 @@ def _reset_variational_strategy(self) -> None:
allocator=self.inducing_point_method,
inducing_size=self.inducing_size,
covar_module=self.covar_module,
X=self.train_inputs[0]
X=self.train_inputs[0],
).to(device)

variational_distribution = CholeskyVariationalDistribution(
Expand Down
22 changes: 13 additions & 9 deletions aepsych/models/gp_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,22 +6,23 @@
# LICENSE file in the root directory of this source tree.
from __future__ import annotations

import warnings

from copy import deepcopy
from typing import Dict, Optional, Tuple, Union
import warnings

import gpytorch
import numpy as np
import torch
from aepsych.config import Config
from aepsych.factory.default import default_mean_covar_factory
from aepsych.models.base import AEPsychModelDeviceMixin
from aepsych.models.inducing_point_allocators import AutoAllocator, SobolAllocator
from aepsych.utils import _process_bounds, promote_0d
from aepsych.utils_logging import getLogger
from botorch.models.utils.inducing_point_allocators import InducingPointAllocator
from gpytorch.likelihoods import GaussianLikelihood, Likelihood
from gpytorch.models import ExactGP
from botorch.models.utils.inducing_point_allocators import InducingPointAllocator
from aepsych.models.inducing_point_allocators import AutoAllocator, SobolAllocator

logger = getLogger()

Expand Down Expand Up @@ -59,26 +60,30 @@ def __init__(
max_fit_time (float, optional): The maximum amount of time, in seconds, to spend fitting the model. If None,
there is no limit to the fitting time.
inducing_point_method (InducingPointAllocator, optional): The method to use for allocating inducing points.
If None, defaults to AutoAllocator.
If None, defaults to AutoAllocator.
inducing_size (int, optional): The number of inducing points to use. If None, defaults to 99.
"""
if likelihood is None:
likelihood = GaussianLikelihood()
self.inducing_size = inducing_size or 99

super().__init__(None, None, likelihood)

self.inducing_point_method: Optional[InducingPointAllocator]
if inducing_points is not None and inducing_point_method is SobolAllocator:
warnings.warn(
"Both inducing_points and SobolAllocator are provided. "
"The initial inducing_points will be overwritten by the allocator."
)
self.inducing_points = inducing_point_method.allocate_inducing_points(num_inducing=self.inducing_size)
self.inducing_points = inducing_point_method.allocate_inducing_points(
num_inducing=self.inducing_size
)

elif inducing_points is None and inducing_point_method is SobolAllocator:
self.inducing_points = inducing_point_method.allocate_inducing_points(num_inducing=self.inducing_size)
self.inducing_points = inducing_point_method.allocate_inducing_points(
num_inducing=self.inducing_size
)

elif inducing_points is None and dim is not None:
# No allocator or unsupported allocator: create a dummy tensor
Expand All @@ -97,7 +102,6 @@ def __init__(
# Always assign the inducing point method
self.inducing_point_method = inducing_point_method or AutoAllocator()


self.dim = dim or self.inducing_points.size(-1)
self.max_fit_time = max_fit_time

Expand Down
13 changes: 7 additions & 6 deletions aepsych/models/monotonic_rejection_gp.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,18 +92,20 @@ def __init__(

self.inducing_size = num_induc



self.inducing_point_method: Optional[InducingPointAllocator]
if inducing_points is not None and inducing_point_method is SobolAllocator:
warnings.warn(
"Both inducing_points and SobolAllocator are provided. "
"The initial inducing_points will be overwritten by the allocator."
)
self.inducing_points = inducing_point_method.allocate_inducing_points(num_inducing=self.inducing_size)
self.inducing_points = inducing_point_method.allocate_inducing_points(
num_inducing=self.inducing_size
)

elif inducing_points is None and inducing_point_method is SobolAllocator:
self.inducing_points = inducing_point_method.allocate_inducing_points(num_inducing=self.inducing_size)
self.inducing_points = inducing_point_method.allocate_inducing_points(
num_inducing=self.inducing_size
)

elif inducing_points is None and dim is not None:
# No allocator or unsupported allocator: create a dummy tensor
Expand All @@ -122,7 +124,6 @@ def __init__(
# Always assign the inducing point method
self.inducing_point_method = inducing_point_method or AutoAllocator()


self.dim = dim or self.inducing_points.size(-1)

inducing_points_aug = self._augment_with_deriv_index(self.inducing_points, 0)
Expand Down Expand Up @@ -187,7 +188,7 @@ def fit(self, train_x: Tensor, train_y: Tensor, **kwargs) -> None:
allocator=self.inducing_point_method,
inducing_size=self.inducing_size,
covar_module=self.covar_module,
X=self.train_inputs[0]
X=self.train_inputs[0],
)
self._set_model(train_x, train_y)

Expand Down
32 changes: 19 additions & 13 deletions aepsych/models/semi_p.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,10 @@

from __future__ import annotations

import warnings

from copy import deepcopy
from typing import Any, Optional, Tuple, Union
import warnings

import gpytorch
import numpy as np
Expand Down Expand Up @@ -214,17 +215,21 @@ def __init__(
If "kmeans++", selects points by performing kmeans++ clustering on the training data.
If "auto", tries to determine the best method automatically.
"""

self.inducing_point_method: Optional[InducingPointAllocator]
if inducing_points is not None and inducing_point_method is SobolAllocator:
warnings.warn(
"Both inducing_points and SobolAllocator are provided. "
"The initial inducing_points will be overwritten by the allocator."
)
self.inducing_points = inducing_point_method.allocate_inducing_points(num_inducing=self.inducing_size)
self.inducing_points = inducing_point_method.allocate_inducing_points(
num_inducing=self.inducing_size
)

elif inducing_points is None and inducing_point_method is SobolAllocator:
self.inducing_points = inducing_point_method.allocate_inducing_points(num_inducing=self.inducing_size)
self.inducing_points = inducing_point_method.allocate_inducing_points(
num_inducing=self.inducing_size
)

elif inducing_points is None and dim is not None:
# No allocator or unsupported allocator: create a dummy tensor
Expand All @@ -243,12 +248,11 @@ def __init__(
# Always assign the inducing point method
self.inducing_point_method = inducing_point_method or AutoAllocator()


self.dim = dim or self.inducing_points.size(-1)
dim = self.dim

self.stim_dim = stim_dim
self.context_dims = list(range(self.dim ))
self.context_dims = list(range(self.dim))
self.context_dims.pop(stim_dim)

if mean_module is None:
Expand All @@ -261,7 +265,7 @@ def __init__(
if covar_module is None:
covar_module = ScaleKernel(
RBFKernel(
ard_num_dims=self.dim - 1,
ard_num_dims=self.dim - 1,
lengthscale_prior=GammaPrior(3, 6),
active_dims=self.context_dims, # Operate only on x_s
batch_shape=torch.Size([2]),
Expand Down Expand Up @@ -474,7 +478,6 @@ def __init__(
inducing_size: Optional[int] = None,
max_fit_time: Optional[float] = None,
inducing_point_method: Optional[InducingPointAllocator] = None,

) -> None:
"""
Initialize HadamardSemiPModel.
Expand Down Expand Up @@ -507,17 +510,21 @@ def __init__(
"at >=100 inducing points is especially slow."
)
)

self.inducing_point_method: Optional[InducingPointAllocator]
if inducing_points is not None and inducing_point_method is SobolAllocator:
warnings.warn(
"Both inducing_points and SobolAllocator are provided. "
"The initial inducing_points will be overwritten by the allocator."
)
self.inducing_points = inducing_point_method.allocate_inducing_points(num_inducing=self.inducing_size)
self.inducing_points = inducing_point_method.allocate_inducing_points(
num_inducing=self.inducing_size
)

elif inducing_points is None and inducing_point_method is SobolAllocator:
self.inducing_points = inducing_point_method.allocate_inducing_points(num_inducing=self.inducing_size)
self.inducing_points = inducing_point_method.allocate_inducing_points(
num_inducing=self.inducing_size
)

elif inducing_points is None and dim is not None:
# No allocator or unsupported allocator: create a dummy tensor
Expand All @@ -536,9 +543,8 @@ def __init__(
# Always assign the inducing point method
self.inducing_point_method = inducing_point_method or AutoAllocator()


self.dim = dim or self.inducing_points.size(-1)

super().__init__(
inducing_points=inducing_points,
dim=dim,
Expand Down
Loading

0 comments on commit f2eadbc

Please sign in to comment.