From 0a5cedeb32d964b7c5e2237d5bcddbc9ba653706 Mon Sep 17 00:00:00 2001 From: Sait Cakmak Date: Fri, 1 Dec 2023 14:58:56 -0800 Subject: [PATCH] Adds test_helpers. Do not use relative imports. (#2133) Summary: Moves test utilities that were used in multiple test files to `botorch/utils/test_helpers.py`. This makes it possible to remove all relative imports from the test files, which does not play well with some internal tooling we want to use. Going forward, tests, like the rest of BoTorch, should only use absolute imports. Reviewed By: esantorella Differential Revision: D51767702 --- botorch/__init__.py | 6 +- botorch/utils/test_helpers.py | 170 ++++++++++++++++++ botorch/utils/transforms.py | 6 +- sphinx/source/utils.rst | 6 +- test/acquisition/test_knowledge_gradient.py | 3 +- test/acquisition/test_monte_carlo.py | 12 +- test/models/test_converter.py | 3 +- test/models/test_fully_bayesian_multitask.py | 5 +- test/models/test_gp_regression.py | 20 +-- test/models/test_gp_regression_mixed.py | 7 +- test/models/test_gpytorch.py | 43 +---- test/models/test_multitask.py | 63 ++----- test/sampling/pathwise/helpers.py | 33 ---- .../pathwise/test_posterior_samplers.py | 3 +- test/sampling/pathwise/test_prior_samplers.py | 3 +- 15 files changed, 209 insertions(+), 174 deletions(-) create mode 100644 botorch/utils/test_helpers.py delete mode 100644 test/sampling/pathwise/helpers.py diff --git a/botorch/__init__.py b/botorch/__init__.py index b121d1170b..de6bee80ef 100644 --- a/botorch/__init__.py +++ b/botorch/__init__.py @@ -30,9 +30,11 @@ from botorch.utils import manual_seed try: - from botorch.version import version as __version__ + # Marking this as a manual import to avoid autodeps complaints + # due to imports from non-existent file. + from botorch.version import version as __version__ # @manual except Exception: # pragma: no cover - __version__ = "Unknown" # pragma: no cover + __version__ = "Unknown" logger.info( "Turning off `fast_computations` in linear operator and increasing " diff --git a/botorch/utils/test_helpers.py b/botorch/utils/test_helpers.py new file mode 100644 index 0000000000..9e74aec113 --- /dev/null +++ b/botorch/utils/test_helpers.py @@ -0,0 +1,170 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +""" +Dummy classes and other helpers that are used in multiple test files +should be defined here to avoid relative imports. +""" + +from __future__ import annotations + +import math +from typing import Optional, Tuple + +import torch +from botorch.acquisition.objective import PosteriorTransform +from botorch.models.gpytorch import GPyTorchModel +from botorch.models.model import FantasizeMixin, Model +from botorch.models.transforms.outcome import Standardize +from botorch.models.utils import add_output_dim +from botorch.models.utils.assorted import fantasize +from botorch.posteriors.posterior import Posterior +from botorch.utils.datasets import MultiTaskDataset, SupervisedDataset +from gpytorch.distributions.multivariate_normal import MultivariateNormal +from gpytorch.kernels import RBFKernel, ScaleKernel +from gpytorch.likelihoods.gaussian_likelihood import ( + FixedNoiseGaussianLikelihood, + GaussianLikelihood, +) +from gpytorch.means import ConstantMean +from gpytorch.models.exact_gp import ExactGP +from torch import Size, Tensor +from torch.nn.functional import pad + + +def get_sample_moments(samples: Tensor, sample_shape: Size) -> Tuple[Tensor, Tensor]: + """Computes the mean and covariance of a set of samples. + + Args: + samples: A tensor of shape `sample_shape x batch_shape x q`. + sample_shape: The sample_shape input used while generating the samples using + the pathwise sampling API. + """ + sample_dim = len(sample_shape) + samples = samples.view(-1, *samples.shape[sample_dim:]) + loc = samples.mean(dim=0) + residuals = (samples - loc).permute(*range(1, samples.ndim), 0) + return loc, (residuals @ residuals.transpose(-2, -1)) / sample_shape.numel() + + +def standardize_moments( + transform: Standardize, + loc: Tensor, + covariance_matrix: Tensor, +) -> Tuple[Tensor, Tensor]: + """Standardizes the loc and covariance_matrix using the mean and standard + deviations from a Standardize transform. + """ + m = transform.means.squeeze().unsqueeze(-1) + s = transform.stdvs.squeeze().reciprocal().unsqueeze(-1) + loc = s * (loc - m) + correlation_matrix = s.unsqueeze(-1) * covariance_matrix * s.unsqueeze(-2) + return loc, correlation_matrix + + +def gen_multi_task_dataset( + yvar: Optional[float] = None, **tkwargs +) -> Tuple[MultiTaskDataset, Tuple[Tensor, Tensor, Tensor]]: + """Constructs a multi-task dataset with two tasks, each with 10 data points.""" + X = torch.linspace(0, 0.95, 10, **tkwargs) + 0.05 * torch.rand(10, **tkwargs) + X = X.unsqueeze(dim=-1) + Y1 = torch.sin(X * (2 * math.pi)) + torch.randn_like(X) * 0.2 + Y2 = torch.cos(X * (2 * math.pi)) + torch.randn_like(X) * 0.2 + train_X = torch.cat([pad(X, (1, 0), value=i) for i in range(2)]) + train_Y = torch.cat([Y1, Y2]) + + Yvar1 = None if yvar is None else torch.full_like(Y1, yvar) + Yvar2 = None if yvar is None else torch.full_like(Y2, yvar) + train_Yvar = None if yvar is None else torch.cat([Yvar1, Yvar2]) + datasets = [ + SupervisedDataset( + X=train_X[:10], + Y=Y1, + Yvar=Yvar1, + feature_names=["task", "X"], + outcome_names=["y"], + ), + SupervisedDataset( + X=train_X[10:], + Y=Y2, + Yvar=Yvar2, + feature_names=["task", "X"], + outcome_names=["y1"], + ), + ] + dataset = MultiTaskDataset( + datasets=datasets, target_outcome_name="y", task_feature_index=0 + ) + return dataset, (train_X, train_Y, train_Yvar) + + +def get_pvar_expected(posterior: Posterior, model: Model, X: Tensor, m: int) -> Tensor: + """Computes the expected variance of a posterior after adding the + predictive noise from the likelihood. + """ + X = model.transform_inputs(X) + lh_kwargs = {} + if isinstance(model.likelihood, FixedNoiseGaussianLikelihood): + lh_kwargs["noise"] = model.likelihood.noise.mean().expand(X.shape[:-1]) + if m == 1: + return model.likelihood( + posterior.distribution, X, **lh_kwargs + ).variance.unsqueeze(-1) + X_, odi = add_output_dim(X=X, original_batch_shape=model._input_batch_shape) + pvar_exp = model.likelihood(model(X_), X_, **lh_kwargs).variance + return torch.stack([pvar_exp.select(dim=odi, index=i) for i in range(m)], dim=-1) + + +class DummyNonScalarizingPosteriorTransform(PosteriorTransform): + scalarize = False + + def evaluate(self, Y): + pass # pragma: no cover + + def forward(self, posterior): + pass # pragma: no cover + + +class SimpleGPyTorchModel(GPyTorchModel, ExactGP, FantasizeMixin): + last_fantasize_flag: bool = False + + def __init__(self, train_X, train_Y, outcome_transform=None, input_transform=None): + r""" + Args: + train_X: A tensor of inputs, passed to self.transform_inputs. + train_Y: Passed to outcome_transform. + outcome_transform: Transform applied to train_Y. + input_transform: A Module that performs the input transformation, passed to + self.transform_inputs. + """ + with torch.no_grad(): + transformed_X = self.transform_inputs( + X=train_X, input_transform=input_transform + ) + if outcome_transform is not None: + train_Y, _ = outcome_transform(train_Y) + self._validate_tensor_args(transformed_X, train_Y) + train_Y = train_Y.squeeze(-1) + likelihood = GaussianLikelihood() + super().__init__(train_X, train_Y, likelihood) + self.mean_module = ConstantMean() + self.covar_module = ScaleKernel(RBFKernel()) + if outcome_transform is not None: + self.outcome_transform = outcome_transform + if input_transform is not None: + self.input_transform = input_transform + self._num_outputs = 1 + self.to(train_X) + self.transformed_call_args = [] + + def forward(self, x): + self.last_fantasize_flag = fantasize.on() + if self.training: + x = self.transform_inputs(x) + self.transformed_call_args.append(x) + mean_x = self.mean_module(x) + covar_x = self.covar_module(x) + return MultivariateNormal(mean_x, covar_x) diff --git a/botorch/utils/transforms.py b/botorch/utils/transforms.py index e7b96a08ce..5727931491 100644 --- a/botorch/utils/transforms.py +++ b/botorch/utils/transforms.py @@ -18,9 +18,9 @@ from botorch.utils.safe_math import logmeanexp from torch import Tensor -if TYPE_CHECKING: - from botorch.acquisition import AcquisitionFunction # pragma: no cover - from botorch.model import Model # pragma: no cover +if TYPE_CHECKING: # pragma: no cover + from botorch.acquisition import AcquisitionFunction + from botorch.models.model import Model def standardize(Y: Tensor) -> Tensor: diff --git a/sphinx/source/utils.rst b/sphinx/source/utils.rst index b49cfc2e62..5a18172b6f 100644 --- a/sphinx/source/utils.rst +++ b/sphinx/source/utils.rst @@ -57,12 +57,16 @@ Sampling from GP priors .. automodule:: botorch.utils.gp_sampling :members: - Testing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: botorch.utils.testing :members: +Test Helpers +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. automodule:: botorch.utils.test_helpers + :members: + Torch ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: botorch.utils.torch diff --git a/test/acquisition/test_knowledge_gradient.py b/test/acquisition/test_knowledge_gradient.py index 0aa1b044fc..089d9ca6c0 100644 --- a/test/acquisition/test_knowledge_gradient.py +++ b/test/acquisition/test_knowledge_gradient.py @@ -30,11 +30,10 @@ from botorch.optim.utils import _filter_kwargs from botorch.posteriors.gpytorch import GPyTorchPosterior from botorch.sampling.normal import IIDNormalSampler, SobolQMCNormalSampler +from botorch.utils.test_helpers import DummyNonScalarizingPosteriorTransform from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior from gpytorch.distributions import MultitaskMultivariateNormal -from .test_monte_carlo import DummyNonScalarizingPosteriorTransform - NO = "botorch.utils.testing.MockModel.num_outputs" diff --git a/test/acquisition/test_monte_carlo.py b/test/acquisition/test_monte_carlo.py index de41548b0a..e9a0bc5b6a 100644 --- a/test/acquisition/test_monte_carlo.py +++ b/test/acquisition/test_monte_carlo.py @@ -26,7 +26,6 @@ ConstrainedMCObjective, GenericMCObjective, IdentityMCObjective, - PosteriorTransform, ScalarizedPosteriorTransform, ) from botorch.acquisition.utils import prune_inferior_points @@ -34,6 +33,7 @@ from botorch.models import SingleTaskGP from botorch.sampling.normal import IIDNormalSampler, SobolQMCNormalSampler from botorch.utils.low_rank import sample_cached_cholesky +from botorch.utils.test_helpers import DummyNonScalarizingPosteriorTransform from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior from botorch.utils.transforms import standardize from torch import Tensor @@ -49,16 +49,6 @@ def _sample_forward(self, X): pass -class DummyNonScalarizingPosteriorTransform(PosteriorTransform): - scalarize = False - - def evaluate(self, Y): - pass # pragma: no cover - - def forward(self, posterior): - pass # pragma: no cover - - def infeasible_con(samples: Tensor) -> Tensor: return torch.ones_like(samples[..., 0]) diff --git a/test/models/test_converter.py b/test/models/test_converter.py index bb83173ae7..a429342eea 100644 --- a/test/models/test_converter.py +++ b/test/models/test_converter.py @@ -20,13 +20,12 @@ ) from botorch.models.transforms.input import AppendFeatures, Normalize from botorch.models.transforms.outcome import Standardize +from botorch.utils.test_helpers import SimpleGPyTorchModel from botorch.utils.testing import BotorchTestCase from gpytorch.kernels import RBFKernel from gpytorch.likelihoods import GaussianLikelihood from gpytorch.likelihoods.gaussian_likelihood import FixedNoiseGaussianLikelihood -from .test_gpytorch import SimpleGPyTorchModel - class TestConverters(BotorchTestCase): def test_batched_to_model_list(self): diff --git a/test/models/test_fully_bayesian_multitask.py b/test/models/test_fully_bayesian_multitask.py index 66812fdc10..7b9b3c4ff2 100644 --- a/test/models/test_fully_bayesian_multitask.py +++ b/test/models/test_fully_bayesian_multitask.py @@ -42,14 +42,13 @@ from botorch.utils.multi_objective.box_decompositions.non_dominated import ( NondominatedPartitioning, ) +from botorch.utils.test_helpers import gen_multi_task_dataset from botorch.utils.testing import BotorchTestCase from gpytorch.kernels import MaternKernel, ScaleKernel from gpytorch.likelihoods import FixedNoiseGaussianLikelihood from gpytorch.likelihoods.gaussian_likelihood import GaussianLikelihood from gpytorch.means import ConstantMean -from .test_multitask import _gen_multi_task_dataset - EXPECTED_KEYS = [ "latent_features", "mean_module.raw_constant", @@ -566,7 +565,7 @@ def test_construct_inputs(self): for dtype, infer_noise in [(torch.float, False), (torch.double, True)]: tkwargs = {"device": self.device, "dtype": dtype} task_feature = 0 - datasets, (train_X, train_Y, train_Yvar) = _gen_multi_task_dataset( + datasets, (train_X, train_Y, train_Yvar) = gen_multi_task_dataset( yvar=None if infer_noise else 0.05, **tkwargs ) diff --git a/test/models/test_gp_regression.py b/test/models/test_gp_regression.py index 40d6c94b7a..6d74ce0fbb 100644 --- a/test/models/test_gp_regression.py +++ b/test/models/test_gp_regression.py @@ -17,11 +17,11 @@ ) from botorch.models.transforms import Normalize, Standardize from botorch.models.transforms.input import InputStandardize -from botorch.models.utils import add_output_dim from botorch.posteriors import GPyTorchPosterior from botorch.sampling import SobolQMCNormalSampler from botorch.utils.datasets import SupervisedDataset from botorch.utils.sampling import manual_seed +from botorch.utils.test_helpers import get_pvar_expected from botorch.utils.testing import _get_random_data, BotorchTestCase from gpytorch.kernels import MaternKernel, RBFKernel, ScaleKernel from gpytorch.likelihoods import ( @@ -142,7 +142,7 @@ def test_gp(self, double_only: bool = False): self.assertAllClose(posterior_pred.variance, expected_var) else: pvar = posterior_pred.variance - pvar_exp = _get_pvar_expected(posterior, model, X, m) + pvar_exp = get_pvar_expected(posterior, model, X, m) self.assertAllClose(pvar, pvar_exp, rtol=1e-4, atol=1e-5) # Tensor valued observation noise. @@ -176,7 +176,7 @@ def test_gp(self, double_only: bool = False): self.assertAllClose(posterior_pred.variance, expected_var) else: pvar = posterior_pred.variance - pvar_exp = _get_pvar_expected(posterior, model, X, m) + pvar_exp = get_pvar_expected(posterior, model, X, m) self.assertAllClose(pvar, pvar_exp, rtol=1e-4, atol=1e-5) def test_custom_init(self): @@ -599,17 +599,3 @@ def test_condition_on_observations(self): def test_subset_model(self): with self.assertRaises(NotImplementedError): super().test_subset_model() - - -def _get_pvar_expected(posterior, model, X, m): - X = model.transform_inputs(X) - lh_kwargs = {} - if isinstance(model.likelihood, FixedNoiseGaussianLikelihood): - lh_kwargs["noise"] = model.likelihood.noise.mean().expand(X.shape[:-1]) - if m == 1: - return model.likelihood( - posterior.distribution, X, **lh_kwargs - ).variance.unsqueeze(-1) - X_, odi = add_output_dim(X=X, original_batch_shape=model._input_batch_shape) - pvar_exp = model.likelihood(model(X_), X_, **lh_kwargs).variance - return torch.stack([pvar_exp.select(dim=odi, index=i) for i in range(m)], dim=-1) diff --git a/test/models/test_gp_regression_mixed.py b/test/models/test_gp_regression_mixed.py index 6406189080..bb07d64470 100644 --- a/test/models/test_gp_regression_mixed.py +++ b/test/models/test_gp_regression_mixed.py @@ -17,6 +17,7 @@ from botorch.posteriors import GPyTorchPosterior from botorch.sampling import SobolQMCNormalSampler from botorch.utils.datasets import SupervisedDataset +from botorch.utils.test_helpers import get_pvar_expected from botorch.utils.testing import _get_random_data, BotorchTestCase from gpytorch.kernels.kernel import AdditiveKernel, ProductKernel from gpytorch.kernels.matern_kernel import MaternKernel @@ -26,8 +27,6 @@ from gpytorch.means import ConstantMean from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood -from .test_gp_regression import _get_pvar_expected - class TestMixedSingleTaskGP(BotorchTestCase): observed_noise = False @@ -119,7 +118,7 @@ def test_gp(self): self.assertEqual(posterior_pred.mean.shape, expected_shape) self.assertEqual(posterior_pred.variance.shape, expected_shape) pvar = posterior_pred.variance - pvar_exp = _get_pvar_expected(posterior, model, X, m) + pvar_exp = get_pvar_expected(posterior, model, X, m) self.assertAllClose(pvar, pvar_exp, rtol=1e-4, atol=1e-5) # test batch evaluation @@ -133,7 +132,7 @@ def test_gp(self): self.assertIsInstance(posterior_pred, GPyTorchPosterior) self.assertEqual(posterior_pred.mean.shape, expected_shape) pvar = posterior_pred.variance - pvar_exp = _get_pvar_expected(posterior, model, X, m) + pvar_exp = get_pvar_expected(posterior, model, X, m) self.assertAllClose(pvar, pvar_exp, rtol=1e-4, atol=1e-5) # test that model converter throws an exception diff --git a/test/models/test_gpytorch.py b/test/models/test_gpytorch.py index 020f3a63a1..80608d6aaa 100644 --- a/test/models/test_gpytorch.py +++ b/test/models/test_gpytorch.py @@ -28,6 +28,7 @@ from botorch.models.utils import fantasize from botorch.posteriors.gpytorch import GPyTorchPosterior from botorch.sampling.normal import SobolQMCNormalSampler +from botorch.utils.test_helpers import SimpleGPyTorchModel from botorch.utils.testing import BotorchTestCase from gpytorch import ExactMarginalLogLikelihood from gpytorch.distributions import MultivariateNormal @@ -56,48 +57,6 @@ def transform(self, X: Tensor) -> Tensor: return X + self.add_value -class SimpleGPyTorchModel(GPyTorchModel, ExactGP, FantasizeMixin): - last_fantasize_flag: bool = False - - def __init__(self, train_X, train_Y, outcome_transform=None, input_transform=None): - r""" - Args: - train_X: A tensor of inputs, passed to self.transform_inputs. - train_Y: Passed to outcome_transform. - outcome_transform: Transform applied to train_Y. - input_transform: A Module that performs the input transformation, passed to - self.transform_inputs. - """ - with torch.no_grad(): - transformed_X = self.transform_inputs( - X=train_X, input_transform=input_transform - ) - if outcome_transform is not None: - train_Y, _ = outcome_transform(train_Y) - self._validate_tensor_args(transformed_X, train_Y) - train_Y = train_Y.squeeze(-1) - likelihood = GaussianLikelihood() - super().__init__(train_X, train_Y, likelihood) - self.mean_module = ConstantMean() - self.covar_module = ScaleKernel(RBFKernel()) - if outcome_transform is not None: - self.outcome_transform = outcome_transform - if input_transform is not None: - self.input_transform = input_transform - self._num_outputs = 1 - self.to(train_X) - self.transformed_call_args = [] - - def forward(self, x): - self.last_fantasize_flag = fantasize.on() - if self.training: - x = self.transform_inputs(x) - self.transformed_call_args.append(x) - mean_x = self.mean_module(x) - covar_x = self.covar_module(x) - return MultivariateNormal(mean_x, covar_x) - - class SimpleBatchedMultiOutputGPyTorchModel( BatchedMultiOutputGPyTorchModel, ExactGP, FantasizeMixin ): diff --git a/test/models/test_multitask.py b/test/models/test_multitask.py index d6cc423986..c28cca930d 100644 --- a/test/models/test_multitask.py +++ b/test/models/test_multitask.py @@ -7,7 +7,7 @@ import itertools import math import warnings -from typing import List, Optional, Tuple +from typing import List, Optional import torch from botorch.acquisition.objective import ScalarizedPosteriorTransform @@ -22,7 +22,7 @@ from botorch.models.transforms.outcome import Standardize from botorch.posteriors import GPyTorchPosterior from botorch.posteriors.transformed import TransformedPosterior -from botorch.utils.datasets import MultiTaskDataset, SupervisedDataset +from botorch.utils.test_helpers import gen_multi_task_dataset from botorch.utils.testing import BotorchTestCase from gpytorch.distributions import MultitaskMultivariateNormal, MultivariateNormal from gpytorch.kernels import ( @@ -43,43 +43,6 @@ from gpytorch.priors import GammaPrior, LogNormalPrior, SmoothedBoxPrior from gpytorch.priors.lkj_prior import LKJCovariancePrior from gpytorch.settings import max_cholesky_size, max_root_decomposition_size -from torch import Tensor -from torch.nn.functional import pad - - -def _gen_multi_task_dataset( - yvar: Optional[float] = None, **tkwargs -) -> Tuple[MultiTaskDataset, Tuple[Tensor, Tensor, Tensor]]: - X = torch.linspace(0, 0.95, 10, **tkwargs) + 0.05 * torch.rand(10, **tkwargs) - X = X.unsqueeze(dim=-1) - Y1 = torch.sin(X * (2 * math.pi)) + torch.randn_like(X) * 0.2 - Y2 = torch.cos(X * (2 * math.pi)) + torch.randn_like(X) * 0.2 - train_X = torch.cat([pad(X, (1, 0), value=i) for i in range(2)]) - train_Y = torch.cat([Y1, Y2]) - - Yvar1 = None if yvar is None else torch.full_like(Y1, yvar) - Yvar2 = None if yvar is None else torch.full_like(Y2, yvar) - train_Yvar = None if yvar is None else torch.cat([Yvar1, Yvar2]) - datasets = [ - SupervisedDataset( - X=train_X[:10], - Y=Y1, - Yvar=Yvar1, - feature_names=["task", "X"], - outcome_names=["y"], - ), - SupervisedDataset( - X=train_X[10:], - Y=Y2, - Yvar=Yvar2, - feature_names=["task", "X"], - outcome_names=["y1"], - ), - ] - dataset = MultiTaskDataset( - datasets=datasets, target_outcome_name="y", task_feature_index=0 - ) - return dataset, (train_X, train_Y, train_Yvar) def _gen_model_and_data( @@ -89,7 +52,7 @@ def _gen_model_and_data( outcome_transform=None, **tkwargs ): - datasets, (train_X, train_Y, _) = _gen_multi_task_dataset(**tkwargs) + datasets, (train_X, train_Y, _) = gen_multi_task_dataset(**tkwargs) model = MultiTaskGP( train_X, train_Y, @@ -102,7 +65,7 @@ def _gen_model_and_data( def _gen_model_single_output(**tkwargs): - _, (train_X, train_Y, _) = _gen_multi_task_dataset(**tkwargs) + _, (train_X, train_Y, _) = gen_multi_task_dataset(**tkwargs) model = MultiTaskGP(train_X, train_Y, task_feature=0, output_tasks=[1]) return model.to(**tkwargs) @@ -114,7 +77,7 @@ def _gen_fixed_noise_model_and_data( use_fixed_noise_model_class: bool = False, **tkwargs ): - datasets, (train_X, train_Y, train_Yvar) = _gen_multi_task_dataset( + datasets, (train_X, train_Y, train_Yvar) = gen_multi_task_dataset( yvar=0.05, **tkwargs ) model_class = FixedNoiseMultiTaskGP if use_fixed_noise_model_class else MultiTaskGP @@ -130,7 +93,7 @@ def _gen_fixed_noise_model_and_data( def _gen_fixed_noise_model_single_output(**tkwargs): - _, (train_X, train_Y, train_Yvar) = _gen_multi_task_dataset(yvar=0.05, **tkwargs) + _, (train_X, train_Y, train_Yvar) = gen_multi_task_dataset(yvar=0.05, **tkwargs) model = FixedNoiseMultiTaskGP( train_X, train_Y, train_Yvar, task_feature=0, output_tasks=[1] ) @@ -138,7 +101,7 @@ def _gen_fixed_noise_model_single_output(**tkwargs): def _gen_fixed_prior_model(**tkwargs): - _, (train_X, train_Y, _) = _gen_multi_task_dataset(**tkwargs) + _, (train_X, train_Y, _) = gen_multi_task_dataset(**tkwargs) sd_prior = GammaPrior(2.0, 0.15) sd_prior._event_shape = torch.Size([2]) model = MultiTaskGP( @@ -151,7 +114,7 @@ def _gen_fixed_prior_model(**tkwargs): def _gen_given_covar_module_model(**tkwargs): - _, (train_X, train_Y, _) = _gen_multi_task_dataset(**tkwargs) + _, (train_X, train_Y, _) = gen_multi_task_dataset(**tkwargs) model = MultiTaskGP( train_X, train_Y, @@ -162,7 +125,7 @@ def _gen_given_covar_module_model(**tkwargs): def _gen_fixed_noise_and_prior_model(**tkwargs): - _, (train_X, train_Y, train_Yvar) = _gen_multi_task_dataset(yvar=0.05, **tkwargs) + _, (train_X, train_Y, train_Yvar) = gen_multi_task_dataset(yvar=0.05, **tkwargs) sd_prior = GammaPrior(2.0, 0.15) sd_prior._event_shape = torch.Size([2]) model = FixedNoiseMultiTaskGP( @@ -176,7 +139,7 @@ def _gen_fixed_noise_and_prior_model(**tkwargs): def _gen_fixed_noise_and_given_covar_module_model(**tkwargs): - _, (train_X, train_Y, train_Yvar) = _gen_multi_task_dataset(yvar=0.05, **tkwargs) + _, (train_X, train_Y, train_Yvar) = gen_multi_task_dataset(yvar=0.05, **tkwargs) model = FixedNoiseMultiTaskGP( train_X, train_Y, @@ -317,7 +280,7 @@ def test_MultiTaskGP(self): MultiTaskGP(torch.rand(2, 2, 2), torch.rand(2, 2, 1), 0) # test that bad feature index throws correct error - _, (train_X, train_Y, _) = _gen_multi_task_dataset(**tkwargs) + _, (train_X, train_Y, _) = gen_multi_task_dataset(**tkwargs) with self.assertRaises(ValueError): MultiTaskGP(train_X, train_Y, 2) @@ -401,7 +364,7 @@ def test_MultiTaskGP_given_covar_module(self): def test_custom_mean_and_likelihood(self): tkwargs = {"device": self.device, "dtype": torch.double} - _, (train_X, train_Y, _) = _gen_multi_task_dataset(**tkwargs) + _, (train_X, train_Y, _) = gen_multi_task_dataset(**tkwargs) mean_module = LinearMean(input_size=train_X.shape[-1]) likelihood = GaussianLikelihood(noise_prior=LogNormalPrior(0, 1)) model = MultiTaskGP( @@ -517,7 +480,7 @@ def test_FixedNoiseMultiTaskGP(self): ) # test that bad feature index throws correct error - _, (train_X, train_Y, _) = _gen_multi_task_dataset(**tkwargs) + _, (train_X, train_Y, _) = gen_multi_task_dataset(**tkwargs) train_Yvar = torch.full_like(train_Y, 0.05) with self.assertRaises(ValueError): FixedNoiseMultiTaskGP(train_X, train_Y, train_Yvar, 2) diff --git a/test/sampling/pathwise/helpers.py b/test/sampling/pathwise/helpers.py deleted file mode 100644 index 6740365839..0000000000 --- a/test/sampling/pathwise/helpers.py +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from __future__ import annotations - -from typing import Tuple - -from botorch.models.transforms.outcome import Standardize -from torch import Size, Tensor - - -def get_sample_moments(samples: Tensor, sample_shape: Size) -> Tuple[Tensor, Tensor]: - sample_dim = len(sample_shape) - samples = samples.view(-1, *samples.shape[sample_dim:]) - loc = samples.mean(dim=0) - residuals = (samples - loc).permute(*range(1, samples.ndim), 0) - return loc, (residuals @ residuals.transpose(-2, -1)) / sample_shape.numel() - - -def standardize_moments( - transform: Standardize, - loc: Tensor, - covariance_matrix: Tensor, -) -> Tuple[Tensor, Tensor]: - - m = transform.means.squeeze().unsqueeze(-1) - s = transform.stdvs.squeeze().reciprocal().unsqueeze(-1) - loc = s * (loc - m) - correlation_matrix = s.unsqueeze(-1) * covariance_matrix * s.unsqueeze(-2) - return loc, correlation_matrix diff --git a/test/sampling/pathwise/test_posterior_samplers.py b/test/sampling/pathwise/test_posterior_samplers.py index 8828e34f63..eeba8e0238 100644 --- a/test/sampling/pathwise/test_posterior_samplers.py +++ b/test/sampling/pathwise/test_posterior_samplers.py @@ -14,13 +14,12 @@ from botorch.models.transforms.outcome import Standardize from botorch.sampling.pathwise import draw_matheron_paths, MatheronPath, PathList from botorch.sampling.pathwise.utils import get_train_inputs +from botorch.utils.test_helpers import get_sample_moments, standardize_moments from botorch.utils.testing import BotorchTestCase from gpytorch.kernels import MaternKernel, ScaleKernel from torch import Size from torch.nn.functional import pad -from .helpers import get_sample_moments, standardize_moments - class TestPosteriorSamplers(BotorchTestCase): def setUp(self) -> None: diff --git a/test/sampling/pathwise/test_prior_samplers.py b/test/sampling/pathwise/test_prior_samplers.py index 5c562eb024..d866431cf4 100644 --- a/test/sampling/pathwise/test_prior_samplers.py +++ b/test/sampling/pathwise/test_prior_samplers.py @@ -21,13 +21,12 @@ PathList, ) from botorch.sampling.pathwise.utils import get_train_inputs +from botorch.utils.test_helpers import get_sample_moments, standardize_moments from botorch.utils.testing import BotorchTestCase from gpytorch.kernels import MaternKernel, RBFKernel, ScaleKernel from torch import Size from torch.nn.functional import pad -from .helpers import get_sample_moments, standardize_moments - class TestPriorSamplers(BotorchTestCase): def setUp(self) -> None: