Source code for botorch.models.gp_regression

#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.

r"""
Gaussian Process Regression models based on GPyTorch models.

These models are often a good starting point and are further documented in the
tutorials.

`SingleTaskGP`, `FixedNoiseGP`, and `HeteroskedasticSingleTaskGP` are all
single-task exact GP models, differing in how they treat noise. They use
relatively strong priors on the Kernel hyperparameters, which work best when
covariates are normalized to the unit cube and outcomes are standardized (zero
mean, unit variance).

These models all work in batch mode (each batch having its own hyperparameters).
When the training observations include multiple outputs, these models use
batching to model outputs independently.

These models all support multiple outputs. However, as single-task models,
`SingleTaskGP`, `FixedNoiseGP`, and `HeteroskedasticSingleTaskGP` should be
used only when the outputs are independent and all use the same training data.
If outputs are independent and outputs have different training data, use the
`ModelListGP`. When modeling correlations between outputs, use a multi-task
model like `MultiTaskGP`.
"""

from __future__ import annotations

from typing import Any, List, NoReturn, Optional, Union

import torch
from botorch import settings
from botorch.models.gpytorch import BatchedMultiOutputGPyTorchModel
from botorch.models.model import FantasizeMixin
from botorch.models.transforms.input import InputTransform
from botorch.models.transforms.outcome import Log, OutcomeTransform
from botorch.models.utils import fantasize as fantasize_flag, validate_input_scaling
from botorch.sampling.base import MCSampler
from gpytorch.constraints.constraints import GreaterThan
from gpytorch.distributions.multivariate_normal import MultivariateNormal
from gpytorch.kernels.matern_kernel import MaternKernel
from gpytorch.kernels.scale_kernel import ScaleKernel
from gpytorch.likelihoods.gaussian_likelihood import (
    _GaussianLikelihoodBase,
    FixedNoiseGaussianLikelihood,
    GaussianLikelihood,
)
from gpytorch.likelihoods.likelihood import Likelihood
from gpytorch.likelihoods.noise_models import HeteroskedasticNoise
from gpytorch.means.constant_mean import ConstantMean
from gpytorch.means.mean import Mean
from gpytorch.mlls.noise_model_added_loss_term import NoiseModelAddedLossTerm
from gpytorch.models.exact_gp import ExactGP
from gpytorch.module import Module
from gpytorch.priors.smoothed_box_prior import SmoothedBoxPrior
from gpytorch.priors.torch_priors import GammaPrior
from torch import Tensor


MIN_INFERRED_NOISE_LEVEL = 1e-4


[docs]class SingleTaskGP(BatchedMultiOutputGPyTorchModel, ExactGP, FantasizeMixin): r"""A single-task exact GP model. A single-task exact GP using relatively strong priors on the Kernel hyperparameters, which work best when covariates are normalized to the unit cube and outcomes are standardized (zero mean, unit variance). This model works in batch mode (each batch having its own hyperparameters). When the training observations include multiple outputs, this model will use batching to model outputs independently. Use this model when you have independent output(s) and all outputs use the same training data. If outputs are independent and outputs have different training data, use the ModelListGP. When modeling correlations between outputs, use the MultiTaskGP. Example: >>> train_X = torch.rand(20, 2) >>> train_Y = torch.sin(train_X).sum(dim=1, keepdim=True) >>> model = SingleTaskGP(train_X, train_Y) """ def __init__( self, train_X: Tensor, train_Y: Tensor, likelihood: Optional[Likelihood] = None, covar_module: Optional[Module] = None, mean_module: Optional[Mean] = None, outcome_transform: Optional[OutcomeTransform] = None, input_transform: Optional[InputTransform] = None, ) -> None: r""" Args: train_X: A `batch_shape x n x d` tensor of training features. train_Y: A `batch_shape x n x m` tensor of training observations. likelihood: A likelihood. If omitted, use a standard GaussianLikelihood with inferred noise level. covar_module: The module computing the covariance (Kernel) matrix. If omitted, use a `MaternKernel`. mean_module: The mean function to be used. If omitted, use a `ConstantMean`. outcome_transform: An outcome transform that is applied to the training data during instantiation and to the posterior during inference (that is, the `Posterior` obtained by calling `.posterior` on the model will be on the original scale). input_transform: An input transform that is applied in the model's forward pass. """ with torch.no_grad(): transformed_X = self.transform_inputs( X=train_X, input_transform=input_transform ) if outcome_transform is not None: train_Y, _ = outcome_transform(train_Y) self._validate_tensor_args(X=transformed_X, Y=train_Y) ignore_X_dims = getattr(self, "_ignore_X_dims_scaling_check", None) validate_input_scaling( train_X=transformed_X, train_Y=train_Y, ignore_X_dims=ignore_X_dims ) self._set_dimensions(train_X=train_X, train_Y=train_Y) train_X, train_Y, _ = self._transform_tensor_args(X=train_X, Y=train_Y) if likelihood is None: noise_prior = GammaPrior(1.1, 0.05) noise_prior_mode = (noise_prior.concentration - 1) / noise_prior.rate likelihood = GaussianLikelihood( noise_prior=noise_prior, batch_shape=self._aug_batch_shape, noise_constraint=GreaterThan( MIN_INFERRED_NOISE_LEVEL, transform=None, initial_value=noise_prior_mode, ), ) else: self._is_custom_likelihood = True ExactGP.__init__( self, train_inputs=train_X, train_targets=train_Y, likelihood=likelihood ) if mean_module is None: mean_module = ConstantMean(batch_shape=self._aug_batch_shape) self.mean_module = mean_module if covar_module is None: covar_module = ScaleKernel( MaternKernel( nu=2.5, ard_num_dims=transformed_X.shape[-1], batch_shape=self._aug_batch_shape, lengthscale_prior=GammaPrior(3.0, 6.0), ), batch_shape=self._aug_batch_shape, outputscale_prior=GammaPrior(2.0, 0.15), ) self._subset_batch_dict = { "likelihood.noise_covar.raw_noise": -2, "mean_module.raw_constant": -1, "covar_module.raw_outputscale": -1, "covar_module.base_kernel.raw_lengthscale": -3, } self.covar_module = covar_module # TODO: Allow subsetting of other covar modules if outcome_transform is not None: self.outcome_transform = outcome_transform if input_transform is not None: self.input_transform = input_transform self.to(train_X)
[docs] def forward(self, x: Tensor) -> MultivariateNormal: if self.training: x = self.transform_inputs(x) mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x)
[docs]class FixedNoiseGP(BatchedMultiOutputGPyTorchModel, ExactGP): r"""A single-task exact GP model using fixed noise levels. A single-task exact GP that uses fixed observation noise levels, differing from `SingleTaskGP` only in that noise levels are provided rather than inferred. This model also uses relatively strong priors on the Kernel hyperparameters, which work best when covariates are normalized to the unit cube and outcomes are standardized (zero mean, unit variance). This model works in batch mode (each batch having its own hyperparameters). An example of a case in which noise levels are known is online experimentation, where noise can be measured using the variability of different observations from the same arm, or provided by outside software. Another use case is simulation optimization, where the evaluation can provide variance estimates, perhaps from bootstrapping. In any case, these noise levels must be provided to `FixedNoiseGP` as `train_Yvar`. `FixedNoiseGP` is also commonly used when the observations are known to be noise-free. Noise-free observations can be modeled using arbitrarily small noise values, such as `train_Yvar=torch.full_like(train_Y, 1e-6)`. `FixedNoiseGP` cannot predict noise levels out of sample. If this is needed, use `HeteroskedasticSingleTaskGP`, which will create another model for the observation noise. Example: >>> train_X = torch.rand(20, 2) >>> train_Y = torch.sin(train_X).sum(dim=1, keepdim=True) >>> train_Yvar = torch.full_like(train_Y, 0.2) >>> model = FixedNoiseGP(train_X, train_Y, train_Yvar) """ def __init__( self, train_X: Tensor, train_Y: Tensor, train_Yvar: Tensor, covar_module: Optional[Module] = None, mean_module: Optional[Mean] = None, outcome_transform: Optional[OutcomeTransform] = None, input_transform: Optional[InputTransform] = None, ) -> None: r""" Args: train_X: A `batch_shape x n x d` tensor of training features. train_Y: A `batch_shape x n x m` tensor of training observations. train_Yvar: A `batch_shape x n x m` tensor of observed measurement noise. covar_module: The module computing the covariance (Kernel) matrix. If omitted, use a `MaternKernel`. mean_module: The mean function to be used. If omitted, use a `ConstantMean`. outcome_transform: An outcome transform that is applied to the training data during instantiation and to the posterior during inference (that is, the `Posterior` obtained by calling `.posterior` on the model will be on the original scale). input_transform: An input transfrom that is applied in the model's forward pass. """ with torch.no_grad(): transformed_X = self.transform_inputs( X=train_X, input_transform=input_transform ) if outcome_transform is not None: train_Y, train_Yvar = outcome_transform(train_Y, train_Yvar) self._validate_tensor_args(X=transformed_X, Y=train_Y, Yvar=train_Yvar) validate_input_scaling( train_X=transformed_X, train_Y=train_Y, train_Yvar=train_Yvar ) self._set_dimensions(train_X=train_X, train_Y=train_Y) train_X, train_Y, train_Yvar = self._transform_tensor_args( X=train_X, Y=train_Y, Yvar=train_Yvar ) likelihood = FixedNoiseGaussianLikelihood( noise=train_Yvar, batch_shape=self._aug_batch_shape ) ExactGP.__init__( self, train_inputs=train_X, train_targets=train_Y, likelihood=likelihood ) if mean_module is None: mean_module = ConstantMean(batch_shape=self._aug_batch_shape) self.mean_module = mean_module if covar_module is None: covar_module = ScaleKernel( base_kernel=MaternKernel( nu=2.5, ard_num_dims=transformed_X.shape[-1], batch_shape=self._aug_batch_shape, lengthscale_prior=GammaPrior(3.0, 6.0), ), batch_shape=self._aug_batch_shape, outputscale_prior=GammaPrior(2.0, 0.15), ) self._subset_batch_dict = { "mean_module.raw_constant": -1, "covar_module.raw_outputscale": -1, "covar_module.base_kernel.raw_lengthscale": -3, } self.covar_module = covar_module # TODO: Allow subsetting of other covar modules if input_transform is not None: self.input_transform = input_transform if outcome_transform is not None: self.outcome_transform = outcome_transform self.to(train_X)
[docs] def fantasize( self, X: Tensor, sampler: MCSampler, observation_noise: Union[bool, Tensor] = True, **kwargs: Any, ) -> FixedNoiseGP: r"""Construct a fantasy model. Constructs a fantasy model in the following fashion: (1) compute the model posterior at `X` (if `observation_noise=True`, this includes observation noise taken as the mean across the observation noise in the training data. If `observation_noise` is a Tensor, use it directly as the observation noise to add). (2) sample from this posterior (using `sampler`) to generate "fake" observations. (3) condition the model on the new fake observations. Args: X: A `batch_shape x n' x d`-dim Tensor, where `d` is the dimension of the feature space, `n'` is the number of points per batch, and `batch_shape` is the batch shape (must be compatible with the batch shape of the model). sampler: The sampler used for sampling from the posterior at `X`. observation_noise: If True, include the mean across the observation noise in the training data as observation noise in the posterior from which the samples are drawn. If a Tensor, use it directly as the specified measurement noise. Returns: The constructed fantasy model. """ propagate_grads = kwargs.pop("propagate_grads", False) with fantasize_flag(): with settings.propagate_grads(propagate_grads): post_X = self.posterior( X, observation_noise=observation_noise, **kwargs ) Y_fantasized = sampler(post_X) # num_fantasies x batch_shape x n' x m # Use the mean of the previous noise values (TODO: be smarter here). # noise should be batch_shape x q x m when X is batch_shape x q x d, and # Y_fantasized is num_fantasies x batch_shape x q x m. noise_shape = Y_fantasized.shape[1:] noise = self.likelihood.noise.mean().expand(noise_shape) return self.condition_on_observations( X=self.transform_inputs(X), Y=Y_fantasized, noise=noise )
[docs] def forward(self, x: Tensor) -> MultivariateNormal: # TODO: reduce redundancy with the 'forward' method of # SingleTaskGP, which is identical if self.training: x = self.transform_inputs(x) mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x)
[docs] def subset_output(self, idcs: List[int]) -> BatchedMultiOutputGPyTorchModel: r"""Subset the model along the output dimension. Args: idcs: The output indices to subset the model to. Returns: The current model, subset to the specified output indices. """ new_model = super().subset_output(idcs=idcs) full_noise = new_model.likelihood.noise_covar.noise new_noise = full_noise[..., idcs if len(idcs) > 1 else idcs[0], :] new_model.likelihood.noise_covar.noise = new_noise return new_model
[docs]class HeteroskedasticSingleTaskGP(BatchedMultiOutputGPyTorchModel, ExactGP): r"""A single-task exact GP model using a heteroskedastic noise model. This model differs from `SingleTaskGP` in that noise levels are provided rather than inferred, and differs from `FixedNoiseGP` in that it can predict noise levels out of sample, because it internally wraps another GP (a SingleTaskGP) to model the observation noise. Noise levels must be provided to `HeteroskedasticSingleTaskGP` as `train_Yvar`. Examples of cases in which noise levels are known include online experimentation and simulation optimization. Example: >>> train_X = torch.rand(20, 2) >>> train_Y = torch.sin(train_X).sum(dim=1, keepdim=True) >>> se = torch.norm(train_X, dim=1, keepdim=True) >>> train_Yvar = 0.1 + se * torch.rand_like(train_Y) >>> model = HeteroskedasticSingleTaskGP(train_X, train_Y, train_Yvar) """ def __init__( self, train_X: Tensor, train_Y: Tensor, train_Yvar: Tensor, outcome_transform: Optional[OutcomeTransform] = None, input_transform: Optional[InputTransform] = None, ) -> None: r""" Args: train_X: A `batch_shape x n x d` tensor of training features. train_Y: A `batch_shape x n x m` tensor of training observations. train_Yvar: A `batch_shape x n x m` tensor of observed measurement noise. outcome_transform: An outcome transform that is applied to the training data during instantiation and to the posterior during inference (that is, the `Posterior` obtained by calling `.posterior` on the model will be on the original scale). Note that the noise model internally log-transforms the variances, which will happen after this transform is applied. input_transform: An input transfrom that is applied in the model's forward pass. """ if outcome_transform is not None: train_Y, train_Yvar = outcome_transform(train_Y, train_Yvar) self._validate_tensor_args(X=train_X, Y=train_Y, Yvar=train_Yvar) validate_input_scaling(train_X=train_X, train_Y=train_Y, train_Yvar=train_Yvar) self._set_dimensions(train_X=train_X, train_Y=train_Y) noise_likelihood = GaussianLikelihood( noise_prior=SmoothedBoxPrior(-3, 5, 0.5, transform=torch.log), batch_shape=self._aug_batch_shape, noise_constraint=GreaterThan( MIN_INFERRED_NOISE_LEVEL, transform=None, initial_value=1.0 ), ) noise_model = SingleTaskGP( train_X=train_X, train_Y=train_Yvar, likelihood=noise_likelihood, outcome_transform=Log(), input_transform=input_transform, ) likelihood = _GaussianLikelihoodBase(HeteroskedasticNoise(noise_model)) # This is hacky -- this class used to inherit from SingleTaskGP, but it # shouldn't so this is a quick fix to enable getting rid of that # inheritance SingleTaskGP.__init__( # pyre-fixme[6]: Incompatible parameter type self, train_X=train_X, train_Y=train_Y, likelihood=likelihood, input_transform=input_transform, ) self.register_added_loss_term("noise_added_loss") self.update_added_loss_term( "noise_added_loss", NoiseModelAddedLossTerm(noise_model) ) if outcome_transform is not None: self.outcome_transform = outcome_transform self.to(train_X) # pyre-fixme[15]: Inconsistent override
[docs] def condition_on_observations(self, *_, **__) -> NoReturn: raise NotImplementedError
# pyre-fixme[15]: Inconsistent override
[docs] def subset_output(self, idcs) -> NoReturn: raise NotImplementedError
[docs] def forward(self, x: Tensor) -> MultivariateNormal: if self.training: x = self.transform_inputs(x) mean_x = self.mean_module(x) covar_x = self.covar_module(x) return MultivariateNormal(mean_x, covar_x)