#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Batch acquisition functions using the reparameterization trick in combination
with (quasi) Monte-Carlo sampling. See [Rezende2014reparam]_, [Wilson2017reparam]_ and
[Balandat2020botorch]_.
.. [Rezende2014reparam]
D. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and
approximate inference in deep generative models. ICML 2014.
.. [Wilson2017reparam]
J. T. Wilson, R. Moriconi, F. Hutter, and M. P. Deisenroth.
The reparameterization trick for acquisition functions. ArXiv 2017.
"""
from __future__ import annotations
import math
from abc import ABC, abstractmethod
from copy import deepcopy
from typing import Any, Optional, Union
import torch
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.acquisition.cached_cholesky import CachedCholeskyMCAcquisitionFunction
from botorch.acquisition.objective import (
IdentityMCObjective,
MCAcquisitionObjective,
PosteriorTransform,
)
from botorch.acquisition.utils import prune_inferior_points
from botorch.exceptions.errors import UnsupportedError
from botorch.models.model import Model
from botorch.posteriors.posterior import Posterior
from botorch.sampling.samplers import MCSampler, SobolQMCNormalSampler
from botorch.utils.transforms import (
concatenate_pending_points,
match_batch_shape,
t_batch_mode_transform,
)
from torch import Tensor
[docs]class MCAcquisitionFunction(AcquisitionFunction, ABC):
r"""Abstract base class for Monte-Carlo based batch acquisition functions."""
def __init__(
self,
model: Model,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""Constructor for the MCAcquisitionFunction base class.
Args:
model: A fitted model.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
posterior_transform: A PosteriorTransform (optional).
X_pending: A `batch_shape, m x d`-dim Tensor of `m` design points
that have points that have been submitted for function evaluation
but have not yet been evaluated.
"""
super().__init__(model=model)
if sampler is None:
sampler = SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)
self.add_module("sampler", sampler)
if objective is None and model.num_outputs != 1:
if posterior_transform is None:
raise UnsupportedError(
"Must specify an objective or a posterior transform when using "
"a multi-output model."
)
elif not posterior_transform.scalarize:
raise UnsupportedError(
"If using a multi-output model without an objective, "
"posterior_transform must scalarize the output."
)
if objective is None:
objective = IdentityMCObjective()
self.posterior_transform = posterior_transform
self.add_module("objective", objective)
self.set_X_pending(X_pending)
[docs] @abstractmethod
def forward(self, X: Tensor) -> Tensor:
r"""Takes in a `batch_shape x q x d` X Tensor of t-batches with `q` `d`-dim
design points each, and returns a Tensor with shape `batch_shape'`, where
`batch_shape'` is the broadcasted batch shape of model and input `X`. Should
utilize the result of `set_X_pending` as needed to account for pending function
evaluations.
"""
pass # pragma: no cover
[docs]class qExpectedImprovement(MCAcquisitionFunction):
r"""MC-based batch Expected Improvement.
This computes qEI by
(1) sampling the joint posterior over q points
(2) evaluating the improvement over the current best for each sample
(3) maximizing over q
(4) averaging over the samples
`qEI(X) = E(max(max Y - best_f, 0)), Y ~ f(X), where X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> best_f = train_Y.max()[0]
>>> sampler = SobolQMCNormalSampler(1024)
>>> qEI = qExpectedImprovement(model, best_f, sampler)
>>> qei = qEI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
X_pending: Optional[Tensor] = None,
**kwargs: Any,
) -> None:
r"""q-Expected Improvement.
Args:
model: A fitted model.
best_f: The best objective value observed so far (assumed noiseless). Can be
a `batch_shape`-shaped tensor, which in case of a batched model
specifies potentially different values for each element of the batch.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are evaluated.
Defaults to `IdentityMCObjective()`.
posterior_transform: A PosteriorTransform (optional).
X_pending: A `m x d`-dim Tensor of `m` design points that have been
submitted for function evaluation but have not yet been evaluated.
Concatenated into X upon forward call. Copied and set to have no
gradient.
"""
super().__init__(
model=model,
sampler=sampler,
objective=objective,
posterior_transform=posterior_transform,
X_pending=X_pending,
)
self.register_buffer("best_f", torch.as_tensor(best_f, dtype=float))
[docs] @concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qExpectedImprovement on the candidate set `X`.
Args:
X: A `batch_shape x q x d`-dim Tensor of t-batches with `q` `d`-dim design
points each.
Returns:
A `batch_shape'`-dim Tensor of Expected Improvement values at the given
design points `X`, where `batch_shape'` is the broadcasted batch shape of
model and input `X`.
"""
posterior = self.model.posterior(
X=X, posterior_transform=self.posterior_transform
)
samples = self.sampler(posterior)
obj = self.objective(samples, X=X)
obj = (obj - self.best_f.unsqueeze(-1).to(obj)).clamp_min(0)
q_ei = obj.max(dim=-1)[0].mean(dim=0)
return q_ei
[docs]class qNoisyExpectedImprovement(
MCAcquisitionFunction, CachedCholeskyMCAcquisitionFunction
):
r"""MC-based batch Noisy Expected Improvement.
This function does not assume a `best_f` is known (which would require
noiseless observations). Instead, it uses samples from the joint posterior
over the `q` test points and previously observed points. The improvement
over previously observed points is computed for each sample and averaged.
`qNEI(X) = E(max(max Y - max Y_baseline, 0))`, where
`(Y, Y_baseline) ~ f((X, X_baseline)), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1024)
>>> qNEI = qNoisyExpectedImprovement(model, train_X, sampler)
>>> qnei = qNEI(test_X)
"""
def __init__(
self,
model: Model,
X_baseline: Tensor,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
X_pending: Optional[Tensor] = None,
prune_baseline: bool = False,
cache_root: bool = True,
**kwargs: Any,
) -> None:
r"""q-Noisy Expected Improvement.
Args:
model: A fitted model.
X_baseline: A `batch_shape x r x d`-dim Tensor of `r` design points
that have already been observed. These points are considered as
the potential best design point.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
posterior_transform: A PosteriorTransform (optional).
X_pending: A `batch_shape x m x d`-dim Tensor of `m` design points
that have points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into `X` upon
forward call. Copied and set to have no gradient.
prune_baseline: If True, remove points in `X_baseline` that are
highly unlikely to be the best point. This can significantly
improve performance and is generally recommended. In order to
customize pruning parameters, instead manually call
`botorch.acquisition.utils.prune_inferior_points` on `X_baseline`
before instantiating the acquisition function.
cache_root: A boolean indicating whether to cache the root
decomposition over `X_baseline` and use low-rank updates.
TODO: similar to qNEHVI, when we are using sequential greedy candidate
selection, we could incorporate pending points X_baseline and compute
the incremental qNEI from the new point. This would greatly increase
efficiency for large batches.
"""
super().__init__(
model=model,
sampler=sampler,
objective=objective,
posterior_transform=posterior_transform,
X_pending=X_pending,
)
self._setup(model=model, sampler=self.sampler, cache_root=cache_root)
self.base_sampler = deepcopy(self.sampler)
if prune_baseline:
X_baseline = prune_inferior_points(
model=model,
X=X_baseline,
objective=objective,
posterior_transform=posterior_transform,
marginalize_dim=kwargs.get("marginalize_dim"),
)
self.register_buffer("X_baseline", X_baseline)
if self._cache_root:
self.q = -1
# set baseline samples
with torch.no_grad():
posterior = self.model.posterior(X_baseline)
baseline_samples = self.base_sampler(posterior)
baseline_obj = self.objective(baseline_samples, X=X_baseline)
self.register_buffer("baseline_samples", baseline_samples)
self.register_buffer(
"baseline_obj_max_values", baseline_obj.max(dim=-1).values
)
self._cache_root_decomposition(posterior=posterior)
def _set_sampler(
self,
q: int,
posterior: Posterior,
) -> None:
r"""Update the sampler to use the original base samples for X_baseline.
Args:
q: the batch size
posterior: the posterior
TODO: refactor some/all of this into the MCSampler.
"""
if self.q != q:
# create new base_samples
base_sample_shape = self.sampler._get_base_sample_shape(posterior=posterior)
self.sampler._construct_base_samples(
posterior=posterior, shape=base_sample_shape
)
if (
self.X_baseline.shape[0] > 0
and self.base_sampler.base_samples is not None
):
current_base_samples = self.base_sampler.base_samples.detach().clone()
view_shape = (
base_sample_shape[:1]
+ torch.Size([1] * (len(base_sample_shape) - 3))
+ current_base_samples.shape[-2:]
)
expanded_shape = (
base_sample_shape[:-2] + current_base_samples.shape[-2:]
)
# Use stored base samples:
# Use all base_samples from the current sampler
# this includes the base_samples from the base_sampler
# and any base_samples for the new points in the sampler.
# For example, when using sequential greedy candidate generation
# then generate the new candidate point using last (-1) base_sample
# in sampler. This copies that base sample.
end_idx = current_base_samples.shape[-2]
self.sampler.base_samples[..., :end_idx, :] = current_base_samples.view(
view_shape
).expand(expanded_shape)
self.q = q
def _forward_cached(self, posterior: Posterior, X_full: Tensor, q: int) -> Tensor:
r"""Compute difference objective using cached root decomposition.
Args:
posterior: The posterior.
X_full: A `batch_shape x n + q x d`-dim tensor of inputs
q: The batch size.
Returns:
A `sample_shape x batch_shape`-dim tensor containing the
difference in objective under each MC sample.
"""
self._set_sampler(q=q, posterior=posterior)
# handle one-to-many input transforms
n_w = posterior.event_shape[-2] // X_full.shape[-2]
new_samples = self._get_f_X_samples(posterior=posterior, q_in=n_w * q)
new_obj = self.objective(new_samples, X=X_full[..., -q:, :])
new_obj_max_values = new_obj.max(dim=-1).values
n_sample_dims = len(self.base_sampler.sample_shape)
view_shape = torch.Size(
[
*self.baseline_obj_max_values.shape[:n_sample_dims],
*(1,) * (new_obj_max_values.ndim - self.baseline_obj_max_values.ndim),
*self.baseline_obj_max_values.shape[n_sample_dims:],
]
)
return new_obj_max_values - self.baseline_obj_max_values.view(view_shape)
[docs] @concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qNoisyExpectedImprovement on the candidate set `X`.
Args:
X: A `batch_shape x q x d`-dim Tensor of t-batches with `q` `d`-dim design
points each.
Returns:
A `batch_shape'`-dim Tensor of Noisy Expected Improvement values at the
given design points `X`, where `batch_shape'` is the broadcasted batch shape
of model and input `X`.
"""
q = X.shape[-2]
X_full = torch.cat([match_batch_shape(self.X_baseline, X), X], dim=-2)
# TODO: Implement more efficient way to compute posterior over both training and
# test points in GPyTorch (https://github.com/cornellius-gp/gpytorch/issues/567)
posterior = self.model.posterior(
X_full, posterior_transform=self.posterior_transform
)
if self._cache_root:
diffs = self._forward_cached(posterior=posterior, X_full=X_full, q=q)
else:
samples = self.sampler(posterior)
obj = self.objective(samples, X=X_full)
diffs = obj[..., -q:].max(dim=-1).values - obj[..., :-q].max(dim=-1).values
return diffs.clamp_min(0).mean(dim=0)
[docs]class qProbabilityOfImprovement(MCAcquisitionFunction):
r"""MC-based batch Probability of Improvement.
Estimates the probability of improvement over the current best observed
value by sampling from the joint posterior distribution of the q-batch.
MC-based estimates of a probability involves taking expectation of an
indicator function; to support auto-differntiation, the indicator is
replaced with a sigmoid function with temperature parameter `tau`.
`qPI(X) = P(max Y >= best_f), Y ~ f(X), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> best_f = train_Y.max()[0]
>>> sampler = SobolQMCNormalSampler(1024)
>>> qPI = qProbabilityOfImprovement(model, best_f, sampler)
>>> qpi = qPI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
X_pending: Optional[Tensor] = None,
tau: float = 1e-3,
) -> None:
r"""q-Probability of Improvement.
Args:
model: A fitted model.
best_f: The best objective value observed so far (assumed noiseless). Can
be a `batch_shape`-shaped tensor, which in case of a batched model
specifies potentially different values for each element of the batch.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
posterior_transform: A PosteriorTransform (optional).
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
tau: The temperature parameter used in the sigmoid approximation
of the step function. Smaller values yield more accurate
approximations of the function, but result in gradients
estimates with higher variance.
"""
super().__init__(
model=model,
sampler=sampler,
objective=objective,
posterior_transform=posterior_transform,
X_pending=X_pending,
)
self.register_buffer("best_f", torch.as_tensor(best_f, dtype=float))
self.register_buffer("tau", torch.as_tensor(tau, dtype=float))
[docs] @concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qProbabilityOfImprovement on the candidate set `X`.
Args:
X: A `batch_shape x q x d`-dim Tensor of t-batches with `q` `d`-dim design
points each.
Returns:
A `batch_shape'`-dim Tensor of Probability of Improvement values at the
given design points `X`, where `batch_shape'` is the broadcasted batch shape
of model and input `X`.
"""
posterior = self.model.posterior(
X=X, posterior_transform=self.posterior_transform
)
samples = self.sampler(posterior)
obj = self.objective(samples, X=X)
max_obj = obj.max(dim=-1)[0]
impr = max_obj - self.best_f.unsqueeze(-1).to(max_obj)
val = torch.sigmoid(impr / self.tau).mean(dim=0)
return val
[docs]class qSimpleRegret(MCAcquisitionFunction):
r"""MC-based batch Simple Regret.
Samples from the joint posterior over the q-batch and computes the simple regret.
`qSR(X) = E(max Y), Y ~ f(X), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1024)
>>> qSR = qSimpleRegret(model, sampler)
>>> qsr = qSR(test_X)
"""
[docs] @concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qSimpleRegret on the candidate set `X`.
Args:
X: A `batch_shape x q x d`-dim Tensor of t-batches with `q` `d`-dim design
points each.
Returns:
A `batch_shape'`-dim Tensor of Simple Regret values at the given design
points `X`, where `batch_shape'` is the broadcasted batch shape of model
and input `X`.
"""
posterior = self.model.posterior(
X=X, posterior_transform=self.posterior_transform
)
samples = self.sampler(posterior)
obj = self.objective(samples, X=X)
val = obj.max(dim=-1)[0].mean(dim=0)
return val
[docs]class qUpperConfidenceBound(MCAcquisitionFunction):
r"""MC-based batch Upper Confidence Bound.
Uses a reparameterization to extend UCB to qUCB for q > 1 (See Appendix A
of [Wilson2017reparam].)
`qUCB = E(max(mu + |Y_tilde - mu|))`, where `Y_tilde ~ N(mu, beta pi/2 Sigma)`
and `f(X)` has distribution `N(mu, Sigma)`.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1024)
>>> qUCB = qUpperConfidenceBound(model, 0.1, sampler)
>>> qucb = qUCB(test_X)
"""
def __init__(
self,
model: Model,
beta: float,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""q-Upper Confidence Bound.
Args:
model: A fitted model.
beta: Controls tradeoff between mean and standard deviation in UCB.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
posterior_transform: A PosteriorTransform (optional).
X_pending: A `batch_shape x m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation but have not yet
been evaluated. Concatenated into X upon forward call. Copied and set to
have no gradient.
"""
super().__init__(
model=model,
sampler=sampler,
objective=objective,
posterior_transform=posterior_transform,
X_pending=X_pending,
)
self.beta_prime = math.sqrt(beta * math.pi / 2)
[docs] @concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qUpperConfidenceBound on the candidate set `X`.
Args:
X: A `batch_sahpe x q x d`-dim Tensor of t-batches with `q` `d`-dim design
points each.
Returns:
A `batch_shape'`-dim Tensor of Upper Confidence Bound values at the given
design points `X`, where `batch_shape'` is the broadcasted batch shape of
model and input `X`.
"""
posterior = self.model.posterior(
X=X, posterior_transform=self.posterior_transform
)
samples = self.sampler(posterior)
obj = self.objective(samples, X=X)
mean = obj.mean(dim=0)
ucb_samples = mean + self.beta_prime * (obj - mean).abs()
return ucb_samples.max(dim=-1)[0].mean(dim=0)