Modular
Plug in new models, acquisition functions, and optimizers.
Built on PyTorch
Easily integrate neural network modules. Native GPU & autograd support.
Scalable
Support for scalable GPs via GPyTorch. Run code on multiple devices.
@article{balandat2019botorch,
Author = {Maximilian Balandat and Brian Karrer and Daniel R. Jiang and Samuel Daulton and Benjamin Letham and Andrew Gordon Wilson and Eytan Bakshy},
Journal = {arXiv e-prints},
Month = oct,
Pages = {arXiv:1910.06403},
Title = {{BoTorch: Programmable Bayesian Optimization in PyTorch}},
Year = 2019}
@article{daulton2020differentiable,
Author = {Samuel Daulton and Maximilian Balandat and Eytan Bakshy},
Journal = {Arxiv e-prints},
Title = {Differentiable Expected Hypervolume Improvement for Parallel Multi-Objective Bayesian Optimization},
Year = {2020},
url = {https://arxiv.org/abs/2006.05078}
}
conda install botorch -c pytorch -c gpytorch
pip install botorch
import torch
from botorch.models import SingleTaskGP
from botorch.fit import fit_gpytorch_model
from botorch.utils import standardize
from gpytorch.mlls import ExactMarginalLogLikelihood
train_X = torch.rand(10, 2)
Y = 1 - torch.norm(train_X - 0.5, dim=-1, keepdim=True)
Y = Y + 0.1 * torch.randn_like(Y) # add some noise
train_Y = standardize(Y)
gp = SingleTaskGP(train_X, train_Y)
mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
fit_gpytorch_model(mll)
from botorch.acquisition import UpperConfidenceBound
UCB = UpperConfidenceBound(gp, beta=0.1)
from botorch.optim import optimize_acqf
bounds = torch.stack([torch.zeros(2), torch.ones(2)])
candidate, acq_value = optimize_acqf(
UCB, bounds=bounds, q=1, num_restarts=5, raw_samples=20,
)
candidate # tensor([0.4887, 0.5063])