Modular
Plug in new models, acquisition functions, and optimizers.
Built on PyTorch
Easily integrate neural network modules. Native GPU & autograd support.
Scalable
Support for scalable GPs via GPyTorch. Run code on multiple devices.
@inproceedings{balandat2020botorch,
title = {{BoTorch: A Framework for Efficient Monte-Carlo Bayesian Optimization}},
author = {Balandat, Maximilian and Karrer, Brian and Jiang, Daniel R. and Daulton, Samuel and Letham, Benjamin and Wilson, Andrew Gordon and Bakshy, Eytan},
booktitle = {Advances in Neural Information Processing Systems 33},
year = 2020,
url = {http://arxiv.org/abs/1910.06403}
}
conda install botorch -c pytorch -c gpytorch -c conda-forge
pip install botorch
import torch
from botorch.models import SingleTaskGP
from botorch.models.transforms import Normalize, Standardize
from botorch.fit import fit_gpytorch_mll
from gpytorch.mlls import ExactMarginalLogLikelihood
train_X = torch.rand(10, 2, dtype=torch.double) * 2
Y = 1 - torch.linalg.norm(train_X - 0.5, dim=-1, keepdim=True)
Y = Y + 0.1 * torch.randn_like(Y) # add some noise
gp = SingleTaskGP(
train_X=train_X,
train_Y=Y,
input_transform=Normalize(d=2),
outcome_transform=Standardize(m=1),
)
mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
fit_gpytorch_mll(mll)
from botorch.acquisition import LogExpectedImprovement
logEI = LogExpectedImprovement(model=gp, best_f=Y.max())
from botorch.optim import optimize_acqf
bounds = torch.stack([torch.zeros(2), torch.ones(2)]).to(torch.double)
candidate, acq_value = optimize_acqf(
logEI, bounds=bounds, q=1, num_restarts=5, raw_samples=20,
)
candidate # tensor([[0.2981, 0.2401]], dtype=torch.float64)