# Source code for botorch.test_functions.sensitivity_analysis

# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# LICENSE file in the root directory of this source tree.

import math
from typing import List, Optional, Tuple

import torch

from botorch.test_functions.synthetic import SyntheticTestFunction
from torch import Tensor

[docs]class Ishigami(SyntheticTestFunction): r"""Ishigami test function. three-dimensional function (usually evaluated on [-pi, pi]^3): f(x) = sin(x_1) + a sin(x_2)^2 + b x_3^4 sin(x_1) Here a and b are constants where a=7 and b=0.1 or b=0.05 Proposed to test sensitivity analysis methods because it exhibits strong nonlinearity and nonmonotonicity and a peculiar dependence on x_3. """ def __init__( self, b: float = 0.1, noise_std: Optional[float] = None, negate: bool = False ) -> None: r""" Args: b: the b constant, should be 0.1 or 0.05. noise_std: Standard deviation of the observation noise. negative: If True, negative the objective. """ self._optimizers = None if b not in (0.1, 0.05): raise ValueError("b parameter should be 0.1 or 0.05") self.dim = 3 if b == 0.1: self.si = [0.3138, 0.4424, 0] self.si_t = [0.558, 0.442, 0.244] self.s_ij = [0, 0.244, 0] self.dgsm_gradient = [-0.0004, -0.0004, -0.0004] self.dgsm_gradient_abs = [1.9, 4.45, 1.97] self.dgsm_gradient_square = [7.7, 24.5, 11] elif b == 0.05: self.si = [0.218, 0.687, 0] self.si_t = [0.3131, 0.6868, 0.095] self.s_ij = [0, 0.094, 0] self.dgsm_gradient = [-0.0002, -0.0002, -0.0002] self.dgsm_gradient_abs = [1.26, 4.45, 1.97] self.dgsm_gradient_square = [2.8, 24.5, 11] self._bounds = [(-math.pi, math.pi) for _ in range(self.dim)] self.b = b super().__init__(noise_std=noise_std, negate=negate) @property def _optimal_value(self) -> float: raise NotImplementedError
[docs]class Gsobol(SyntheticTestFunction): r"""Gsobol test function. d-dimensional function (usually evaluated on [0, 1]^d): f(x) = Prod_{i=1}\^{d} ((\|4x_i-2\|+a_i)/(1+a_i)), a_i >=0 common combinations of dimension and a vector: dim=8, a= [0, 1, 4.5, 9, 99, 99, 99, 99] dim=6, a=[0, 0.5, 3, 9, 99, 99] dim = 15, a= [1, 2, 5, 10, 20, 50, 100, 500, 1000, ..., 1000] Proposed to test sensitivity analysis methods First order Sobol indices have closed form expression S_i=V_i/V with : V_i= 1/(3(1+a_i)\^2) V= Prod_{i=1}\^{d} (1+V_i) - 1 """ def __init__( self, dim: int, a: List = None, noise_std: Optional[float] = None, negate: bool = False, ) -> None: r""" Args: dim: Dimensionality of the problem. If 6, 8, or 15, will use standard a. a: a parameter, unless dim is 6, 8, or 15. noise_std: Standard deviation of observation noise. negate: Return negatie of function. """ self._optimizers = None self.dim = dim self._bounds = [(0, 1) for _ in range(self.dim)] if self.dim == 6: self.a = [0, 0.5, 3, 9, 99, 99] elif self.dim == 8: self.a = [0, 1, 4.5, 9, 99, 99, 99, 99] elif self.dim == 15: self.a = [ 1, 2, 5, 10, 20, 50, 100, 500, 1000, 1000, 1000, 1000, 1000, 1000, 1000, ] else: self.a = a self.optimal_sobol_indicies() super().__init__(noise_std=noise_std, negate=negate) @property def _optimal_value(self) -> float: raise NotImplementedError
[docs]class Morris(SyntheticTestFunction): r"""Morris test function. 20-dimensional function (usually evaluated on [0, 1]^20): f(x) = sum_{i=1}\^20 beta_i w_i + sum_{i<j}\^20 beta_ij w_i w_j + sum_{i<j<l}\^20 beta_ijl w_i w_j w_l + 5w_1 w_2 w_3 w_4 Proposed to test sensitivity analysis methods """ def __init__(self, noise_std: Optional[float] = None, negate: bool = False) -> None: r""" Args: noise_std: Standard deviation of observation noise. negate: Return negative of function. """ self._optimizers = None self.dim = 20 self._bounds = [(0, 1) for _ in range(self.dim)] self.si = [ 0.005, 0.008, 0.017, 0.009, 0.016, 0, 0.069, 0.1, 0.15, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ] super().__init__(noise_std=noise_std, negate=negate) @property def _optimal_value(self) -> float: raise NotImplementedError