diff --git a/botorch/optim/optimize.py b/botorch/optim/optimize.py index 6f3a5876a9..ef37c38d87 100644 --- a/botorch/optim/optimize.py +++ b/botorch/optim/optimize.py @@ -196,6 +196,8 @@ def _optimize_acqf_all_features_fixed( X = X.expand(q, *X.shape) with torch.no_grad(): acq_value = acq_function(X) + if acq_value.ndim == 1: + acq_value = acq_value[0] return X, acq_value diff --git a/test/optim/test_optimize.py b/test/optim/test_optimize.py index 8d8be47ea0..e0bff2ce62 100644 --- a/test/optim/test_optimize.py +++ b/test/optim/test_optimize.py @@ -14,10 +14,12 @@ import numpy as np import torch + from botorch.acquisition.acquisition import ( AcquisitionFunction, OneShotAcquisitionFunction, ) +from botorch.acquisition.analytic import LogExpectedImprovement from botorch.acquisition.knowledge_gradient import qKnowledgeGradient from botorch.acquisition.monte_carlo import qExpectedImprovement from botorch.acquisition.multi_objective.hypervolume_knowledge_gradient import ( @@ -1147,6 +1149,23 @@ def nlc(x): ), ) + def test_optimize_acqf_all_fixed_features(self): + train_X = torch.rand(3, 2) + train_Y = torch.rand(3, 1) + gp = SingleTaskGP(train_X=train_X, train_Y=train_Y) + gp.eval() + logEI = LogExpectedImprovement(model=gp, best_f=train_Y.max()) + bounds = torch.stack([torch.zeros(2), torch.ones(2)]) + _, acqf_value = optimize_acqf( + logEI, + bounds, + q=1, + num_restarts=1, + raw_samples=1, + fixed_features={0: 0, 1: 0}, + ) + self.assertEqual(acqf_value.ndim, 0) + def test_constraint_caching(self): def nlc(x): return 4 - x.sum(dim=-1)