Skip to content

Properly handle observation_noise kwarg for BatchedMultiOutputGPyTorchModel #182

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions botorch/models/gpytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,8 @@ def posterior(
X=X, original_batch_shape=self._input_batch_shape
)
mvn = self(X)
if observation_noise:
mvn = self.likelihood(mvn, X)
mean_x = mvn.mean
covar_x = mvn.covariance_matrix
if self._num_outputs > 1:
Expand Down
40 changes: 35 additions & 5 deletions test/models/test_gp_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
HeteroskedasticSingleTaskGP,
SingleTaskGP,
)
from botorch.models.utils import add_output_dim
from botorch.posteriors import GPyTorchPosterior
from botorch.sampling import SobolQMCNormalSampler
from gpytorch.kernels import MaternKernel, ScaleKernel
Expand Down Expand Up @@ -82,20 +83,39 @@ def test_gp(self, cuda=False):
# test posterior
# test non batch evaluation
X = torch.rand(batch_shape + torch.Size([3, 1]), **tkwargs)
expected_mean_shape = batch_shape + torch.Size([3, num_outputs])
posterior = model.posterior(X)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertEqual(
posterior.mean.shape, batch_shape + torch.Size([3, num_outputs])
self.assertEqual(posterior.mean.shape, expected_mean_shape)
# test adding observation noise
posterior_pred = model.posterior(X, observation_noise=True)
self.assertIsInstance(posterior_pred, GPyTorchPosterior)
self.assertEqual(posterior_pred.mean.shape, expected_mean_shape)
pvar = posterior_pred.variance
pvar_exp = _get_pvar_expected(posterior, model, X, num_outputs)
self.assertTrue(
torch.allclose(pvar, pvar_exp, rtol=1e-4, atol=1e-06)
)

# test batch evaluation
X = torch.rand(
torch.Size([2]) + batch_shape + torch.Size([3, 1]), **tkwargs
)
expected_mean_shape = (
torch.Size([2]) + batch_shape + torch.Size([3, num_outputs])
)

posterior = model.posterior(X)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertEqual(
posterior.mean.shape,
torch.Size([2]) + batch_shape + torch.Size([3, num_outputs]),
self.assertEqual(posterior.mean.shape, expected_mean_shape)
# test adding observation noise in batch mode
posterior_pred = model.posterior(X, observation_noise=True)
self.assertIsInstance(posterior_pred, GPyTorchPosterior)
self.assertEqual(posterior_pred.mean.shape, expected_mean_shape)
pvar = posterior_pred.variance
pvar_exp = _get_pvar_expected(posterior, model, X, num_outputs)
self.assertTrue(
torch.allclose(pvar, pvar_exp, rtol=1e-4, atol=1e-06)
)

def test_gp_cuda(self):
Expand Down Expand Up @@ -324,3 +344,13 @@ def test_condition_on_observations(self, cuda=False):
def test_fantasize(self, cuda=False):
with self.assertRaises(NotImplementedError):
super().test_fantasize(cuda=cuda)


def _get_pvar_expected(posterior, model, X, num_outputs):
if num_outputs == 1:
return model.likelihood(posterior.mvn, X).variance.unsqueeze(-1)
X_, odi = add_output_dim(X=X, original_batch_shape=model._input_batch_shape)
pvar_exp = model.likelihood(model(X_), X_).variance
return torch.stack(
[pvar_exp.select(dim=odi, index=i) for i in range(num_outputs)], dim=-1
)