33
33
import numpy as np
34
34
import torch
35
35
from botorch import settings
36
- from botorch .exceptions .errors import BotorchTensorDimensionError , InputDataError
36
+ from botorch .exceptions .errors import (
37
+ BotorchTensorDimensionError ,
38
+ DeprecationError ,
39
+ InputDataError ,
40
+ )
37
41
from botorch .logging import shape_to_str
38
42
from botorch .models .utils .assorted import fantasize as fantasize_flag
39
43
from botorch .posteriors import Posterior , PosteriorList
@@ -83,7 +87,7 @@ def posterior(
83
87
self ,
84
88
X : Tensor ,
85
89
output_indices : Optional [List [int ]] = None ,
86
- observation_noise : bool = False ,
90
+ observation_noise : Union [ bool , Tensor ] = False ,
87
91
posterior_transform : Optional [PosteriorTransform ] = None ,
88
92
** kwargs : Any ,
89
93
) -> Posterior :
@@ -102,7 +106,12 @@ def posterior(
102
106
Can be used to speed up computation if only a subset of the
103
107
model's outputs are required for optimization. If omitted,
104
108
computes the posterior over all model outputs.
105
- observation_noise: If True, add observation noise to the posterior.
109
+ observation_noise: For models with an inferred noise level, if True,
110
+ include observation noise. For models with an observed noise level,
111
+ this must be a `model_batch_shape x 1 x m`-dim tensor or
112
+ a `model_batch_shape x n' x m`-dim tensor containing the average
113
+ noise for each batch and output. `noise` must be in the
114
+ outcome-transformed space if an outcome transform is used.
106
115
posterior_transform: An optional PosteriorTransform.
107
116
108
117
Returns:
@@ -310,7 +319,7 @@ def fantasize(
310
319
# TODO: see if any of these can be imported only if TYPE_CHECKING
311
320
X : Tensor ,
312
321
sampler : MCSampler ,
313
- observation_noise : bool = True ,
322
+ observation_noise : Optional [ Tensor ] = None ,
314
323
** kwargs : Any ,
315
324
) -> TFantasizeMixin :
316
325
r"""Construct a fantasy model.
@@ -328,12 +337,21 @@ def fantasize(
328
337
`batch_shape` is the batch shape (must be compatible with the
329
338
batch shape of the model).
330
339
sampler: The sampler used for sampling from the posterior at `X`.
331
- observation_noise: If True, include observation noise.
340
+ observation_noise: A `model_batch_shape x 1 x m`-dim tensor or
341
+ a `model_batch_shape x n' x m`-dim tensor containing the average
342
+ noise for each batch and output, where `m` is the number of outputs.
343
+ `noise` must be in the outcome-transformed space if an outcome
344
+ transform is used. If None, then the noise will be the inferred
345
+ noise level.
332
346
kwargs: Will be passed to `model.condition_on_observations`
333
347
334
348
Returns:
335
349
The constructed fantasy model.
336
350
"""
351
+ if not isinstance (observation_noise , Tensor ) and observation_noise is not None :
352
+ raise DeprecationError (
353
+ "`fantasize` no longer accepts a boolean for `observation_noise`."
354
+ )
337
355
# if the inputs are empty, expand the inputs
338
356
if X .shape [- 2 ] == 0 :
339
357
output_shape = (
@@ -350,8 +368,15 @@ def fantasize(
350
368
propagate_grads = kwargs .pop ("propagate_grads" , False )
351
369
with fantasize_flag ():
352
370
with settings .propagate_grads (propagate_grads ):
353
- post_X = self .posterior (X , observation_noise = observation_noise )
371
+ post_X = self .posterior (
372
+ X ,
373
+ observation_noise = True
374
+ if observation_noise is None
375
+ else observation_noise ,
376
+ )
354
377
Y_fantasized = sampler (post_X ) # num_fantasies x batch_shape x n' x m
378
+ if observation_noise is not None :
379
+ kwargs ["noise" ] = observation_noise .expand (Y_fantasized .shape [1 :])
355
380
return self .condition_on_observations (
356
381
X = self .transform_inputs (X ), Y = Y_fantasized , ** kwargs
357
382
)
@@ -434,7 +459,9 @@ def posterior(
434
459
respective likelihoods to the posterior. If a Tensor of shape
435
460
`(batch_shape) x q x m`, use it directly as the observation
436
461
noise (with `observation_noise[...,i]` added to the posterior
437
- of the `i`-th model).
462
+ of the `i`-th model). `observation_noise` is assumed
463
+ to be in the outcome-transformed space, if an outcome transform
464
+ is used by the model.
438
465
posterior_transform: An optional PosteriorTransform.
439
466
440
467
Returns:
@@ -553,7 +580,7 @@ def fantasize(
553
580
self ,
554
581
X : Tensor ,
555
582
sampler : MCSampler ,
556
- observation_noise : bool = True ,
583
+ observation_noise : Optional [ Tensor ] = None ,
557
584
evaluation_mask : Optional [Tensor ] = None ,
558
585
** kwargs : Any ,
559
586
) -> Model :
@@ -573,7 +600,12 @@ def fantasize(
573
600
batch shape of the model).
574
601
sampler: The sampler used for sampling from the posterior at `X`. If
575
602
evaluation_mask is not None, this must be a `ListSampler`.
576
- observation_noise: If True, include observation noise.
603
+ observation_noise: A `model_batch_shape x 1 x m`-dim tensor or
604
+ a `model_batch_shape x n' x m`-dim tensor containing the average
605
+ noise for each batch and output, where `m` is the number of outputs.
606
+ `noise` must be in the outcome-transformed space if an outcome
607
+ transform is used. If None, then the noise will be the inferred
608
+ noise level.
577
609
evaluation_mask: A `n' x m`-dim tensor of booleans indicating which
578
610
outputs should be fantasized for a given design. This uses the same
579
611
evaluation mask for all batches.
@@ -595,6 +627,8 @@ def fantasize(
595
627
596
628
fant_models = []
597
629
X_i = X
630
+ if observation_noise is None :
631
+ observation_noise_i = observation_noise
598
632
for i in range (self .num_outputs ):
599
633
# get the inputs to fantasize at for output i
600
634
if evaluation_mask is not None :
@@ -604,12 +638,15 @@ def fantasize(
604
638
# samples from a single Sobol sequence or consider requiring that the
605
639
# sampling is IID to ensure good coverage.
606
640
sampler_i = sampler .samplers [i ]
641
+ if observation_noise is not None :
642
+ observation_noise_i = observation_noise [..., mask_i , i : i + 1 ]
607
643
else :
608
644
sampler_i = sampler
645
+
609
646
fant_model = self .models [i ].fantasize (
610
647
X = X_i ,
611
648
sampler = sampler_i ,
612
- observation_noise = observation_noise ,
649
+ observation_noise = observation_noise_i ,
613
650
** kwargs ,
614
651
)
615
652
fant_models .append (fant_model )
0 commit comments