@@ -304,7 +304,9 @@ def scipy_optimizer(
304
304
n : int ,
305
305
inequality_constraints : Optional [List [Tuple [Tensor , Tensor , float ]]] = None ,
306
306
equality_constraints : Optional [List [Tuple [Tensor , Tensor , float ]]] = None ,
307
+ nonlinear_inequality_constraints : Optional [List [Callable ]] = None ,
307
308
fixed_features : Optional [Dict [int , float ]] = None ,
309
+ batch_initial_conditions : Optional [Tensor ] = None ,
308
310
rounding_func : Optional [Callable [[Tensor ], Tensor ]] = None ,
309
311
** kwargs : Any ,
310
312
) -> Tuple [Tensor , Tensor ]:
@@ -321,6 +323,14 @@ def scipy_optimizer(
321
323
equality constraints: A list of tuples (indices, coefficients, rhs),
322
324
with each tuple encoding an equality constraint of the form
323
325
`\sum_i (X[indices[i]] * coefficients[i]) == rhs`
326
+ nonlinear_inequality_constraints: A list of callables with that represent
327
+ non-linear inequality constraints of the form `callable(x) >= 0`. Each
328
+ callable is expected to take a `(num_restarts) x q x d`-dim tensor as an
329
+ input and return a `(num_restarts) x q`-dim tensor with the constraint
330
+ values. The constraints will later be passed to SLSQP. You need to pass in
331
+ `batch_initial_conditions` in this case.
332
+ batch_initial_conditions: A tensor to specify the initial conditions. Set
333
+ this if you do not want to use default initialization strategy.
324
334
fixed_features: A map {feature_index: value} for features that should
325
335
be fixed to a particular value during generation.
326
336
rounding_func: A function that rounds an optimization result
@@ -352,7 +362,9 @@ def scipy_optimizer(
352
362
options = kwargs ,
353
363
inequality_constraints = inequality_constraints ,
354
364
equality_constraints = equality_constraints ,
365
+ nonlinear_inequality_constraints = nonlinear_inequality_constraints ,
355
366
fixed_features = fixed_features ,
367
+ batch_initial_conditions = batch_initial_conditions ,
356
368
sequential = sequential ,
357
369
post_processing_func = rounding_func ,
358
370
)
@@ -365,6 +377,7 @@ def recommend_best_observed_point(
365
377
objective_weights : Tensor ,
366
378
outcome_constraints : Optional [Tuple [Tensor , Tensor ]] = None ,
367
379
linear_constraints : Optional [Tuple [Tensor , Tensor ]] = None ,
380
+ nonlinear_inequality_constraints : Optional [List [Callable ]] = None ,
368
381
fixed_features : Optional [Dict [int , float ]] = None ,
369
382
model_gen_options : Optional [TConfig ] = None ,
370
383
target_fidelities : Optional [Dict [int , float ]] = None ,
@@ -385,6 +398,8 @@ def recommend_best_observed_point(
385
398
linear_constraints: A tuple of (A, b). For k linear constraints on
386
399
d-dimensional x, A is (k x d) and b is (k x 1) such that
387
400
A x <= b.
401
+ nonlinear_inequality_constraints: A list of callables with that represent
402
+ non-linear inequality constraints of the form `callable(x) >= 0`.
388
403
fixed_features: A map {feature_index: value} for features that
389
404
should be fixed to a particular value in the best point.
390
405
model_gen_options: A config dictionary that can contain
@@ -396,6 +411,11 @@ def recommend_best_observed_point(
396
411
Returns:
397
412
A d-array of the best point, or None if no feasible point was observed.
398
413
"""
414
+ if nonlinear_inequality_constraints :
415
+ raise NotImplementedError (
416
+ "`nonlinear_inequality_constraints` aren't supported by "
417
+ "`recommend_best_observed_point`."
418
+ )
399
419
if target_fidelities :
400
420
raise NotImplementedError (
401
421
"target_fidelities not implemented for base BotorchModel"
@@ -421,6 +441,7 @@ def recommend_best_out_of_sample_point(
421
441
objective_weights : Tensor ,
422
442
outcome_constraints : Optional [Tuple [Tensor , Tensor ]] = None ,
423
443
linear_constraints : Optional [Tuple [Tensor , Tensor ]] = None ,
444
+ nonlinear_inequality_constraints : Optional [List [Callable ]] = None ,
424
445
fixed_features : Optional [Dict [int , float ]] = None ,
425
446
model_gen_options : Optional [TConfig ] = None ,
426
447
target_fidelities : Optional [Dict [int , float ]] = None ,
@@ -442,6 +463,8 @@ def recommend_best_out_of_sample_point(
442
463
linear_constraints: A tuple of (A, b). For k linear constraints on
443
464
d-dimensional x, A is (k x d) and b is (k x 1) such that
444
465
A x <= b.
466
+ nonlinear_inequality_constraints: A list of callables with that represent
467
+ non-linear inequality constraints of the form `callable(x) >= 0`.
445
468
fixed_features: A map {feature_index: value} for features that
446
469
should be fixed to a particular value in the best point.
447
470
model_gen_options: A config dictionary that can contain
@@ -483,6 +506,8 @@ def recommend_best_out_of_sample_point(
483
506
# (including transforming constraints b/c of fixed features)
484
507
if inequality_constraints is not None :
485
508
raise UnsupportedError ("Inequality constraints are not supported!" )
509
+ if nonlinear_inequality_constraints is not None :
510
+ raise UnsupportedError ("Non-linear inequality constraints are not supported!" )
486
511
487
512
return_best_only = optimizer_options .get ("return_best_only" , True )
488
513
bounds_ = torch .tensor (bounds , dtype = model .dtype , device = model .device )
0 commit comments