19
19
from fiddy .extensions .amici import simulate_petab_to_cached_functions
20
20
from fiddy .success import Consistency
21
21
22
+
22
23
repo_root = Path (__file__ ).parent .parent .parent
23
24
24
25
# reuse compiled models from test_benchmark_collection.sh
40
41
"Fujita_SciSignal2010" ,
41
42
# FIXME: re-enable
42
43
"Raia_CancerResearch2011" ,
43
- "Zheng_PNAS2012" ,
44
44
}
45
45
models = list (sorted (models ))
46
46
47
- debug = False
48
- if debug :
49
- debug_path = Path (__file__ ).parent / "debug"
50
- debug_path .mkdir (exist_ok = True , parents = True )
51
-
52
47
53
48
@dataclass
54
49
class GradientCheckSettings :
@@ -58,6 +53,10 @@ class GradientCheckSettings:
58
53
# Absolute and relative tolerances for finite difference gradient checks.
59
54
atol_check : float = 1e-3
60
55
rtol_check : float = 1e-2
56
+ # Absolute and relative tolerances for fiddy consistency check between
57
+ # forward/backward/central differences.
58
+ atol_consistency : float = 1e-5
59
+ rtol_consistency : float = 1e-1
61
60
# Step sizes for finite difference gradient checks.
62
61
step_sizes = [
63
62
1e-1 ,
@@ -68,50 +67,73 @@ class GradientCheckSettings:
68
67
1e-5 ,
69
68
]
70
69
rng_seed : int = 0
70
+ ss_sensitivity_mode : amici .SteadyStateSensitivityMode = (
71
+ amici .SteadyStateSensitivityMode .integrateIfNewtonFails
72
+ )
73
+ noise_level : float = 0.05
71
74
72
75
73
76
settings = defaultdict (GradientCheckSettings )
74
- settings ["Smith_BMCSystBiol2013" ] = GradientCheckSettings (
75
- atol_sim = 1e-10 ,
76
- rtol_sim = 1e-10 ,
77
+ # NOTE: Newton method fails badly with ASA for Blasi_CellSystems2016
78
+ settings ["Blasi_CellSystems2016" ] = GradientCheckSettings (
79
+ atol_check = 1e-12 ,
80
+ rtol_check = 1e-4 ,
81
+ ss_sensitivity_mode = amici .SteadyStateSensitivityMode .integrationOnly ,
82
+ )
83
+ settings ["Borghans_BiophysChem1997" ] = GradientCheckSettings (
84
+ rng_seed = 2 ,
85
+ atol_check = 1e-5 ,
86
+ rtol_check = 1e-3 ,
87
+ )
88
+ settings ["Brannmark_JBC2010" ] = GradientCheckSettings (
89
+ ss_sensitivity_mode = amici .SteadyStateSensitivityMode .integrationOnly ,
77
90
)
91
+ settings ["Giordano_Nature2020" ] = GradientCheckSettings (
92
+ atol_check = 1e-6 , rtol_check = 1e-3 , rng_seed = 1
93
+ )
94
+ settings ["Okuonghae_ChaosSolitonsFractals2020" ] = GradientCheckSettings (
95
+ atol_sim = 1e-14 ,
96
+ rtol_sim = 1e-14 ,
97
+ noise_level = 0.01 ,
98
+ atol_consistency = 1e-3 ,
99
+ )
100
+ settings ["Okuonghae_ChaosSolitonsFractals2020" ].step_sizes .extend ([0.2 , 0.005 ])
78
101
settings ["Oliveira_NatCommun2021" ] = GradientCheckSettings (
79
102
# Avoid "root after reinitialization"
80
103
atol_sim = 1e-12 ,
81
104
rtol_sim = 1e-12 ,
82
105
)
83
- settings ["Okuonghae_ChaosSolitonsFractals2020 " ] = GradientCheckSettings (
84
- atol_sim = 1e-14 ,
85
- rtol_sim = 1e-14 ,
106
+ settings ["Smith_BMCSystBiol2013 " ] = GradientCheckSettings (
107
+ atol_sim = 1e-10 ,
108
+ rtol_sim = 1e-10 ,
86
109
)
87
- settings ["Okuonghae_ChaosSolitonsFractals2020" ].step_sizes .insert (0 , 0.2 )
88
- settings ["Giordano_Nature2020" ] = GradientCheckSettings (
89
- atol_check = 1e-6 , rtol_check = 1e-3 , rng_seed = 1
110
+ settings ["Sneyd_PNAS2002" ] = GradientCheckSettings (
111
+ atol_sim = 1e-15 ,
112
+ rtol_sim = 1e-12 ,
113
+ atol_check = 1e-5 ,
114
+ rtol_check = 1e-4 ,
115
+ rng_seed = 7 ,
116
+ )
117
+ settings ["Weber_BMC2015" ] = GradientCheckSettings (
118
+ atol_sim = 1e-12 ,
119
+ rtol_sim = 1e-12 ,
120
+ atol_check = 1e-6 ,
121
+ rtol_check = 1e-2 ,
122
+ rng_seed = 1 ,
123
+ )
124
+ settings ["Zheng_PNAS2012" ] = GradientCheckSettings (
125
+ rng_seed = 1 ,
126
+ rtol_sim = 1e-15 ,
127
+ atol_check = 5e-4 ,
128
+ rtol_check = 4e-3 ,
129
+ noise_level = 0.01 ,
90
130
)
91
131
92
132
93
- def assert_gradient_check_success (
94
- derivative : fiddy .Derivative ,
95
- expected_derivative : np .array ,
96
- point : np .array ,
97
- atol : float ,
98
- rtol : float ,
99
- ) -> None :
100
- if not derivative .df .success .all ():
101
- raise AssertionError (
102
- f"Failed to compute finite differences:\n { derivative .df } "
103
- )
104
- check = NumpyIsCloseDerivativeCheck (
105
- derivative = derivative ,
106
- expectation = expected_derivative ,
107
- point = point ,
108
- )
109
- check_result = check (rtol = rtol , atol = atol )
110
-
111
- if check_result .success is True :
112
- return
113
-
114
- raise AssertionError (f"Gradient check failed:\n { check_result .df } " )
133
+ debug = False
134
+ if debug :
135
+ debug_path = Path (__file__ ).parent / "debug"
136
+ debug_path .mkdir (exist_ok = True , parents = True )
115
137
116
138
117
139
@pytest .mark .filterwarnings (
@@ -135,7 +157,7 @@ def test_benchmark_gradient(model, scale, sensitivity_method, request):
135
157
"Borghans_BiophysChem1997" ,
136
158
"Sneyd_PNAS2002" ,
137
159
"Bertozzi_PNAS2020" ,
138
- "Okuonghae_ChaosSolitonsFractals2020 " ,
160
+ "Zheng_PNAS2012 " ,
139
161
):
140
162
# not really worth the effort trying to fix these cases if they
141
163
# only fail on linear scale
@@ -144,7 +166,6 @@ def test_benchmark_gradient(model, scale, sensitivity_method, request):
144
166
if (
145
167
model
146
168
in (
147
- "Blasi_CellSystems2016" ,
148
169
# events with parameter-dependent triggers
149
170
# https://github.com/AMICI-dev/AMICI/issues/18
150
171
"Oliveira_NatCommun2021" ,
@@ -154,25 +175,11 @@ def test_benchmark_gradient(model, scale, sensitivity_method, request):
154
175
# FIXME
155
176
pytest .skip ()
156
177
157
- if (
158
- model
159
- in (
160
- "Weber_BMC2015" ,
161
- "Sneyd_PNAS2002" ,
162
- )
163
- and sensitivity_method == SensitivityMethod .forward
164
- ):
165
- # FIXME
166
- pytest .skip ()
167
-
168
178
petab_problem = benchmark_models_petab .get_problem (model )
169
179
petab .flatten_timepoint_specific_output_overrides (petab_problem )
170
180
171
181
# Only compute gradient for estimated parameters.
172
- parameter_df_free = petab_problem .parameter_df .loc [
173
- petab_problem .x_free_ids
174
- ]
175
- parameter_ids = list (parameter_df_free .index )
182
+ parameter_ids = petab_problem .x_free_ids
176
183
cur_settings = settings [model ]
177
184
178
185
# Setup AMICI objects.
@@ -183,13 +190,10 @@ def test_benchmark_gradient(model, scale, sensitivity_method, request):
183
190
amici_solver = amici_model .getSolver ()
184
191
amici_solver .setAbsoluteTolerance (cur_settings .atol_sim )
185
192
amici_solver .setRelativeTolerance (cur_settings .rtol_sim )
186
- amici_solver .setMaxSteps (int ( 1e5 ) )
193
+ amici_solver .setMaxSteps (2 * 10 ** 5 )
187
194
amici_solver .setSensitivityMethod (sensitivity_method )
188
-
189
- if model in ("Brannmark_JBC2010" ,):
190
- amici_model .setSteadyStateSensitivityMode (
191
- amici .SteadyStateSensitivityMode .integrationOnly
192
- )
195
+ # TODO: we should probably test all sensitivity modes
196
+ amici_model .setSteadyStateSensitivityMode (cur_settings .ss_sensitivity_mode )
193
197
194
198
amici_function , amici_derivative = simulate_petab_to_cached_functions (
195
199
petab_problem = petab_problem ,
@@ -204,24 +208,20 @@ def test_benchmark_gradient(model, scale, sensitivity_method, request):
204
208
# cache=not debug,
205
209
cache = False ,
206
210
)
207
-
208
- noise_level = 0.1
209
211
np .random .seed (cur_settings .rng_seed )
210
212
211
213
# find a point where the derivative can be computed
212
214
for _ in range (5 ):
213
215
if scale :
214
- point = np .asarray (
215
- list (
216
- petab_problem .scale_parameters (
217
- dict (parameter_df_free .nominalValue )
218
- ).values ()
219
- )
216
+ point = petab_problem .x_nominal_free_scaled
217
+ point_noise = (
218
+ np .random .randn (len (point )) * cur_settings .noise_level
220
219
)
221
- point_noise = np .random .randn (len (point )) * noise_level
222
220
else :
223
- point = parameter_df_free .nominalValue .values
224
- point_noise = np .random .randn (len (point )) * point * noise_level
221
+ point = petab_problem .x_nominal_free
222
+ point_noise = (
223
+ np .random .randn (len (point )) * point * cur_settings .noise_level
224
+ )
225
225
point += point_noise # avoid small gradients at nominal value
226
226
227
227
try :
@@ -239,11 +239,19 @@ def test_benchmark_gradient(model, scale, sensitivity_method, request):
239
239
sizes = cur_settings .step_sizes ,
240
240
direction_ids = parameter_ids ,
241
241
method_ids = [MethodId .CENTRAL , MethodId .FORWARD , MethodId .BACKWARD ],
242
- success_checker = Consistency (atol = 1e-5 , rtol = 1e-1 ),
242
+ success_checker = Consistency (
243
+ rtol = cur_settings .rtol_consistency ,
244
+ atol = cur_settings .atol_consistency ,
245
+ ),
243
246
expected_result = expected_derivative ,
244
247
relative_sizes = not scale ,
245
248
)
246
249
250
+ print ()
251
+ print ("Testing at:" , point )
252
+ print ("Expected derivative:" , expected_derivative )
253
+ print ("Print actual derivative:" , derivative .series .values )
254
+
247
255
if debug :
248
256
write_debug_output (
249
257
debug_path / f"{ request .node .callspec .id } .tsv" ,
@@ -258,9 +266,53 @@ def test_benchmark_gradient(model, scale, sensitivity_method, request):
258
266
point ,
259
267
rtol = cur_settings .rtol_check ,
260
268
atol = cur_settings .atol_check ,
269
+ always_print = True ,
261
270
)
262
271
263
272
273
+ def assert_gradient_check_success (
274
+ derivative : fiddy .Derivative ,
275
+ expected_derivative : np .array ,
276
+ point : np .array ,
277
+ atol : float ,
278
+ rtol : float ,
279
+ always_print : bool = False ,
280
+ ) -> None :
281
+ if not derivative .df .success .all ():
282
+ raise AssertionError (
283
+ f"Failed to compute finite differences:\n { derivative .df } "
284
+ )
285
+ check = NumpyIsCloseDerivativeCheck (
286
+ derivative = derivative ,
287
+ expectation = expected_derivative ,
288
+ point = point ,
289
+ )
290
+ check_result = check (rtol = rtol , atol = atol )
291
+
292
+ if check_result .success is True and not always_print :
293
+ return
294
+
295
+ df = check_result .df
296
+ df ["abs_diff" ] = np .abs (df ["expectation" ] - df ["test" ])
297
+ df ["rel_diff" ] = df ["abs_diff" ] / np .abs (df ["expectation" ])
298
+ df ["atol_success" ] = df ["abs_diff" ] <= atol
299
+ df ["rtol_success" ] = df ["rel_diff" ] <= rtol
300
+ max_adiff = df ["abs_diff" ].max ()
301
+ max_rdiff = df ["rel_diff" ].max ()
302
+ with pd .option_context ("display.max_columns" , None , "display.width" , None ):
303
+ message = (
304
+ f"Gradient check failed:\n { df } \n \n "
305
+ f"Maximum absolute difference: { max_adiff } (tolerance: { atol } )\n "
306
+ f"Maximum relative difference: { max_rdiff } (tolerance: { rtol } )"
307
+ )
308
+
309
+ if check_result .success is False :
310
+ raise AssertionError (message )
311
+
312
+ if always_print :
313
+ print (message )
314
+
315
+
264
316
def write_debug_output (
265
317
file_name , derivative , expected_derivative , parameter_ids
266
318
):
0 commit comments