Skip to content

Commit

Permalink
Merge pull request #651 from ICB-DCM/develop
Browse files Browse the repository at this point in the history
0.10.3 Release
  • Loading branch information
FFroehlich authored Mar 13, 2019
2 parents b7095b5 + 9c73221 commit 0527b5d
Show file tree
Hide file tree
Showing 20 changed files with 602 additions and 460 deletions.
1 change: 0 additions & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,6 @@ install:
- if [[ "$CI_MODE" == "test" ]]; then ./scripts/buildSundials.sh; fi
- if [[ "$CI_MODE" == "test" ]]; then ./scripts/buildCpputest.sh; fi
- if [[ "$CI_MODE" == "test" ]]; then ./scripts/buildBNGL.sh; fi
- if [[ "$CI_MODE" == "test" ]]; then export BNGPATH=$(pwd)/ThirdParty/BioNetGen-2.3.2; fi
- if [[ "$CI_MODE" == "test" ]]; then ./scripts/buildAmici.sh; fi
- if [[ "$CI_MODE" == "test" ]]; then ./scripts/installAmiciArchive.sh; fi
- if [[ "$CI_MODE" == "test" ]]; then ./scripts/installAmiciSource.sh; fi
Expand Down
2 changes: 2 additions & 0 deletions include/amici/amici.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,12 +38,14 @@ std::unique_ptr<ReturnData> runAmiciSimulation(Solver &solver, const ExpData *ed
* @param solver Solver instance
* @param edatas experimental data objects
* @param model model specification object
* @param failfast flag to allow early termination
* @param num_threads number of threads for parallel execution
* @return vector of pointers to return data objects
*/
std::vector<std::unique_ptr<ReturnData>> runAmiciSimulations(Solver const& solver,
const std::vector<ExpData *> &edatas,
Model const& model,
const bool failfast,
int num_threads);

} // namespace amici
Expand Down
1 change: 1 addition & 0 deletions include/amici/serialization.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@ void serialize(Archive &ar, amici::Model &u, const unsigned int version) {
ar &const_cast<int &>(u.nw);
ar &const_cast<int &>(u.ndwdx);
ar &const_cast<int &>(u.ndwdp);
ar &const_cast<int &>(u.ndxdotdw);
ar &const_cast<int &>(u.nnz);
ar &const_cast<int &>(u.nJ);
ar &const_cast<int &>(u.ubw);
Expand Down
5 changes: 5 additions & 0 deletions include/amici/sundials_matrix_wrapper.h
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,12 @@ class SUNMatrixWrapper {
void multiply(realtype *c, const realtype *b) const;

private:
void update_ptrs();

SUNMatrix matrix = nullptr;
realtype *data_ptr = nullptr;
sunindextype *indexptrs_ptr = nullptr;
sunindextype *indexvals_ptr = nullptr;
};

} // namespace amici
Expand Down
5 changes: 4 additions & 1 deletion python/amici/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,13 +153,15 @@ def ExpData(*args):
return amici.ExpData(*args)


def runAmiciSimulations(model, solver, edata_list, num_threads=1):
def runAmiciSimulations(model, solver, edata_list, failfast=True,
num_threads=1):
""" Convenience wrapper for loops of amici.runAmiciSimulation
Arguments:
model: Model instance
solver: Solver instance, must be generated from Model.getSolver()
edata_list: list of ExpData instances
failfast: returns as soon as an integration failure is encountered
num_threads: number of threads to use
(only used if compiled with openmp)
Expand All @@ -174,5 +176,6 @@ def runAmiciSimulations(model, solver, edata_list, num_threads=1):
rdata_ptr_list = amici.runAmiciSimulations(solver.get(),
edata_ptr_vector,
model.get(),
failfast,
num_threads)
return [numpy.rdataToNumPyArrays(r) for r in rdata_ptr_list]
137 changes: 137 additions & 0 deletions python/amici/gradient_check.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,137 @@
from . import (runAmiciSimulation, SensitivityOrder_none,
SensitivityMethod_forward)
import numpy as np
import copy


def check_finite_difference(x0, model, solver, edata, ip, fields,
assert_fun, atol=1e-4, rtol=1e-4, epsilon=1e-3):
old_sensitivity_order = solver.getSensitivityOrder()
old_parameters = model.getParameters()
old_plist = model.getParameterList()

# sensitivity
p = copy.deepcopy(x0)
plist = [ip]

model.setParameters(p)
model.setParameterList(plist)
rdata = runAmiciSimulation(model, solver, edata)

# finite difference
solver.setSensitivityOrder(SensitivityOrder_none)

# forward:
p = copy.deepcopy(x0)
p[ip] += epsilon/2
model.setParameters(p)
rdataf = runAmiciSimulation(model, solver, edata)

# backward:
p = copy.deepcopy(x0)
p[ip] -= epsilon/2
model.setParameters(p)
rdatab = runAmiciSimulation(model, solver, edata)

for field in fields:
sensi_raw = rdata[f's{field}']
fd = (rdataf[field]-rdatab[field])/epsilon
if len(sensi_raw.shape) == 1:
sensi = sensi_raw[0]
elif len(sensi_raw.shape) == 2:
sensi = sensi_raw[:, 0]
elif len(sensi_raw.shape) == 3:
sensi = sensi_raw[:, 0, :]
else:
assert_fun(False) # not implemented

check_close(sensi, fd, assert_fun, atol, rtol, field, ip=ip)

solver.setSensitivityOrder(old_sensitivity_order)
model.setParameters(old_parameters)
model.setParameterList(old_plist)


def check_derivatives(model, solver, edata, assert_fun,
atol=1e-4, rtol=1e-4, epsilon=1e-3):
"""Finite differences check for likelihood gradient
Arguments:
model: amici model
solver: amici solver
edata: exp data
atol: absolute tolerance
rtol: relative tolerance
epsilon: finite difference step-size
"""
from scipy.optimize import check_grad

p = np.array(model.getParameters())

rdata = runAmiciSimulation(model, solver, edata)

fields = ['llh']

leastsquares_applicable = \
solver.getSensitivityMethod() == SensitivityMethod_forward

if 'ssigmay' in rdata.keys():
if rdata['ssigmay'] is not None:
if rdata['ssigmay'].any():
leastsquares_applicable = False

if leastsquares_applicable:
fields += ['res', 'x', 'y']

check_results(rdata, 'FIM',
np.dot(rdata['sres'].transpose(), rdata['sres']),
assert_fun,
1e-8, 1e-4)
check_results(rdata, 'sllh',
-np.dot(rdata['res'].transpose(), rdata['sres']),
assert_fun,
1e-8, 1e-4)
for ip in range(len(p)):
check_finite_difference(p, model, solver, edata, ip, fields,
assert_fun, atol=atol, rtol=rtol,
epsilon=epsilon)


def check_close(result, expected, assert_fun, atol, rtol, field, ip=None):
close = np.isclose(result, expected, atol=atol, rtol=rtol, equal_nan=True)

if not close.all():
if ip is None:
index_str = ''
check_type = 'Regression check '
else:
index_str = f'at index ip={ip} '
check_type = 'FD check '
print(f'{check_type} failed for {field} {index_str}for '
f'{close.sum()} indices:')
adev = np.abs(result - expected)
rdev = np.abs((result - expected)/(expected + atol))
print(f'max(adev): {adev.max()}, max(rdev): {rdev.max()}')

assert_fun(close.all())


def check_results(rdata, field, expected, assert_fun, atol, rtol):
"""
checks whether rdata[field] agrees with expected according to provided
tolerances
Arguments:
rdata: simulation results as returned by amici.runAmiciSimulation
field: name of the field to check
expected: expected test results
atol: absolute tolerance
rtol: relative tolerance
"""

result = rdata[field]
if type(result) is float:
result = np.array(result)

check_close(result, expected, assert_fun, atol, rtol, field)

40 changes: 35 additions & 5 deletions python/amici/ode_export.py
Original file line number Diff line number Diff line change
Expand Up @@ -691,6 +691,9 @@ class ODEModel:
_syms: carries symbolic identifiers of the symbolic variables of the
model @type dict
_strippedsyms: carries symbolic identifiers that were stripped of
additional class information @type dict
_sparsesyms: carries linear list of all symbolic identifiers for
sparsified variables @type dict
Expand Down Expand Up @@ -759,6 +762,7 @@ def __init__(self):
self._vals = dict()
self._names = dict()
self._syms = dict()
self._strippedsyms = dict()
self._sparsesyms = dict()
self._colptrs = dict()
self._rowvals = dict()
Expand Down Expand Up @@ -913,7 +917,7 @@ def add_conservation_law(self, state, total_abundance, state_expr,
state_id = self._states[ix].get_id()

self.add_component(
Expression(state_id, f'cl_{state_id}', state_expr)
Expression(state_id, str(state_id), state_expr)
)

self.add_component(
Expand Down Expand Up @@ -1004,22 +1008,31 @@ def np(self):
"""
return len(self.sym('p'))

def sym(self, name):
def sym(self, name, stripped=False):
"""Returns (and constructs if necessary) the identifiers for a symbolic
entity.
Arguments:
name: name of the symbolic variable @type str
stripped: (optional) should additional class information be
stripped from the symbolic variables? @type bool
Returns:
containing the symbolic identifiers @type symengine.DenseMatrix
Raises:
ValueError if stripped symbols not available
"""
if name not in self._syms:
self._generateSymbol(name)
return self._syms[name]

if stripped:
if name not in self._variable_prototype:
raise ValueError('Stripped symbols only available for '
'variables from variable prototypes')
return self._strippedsyms[name]
else:
return self._syms[name]

def sparsesym(self, name):
"""Returns (and constructs if necessary) the sparsified identifiers for
Expand Down Expand Up @@ -1166,6 +1179,10 @@ def _generateSymbol(self, name):
comp.get_id()
for comp in getattr(self, component)
])
self._strippedsyms[name] = sp.Matrix([
sp.Symbol(comp.get_name())
for comp in getattr(self, component)
])
if name == 'y':
self._syms['my'] = sp.Matrix([
sp.Symbol(f'm{strip_pysb(comp.get_id())}')
Expand Down Expand Up @@ -1486,13 +1503,26 @@ def _derivative(self, eq, var, name=None):
self._lock_total_derivative = False
return

# this is the basic requirement check
needs_stripped_symbols = eq == 'xdot' and var != 'x'

# partial derivative
if eq == 'Jy':
eq = self.eq(eq).transpose()
else:
eq = self.eq(eq)

sym_var = self.sym(var)
if pysb is not None and needs_stripped_symbols:
needs_stripped_symbols = not any(
isinstance(sym, pysb.Component)
for sym in eq.free_symbols
)

# now check whether we are working with energy_modeling branch
# where pysb class info is already stripped
# TODO: fixme as soon as energy_modeling made it to the main pysb
# branch
sym_var = self.sym(var, needs_stripped_symbols)

if min(eq.shape) and min(sym_var.shape) \
and eq.is_zero is not True and sym_var.is_zero is not True:
Expand Down
1 change: 0 additions & 1 deletion python/amici/pysb_import.py
Original file line number Diff line number Diff line change
Expand Up @@ -793,7 +793,6 @@ def _greedy_target_index_update(cl_prototypes):
# with the highest diff_fillin (note that the target index counts
# are recomputed on the fly)


if prototype['diff_fillin'] > -1 \
and (
get_target_indices(cl_prototypes).count(
Expand Down
3 changes: 2 additions & 1 deletion python/sdist/MANIFEST.in
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,5 @@ include amici/*
include amici/src/*template*
include amici/swig/CMakeLists_model.txt
include setup_clibs.py
include version.txt
include custom_commands.py
include version.txt
1 change: 1 addition & 0 deletions python/sdist/amici/gradient_check.py
Loading

0 comments on commit 0527b5d

Please sign in to comment.