Skip to content

Commit

Permalink
Switch from pm.DensityDist to pm.Potential to describe the likelihood…
Browse files Browse the repository at this point in the history
… in MLDA notebooks and script examples. This is done because of the bug described in arviz-devs/arviz#1279. The commit also changes a few parameters in the MLDA .py example to match the ones in the equivalent notebook.
  • Loading branch information
gmingas committed Jul 21, 2020
1 parent 9d25cd2 commit 27f8b3c
Show file tree
Hide file tree
Showing 4 changed files with 21 additions and 21 deletions.
10 changes: 5 additions & 5 deletions docs/source/notebooks/MLDA_benchmarks_tuning.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -231,9 +231,9 @@
" # convert m and c to a tensor vector\n",
" theta = tt.as_tensor_variable(parameters)\n",
"\n",
" # use a DensityDist (use a lamdba function to \"call\" the Op)\n",
" # use a Potential for the likelihood\n",
" ll = logl[j]\n",
" pm.DensityDist('likelihood', lambda v: ll(v), observed={'v': theta})\n",
" pm.Potential('likelihood', ll(theta))\n",
"\n",
" coarse_models.append(cmodel)\n",
" \n",
Expand All @@ -248,8 +248,8 @@
" # Convert m and c to a tensor vector\n",
" theta = tt.as_tensor_variable(parameters)\n",
"\n",
" # use a DensityDist (use a lamdba function to \"call\" the Op)\n",
" pm.DensityDist('likelihood', lambda v: logl[-1](v), observed={'v': theta})\n",
" ## use a Potential for the likelihood\n",
" pm.Potential('likelihood', logl[-1](theta))\n",
" \n",
" return model, coarse_models, true_parameters"
]
Expand Down Expand Up @@ -2716,7 +2716,7 @@
"source": [
"Generally, the optimal subsampling rate depends on the complexity of the fine posterior. The more complex the posterior, the more samples are needed to generate a decent proposal. The reason is that the MLDA sampler is based on the assumption that the coarse proposal samples (i.e. the samples sent from the coarse chain to the fine one) are independent from each other. In order to generate independent samples, it is necessary to run the coarse chain for an adequate number of iterations to get rid of autocorrelation. The more complex the posterior the more iterations are needed and thus a larger subsampling rate.\n",
"\n",
"Note that in cases where you have more than one coarse model/level, MLDA allows you to choose a different subsampling rate for each coarse level (as a list of integers when you instantiate the stepper)."
"Note that in cases where you have more than one coarse model/level, MLDA allows you to choose a different subsampling rate for each coarse level (as a list of integers when you instantiate the stepper)."
]
}
],
Expand Down
8 changes: 4 additions & 4 deletions docs/source/notebooks/MLDA_multilevel_groundwater_flow.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -352,9 +352,9 @@
" # convert m and c to a tensor vector\n",
" theta = tt.as_tensor_variable(parameters)\n",
"\n",
" # use a DensityDist (use a lamdba function to \"call\" the Op)\n",
" # use a Potential for the likelihood\n",
" ll = logl[j]\n",
" pm.DensityDist('likelihood', lambda v: ll(v), observed={'v': theta})\n",
" pm.Potential('likelihood', ll(theta))\n",
"\n",
" coarse_models.append(model)\n"
]
Expand Down Expand Up @@ -599,8 +599,8 @@
" # Convert m and c to a tensor vector\n",
" theta = tt.as_tensor_variable(parameters)\n",
"\n",
" # use a DensityDist (use a lamdba function to \"call\" the Op)\n",
" pm.DensityDist('likelihood', lambda v: logl[-1](v), observed={'v': theta})\n",
" # use a Potential for the likelihood\n",
" pm.Potential('likelihood', logl[-1](theta))\n",
"\n",
" # Initialise an MLDA step method object, passing the subsampling rate and\n",
" # coarse models list\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -352,9 +352,9 @@
" # convert m and c to a tensor vector\n",
" theta = tt.as_tensor_variable(parameters)\n",
"\n",
" # use a DensityDist (use a lamdba function to \"call\" the Op)\n",
" # use a Potential for the likelihood\n",
" ll = logl[j]\n",
" pm.DensityDist('likelihood', lambda v: ll(v), observed={'v': theta})\n",
" pm.Potential('likelihood', ll(theta))\n",
"\n",
" coarse_models.append(model)\n"
]
Expand Down Expand Up @@ -557,8 +557,8 @@
" # Convert m and c to a tensor vector\n",
" theta = tt.as_tensor_variable(parameters)\n",
"\n",
" # use a DensityDist (use a lamdba function to \"call\" the Op)\n",
" pm.DensityDist('likelihood', lambda v: logl[-1](v), observed={'v': theta})\n",
" # use a Potential for the likelihood\n",
" pm.Potential('likelihood', logl[-1](theta))\n",
"\n",
" # Initialise an MLDA step method object, passing the subsampling rate and\n",
" # coarse models list\n",
Expand Down
16 changes: 8 additions & 8 deletions docs/source/notebooks/mlda/MLDA_multilevel_groundwater_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,24 +113,24 @@
# Set random field parameters
field_mean = 0
field_stdev = 1
lamb_cov = 0.1
lamb_cov = 0.05

# Set the number of unknown parameters (i.e. dimension of theta in posterior)
nparam = 3

# Number of draws from the distribution
ndraws = 1000
ndraws = 6000

# Number of burn-in samples
nburn = 500
nburn = 6000

# MLDA and Metropolis tuning parameters
tune = True
tune_interval = 100
discard_tuning = True

# Number of independent chains
nchains = 2
nchains = 4

# Subsampling rate for MLDA
nsub = 5
Expand Down Expand Up @@ -273,9 +273,9 @@ def perform(self, node, inputs, outputs):
# convert m and c to a tensor vector
theta = tt.as_tensor_variable(parameters)

# use a DensityDist (use a lamdba function to "call" the Op)
# use a Potential for the likelihood
temp = logl[j]
pm.DensityDist('likelihood', lambda v, ll=temp: ll(v), observed={'v': theta})
pm.Potential('likelihood', temp(theta))

coarse_models.append(model)

Expand All @@ -300,8 +300,8 @@ def perform(self, node, inputs, outputs):
# Convert m and c to a tensor vector
theta = tt.as_tensor_variable(parameters)

# use a DensityDist (use a lamdba function to "call" the Op)
pm.DensityDist('likelihood', lambda v: logl[-1](v), observed={'v': theta})
# use a Potential for the likelihood
pm.Potential('likelihood', logl[-1](theta))

# Initialise an MLDA step method object, passing the subsampling rate and
# coarse models list
Expand Down

0 comments on commit 27f8b3c

Please sign in to comment.