Skip to content
2 changes: 0 additions & 2 deletions pymc3/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,6 @@
from .exceptions import *
from . import sampling

from .debug import *

from .diagnostics import *
from .backends.tracetab import *

Expand Down
24 changes: 0 additions & 24 deletions pymc3/debug.py

This file was deleted.

4 changes: 2 additions & 2 deletions pymc3/distributions/continuous.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ def logp(self, value):
return tt.zeros_like(value)

def _repr_latex_(self, name=None, dist=None):
return r'${} \sim \text{Flat}()$'.format(name)
return r'${} \sim \text{{Flat}}()$'.format(name)


class HalfFlat(PositiveContinuous):
Expand All @@ -218,7 +218,7 @@ def logp(self, value):
return bound(tt.zeros_like(value), value > 0)

def _repr_latex_(self, name=None, dist=None):
return r'${} \sim \text{{HalfFlat}()$'.format(name)
return r'${} \sim \text{{HalfFlat}}()$'.format(name)


class Normal(Continuous):
Expand Down
81 changes: 54 additions & 27 deletions pymc3/distributions/discrete.py
Original file line number Diff line number Diff line change
Expand Up @@ -632,11 +632,13 @@ def logp(self, value):
psi = self.psi
theta = self.theta

logp_val = tt.switch(tt.gt(value, 0),
tt.log(psi) + self.pois.logp(value),
logaddexp(tt.log1p(-psi), tt.log(psi) - theta))
logp_val = tt.switch(
tt.gt(value, 0),
tt.log(psi) + self.pois.logp(value),
logaddexp(tt.log1p(-psi), tt.log(psi) - theta))

return bound(logp_val,
return bound(
logp_val,
0 <= value,
0 <= psi, psi <= 1,
0 <= theta)
Expand Down Expand Up @@ -700,11 +702,13 @@ def logp(self, value):
p = self.p
n = self.n

logp_val = tt.switch(tt.gt(value, 0),
tt.log(psi) + self.bin.logp(value),
logaddexp(tt.log1p(-psi), tt.log(psi) + n * tt.log1p(-p)))
logp_val = tt.switch(
tt.gt(value, 0),
tt.log(psi) + self.bin.logp(value),
logaddexp(tt.log1p(-psi), tt.log(psi) + n * tt.log1p(-p)))

return bound(logp_val,
return bound(
logp_val,
0 <= value, value <= n,
0 <= psi, psi <= 1,
0 <= p, p <= 1)
Expand All @@ -715,10 +719,14 @@ def _repr_latex_(self, name=None, dist=None):
n = dist.n
p = dist.p
psi = dist.psi
return r'${} \sim \text{{ZeroInflatedBinomial}}(\mathit{{n}}={}, \mathit{{p}}={}, \mathit{{psi}}={})$'.format(name,
get_variable_name(n),
get_variable_name(p),
get_variable_name(psi))

name_n = get_variable_name(n)
name_p = get_variable_name(p)
name_psi = get_variable_name(psi)
return (r'${} \sim \text{{ZeroInflatedBinomial}}'
r'(\mathit{{n}}={}, \mathit{{p}}={}, '
r'\mathit{{psi}}={})$'
.format(name, name_n, name_p, name_psi))


class ZeroInflatedNegativeBinomial(Discrete):
Expand All @@ -731,10 +739,18 @@ class ZeroInflatedNegativeBinomial(Discrete):

.. math::

f(x \mid \psi, \mu, \alpha) = \left\{ \begin{array}{l}
(1-\psi) + \psi \left (\frac{\alpha}{\alpha+\mu} \right) ^\alpha, \text{if } x = 0 \\
\psi \frac{\Gamma(x+\alpha)}{x! \Gamma(\alpha)} \left (\frac{\alpha}{\mu+\alpha} \right)^\alpha \left( \frac{\mu}{\mu+\alpha} \right)^x, \text{if } x=1,2,3,\ldots
\end{array} \right.
f(x \mid \psi, \mu, \alpha) = \left\{
\begin{array}{l}
(1-\psi) + \psi \left (
\frac{\alpha}{\alpha+\mu}
\right) ^\alpha, \text{if } x = 0 \\
\psi \frac{\Gamma(x+\alpha)}{x! \Gamma(\alpha)} \left (
\frac{\alpha}{\mu+\alpha}
\right)^\alpha \left(
\frac{\mu}{\mu+\alpha}
\right)^x, \text{if } x=1,2,3,\ldots
\end{array}
\right.

======== ==========================
Support :math:`x \in \mathbb{N}_0`
Expand Down Expand Up @@ -776,22 +792,33 @@ def logp(self, value):
mu = self.mu
psi = self.psi

logp_val = tt.switch(tt.gt(value, 0),
tt.log(psi) + self.nb.logp(value),
logaddexp(tt.log1p(-psi), tt.log(psi) + alpha * (tt.log(alpha) - tt.log(alpha + mu))))
logp_other = tt.log(psi) + self.nb.logp(value)
logp_0 = logaddexp(
tt.log1p(-psi),
tt.log(psi) + alpha * (tt.log(alpha) - tt.log(alpha + mu)))

logp_val = tt.switch(
tt.gt(value, 0),
logp_other,
logp_0)

return bound(logp_val,
0 <= value,
0 <= psi, psi <= 1,
mu > 0, alpha > 0)
return bound(
logp_val,
0 <= value,
0 <= psi, psi <= 1,
mu > 0, alpha > 0)

def _repr_latex_(self, name=None, dist=None):
if dist is None:
dist = self
mu = dist.mu
alpha = dist.alpha
psi = dist.psi
return r'${} \sim \text{{ZeroInflatedNegativeBinomial}}(\mathit{{mu}}={}, \mathit{{alpha}}={}, \mathit{{psi}}={})$'.format(name,
get_variable_name(mu),
get_variable_name(alpha),
get_variable_name(psi))

name_mu = get_variable_name(mu)
name_alpha = get_variable_name(alpha)
name_psi = get_variable_name(psi)
return (r'${} \sim \text{{ZeroInflatedNegativeBinomial}}'
r'(\mathit{{mu}}={}, \mathit{{alpha}}={}, '
r'\mathit{{psi}}={})$'
.format(name, name_mu, name_alpha, name_psi))
22 changes: 22 additions & 0 deletions pymc3/distributions/distribution.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,28 @@ def _repr_latex_(self, name=None, dist=None):
"""Magic method name for IPython to use for LaTeX formatting."""
return None

def logp_nojac(self, *args, **kwargs):
"""Return the logp, but do not include a jacobian term for transforms.

If we use different parametrizations for the same distribution, we
need to add the determinant of the jacobian of the transformation
to make sure the densities still describe the same distribution.
However, MAP estimates are not invariant with respect to the
parametrization, we need to exclude the jacobian terms in this case.

This function should be overwritten in base classes for transformed
distributions.
"""
return self.logp(*args, **kwargs)

def logp_sum(self, *args, **kwargs):
"""Return the sum of the logp values for the given observations.

Subclasses can use this to improve the speed of logp evaluations
if only the sum of the logp values is needed.
"""
return tt.sum(self.logp(*args, **kwargs))

__latex__ = _repr_latex_


Expand Down
3 changes: 3 additions & 0 deletions pymc3/distributions/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,9 @@ def logp(self, x):
return (self.dist.logp(self.transform_used.backward(x)) +
self.transform_used.jacobian_det(x))

def logp_nojac(self, x):
return self.dist.logp(self.transform_used.backward(x))

transform = Transform


Expand Down
79 changes: 73 additions & 6 deletions pymc3/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,9 @@ class Factor(object):
"""Common functionality for objects with a log probability density
associated with them.
"""
def __init__(self, *args, **kwargs):
super(Factor, self).__init__(*args, **kwargs)

@property
def logp(self):
"""Compiled log probability density function"""
Expand All @@ -169,6 +172,18 @@ def d2logp(self, vars=None):
"""Compiled log probability density hessian function"""
return self.model.fn(hessian(self.logpt, vars))

@property
def logp_nojac(self):
return self.model.fn(self.logp_nojact)

def dlogp_nojac(self, vars=None):
"""Compiled log density gradient function, without jacobian terms."""
return self.model.fn(gradient(self.logp_nojact, vars))

def d2logp_nojac(self, vars=None):
"""Compiled log density hessian function, without jacobian terms."""
return self.model.fn(hessian(self.logp_nojact, vars))

@property
def fastlogp(self):
"""Compiled log probability density function"""
Expand All @@ -182,13 +197,36 @@ def fastd2logp(self, vars=None):
"""Compiled log probability density hessian function"""
return self.model.fastfn(hessian(self.logpt, vars))

@property
def fastlogp_nojac(self):
return self.model.fastfn(self.logp_nojact)

def fastdlogp_nojac(self, vars=None):
"""Compiled log density gradient function, without jacobian terms."""
return self.model.fastfn(gradient(self.logp_nojact, vars))

def fastd2logp_nojac(self, vars=None):
"""Compiled log density hessian function, without jacobian terms."""
return self.model.fastfn(hessian(self.logp_nojact, vars))

@property
def logpt(self):
"""Theano scalar of log-probability of the model"""
if getattr(self, 'total_size', None) is not None:
logp = tt.sum(self.logp_elemwiset) * self.scaling
logp = self.logp_sum_unscaledt * self.scaling
else:
logp = tt.sum(self.logp_elemwiset)
logp = self.logp_sum_unscaledt
if self.name is not None:
logp.name = '__logp_%s' % self.name
return logp

@property
def logp_nojact(self):
"""Theano scalar of log-probability, excluding jacobian terms."""
if getattr(self, 'total_size', None) is not None:
logp = tt.sum(self.logp_nojac_unscaledt) * self.scaling
else:
logp = tt.sum(self.logp_nojac_unscaledt)
if self.name is not None:
logp.name = '__logp_%s' % self.name
return logp
Expand Down Expand Up @@ -623,9 +661,26 @@ def logp_dlogp_function(self, grad_vars=None, **kwargs):
def logpt(self):
"""Theano scalar of log-probability of the model"""
with self:
factors = [var.logpt for var in self.basic_RVs] + self.potentials
logp = tt.add(*map(tt.sum, factors))
logp.name = '__logp'
factors = [var.logpt for var in self.basic_RVs]
logp_factors = tt.sum(factors)
logp_potentials = tt.sum([tt.sum(pot) for pot in self.potentials])
logp = logp_factors + logp_potentials
if self.name:
logp.name = '__logp_%s' % self.name
else:
logp.name = '__logp'
return logp

@property
def logp_nojact(self):
"""Theano scalar of log-probability of the model"""
with self:
factors = [var.logp_nojact for var in self.basic_RVs] + self.potentials
logp = tt.sum([tt.sum(factor) for factor in factors])
if self.name:
logp.name = '__logp_nojac_%s' % self.name
else:
logp.name = '__logp_nojac'
return logp

@property
Expand All @@ -634,7 +689,7 @@ def varlogpt(self):
(excluding deterministic)."""
with self:
factors = [var.logpt for var in self.vars]
return tt.add(*map(tt.sum, factors))
return tt.sum(factors)

@property
def vars(self):
Expand Down Expand Up @@ -1066,6 +1121,10 @@ def __init__(self, type=None, owner=None, index=None, name=None,
self.tag.test_value = np.ones(
distribution.shape, distribution.dtype) * distribution.default()
self.logp_elemwiset = distribution.logp(self)
# The logp might need scaling in minibatches.
# This is done in `Factor`.
self.logp_sum_unscaledt = distribution.logp_sum(self)
self.logp_nojac_unscaledt = distribution.logp_nojac(self)
self.total_size = total_size
self.model = model
self.scaling = _get_scaling(total_size, self.shape, self.ndim)
Expand Down Expand Up @@ -1169,6 +1228,10 @@ def __init__(self, type=None, owner=None, index=None, name=None, data=None,

self.missing_values = data.missing_values
self.logp_elemwiset = distribution.logp(data)
# The logp might need scaling in minibatches.
# This is done in `Factor`.
self.logp_sum_unscaledt = distribution.logp_sum(data)
self.logp_nojac_unscaledt = distribution.logp_nojac(data)
self.total_size = total_size
self.model = model
self.distribution = distribution
Expand Down Expand Up @@ -1220,6 +1283,10 @@ def __init__(self, name, data, distribution, total_size=None, model=None):
self.missing_values = [datum.missing_values for datum in self.data.values()
if datum.missing_values is not None]
self.logp_elemwiset = distribution.logp(**self.data)
# The logp might need scaling in minibatches.
# This is done in `Factor`.
self.logp_sum_unscaledt = distribution.logp_sum(**self.data)
self.logp_nojac_unscaledt = distribution.logp_nojac(**self.data)
self.total_size = total_size
self.model = model
self.distribution = distribution
Expand Down
Loading