Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
add op random.beta (#17390)
Browse files Browse the repository at this point in the history
  • Loading branch information
Alicia1529 committed Jan 30, 2020
1 parent ab8701a commit 8a6aed9
Show file tree
Hide file tree
Showing 4 changed files with 210 additions and 3 deletions.
59 changes: 58 additions & 1 deletion python/mxnet/ndarray/numpy/random.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@


__all__ = ['randint', 'uniform', 'normal', "choice", "rand", "multinomial", "multivariate_normal",
"shuffle", 'gamma', 'exponential']
"shuffle", 'gamma', 'beta', 'exponential']


def randint(low, high=None, size=None, dtype=None, ctx=None, out=None):
Expand Down Expand Up @@ -486,6 +486,63 @@ def gamma(shape, scale=1.0, size=None, dtype=None, ctx=None, out=None):
raise ValueError("Distribution parameters must be either mxnet.numpy.ndarray or numbers")


def beta(a, b, size=None, dtype=None, ctx=None):
r"""Draw samples from a Beta distribution.
The Beta distribution is a special case of the Dirichlet distribution,
and is related to the Gamma distribution. It has the probability
distribution function
.. math:: f(x; a,b) = \frac{1}{B(\alpha, \beta)} x^{\alpha - 1}
(1 - x)^{\beta - 1},
where the normalisation, B, is the beta function,
.. math:: B(\alpha, \beta) = \int_0^1 t^{\alpha - 1}
(1 - t)^{\beta - 1} dt.
It is often seen in Bayesian inference and order statistics.
Parameters
----------
a : float or array_like of floats
Alpha, positive (>0).
b : float or array_like of floats
Beta, positive (>0).
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``a`` and ``b`` are both scalars.
Otherwise, ``np.broadcast(a, b).size`` samples are drawn.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'.
Dtype 'float32' or 'float64' is strongly recommended,
since lower precision might lead to out of range issue.
ctx : Context, optional
Device context of output. Default is current context.
Notes
-------
To use this operator with scalars as input, please run ``npx.set_np()`` first.
Returns
-------
out : ndarray or scalar
Drawn samples from the parameterized beta distribution.
"""
if dtype is None:
dtype = 'float32'
if ctx is None:
ctx = current_context()
if size == ():
size = None
# use fp64 to prevent precision loss
X = gamma(a, 1, size=size, dtype='float64', ctx=ctx)
Y = gamma(b, 1, size=size, dtype='float64', ctx=ctx)
out = X/(X + Y)
return out.astype(dtype)


def rand(*size, **kwargs):
r"""Random values in a given shape.
Expand Down
49 changes: 48 additions & 1 deletion python/mxnet/numpy/random.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@


__all__ = ["randint", "uniform", "normal", "choice", "rand", "multinomial", "multivariate_normal",
"shuffle", "randn", "gamma", "exponential"]
"shuffle", "randn", "gamma", 'beta', "exponential"]


def randint(low, high=None, size=None, dtype=None, ctx=None, out=None):
Expand Down Expand Up @@ -495,6 +495,53 @@ def gamma(shape, scale=1.0, size=None, dtype=None, ctx=None, out=None):
return _mx_nd_np.random.gamma(shape, scale, size, dtype, ctx, out)


def beta(a, b, size=None, dtype=None, ctx=None):
r"""Draw samples from a Beta distribution.
The Beta distribution is a special case of the Dirichlet distribution,
and is related to the Gamma distribution. It has the probability
distribution function
.. math:: f(x; a,b) = \frac{1}{B(\alpha, \beta)} x^{\alpha - 1}
(1 - x)^{\beta - 1},
where the normalisation, B, is the beta function,
.. math:: B(\alpha, \beta) = \int_0^1 t^{\alpha - 1}
(1 - t)^{\beta - 1} dt.
It is often seen in Bayesian inference and order statistics.
Parameters
----------
a : float or array_like of floats
Alpha, positive (>0).
b : float or array_like of floats
Beta, positive (>0).
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``a`` and ``b`` are both scalars.
Otherwise, ``np.broadcast(a, b).size`` samples are drawn.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'.
Dtype 'float32' or 'float64' is strongly recommended,
since lower precision might lead to out of range issue.
ctx : Context, optional
Device context of output. Default is current context.
Notes
-------
To use this operator with scalars as input, please run ``npx.set_np()`` first.
Returns
-------
out : ndarray or scalar
Drawn samples from the parameterized beta distribution.
"""
return _mx_nd_np.random.beta(a, b, size=size, dtype=dtype, ctx=ctx)


def randn(*size, **kwargs):
r"""Return a sample (or samples) from the "standard normal" distribution.
If positive, int_like or int-convertible arguments are provided,
Expand Down
59 changes: 58 additions & 1 deletion python/mxnet/symbol/numpy/random.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@


__all__ = ['randint', 'uniform', 'normal', 'multivariate_normal',
'rand', 'shuffle', 'gamma', 'exponential']
'rand', 'shuffle', 'gamma', 'beta', 'exponential']


def randint(low, high=None, size=None, dtype=None, ctx=None, out=None):
Expand Down Expand Up @@ -349,6 +349,63 @@ def gamma(shape, scale=1.0, size=None, dtype=None, ctx=None, out=None):
raise ValueError("Distribution parameters must be either _Symbol or numbers")


def beta(a, b, size=None, dtype=None, ctx=None):
r"""Draw samples from a Beta distribution.
The Beta distribution is a special case of the Dirichlet distribution,
and is related to the Gamma distribution. It has the probability
distribution function
.. math:: f(x; a,b) = \frac{1}{B(\alpha, \beta)} x^{\alpha - 1}
(1 - x)^{\beta - 1},
where the normalisation, B, is the beta function,
.. math:: B(\alpha, \beta) = \int_0^1 t^{\alpha - 1}
(1 - t)^{\beta - 1} dt.
It is often seen in Bayesian inference and order statistics.
Parameters
----------
a : float or _Symbol of floats
Alpha, positive (>0).
b : float or _Symbol of floats
Beta, positive (>0).
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``a`` and ``b`` are both scalars.
Otherwise, ``np.broadcast(a, b).size`` samples are drawn.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'.
Dtype 'float32' or 'float64' is strongly recommended,
since lower precision might lead to out of range issue.
ctx : Context, optional
Device context of output. Default is current context.
Notes
-------
To use this operator with scalars as input, please run ``npx.set_np()`` first.
Returns
-------
out : _Symbol
Drawn samples from the parameterized beta distribution.
"""
if dtype is None:
dtype = 'float32'
if ctx is None:
ctx = current_context()
if size == ():
size = None
# use fp64 to prevent precision loss
X = gamma(a, 1, size=size, dtype='float64', ctx=ctx)
Y = gamma(b, 1, size=size, dtype='float64', ctx=ctx)
out = X/(X + Y)
return out.astype(dtype)


def exponential(scale=1.0, size=None):
r"""Draw samples from an exponential distribution.
Expand Down
46 changes: 46 additions & 0 deletions tests/python/unittest/test_numpy_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -3470,6 +3470,52 @@ def hybrid_forward(self, F, x):
assert out.shape == expected_shape


@with_seed()
@use_np
def test_np_random_beta():
class TestRandomBeta(HybridBlock):
def __init__(self, size=None, dtype=None, ctx=None):
super(TestRandomBeta, self).__init__()
self._size = size
self._dtype = dtype
self._ctx = ctx

def hybrid_forward(self, F, a, b):
return F.np.random.beta(a, b, size=self._size, dtype=self._dtype, ctx=self._ctx)

def _test_random_beta_range(output):
bigger_than_zero = _np.all(output > 0)
smaller_than_one = _np.all(output < 1)
return bigger_than_zero and smaller_than_one

shape_list = [(), (1,), (2, 3), (4, 0, 5), 6, (7, 8), None]
# since fp16 might incur precision issue, the corresponding test is skipped
dtype_list = [np.float32, np.float64]
hybridize_list = [False, True]
data = np.array([1])
for [param_shape, in_dtype, out_dtype, hybridize] in itertools.product(shape_list,
dtype_list, dtype_list, hybridize_list):
if sys.version_info.major < 3 and param_shape == ():
continue
mx_data = data.astype(in_dtype)
np_data = mx_data.asnumpy()
test_random_beta = TestRandomBeta(size=param_shape, dtype=out_dtype)
if hybridize:
test_random_beta.hybridize()
np_out = _np.random.beta(np_data, np_data, size=param_shape)
mx_out = test_random_beta(mx_data, mx_data)
mx_out_imperative = mx.np.random.beta(mx_data, mx_data, size=param_shape, dtype=out_dtype)

assert_almost_equal(np_out.shape, mx_out.shape)
assert_almost_equal(np_out.shape, mx_out_imperative.shape)
assert _test_random_beta_range(mx_out.asnumpy()) == True
assert _test_random_beta_range(mx_out_imperative.asnumpy()) == True

# test scalar
mx_out_imperative = mx.np.random.beta(1, 1, size=param_shape, dtype=out_dtype)
assert _test_random_beta_range(mx_out_imperative.asnumpy()) == True


@with_seed()
@use_np
def test_np_exponential():
Expand Down

0 comments on commit 8a6aed9

Please sign in to comment.