Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
[MXNET-1029] Feature request: randint operator (#12749)
Browse files Browse the repository at this point in the history
* randint operator add along with add optional tag to params

* register param

* lint space issue

* randn issue fix

* uniform_int_distribution doesn't support int8, uint8 fix

* dtype ftype

* ftype to dtype - invalid template arg

* fix template arg issue

* test with int dtype for windows

* removed int8,uint8 from test

* gpu implementation

* gpu engine state diff

* removed gpu support

* empty commit

* temporary fix : batchnorm flaky test skip

* removed randn symbol specific code since other PR is on it

* revert ndarray/randn for compatibility

* added unit test for checking extremes and uniform distribution for sufficient samples

* increased the high val

* int32 to int64 support, indentation fix, check for optype correctly based on type of random function

* gpu support, revert finfertype using template specialization, remove defaults, prints, test other low high val

* fix for invalid template arg by checking for int32,int64

* gpu randint in random_generator

* sample_uniform issue and param, removed old flaky test skip line

* replaced discrete_uniform function by rand_int64 for consistency

* formula update and removed itype

* change ctx to include gpu, randint samepl_op.cu typo

* trigger ci

* doc fix, check fix, whitespace remove

* added the without dtype testcase
  • Loading branch information
ChaiBapchya authored and eric-haibin-lin committed Nov 27, 2018
1 parent e6fffe9 commit 8a94dbd
Show file tree
Hide file tree
Showing 9 changed files with 342 additions and 88 deletions.
16 changes: 14 additions & 2 deletions include/mxnet/random_generator.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@

#if MXNET_USE_CUDA
#include <curand_kernel.h>
#include <math.h>
#endif // MXNET_USE_CUDA

namespace mxnet {
Expand All @@ -54,7 +55,6 @@ class RandGenerator<cpu, DType> {
public:
typedef typename std::conditional<std::is_floating_point<DType>::value,
DType, double>::type FType;

explicit Impl(RandGenerator<cpu, DType> *gen, int state_idx)
: engine_(gen->states_ + state_idx) {}

Expand All @@ -63,6 +63,10 @@ class RandGenerator<cpu, DType> {

MSHADOW_XINLINE int rand() { return engine_->operator()(); }

MSHADOW_XINLINE int64_t rand_int64() {
return static_cast<int64_t>(engine_->operator()() << 31) + engine_->operator()();
}

MSHADOW_XINLINE FType uniform() {
typedef typename std::conditional<std::is_integral<DType>::value,
std::uniform_int_distribution<DType>,
Expand All @@ -78,7 +82,7 @@ class RandGenerator<cpu, DType> {

private:
std::mt19937 *engine_;
};
}; // class RandGenerator<cpu, DType>::Impl

static void AllocState(RandGenerator<cpu, DType> *inst) {
inst->states_ = new std::mt19937[kNumRandomStates];
Expand Down Expand Up @@ -137,6 +141,10 @@ class RandGenerator<gpu, DType> {
return curand(&state_);
}

MSHADOW_FORCE_INLINE __device__ int64_t rand_int64() {
return static_cast<int64_t>(curand(&state_) << 31) + curand(&state_);
}

MSHADOW_FORCE_INLINE __device__ float uniform() {
return static_cast<float>(1.0) - curand_uniform(&state_);
}
Expand Down Expand Up @@ -189,6 +197,10 @@ class RandGenerator<gpu, double> {
return curand(&state_);
}

MSHADOW_FORCE_INLINE __device__ int64_t rand_int64() {
return static_cast<int64_t>(curand(&state_) << 31) + curand(&state_);
}

MSHADOW_FORCE_INLINE __device__ double uniform() {
return static_cast<float>(1.0) - curand_uniform_double(&state_);
}
Expand Down
134 changes: 88 additions & 46 deletions python/mxnet/ndarray/random.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,23 +59,23 @@ def uniform(low=0, high=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwarg
Parameters
----------
low : float or NDArray
low : float or NDArray, optional
Lower boundary of the output interval. All values generated will be
greater than or equal to low. The default value is 0.
high : float or NDArray
high : float or NDArray, optional
Upper boundary of the output interval. All values generated will be
less than high. The default value is 1.0.
shape : int or tuple of ints
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `low` and
`high` are scalars, output shape will be `(m, n)`. If `low` and `high`
are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[low, high)` pair.
dtype : {'float16','float32', 'float64'}
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`low.context` when `low` is an NDArray.
out : NDArray
out : NDArray, optional
Store output to an existing NDArray.
Expand Down Expand Up @@ -111,21 +111,21 @@ def normal(loc=0, scale=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwarg
Parameters
----------
loc : float or NDArray
loc : float or NDArray, optional
Mean (centre) of the distribution.
scale : float or NDArray
scale : float or NDArray, optional
Standard deviation (spread or width) of the distribution.
shape : int or tuple of ints
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `loc` and
`scale` are scalars, output shape will be `(m, n)`. If `loc` and `scale`
are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[loc, scale)` pair.
dtype : {'float16','float32', 'float64'}
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`loc.context` when `loc` is an NDArray.
out : NDArray
out : NDArray, optional
Store output to an existing NDArray.
Expand Down Expand Up @@ -170,7 +170,7 @@ def randn(*shape, **kwargs):
`scale` are scalars, output shape will be `(m, n)`. If `loc` and `scale`
are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[loc, scale)` pair.
dtype : {'float16','float32', 'float64'}
dtype : {'float16', 'float32', 'float64'}
Data type of output samples. Default is 'float32'
ctx : Context
Device context of output. Default is current context. Overridden by
Expand Down Expand Up @@ -212,19 +212,19 @@ def poisson(lam=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs):
Parameters
----------
lam : float or NDArray
lam : float or NDArray, optional
Expectation of interval, should be >= 0.
shape : int or tuple of ints
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `lam` is
a scalar, output shape will be `(m, n)`. If `lam`
is an NDArray with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each entry in `lam`.
dtype : {'float16','float32', 'float64'}
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`lam.context` when `lam` is an NDArray.
out : NDArray
out : NDArray, optional
Store output to an existing NDArray.
Expand Down Expand Up @@ -259,19 +259,19 @@ def exponential(scale=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs)
Parameters
----------
scale : float or NDArray
scale : float or NDArray, optional
The scale parameter, \beta = 1/\lambda.
shape : int or tuple of ints
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `scale` is
a scalar, output shape will be `(m, n)`. If `scale`
is an NDArray with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each entry in `scale`.
dtype : {'float16','float32', 'float64'}
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`scale.context` when `scale` is an NDArray.
out : NDArray
out : NDArray, optional
Store output to an existing NDArray.
Expand Down Expand Up @@ -302,22 +302,22 @@ def gamma(alpha=1, beta=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwarg
Parameters
----------
alpha : float or NDArray
alpha : float or NDArray, optional
The shape of the gamma distribution. Should be greater than zero.
beta : float or NDArray
beta : float or NDArray, optional
The scale of the gamma distribution. Should be greater than zero.
Default is equal to 1.
shape : int or tuple of ints
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `alpha` and
`beta` are scalars, output shape will be `(m, n)`. If `alpha` and `beta`
are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[alpha, beta)` pair.
dtype : {'float16','float32', 'float64'}
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`alpha.context` when `alpha` is an NDArray.
out : NDArray
out : NDArray, optional
Store output to an existing NDArray.
Expand Down Expand Up @@ -352,21 +352,21 @@ def negative_binomial(k=1, p=1, shape=_Null, dtype=_Null, ctx=None,
Parameters
----------
k : float or NDArray
k : float or NDArray, optional
Limit of unsuccessful experiments, > 0.
p : float or NDArray
p : float or NDArray, optional
Failure probability in each experiment, >= 0 and <=1.
shape : int or tuple of ints
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `k` and
`p` are scalars, output shape will be `(m, n)`. If `k` and `p`
are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[k, p)` pair.
dtype : {'float16','float32', 'float64'}
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`k.context` when `k` is an NDArray.
out : NDArray
out : NDArray, optional
Store output to an existing NDArray.
Expand Down Expand Up @@ -403,21 +403,21 @@ def generalized_negative_binomial(mu=1, alpha=1, shape=_Null, dtype=_Null, ctx=N
Parameters
----------
mu : float or NDArray
mu : float or NDArray, optional
Mean of the negative binomial distribution.
alpha : float or NDArray
alpha : float or NDArray, optional
Alpha (dispersion) parameter of the negative binomial distribution.
shape : int or tuple of ints
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `mu` and
`alpha` are scalars, output shape will be `(m, n)`. If `mu` and `alpha`
are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[mu, alpha)` pair.
dtype : {'float16','float32', 'float64'}
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`mu.context` when `mu` is an NDArray.
out : NDArray
out : NDArray, optional
Store output to an existing NDArray.
Expand Down Expand Up @@ -455,17 +455,17 @@ def multinomial(data, shape=_Null, get_prob=False, out=None, dtype='int32', **kw
`k` is the number of possible outcomes of each multinomial distribution.
For example, data with shape `(m, n, k)` specifies `m*n` multinomial
distributions each with `k` possible outcomes.
shape : int or tuple of ints
shape : int or tuple of ints, optional
The number of samples to draw from each distribution. If shape is empty
one sample will be drawn from each distribution.
get_prob : bool
get_prob : bool, optional
If true, a second array containing log likelihood of the drawn
samples will also be returned.
This is usually used for reinforcement learning, where you can provide
reward as head gradient w.r.t. this array to estimate gradient.
out : NDArray
out : NDArray, optional
Store output to an existing NDArray.
dtype : str or numpy.dtype
dtype : str or numpy.dtype, optional
Data type of the sample output array. The default is int32.
Note that the data type of the log likelihood array is the same with that of `data`.
Expand Down Expand Up @@ -500,7 +500,7 @@ def shuffle(data, **kwargs):
----------
data : NDArray
Input data array.
out : NDArray
out : NDArray, optional
Array to store the result.
Examples
Expand All @@ -518,3 +518,45 @@ def shuffle(data, **kwargs):
<NDArray 2x3 @cpu(0)>
"""
return _internal._shuffle(data, **kwargs)


def randint(low, high, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs):
"""Draw random samples from a discrete uniform distribution.
Samples are uniformly distributed over the half-open interval *[low, high)*
(includes *low*, but excludes *high*).
Parameters
----------
low : int, required
Lower boundary of the output interval. All values generated will be
greater than or equal to low.
high : int, required
Upper boundary of the output interval. All values generated will be
less than high.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `low` and
`high` are scalars, output shape will be `(m, n)`.
dtype : {'int32', 'int64'}, optional
Data type of output samples. Default is 'int32'
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`low.context` when `low` is an NDArray.
out : NDArray, optional
Store output to an existing NDArray.
Examples
--------
>>> mx.nd.random.randint(5, 100)
[ 90]
<NDArray 1 @cpu(0)
>>> mx.nd.random.randint(-10, 2, ctx=mx.gpu(0))
[ -8]
<NDArray 1 @gpu(0)>
>>> mx.nd.random.randint(-10, 10, shape=(2,))
[ -5 4]
<NDArray 2 @cpu(0)>
"""
return _random_helper(_internal._random_randint, None,
[low, high], shape, dtype, ctx, out, kwargs)
Loading

0 comments on commit 8a94dbd

Please sign in to comment.