Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

[MXNET-1029] Feature request: randint operator #12749

Merged
merged 35 commits into from
Nov 27, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
b1e7563
randint operator add along with add optional tag to params
ChaiBapchya Oct 11, 2018
89ad2ee
register param
ChaiBapchya Oct 11, 2018
3dabb6d
lint space issue
ChaiBapchya Oct 11, 2018
40adfde
randn issue fix
ChaiBapchya Oct 11, 2018
9827e1e
uniform_int_distribution doesn't support int8, uint8 fix
ChaiBapchya Oct 13, 2018
3363e56
dtype ftype
ChaiBapchya Oct 13, 2018
df14294
ftype to dtype - invalid template arg
ChaiBapchya Oct 13, 2018
2e24870
fix template arg issue
ChaiBapchya Oct 15, 2018
693622c
test with int dtype for windows
ChaiBapchya Oct 15, 2018
c135e3a
removed int8,uint8 from test
ChaiBapchya Oct 16, 2018
d106ff3
gpu implementation
ChaiBapchya Oct 16, 2018
ef9e1f1
gpu engine state diff
ChaiBapchya Oct 16, 2018
ad574dc
removed gpu support
ChaiBapchya Oct 16, 2018
e3f6afc
empty commit
ChaiBapchya Oct 17, 2018
70cb9af
temporary fix : batchnorm flaky test skip
ChaiBapchya Oct 17, 2018
bf47cde
removed randn symbol specific code since other PR is on it
ChaiBapchya Oct 18, 2018
c3abb3a
revert ndarray/randn for compatibility
ChaiBapchya Oct 18, 2018
4bef3af
added unit test for checking extremes and uniform distribution for su…
ChaiBapchya Oct 19, 2018
c201fb3
increased the high val
ChaiBapchya Oct 29, 2018
634b685
Merge branch 'master' into randint and incorporated changes made by c…
ChaiBapchya Nov 2, 2018
761d48c
Merge branch 'master' into randint
ChaiBapchya Nov 2, 2018
8511303
int32 to int64 support, indentation fix, check for optype correctly b…
ChaiBapchya Nov 3, 2018
b100935
Merge branch 'master' into randint
ChaiBapchya Nov 3, 2018
d7aa9bd
gpu support, revert finfertype using template specialization, remove …
ChaiBapchya Nov 6, 2018
a6acc30
Merge branch 'master' into randint, itype (check if integral), made h…
ChaiBapchya Nov 6, 2018
21d5356
fix for invalid template arg by checking for int32,int64
ChaiBapchya Nov 8, 2018
1254fa7
gpu randint in random_generator
ChaiBapchya Nov 8, 2018
9f16408
Merge branch 'master' into randint and syntax,lint fix, discrete_unif…
ChaiBapchya Nov 8, 2018
274366b
sample_uniform issue and param, removed old flaky test skip line
ChaiBapchya Nov 12, 2018
e7e622c
replaced discrete_uniform function by rand_int64 for consistency
ChaiBapchya Nov 13, 2018
ce3849d
formula update and removed itype
ChaiBapchya Nov 14, 2018
494e416
change ctx to include gpu, randint samepl_op.cu typo
ChaiBapchya Nov 19, 2018
ccebcec
trigger ci
ChaiBapchya Nov 20, 2018
f583ac0
doc fix, check fix, whitespace remove
ChaiBapchya Nov 21, 2018
e3be157
added the without dtype testcase
ChaiBapchya Nov 27, 2018
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 14 additions & 2 deletions include/mxnet/random_generator.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@

#if MXNET_USE_CUDA
#include <curand_kernel.h>
#include <math.h>
#endif // MXNET_USE_CUDA

namespace mxnet {
Expand All @@ -54,7 +55,6 @@ class RandGenerator<cpu, DType> {
public:
typedef typename std::conditional<std::is_floating_point<DType>::value,
DType, double>::type FType;

explicit Impl(RandGenerator<cpu, DType> *gen, int state_idx)
: engine_(gen->states_ + state_idx) {}

Expand All @@ -63,6 +63,10 @@ class RandGenerator<cpu, DType> {

MSHADOW_XINLINE int rand() { return engine_->operator()(); }

MSHADOW_XINLINE int64_t rand_int64() {
return static_cast<int64_t>(engine_->operator()() << 31) + engine_->operator()();
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@leezu over here I use rand_int64() by using mt19937.rand() for 2 32bits

According to the video : https://channel9.msdn.com/Events/GoingNative/2013/rand-Considered-Harmful
is this a right way of doing it truly uniform random integer distribution?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, because you're concatenating 2 32bit numbers, where every bit is uniformly random. Thus the resulting 64bits are uniformly random.

}

MSHADOW_XINLINE FType uniform() {
typedef typename std::conditional<std::is_integral<DType>::value,
std::uniform_int_distribution<DType>,
Expand All @@ -78,7 +82,7 @@ class RandGenerator<cpu, DType> {

private:
std::mt19937 *engine_;
};
}; // class RandGenerator<cpu, DType>::Impl

static void AllocState(RandGenerator<cpu, DType> *inst) {
inst->states_ = new std::mt19937[kNumRandomStates];
Expand Down Expand Up @@ -137,6 +141,10 @@ class RandGenerator<gpu, DType> {
return curand(&state_);
}

MSHADOW_FORCE_INLINE __device__ int64_t rand_int64() {
return static_cast<int64_t>(curand(&state_) << 31) + curand(&state_);
}

MSHADOW_FORCE_INLINE __device__ float uniform() {
return static_cast<float>(1.0) - curand_uniform(&state_);
}
Expand Down Expand Up @@ -189,6 +197,10 @@ class RandGenerator<gpu, double> {
return curand(&state_);
}

MSHADOW_FORCE_INLINE __device__ int64_t rand_int64() {
return static_cast<int64_t>(curand(&state_) << 31) + curand(&state_);
}

MSHADOW_FORCE_INLINE __device__ double uniform() {
return static_cast<float>(1.0) - curand_uniform_double(&state_);
}
Expand Down
134 changes: 88 additions & 46 deletions python/mxnet/ndarray/random.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,23 +59,23 @@ def uniform(low=0, high=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwarg

Parameters
----------
low : float or NDArray
low : float or NDArray, optional
Lower boundary of the output interval. All values generated will be
greater than or equal to low. The default value is 0.
high : float or NDArray
high : float or NDArray, optional
Upper boundary of the output interval. All values generated will be
less than high. The default value is 1.0.
shape : int or tuple of ints
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `low` and
`high` are scalars, output shape will be `(m, n)`. If `low` and `high`
are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[low, high)` pair.
dtype : {'float16','float32', 'float64'}
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`low.context` when `low` is an NDArray.
out : NDArray
out : NDArray, optional
Store output to an existing NDArray.


Expand Down Expand Up @@ -111,21 +111,21 @@ def normal(loc=0, scale=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwarg

Parameters
----------
loc : float or NDArray
loc : float or NDArray, optional
Mean (centre) of the distribution.
scale : float or NDArray
scale : float or NDArray, optional
Standard deviation (spread or width) of the distribution.
shape : int or tuple of ints
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `loc` and
`scale` are scalars, output shape will be `(m, n)`. If `loc` and `scale`
are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[loc, scale)` pair.
dtype : {'float16','float32', 'float64'}
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`loc.context` when `loc` is an NDArray.
out : NDArray
out : NDArray, optional
Store output to an existing NDArray.


Expand Down Expand Up @@ -170,7 +170,7 @@ def randn(*shape, **kwargs):
`scale` are scalars, output shape will be `(m, n)`. If `loc` and `scale`
are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[loc, scale)` pair.
dtype : {'float16','float32', 'float64'}
dtype : {'float16', 'float32', 'float64'}
Data type of output samples. Default is 'float32'
ctx : Context
Device context of output. Default is current context. Overridden by
Expand Down Expand Up @@ -212,19 +212,19 @@ def poisson(lam=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs):

Parameters
----------
lam : float or NDArray
lam : float or NDArray, optional
Expectation of interval, should be >= 0.
shape : int or tuple of ints
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `lam` is
a scalar, output shape will be `(m, n)`. If `lam`
is an NDArray with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each entry in `lam`.
dtype : {'float16','float32', 'float64'}
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`lam.context` when `lam` is an NDArray.
out : NDArray
out : NDArray, optional
Store output to an existing NDArray.


Expand Down Expand Up @@ -259,19 +259,19 @@ def exponential(scale=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs)

Parameters
----------
scale : float or NDArray
scale : float or NDArray, optional
The scale parameter, \beta = 1/\lambda.
shape : int or tuple of ints
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `scale` is
a scalar, output shape will be `(m, n)`. If `scale`
is an NDArray with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each entry in `scale`.
dtype : {'float16','float32', 'float64'}
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`scale.context` when `scale` is an NDArray.
out : NDArray
out : NDArray, optional
Store output to an existing NDArray.


Expand Down Expand Up @@ -302,22 +302,22 @@ def gamma(alpha=1, beta=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwarg

Parameters
----------
alpha : float or NDArray
alpha : float or NDArray, optional
The shape of the gamma distribution. Should be greater than zero.
beta : float or NDArray
beta : float or NDArray, optional
The scale of the gamma distribution. Should be greater than zero.
Default is equal to 1.
shape : int or tuple of ints
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `alpha` and
`beta` are scalars, output shape will be `(m, n)`. If `alpha` and `beta`
are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[alpha, beta)` pair.
dtype : {'float16','float32', 'float64'}
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`alpha.context` when `alpha` is an NDArray.
out : NDArray
out : NDArray, optional
Store output to an existing NDArray.


Expand Down Expand Up @@ -352,21 +352,21 @@ def negative_binomial(k=1, p=1, shape=_Null, dtype=_Null, ctx=None,

Parameters
----------
k : float or NDArray
k : float or NDArray, optional
Limit of unsuccessful experiments, > 0.
p : float or NDArray
p : float or NDArray, optional
Failure probability in each experiment, >= 0 and <=1.
shape : int or tuple of ints
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `k` and
`p` are scalars, output shape will be `(m, n)`. If `k` and `p`
are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[k, p)` pair.
dtype : {'float16','float32', 'float64'}
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`k.context` when `k` is an NDArray.
out : NDArray
out : NDArray, optional
Store output to an existing NDArray.


Expand Down Expand Up @@ -403,21 +403,21 @@ def generalized_negative_binomial(mu=1, alpha=1, shape=_Null, dtype=_Null, ctx=N

Parameters
----------
mu : float or NDArray
mu : float or NDArray, optional
Mean of the negative binomial distribution.
alpha : float or NDArray
alpha : float or NDArray, optional
Alpha (dispersion) parameter of the negative binomial distribution.
shape : int or tuple of ints
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `mu` and
`alpha` are scalars, output shape will be `(m, n)`. If `mu` and `alpha`
are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[mu, alpha)` pair.
dtype : {'float16','float32', 'float64'}
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`mu.context` when `mu` is an NDArray.
out : NDArray
out : NDArray, optional
Store output to an existing NDArray.


Expand Down Expand Up @@ -455,17 +455,17 @@ def multinomial(data, shape=_Null, get_prob=False, out=None, dtype='int32', **kw
`k` is the number of possible outcomes of each multinomial distribution.
For example, data with shape `(m, n, k)` specifies `m*n` multinomial
distributions each with `k` possible outcomes.
shape : int or tuple of ints
shape : int or tuple of ints, optional
The number of samples to draw from each distribution. If shape is empty
one sample will be drawn from each distribution.
get_prob : bool
get_prob : bool, optional
If true, a second array containing log likelihood of the drawn
samples will also be returned.
This is usually used for reinforcement learning, where you can provide
reward as head gradient w.r.t. this array to estimate gradient.
out : NDArray
out : NDArray, optional
Store output to an existing NDArray.
dtype : str or numpy.dtype
dtype : str or numpy.dtype, optional
Data type of the sample output array. The default is int32.
Note that the data type of the log likelihood array is the same with that of `data`.

Expand Down Expand Up @@ -500,7 +500,7 @@ def shuffle(data, **kwargs):
----------
data : NDArray
Input data array.
out : NDArray
out : NDArray, optional
Array to store the result.

Examples
Expand All @@ -518,3 +518,45 @@ def shuffle(data, **kwargs):
<NDArray 2x3 @cpu(0)>
"""
return _internal._shuffle(data, **kwargs)


def randint(low, high, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs):
"""Draw random samples from a discrete uniform distribution.

Samples are uniformly distributed over the half-open interval *[low, high)*
(includes *low*, but excludes *high*).

Parameters
ChaiBapchya marked this conversation as resolved.
Show resolved Hide resolved
----------
low : int, required
Lower boundary of the output interval. All values generated will be
greater than or equal to low.
high : int, required
Upper boundary of the output interval. All values generated will be
less than high.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `low` and
`high` are scalars, output shape will be `(m, n)`.
dtype : {'int32', 'int64'}, optional
Data type of output samples. Default is 'int32'
ctx : Context, optional
ChaiBapchya marked this conversation as resolved.
Show resolved Hide resolved
Device context of output. Default is current context. Overridden by
`low.context` when `low` is an NDArray.
out : NDArray, optional
Store output to an existing NDArray.


Examples
--------
>>> mx.nd.random.randint(5, 100)
[ 90]
<NDArray 1 @cpu(0)
>>> mx.nd.random.randint(-10, 2, ctx=mx.gpu(0))
[ -8]
<NDArray 1 @gpu(0)>
>>> mx.nd.random.randint(-10, 10, shape=(2,))
[ -5 4]
<NDArray 2 @cpu(0)>
"""
return _random_helper(_internal._random_randint, None,
[low, high], shape, dtype, ctx, out, kwargs)
Loading