Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
remove numpy changes to gluon peripherals
Browse files Browse the repository at this point in the history
  • Loading branch information
haojin2 committed Jul 31, 2019
1 parent 91d64d2 commit d9f1d21
Show file tree
Hide file tree
Showing 8 changed files with 58 additions and 198 deletions.
33 changes: 8 additions & 25 deletions python/mxnet/gluon/data/dataloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,6 @@

from . import sampler as _sampler
from ... import nd, context
from ...util import is_np_shape, is_np_array, set_np
from ... import numpy as _mx_np # pylint: disable=reimported

if sys.platform == 'darwin' or sys.platform == 'win32':
def rebuild_ndarray(*args):
Expand Down Expand Up @@ -131,33 +129,27 @@ def __init__(self, *args, **kwargs):
def default_batchify_fn(data):
"""Collate data into batch."""
if isinstance(data[0], nd.NDArray):
return _mx_np.stack(data) if is_np_array() else nd.stack(*data)
return nd.stack(*data)
elif isinstance(data[0], tuple):
data = zip(*data)
return [default_batchify_fn(i) for i in data]
else:
data = np.asarray(data)
array_fn = _mx_np.array if is_np_array() else nd.array
return array_fn(data, dtype=data.dtype)
return nd.array(data, dtype=data.dtype)


def default_mp_batchify_fn(data):
"""Collate data into batch. Use shared memory for stacking."""
if isinstance(data[0], nd.NDArray):
empty_fn = _mx_np.empty if is_np_array() else nd.empty
out = empty_fn((len(data),) + data[0].shape, dtype=data[0].dtype,
out = nd.empty((len(data),) + data[0].shape, dtype=data[0].dtype,
ctx=context.Context('cpu_shared', 0))
if is_np_array():
return _mx_np.stack(data, out=out)
else:
return nd.stack(*data, out=out)
return nd.stack(*data, out=out)
elif isinstance(data[0], tuple):
data = zip(*data)
return [default_mp_batchify_fn(i) for i in data]
else:
data = np.asarray(data)
array_fn = _mx_np.array if is_np_array() else nd.array
return array_fn(data, dtype=data.dtype,
return nd.array(data, dtype=data.dtype,
ctx=context.Context('cpu_shared', 0))


Expand Down Expand Up @@ -393,20 +385,14 @@ def __len__(self):
return len(self._batch_sampler)


def _thread_worker_initializer(active_shape, active_array):
"""Initializer for ThreadPool."""
set_np(shape=active_shape, array=active_array)


_worker_dataset = None
def _worker_initializer(dataset, active_shape, active_array):
def _worker_initializer(dataset):
"""Initialier for processing pool."""
# global dataset is per-process based and only available in worker processes
# this is only necessary to handle MXIndexedRecordIO because otherwise dataset
# can be passed as argument
global _worker_dataset
_worker_dataset = dataset
set_np(shape=active_shape, array=active_array)

def _worker_fn(samples, batchify_fn, dataset=None):
"""Function for processing data in worker process."""
Expand Down Expand Up @@ -573,13 +559,10 @@ def __init__(self, dataset, batch_size=None, shuffle=False, sampler=None,
self._prefetch = max(0, int(prefetch) if prefetch is not None else 2 * self._num_workers)
if self._num_workers > 0:
if self._thread_pool:
self._worker_pool = ThreadPool(self._num_workers,
initializer=_thread_worker_initializer,
initargs=(is_np_shape(), is_np_array()))
self._worker_pool = ThreadPool(self._num_workers)
else:
self._worker_pool = multiprocessing.Pool(
self._num_workers, initializer=_worker_initializer,
initargs=[self._dataset, is_np_shape(), is_np_array()])
self._num_workers, initializer=_worker_initializer, initargs=[self._dataset])
if batchify_fn is None:
if num_workers > 0:
self._batchify_fn = default_mp_batchify_fn
Expand Down
12 changes: 3 additions & 9 deletions python/mxnet/gluon/data/vision/datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,6 @@
from .. import dataset
from ...utils import download, check_sha1, _get_repo_file_url
from .... import nd, image, recordio, base
from .... import numpy as _mx_np # pylint: disable=reimported
from ....util import is_np_array


class MNIST(dataset._DownloadedDataset):
Expand Down Expand Up @@ -83,16 +81,13 @@ def _get_data(self):
with gzip.open(label_file, 'rb') as fin:
struct.unpack(">II", fin.read(8))
label = np.frombuffer(fin.read(), dtype=np.uint8).astype(np.int32)
if is_np_array():
label = _mx_np.array(label, dtype=label.dtype)

with gzip.open(data_file, 'rb') as fin:
struct.unpack(">IIII", fin.read(16))
data = np.frombuffer(fin.read(), dtype=np.uint8)
data = data.reshape(len(label), 28, 28, 1)

array_fn = _mx_np.array if is_np_array() else nd.array
self._data = array_fn(data, dtype=data.dtype)
self._data = nd.array(data, dtype=data.dtype)
self._label = label


Expand Down Expand Up @@ -188,9 +183,8 @@ def _get_data(self):
data = np.concatenate(data)
label = np.concatenate(label)

array_fn = _mx_np.array if is_np_array() else nd.array
self._data = array_fn(data, dtype=data.dtype)
self._label = array_fn(label, dtype=label.dtype) if is_np_array() else label
self._data = nd.array(data, dtype=data.dtype)
self._label = label


class CIFAR100(CIFAR10):
Expand Down
25 changes: 0 additions & 25 deletions python/mxnet/gluon/data/vision/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
from ...nn import Sequential, HybridSequential
from .... import image
from ....base import numeric_types
from ....util import is_np_array


class Compose(Sequential):
Expand Down Expand Up @@ -93,8 +92,6 @@ def __init__(self, dtype='float32'):
self._dtype = dtype

def hybrid_forward(self, F, x):
if is_np_array():
F = F.npx
return F.cast(x, self._dtype)


Expand Down Expand Up @@ -137,8 +134,6 @@ def __init__(self):
super(ToTensor, self).__init__()

def hybrid_forward(self, F, x):
if is_np_array():
F = F.npx
return F.image.to_tensor(x)


Expand Down Expand Up @@ -192,8 +187,6 @@ def __init__(self, mean=0.0, std=1.0):
self._std = std

def hybrid_forward(self, F, x):
if is_np_array():
F = F.npx
return F.image.normalize(x, self._mean, self._std)


Expand Down Expand Up @@ -376,8 +369,6 @@ def __init__(self, size, keep_ratio=False, interpolation=1):
self._interpolation = interpolation

def hybrid_forward(self, F, x):
if is_np_array():
F = F.npx
return F.image.resize(x, self._size, self._keep, self._interpolation)

class RandomFlipLeftRight(HybridBlock):
Expand All @@ -394,8 +385,6 @@ def __init__(self):
super(RandomFlipLeftRight, self).__init__()

def hybrid_forward(self, F, x):
if is_np_array():
F = F.npx
return F.image.random_flip_left_right(x)


Expand All @@ -413,8 +402,6 @@ def __init__(self):
super(RandomFlipTopBottom, self).__init__()

def hybrid_forward(self, F, x):
if is_np_array():
F = F.npx
return F.image.random_flip_top_bottom(x)


Expand All @@ -440,8 +427,6 @@ def __init__(self, brightness):
self._args = (max(0, 1-brightness), 1+brightness)

def hybrid_forward(self, F, x):
if is_np_array():
F = F.npx
return F.image.random_brightness(x, *self._args)


Expand All @@ -467,8 +452,6 @@ def __init__(self, contrast):
self._args = (max(0, 1-contrast), 1+contrast)

def hybrid_forward(self, F, x):
if is_np_array():
F = F.npx
return F.image.random_contrast(x, *self._args)


Expand All @@ -494,8 +477,6 @@ def __init__(self, saturation):
self._args = (max(0, 1-saturation), 1+saturation)

def hybrid_forward(self, F, x):
if is_np_array():
F = F.npx
return F.image.random_saturation(x, *self._args)


Expand All @@ -521,8 +502,6 @@ def __init__(self, hue):
self._args = (max(0, 1-hue), 1+hue)

def hybrid_forward(self, F, x):
if is_np_array():
F = F.npx
return F.image.random_hue(x, *self._args)


Expand Down Expand Up @@ -557,8 +536,6 @@ def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self._args = (brightness, contrast, saturation, hue)

def hybrid_forward(self, F, x):
if is_np_array():
F = F.npx
return F.image.random_color_jitter(x, *self._args)


Expand All @@ -582,6 +559,4 @@ def __init__(self, alpha):
self._alpha = alpha

def hybrid_forward(self, F, x):
if is_np_array():
F = F.npx
return F.image.random_lighting(x, self._alpha)
80 changes: 18 additions & 62 deletions python/mxnet/gluon/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@
from .. import ndarray
from ..base import numeric_types
from .block import HybridBlock
from ..util import is_np_array


def _apply_weighting(F, loss, weight=None, sample_weight=None):
Expand All @@ -54,10 +53,7 @@ def _apply_weighting(F, loss, weight=None, sample_weight=None):
Weighted loss
"""
if sample_weight is not None:
if is_np_array():
loss = loss * sample_weight
else:
loss = F.broadcast_mul(loss, sample_weight)
loss = F.broadcast_mul(loss, sample_weight)

if weight is not None:
assert isinstance(weight, numeric_types), "weight must be a number"
Expand All @@ -68,11 +64,7 @@ def _apply_weighting(F, loss, weight=None, sample_weight=None):

def _reshape_like(F, x, y):
"""Reshapes x to the same shape as y."""
if F is ndarray:
return x.reshape(y.shape)
elif is_np_array():
F = F.npx
return F.reshape_like(x, y)
return x.reshape(y.shape) if F is ndarray else F.reshape_like(x, y)


class Loss(HybridBlock):
Expand Down Expand Up @@ -144,15 +136,9 @@ def __init__(self, weight=1., batch_axis=0, **kwargs):

def hybrid_forward(self, F, pred, label, sample_weight=None):
label = _reshape_like(F, label, pred)
loss = F.np.square(label - pred) if is_np_array() else F.square(label - pred)
loss = F.square(label - pred)
loss = _apply_weighting(F, loss, self._weight / 2, sample_weight)
if is_np_array():
if F is ndarray:
return F.np.mean(loss, axis=tuple(range(1, loss.ndim)))
else:
return F.npx.batch_flatten(loss).mean(axis=1)
else:
return F.mean(loss, axis=self._batch_axis, exclude=True)
return F.mean(loss, axis=self._batch_axis, exclude=True)


class L1Loss(Loss):
Expand Down Expand Up @@ -258,45 +244,27 @@ def __init__(self, from_sigmoid=False, weight=None, batch_axis=0, **kwargs):

def hybrid_forward(self, F, pred, label, sample_weight=None, pos_weight=None):
label = _reshape_like(F, label, pred)
if is_np_array():
relu_fn = F.npx.relu
act_fn = F.npx.activation
abs_fn = F.np.abs
mul_fn = F.np.multiply
log_fn = F.np.log
else:
relu_fn = F.relu
act_fn = F.Activation
abs_fn = F.abs
mul_fn = F.broadcast_mul
log_fn = F.log
if not self._from_sigmoid:
if pos_weight is None:
# We use the stable formula: max(x, 0) - x * z + log(1 + exp(-abs(x)))
loss = relu_fn(pred) - pred * label + \
act_fn(-abs_fn(pred), act_type='softrelu')
loss = F.relu(pred) - pred * label + \
F.Activation(-F.abs(pred), act_type='softrelu')
else:
# We use the stable formula: x - x * z + (1 + z * pos_weight - z) * \
# (log(1 + exp(-abs(x))) + max(-x, 0))
log_weight = 1 + mul_fn(pos_weight - 1, label)
loss = pred - pred * label + log_weight *\
(act_fn(-abs_fn(pred), act_type='softrelu') + relu_fn(-pred))
log_weight = 1 + F.broadcast_mul(pos_weight - 1, label)
loss = pred - pred * label + log_weight * \
(F.Activation(-F.abs(pred), act_type='softrelu') + F.relu(-pred))
else:
eps = 1e-12
if pos_weight is None:
loss = -(log_fn(pred + eps) * label
+ log_fn(1. - pred + eps) * (1. - label))
loss = -(F.log(pred + eps) * label
+ F.log(1. - pred + eps) * (1. - label))
else:
loss = -(mul_fn(log_fn(pred + eps) * label, pos_weight)
+ log_fn(1. - pred + eps) * (1. - label))
loss = -(F.broadcast_mul(F.log(pred + eps) * label, pos_weight)
+ F.log(1. - pred + eps) * (1. - label))
loss = _apply_weighting(F, loss, self._weight, sample_weight)
if is_np_array():
if F is ndarray:
return F.np.mean(loss, axis=tuple(range(1, loss.ndim)))
else:
return F.npx.batch_flatten(loss).mean(axis=1)
else:
return F.mean(loss, axis=self._batch_axis, exclude=True)
return F.mean(loss, axis=self._batch_axis, exclude=True)


SigmoidBCELoss = SigmoidBinaryCrossEntropyLoss
Expand Down Expand Up @@ -373,27 +341,15 @@ def __init__(self, axis=-1, sparse_label=True, from_logits=False, weight=None,
self._from_logits = from_logits

def hybrid_forward(self, F, pred, label, sample_weight=None):
if is_np_array():
log_softmax = F.npx.log_softmax
pick = F.npx.pick
else:
log_softmax = F.log_softmax
pick = F.pick
if not self._from_logits:
pred = log_softmax(pred, self._axis)
pred = F.log_softmax(pred, self._axis)
if self._sparse_label:
loss = -pick(pred, label, axis=self._axis, keepdims=True)
loss = -F.pick(pred, label, axis=self._axis, keepdims=True)
else:
label = _reshape_like(F, label, pred)
loss = -(pred * label).sum(axis=self._axis, keepdims=True)
loss = -F.sum(pred * label, axis=self._axis, keepdims=True)
loss = _apply_weighting(F, loss, self._weight, sample_weight)
if is_np_array():
if F is ndarray:
return loss.mean(axis=tuple(range(1, loss.ndim)))
else:
return F.npx.batch_flatten(loss).mean(axis=1)
else:
return loss.mean(axis=self._batch_axis, exclude=True)
return F.mean(loss, axis=self._batch_axis, exclude=True)


SoftmaxCELoss = SoftmaxCrossEntropyLoss
Expand Down
Loading

0 comments on commit d9f1d21

Please sign in to comment.