Skip to content

Commit

Permalink
MXNET-336 [Perl] Major Gluon update for Perl API. (apache#11414)
Browse files Browse the repository at this point in the history
* MXNET-336
Major Gluon update towards parity with Python's API.
Miscellaneous bugfixes and improvements.
New Engine API.
Module::reshape moved to C++ backend.
Examples were updated to work on multi-gpu boxes.

* fixing random seed for flaky tests.

* removed redundant row.

* fixed learning rate.
  • Loading branch information
sergeykolychev authored Jun 30, 2018
1 parent da36391 commit 911b232
Show file tree
Hide file tree
Showing 55 changed files with 3,937 additions and 734 deletions.
7 changes: 7 additions & 0 deletions perl-package/AI-MXNet/Changes
Original file line number Diff line number Diff line change
@@ -1,5 +1,12 @@
Revision history for Perl extension AI::MXNet

1.3 Tue Jun 26 20:57:40 PDT 2018
- Major Gluon update towards parity with Python's API.
- Miscellaneous bugfixes and improvements.
- New Engine API.
- Module::reshape moved to C++ backend.
- Examples were updated to work on multi-gpu boxes

1.23 Thu Apr 19 15:38:10 PDT 2018
- Support for image operations on symbols and ndarrays.

Expand Down
4 changes: 4 additions & 0 deletions perl-package/AI-MXNet/MANIFEST
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ lib/AI/MXNet/Contrib.pm
lib/AI/MXNet/Contrib/NDArray.pm
lib/AI/MXNet/Contrib/Symbol.pm
lib/AI/MXNet/CudaModule.pm
lib/AI/MXNet/Engine.pm
lib/AI/MXNet/Executor.pm
lib/AI/MXNet/Executor/Group.pm
lib/AI/MXNet/Function/Parameters.pm
Expand All @@ -38,6 +39,7 @@ lib/AI/MXNet/Gluon/Data/Vision.pm
lib/AI/MXNet/Gluon/Loss.pm
lib/AI/MXNet/Gluon/Mouse.pm
lib/AI/MXNet/Gluon/NN.pm
lib/AI/MXNet/Gluon/NN/Activation.pm
lib/AI/MXNet/Gluon/NN/BasicLayers.pm
lib/AI/MXNet/Gluon/NN/ConvLayers.pm
lib/AI/MXNet/Gluon/Parameter.pm
Expand Down Expand Up @@ -97,10 +99,12 @@ t/test_autograd.t
t/test_base.t
t/test_conv.t
t/test_cuda_module.t
t/test_engine.t
t/test_executor.t
t/test_gluon.t
t/test_gluon_data.t
t/test_gluon_rnn.t
t/test_gluon_trainer.t
t/test_infer_shape.t
t/test_init.t
t/test_io.t
Expand Down
6 changes: 3 additions & 3 deletions perl-package/AI-MXNet/META.json
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@
},
"runtime" : {
"requires" : {
"AI::MXNetCAPI" : "1.2",
"AI::NNVMCAPI" : "1.2",
"AI::MXNetCAPI" : "1.3",
"AI::NNVMCAPI" : "1.3",
"Function::Parameters" : "1.0705",
"Hash::Ordered" : "0.012",
"GraphViz" : "2.14",
Expand All @@ -45,5 +45,5 @@
}
},
"release_status" : "stable",
"version" : "1.23"
"version" : "1.3"
}
6 changes: 3 additions & 3 deletions perl-package/AI-MXNet/META.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,12 @@ no_index:
- t
- inc
requires:
AI::MXNetCAPI: '1.2'
AI::NNVMCAPI: '1.2'
AI::MXNetCAPI: '1.3'
AI::NNVMCAPI: '1.3'
Function::Parameters: '1.0705'
Hash::Ordered: '0.012'
GraphViz: '2.14'
Mouse: v2.1.0
PDL: '2.007'
PDL::CCS: '1.23.4'
version: '1.23'
version: '1.3'
2 changes: 1 addition & 1 deletion perl-package/AI-MXNet/README
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
This archive contains the distribution AI-MXNet,
version 1.23:
version 1.3:

Perl interface to MXNet machine learning library

Expand Down
11 changes: 6 additions & 5 deletions perl-package/AI-MXNet/examples/char_lstm.pl
Original file line number Diff line number Diff line change
Expand Up @@ -233,22 +233,23 @@ package main;
initializer => mx->init->Xavier(factor_type => "in", magnitude => 2.34),
num_epoch => $num_epoch,
batch_end_callback => mx->callback->Speedometer($batch_size, $disp_batches),
($chkp_epoch ? (epoch_end_callback => [mx->rnn->do_rnn_checkpoint($stack, $chkp_prefix, $chkp_epoch), \&sample]) : ())
($chkp_epoch ? (epoch_end_callback => [mx->callback->module_checkpoint($model, $chkp_prefix, $chkp_epoch), \&sample]) : ())
);

my $chkp = 1;
sub sample {
return if not $sample_size;
$model->reshape(data_shapes=>[['data',[1, $seq_size]]], label_shapes=>[['softmax_label',[1, $seq_size]]]);
my $inference_model = mx->mod->Module->load($chkp_prefix, $chkp++);
$inference_model->bind(data_shapes=>[['data',[1, $seq_size]]], label_shapes=>[['softmax_label',[1, $seq_size]]]);
my $input = mx->nd->array($fdata->slice([0, $seq_size-1]))->reshape([1, $seq_size]);
$| = 1;
for (0..$sample_size-1)
{
$model->forward(mx->io->DataBatch(data=>[$input]), is_train => 0);
my $prob = $model->get_outputs(0)->[0][0]->at($seq_size-1)->aspdl;
$inference_model->forward(mx->io->DataBatch(data=>[$input]), is_train => 0);
my $prob = $inference_model->get_outputs(0)->[0][0]->at($seq_size-1)->aspdl;
my $next_char = Math::Random::Discrete->new($prob->reshape(-1)->unpdl, [0..scalar(keys %vocabulary)-1])->rand;
print "$reverse_vocab{$next_char}";
$input->at(0)->slice([0, $seq_size-2]) .= $input->at(0)->slice([1, $seq_size-1])->copy;
$input->at(0)->at($seq_size-1) .= $next_char;
}
$model->reshape(data_shapes=>[['data',[$batch_size, $seq_size]]], label_shapes=>[['softmax_label',[$batch_size, $seq_size]]]);
}
4 changes: 2 additions & 2 deletions perl-package/AI-MXNet/examples/gluon/mnist.pl
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@
$net->add(nn->Dense(10));
});
$net->hybridize() if $hybridize;
$net->load_params('mnist.params') if $load_params;
$net->load_parameters('mnist.params') if $load_params;
# data

sub transformer
Expand Down Expand Up @@ -130,7 +130,7 @@ sub train
my ($val_name, $val_acc) = test($ctx);
print "[Epoch $epoch] Validation: $val_name=$val_acc\n"
}
$net->save_params('mnist.params');
$net->save_parameters('mnist.params');
}

train($epochs, $cuda ? mx->gpu(0) : mx->cpu);
8 changes: 7 additions & 1 deletion perl-package/AI-MXNet/lib/AI/MXNet.pm
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,8 @@ use AI::MXNet::AutoGrad;
use AI::MXNet::Gluon;
use AI::MXNet::NDArray::Sparse;
use AI::MXNet::Symbol::Sparse;
our $VERSION = '1.22';
use AI::MXNet::Engine;
our $VERSION = '1.3';

sub import
{
Expand All @@ -76,6 +77,7 @@ sub import
sub Context { shift; AI::MXNet::Context->new(\@_) }
sub context { 'AI::MXNet::Context' }
sub cpu { AI::MXNet::Context->cpu(\$_[1]//0) }
sub cpu_pinned { AI::MXNet::Context->cpu_pinned(\$_[1]//0) }
sub gpu { AI::MXNet::Context->gpu(\$_[1]//0) }
sub kv { 'AI::MXNet::KVStore' }
sub recordio { 'AI::MXNet::RecordIO' }
Expand All @@ -92,8 +94,10 @@ sub import
sub contrib { 'AI::MXNet::Contrib' }
sub linalg { 'AI::MXNet::LinAlg' }
sub autograd { 'AI::MXNet::AutoGrad' }
sub engine { 'AI::MXNet::Engine' }
sub name { '$short_name' }
sub rtc { '$short_name' }
sub gluon { 'AI::MXNet::Gluon' }
sub CudaModule { shift; AI::MXNet::CudaModule->new(\@_) }
sub AttrScope { shift; AI::MXNet::Symbol::AttrScope->new(\@_) }
*AI::MXNet::Symbol::AttrScope::current = sub { \$${short_name}::AttrScope; };
Expand All @@ -106,6 +110,8 @@ sub import
*AI::MXNet::Context::current_context = sub { \$${short_name}::Context; };
*AI::MXNet::Context::set_current = sub { \$${short_name}::Context = \$_[1]; };
\$${short_name}::Context = AI::MXNet::Context->new(device_type => 'cpu', device_id => 0);
package nd;
\@nd::ISA = ('AI::MXNet::NDArray');
1;
EOP
eval $short_name_package;
Expand Down
2 changes: 1 addition & 1 deletion perl-package/AI-MXNet/lib/AI/MXNet/AutoLoad.pm
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ sub AUTOLOAD
my $sub = "_${prefix}_$name";
{
no strict 'refs';
*{"$class::$name"} = sub { shift; $real_class->$sub(@_); };
*{"${class}::$name"} = sub { shift; $real_class->$sub(@_); };
}
goto $class->can($name);
}
Expand Down
17 changes: 12 additions & 5 deletions perl-package/AI-MXNet/lib/AI/MXNet/Base.pm
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ use warnings;
use PDL;
use PDL::Types ();
use PDL::CCS::Nd;
use AI::MXNetCAPI 1.2;
use AI::NNVMCAPI 1.2;
use AI::MXNetCAPI 1.3;
use AI::NNVMCAPI 1.3;
use AI::MXNet::Types;
use Time::HiRes;
use Scalar::Util qw(blessed);
Expand Down Expand Up @@ -169,9 +169,16 @@ sub zip

sub enumerate
{
my ($sub, @arrays) = @_;
my $len = @{ $arrays[0] };
zip($sub, [0..$len-1], @arrays);
if('CODE' eq ref $_[0])
{
# continue supporting the callback style
my $code = shift;
my $len = @{ $_[0] };
$code->(@$_) for AI::MXNetCAPI::py_zip([0..$len-1], map { \@$_ } @_);
return;
}
my $len = @{ $_[0] };
return AI::MXNetCAPI::py_zip([0..$len-1], map { \@$_ } @_);
}

=head2 product
Expand Down
22 changes: 15 additions & 7 deletions perl-package/AI-MXNet/lib/AI/MXNet/CachedOp.pm
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,17 @@ has 'handle' => (is => 'ro', isa => 'CachedOpHandle', required => 1);
around BUILDARGS => sub {
my $orig = shift;
my $class = shift;
my ($sym) = @_;
my ($sym, $flags) = @_;
for my $key (keys %$flags)
{
$flags->{ $key } = "(" .join(", ", map { defined($_) ? $_ : 'None' } @{ $flags->{ $key } }) .")"
if ref $flags->{ $key } eq 'ARRAY';
}
my $handle = check_call(
AI::MXNetCAPI::CreateCachedOp(
$sym->handle
AI::MXNetCAPI::CreateCachedOpEx(
$sym->handle,
scalar(keys %{ $flags//{} }),
$flags//{},
)
);
return $class->$orig(handle => $handle);
Expand Down Expand Up @@ -84,8 +91,8 @@ sub call
{
$out = [];
}
my $output = check_call(
AI::MXNetCAPI::InvokeCachedOp(
my ($output, $stypes) = check_call(
AI::MXNetCAPI::InvokeCachedOpEx(
$self->handle,
scalar(@args),
[map { $_->handle } @args],
Expand All @@ -95,11 +102,12 @@ sub call
return $original_output if defined $original_output;
if(@$output == 1)
{
return AI::MXNet::NDArray->_ndarray_cls($output->[0]);
return AI::MXNet::NDArray->_ndarray_cls($output->[0], 1, $stypes->[0]);
}
else
{
return [map { AI::MXNet::NDArray->_ndarray_cls($_) } @$output];
my $i = 0;
return [map { AI::MXNet::NDArray->_ndarray_cls($_, 1, $stypes->[$i++]) } @$output];
}
}

Expand Down
44 changes: 44 additions & 0 deletions perl-package/AI-MXNet/lib/AI/MXNet/Context.pm
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package AI::MXNet::Context;
use strict;
use warnings;
use Mouse;
use AI::MXNet::Base;
use AI::MXNet::Types;
use AI::MXNet::Function::Parameters;
use constant devtype2str => { 1 => 'cpu', 2 => 'gpu', 3 => 'cpu_pinned' };
Expand Down Expand Up @@ -111,6 +112,28 @@ method cpu(Int $device_id=0)
return $self->new(device_type => 'cpu', device_id => $device_id);
}

=head2 cpu_pinned
Returns a CPU pinned memory context. Copying from CPU pinned memory to GPU
is faster than from normal CPU memory.
Parameters
----------
device_id : int, optional
The device id of the device. `device_id` is not needed for CPU.
This is included to make interface compatible with GPU.
Returns
-------
context : Context
The corresponding CPU pinned memory context.
=cut

method cpu_pinned(Int $device_id=0)
{
return $self->new(device_type => 'cpu_pinned', device_id => $device_id);
}

=head2 gpu
Returns a GPU context.
Expand Down Expand Up @@ -139,6 +162,27 @@ method gpu(Int $device_id=0)
$default_ctx : AI::MXNet::Context
=cut


=head2 num_gpus
Query CUDA for the number of GPUs present.
Raises
------
Will raise an exception on any CUDA error.
Returns
-------
count : int
The number of GPUs.
=cut

method num_gpus()
{
return scalar(check_call(AI::MXNetCAPI::GetGPUCount()));
}

method current_ctx()
{
return $AI::MXNet::current_ctx;
Expand Down
Loading

0 comments on commit 911b232

Please sign in to comment.