Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Add cpp_package build option in CI #5844

Merged
merged 4 commits into from
Apr 18, 2017
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ stage('Build') {
init_git()
def flag = """ \
USE_PROFILER=1 \
USE_CPP_PACKAGE=1 \
USE_BLAS=openblas \
-j\$(nproc)
"""
Expand All @@ -99,6 +100,7 @@ USE_BLAS=openblas \
USE_CUDA=1 \
USE_CUDA_PATH=/usr/local/cuda \
USE_CUDNN=1 \
USE_CPP_PACKAGE=1 \
-j\$(nproc)
"""
make('gpu', flag)
Expand Down Expand Up @@ -126,6 +128,7 @@ USE_MKL2017_EXPERIMENTAL=1 \
USE_CUDA=1 \
USE_CUDA_PATH=/usr/local/cuda \
USE_CUDNN=1 \
USE_CPP_PACKAGE=1 \
-j\$(nproc)
"""
make('mklml_gpu', flag)
Expand Down
7 changes: 4 additions & 3 deletions cpp-package/example/charRNN.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,8 @@ class Shuffler {

class BucketSentenceIter : public DataIter {
Shuffler* random;
int batch, current, end, sequence_length;
int batch, current, end;
unsigned int sequence_length;
Context device;
vector<vector<mx_float>> sequences;
vector<wchar_t> index2chars;
Expand Down Expand Up @@ -582,7 +583,7 @@ void predict(wstring* ptext, int sequence_length, const string param_file,
LoadCheckpoint(param_file, exe);

mx_float index;
wchar_t next;
wchar_t next = 0;
vector<mx_float> softmax;
softmax.resize(input_dim);
for (auto c : *ptext) {
Expand Down Expand Up @@ -642,7 +643,7 @@ void predictWithBuiltInRNNOp(wstring* ptext, int sequence_length, const string p
LoadCheckpoint(param_file, exe);

mx_float index;
wchar_t next;
wchar_t next = 0;
vector<mx_float> softmax;
softmax.resize(input_dim);
for (auto c : *ptext) {
Expand Down
43 changes: 1 addition & 42 deletions cpp-package/include/mxnet-cpp/op_suppl.h
Original file line number Diff line number Diff line change
Expand Up @@ -119,48 +119,7 @@ inline Symbol Crop(const std::string& symbol_name,


/*!
* \breif Slice input equally along specified axis.
* \param data input symbol.
* \param num_outputs Number of outputs to be sliced.
* \param axis Dimension along which to slice.
* \param squeeze_axis If true AND the sliced dimension becomes 1, squeeze that dimension.
* \return new symbol
*/
inline Symbol SliceChannel(Symbol data,
int num_outputs,
int axis = 1,
bool squeeze_axis = false) {
return Operator("SliceChannel")
.SetParam("num_outputs", num_outputs)
.SetParam("axis", axis)
.SetParam("squeeze_axis", squeeze_axis) (data)
.CreateSymbol();
}


/*!
* \breif Slice input equally along specified axis.
* \param symbol_name name of the resulting symbol.
* \param data input symbol.
* \param num_outputs Number of outputs to be sliced.
* \param axis Dimension along which to slice.
* \param squeeze_axis If true AND the sliced dimension becomes 1, squeeze that dimension.
* \return new symbol
*/
inline Symbol SliceChannel(const std::string& symbol_name,
Symbol data,
int num_outputs,
int axis = 1,
bool squeeze_axis = false) {
return Operator("SliceChannel")
.SetParam("num_outputs", num_outputs)
.SetParam("axis", axis)
.SetParam("squeeze_axis", squeeze_axis) (data)
.CreateSymbol(symbol_name);
}

/*!
* \breif Apply activation function to input.
* \brief Apply activation function to input.
* Softmax Activation is only available with CUDNN on GPUand will be
* computed at each location across channel if input is 4D.
* \param symbol_name name of the resulting symbol.
Expand Down
8 changes: 4 additions & 4 deletions src/operator/tensor/matrix_op-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,10 @@ struct ReshapeParam : public dmlc::Parameter<ReshapeParam> {
DMLC_DECLARE_FIELD(shape)
.set_default(nnvm::Tuple<int>())
.describe("The target shape");
DMLC_DECLARE_FIELD(keep_highest).set_default(false)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

dont' change this. Change code in cpp-package that's using this.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Because keep_highest is deprecated?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yes

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ok

.describe("(Deprecated! Use ``shape`` instead.) Whether keep the highest dim unchanged."
"If set to true, then the first dim in target_shape is ignored,"
"and always fixed as input");
DMLC_DECLARE_FIELD(reverse)
.set_default(false)
.describe("If true then the special values are inferred from right to left");
Expand All @@ -40,10 +44,6 @@ struct ReshapeParam : public dmlc::Parameter<ReshapeParam> {
.describe("(Deprecated! Use ``shape`` instead.) "
"Target new shape. One and only one dim can be 0, "
"in which case it will be inferred from the rest of dims");
DMLC_DECLARE_FIELD(keep_highest).set_default(false)
.describe("(Deprecated! Use ``shape`` instead.) Whether keep the highest dim unchanged."
"If set to true, then the first dim in target_shape is ignored,"
"and always fixed as input");
}
};

Expand Down
10 changes: 5 additions & 5 deletions src/operator/tensor/multisample_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ inline bool MultiSampleOpShape(const nnvm::NodeAttrs& attrs,
// Get shape to be sampled for each parameter set.
const MultiSampleParam& param = nnvm::get<MultiSampleParam>(attrs.parsed);
TShape sshape = param.shape;
for ( int i = 0; i < sshape.ndim(); ++i ) {
for ( size_t i = 0; i < sshape.ndim(); ++i ) {
CHECK_GT((int)sshape[i], 0) << "shape parameter must be non-zero within each dimension";
}
// Examine output shape whether it is already defined.
Expand All @@ -57,10 +57,10 @@ inline bool MultiSampleOpShape(const nnvm::NodeAttrs& attrs,
tshape = TShape(tshape.begin(), tshape.begin()+(tshape.ndim()-sshape.ndim()));
}
// Shape assignemnt/checking for inputs.
for ( int i = 0; i < in_attrs->size(); ++i ) {
for ( size_t i = 0; i < in_attrs->size(); ++i ) {
if ( !shape_assign(&tshape, (*in_attrs)[i])) return false;
}
for ( int i = 0; i < in_attrs->size(); ++i ) {
for ( size_t i = 0; i < in_attrs->size(); ++i ) {
SHAPE_ASSIGN_CHECK(*in_attrs, i, tshape);
}
if ( tshape.ndim() > 0 ) {
Expand All @@ -84,10 +84,10 @@ inline bool MultiSampleOpType(const nnvm::NodeAttrs& attrs,

// All inputs must have same type.
int dtype = -1;
for ( int i = 0; i < in_attrs->size(); ++i ) {
for ( size_t i = 0; i < in_attrs->size(); ++i ) {
if (!type_assign(&dtype, (*in_attrs)[i])) return false;
}
for ( int i = 0; i < in_attrs->size(); ++i ) {
for ( size_t i = 0; i < in_attrs->size(); ++i ) {
TYPE_ASSIGN_CHECK(*in_attrs, i, dtype);
}
if (-1 == dtype) return false;
Expand Down