Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Merge pull request #3 from dmlc/master
Browse files Browse the repository at this point in the history
merge dmlc/master
  • Loading branch information
mli committed Sep 15, 2015
2 parents ceefa96 + f4207b5 commit 0f9a849
Show file tree
Hide file tree
Showing 30 changed files with 633 additions and 310 deletions.
1 change: 1 addition & 0 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ env:
- TASK=python CXX=g++
- TASK=python3 CXX=g++
- TASK=python_naive CXX=g++
- TASK=python_perdev CXX=g++
- TASK=cpp_unittest CXX=g++

# dependent apt packages
Expand Down
2 changes: 1 addition & 1 deletion dmlc-core
81 changes: 80 additions & 1 deletion include/mxnet/base.h
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
/*!
* Copyright (c) 2015 by Contributors
* \file base.h
* \brief configuation of mxnet
* \brief configuation of mxnet as well as basic data structure.
*/
#ifndef MXNET_BASE_H_
#define MXNET_BASE_H_

#include <dmlc/base.h>
#include <dmlc/io.h>
#include <dmlc/type_traits.h>
#include <dmlc/parameter.h>
#include <mshadow/tensor.h>
Expand Down Expand Up @@ -62,6 +63,84 @@ typedef mshadow::default_real_t real_t;
typedef mshadow::TShape TShape;
/*! \brief storage container type */
typedef mshadow::TBlob TBlob;

/*! \brief Context information about the execution enviroment */
struct Context {
/*! \brief the device type we run the op can be cpu::kDevMask or gpu::kDevMask */
int32_t dev_mask;
/*! \brief device id we are going to run it on */
int32_t dev_id;
/*! \brief constructor */
Context() : dev_mask(cpu::kDevMask), dev_id(0) {}
/*!
* \brief constructor of context
* \param dev_mask the device mask
* \param dev_id the device id
*/
Context(int dev_mask, int dev_id)
: dev_mask(dev_mask), dev_id(dev_id) {}
/*!
* \brief check if current context equals another one
* \param b another context to compare
* \return whether dev mask and id are same
*/
inline bool operator==(const Context &b) const {
return dev_mask == b.dev_mask && dev_id == b.dev_id;
}
/*!
* \brief check if current context not equals another one
* \param b another context to compare
* \return whether they are not the same
*/
inline bool operator!=(const Context &b) const {
return !(*this == b);
}
/*!
* \brief save the content into binary stream
* \param strm the output stream
*/
void Save(dmlc::Stream *strm) const {
strm->Write(&dev_mask, sizeof(dev_mask));
strm->Write(&dev_id, sizeof(dev_id));
}
/*!
* \brief load the content from binary stream
* \param strm the output stream
* \return whether the load is successful
*/
bool Load(dmlc::Stream *strm) {
if (strm->Read(&dev_mask, sizeof(int32_t)) != sizeof(int32_t)) return false;
if (strm->Read(&dev_id, sizeof(int32_t)) != sizeof(int32_t)) return false;
return true;
}
/*! \brief the maximal device mask, cpu = 1, gpu = 2 */
static const int32_t kMaxDevMask = 2;
/*!
* \brief A dedicate ID for pinned cpu memory.
* Any normal CPU ID should be less than this number.
*/
static const int32_t kPinnedMemoryID = 16;
};

/*!
* \brief execution time context.
* The information needed in runtime for actual execution.
*/
struct RunContext {
/*!
* \brief the stream of the device, can be NULL or Stream<gpu>* in GPU mode
*/
void *stream;
/*!
* \brief get mshadow stream from Context
* \return the mshadow stream
* \tparam xpu the device type of the stream
*/
template<typename xpu>
inline mshadow::Stream<xpu>* get_stream() const {
return static_cast<mshadow::Stream<xpu>*>(stream);
}
};
} // namespace mxnet

//! \cond Doxygen_Suppress
Expand Down
131 changes: 0 additions & 131 deletions include/mxnet/context.h

This file was deleted.

8 changes: 4 additions & 4 deletions include/mxnet/engine.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
#endif
#include <vector>
#include "./base.h"
#include "./context.h"

namespace mxnet {
/*! \brief namespace of engine internal types. */
Expand All @@ -28,13 +27,14 @@ typedef Opr* OprHandle;
} // namespace engine

#if DMLC_USE_CXX11

/*! \brief Function property, used to hint what action is pushed to engine. */
enum class FnProperty {
/*! \brief Normal operation */
kNormal,
/*! \brief Copy operation between CPU and GPU */
kCopy,
/*! \brief Copy operation from GPU to other devices */
kCopyFromGPU,
/*! \brief Copy operation from CPU to other devices */
kCopyToGPU,
/*! \brief Asynchronous function call */
kAsync
}; // enum class FnProperty
Expand Down
2 changes: 1 addition & 1 deletion include/mxnet/kvstore.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
#if DMLC_USE_CXX11
#include <functional>
#endif // DMLC_USE_CXX11
#include "ndarray.h"
#include "./ndarray.h"

namespace mxnet {

Expand Down
3 changes: 1 addition & 2 deletions include/mxnet/ndarray.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,9 @@
#include <dmlc/registry.h>
#include <memory>
#include "./base.h"
#include "./context.h"
#include "./storage.h"
#include "./context.h"
#include "./engine.h"

// check c++11
#if DMLC_USE_CXX11 == 0
#error "cxx11 was required for ndarray module"
Expand Down
10 changes: 7 additions & 3 deletions include/mxnet/operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
#include <string>
#include <utility>
#include "./base.h"
#include "./context.h"
#include "./resource.h"

namespace mxnet {
/*! \brief operation request type to Forward and Backward */
Expand Down Expand Up @@ -230,18 +230,22 @@ class OperatorProperty {
* \brief Declare additional resource required in forward pass.
* These additional resources will be presented in OpContext.requested
* in the same order of the returned Resource.
* \param in_shape The input shape to the operator, corresponds to shapes of in_data.
* \return Additional resource request
*/
virtual std::vector<ResourceRequest> ForwardResource() const {
virtual std::vector<ResourceRequest> ForwardResource(
const std::vector<TShape> &in_shape) const {
return std::vector<ResourceRequest>();
}
/*!
* \brief Decalre additional resource required in backward pass.
* These additional resources will be presented in OpContext.requested
* in the same order of the returned Resource.
* \param in_shape The input shape to the operator, corresponds to shapes of in_data.
* \return Additional resource request
*/
virtual std::vector<ResourceRequest> BackwardResource() const {
virtual std::vector<ResourceRequest> BackwardResource(
const std::vector<TShape> &in_shape) const {
return std::vector<ResourceRequest>();
}
/*!
Expand Down
Loading

0 comments on commit 0f9a849

Please sign in to comment.