Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

WIP: Operator tuning #8670

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ mxnet_option(USE_LAPACK "Build with lapack support" ON IF NOT MSVC)
mxnet_option(USE_MKL_IF_AVAILABLE "Use MKL if found" ON)
mxnet_option(USE_MKLML_MKL "Use MKLML variant of MKL (if MKL found)" ON IF USE_MKL_IF_AVAILABLE AND UNIX AND (NOT APPLE))
mxnet_option(USE_MKL_EXPERIMENTAL "Use experimental MKL (if MKL enabled and found)" OFF)
mxnet_option(USE_OPERATOR_TUNING "Enable auto-tuning of operators" ON)
mxnet_option(USE_GPERFTOOLS "Build with GPerfTools support (if found)" ON)
mxnet_option(USE_JEMALLOC "Build with Jemalloc support" ON)
mxnet_option(USE_PROFILER "Build with Profiler support" OFF)
Expand Down Expand Up @@ -353,6 +354,10 @@ if(USE_PLUGINS_WARPCTC)
list(APPEND CUDA ${PLUGINS_CUSRC})
endif()

if(USE_OPERATOR_TUNING)
add_definitions(-DMXNET_USE_OPERATOR_TUNING=1)
endif()

if(USE_PLUGIN_CAFFE)
if(NOT USE_CUDA)
set(CPU_ONLY ON)
Expand Down Expand Up @@ -471,6 +476,11 @@ else()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc")
endif()

if(USE_OPERATOR_TUNING)
add_executable(mxnet-tblgen tools/tblgen/tblgen.cc)
target_link_libraries(mxnet-tblgen ${mxnet_LINKER_LIBS})
endif()

set(MXNET_INSTALL_TARGETS mxnet)
if(${CMAKE_SYSTEM_NAME} STREQUAL "Darwin" AND USE_MXNET_LIB_NAMING)
add_library(mxnet MODULE ${SOURCE})
Expand Down
4 changes: 4 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,10 @@ ifeq ($(USE_MKL2017), 1)
LDFLAGS += -liomp5
endif

ifeq ($(USE_OPERATOR_TUNING), 1)
CFLAGS += -DMXNET_USE_OPERATOR_TUNING=1
endif

# verify existence of separate lapack library when using blas/openblas/atlas
# switch off lapack support in case it can't be found
# issue covered with this
Expand Down
6 changes: 6 additions & 0 deletions make/config.mk
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,12 @@ LIBJVM=$(JAVA_HOME)/jre/lib/amd64/server
# sudo apt-get install -y libcurl4-openssl-dev
USE_S3 = 0

#----------------------------
# performance settings
#----------------------------
# Use operator tuning
USE_OPERATOR_TUNING = 1

# Use gperftools if found
USE_GPERFTOOLS = 1

Expand Down
94 changes: 69 additions & 25 deletions src/operator/mshadow_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
#include "math.h"
#include "math_functions-inl.h"
#include "special_functions-inl.h"
#include "./mxnet_op.h"

#ifdef __CUDACC__
#include <cuda_fp16.h>
Expand All @@ -38,6 +39,24 @@ namespace mxnet {
namespace op {
namespace mshadow_op {

/*!
* \brief Use the 'MXNET_TUNABLE_MSHADOW_OP_FWD_AND_BWD' macro outside of the mshadow_op namespace
* See mxnet_op.h for a description of 'MXNET_TUNABLE_MSHADOW_OP_FWD_AND_BWD'
*
* \note An entry for the operator must also be added in operator_tune.cc, which will register it
* for auto-tuning and also hold its workload weight
*/
#define MSHADOW_OP_DECLARE_TUNABLE_FWD_AND_BWD(__op$) \
} MXNET_TUNABLE_MSHADOW_OP_FWD_AND_BWD(mshadow_op::__op$) namespace mshadow_op { // NOLINT(*)
/*!
* \brief Use the 'MXNET_TUNABLE_MSHADOW_OP_BACKWARD' macro outside of the mshadow_op namespace
* See mxnet_op.h for a description of 'MXNET_TUNABLE_MSHADOW_OP_BACKWARD'
*
* \note An entry for the operator must also be added in operator_tune.cc, which will register it
* for auto-tuning and also hold its workload weight
*/
#define MSHADOW_OP_DECLARE_TUNABLE_BACKWARD(__op$) \
} MXNET_TUNABLE_MSHADOW_OP_BACKWARD(mshadow_op::__op$) namespace mshadow_op { // NOLINT(*)
#ifdef __CUDA_ARCH__
__constant__ const float PI = 3.14159265358979323846;
#else
Expand All @@ -48,36 +67,41 @@ using std::enable_if;
using std::is_unsigned;

#define MXNET_UNARY_MATH_OP(name, expr) \
struct name { \
template<typename DType> \
MSHADOW_XINLINE static DType Map(DType a) { \
return DType(expr); \
} \
}
struct name { \
template<typename DType> \
MSHADOW_XINLINE static DType Map(DType a) { \
return DType(expr); \
} \
}; \
MSHADOW_OP_DECLARE_TUNABLE_FWD_AND_BWD(name)


#define MXNET_UNARY_MATH_OP_NC(name, expr) \
struct name { \
template<typename DType> \
MSHADOW_XINLINE static DType Map(DType a) { \
return (expr); \
} \
}
struct name { \
template<typename DType> \
MSHADOW_XINLINE static DType Map(DType a) { \
return (expr); \
} \
}; \
MSHADOW_OP_DECLARE_TUNABLE_FWD_AND_BWD(name)

#define MXNET_BINARY_MATH_OP(name, expr) \
struct name { \
template<typename DType> \
MSHADOW_XINLINE static DType Map(DType a, DType b) { \
return DType(expr); \
} \
}
struct name { \
template<typename DType> \
MSHADOW_XINLINE static DType Map(DType a, DType b) { \
return DType(expr); \
} \
}; \
MSHADOW_OP_DECLARE_TUNABLE_FWD_AND_BWD(name)

#define MXNET_BINARY_MATH_OP_NC(name, expr) \
struct name { \
template<typename DType> \
MSHADOW_XINLINE static DType Map(DType a, DType b) { \
return (expr); \
} \
}
struct name { \
template<typename DType> \
MSHADOW_XINLINE static DType Map(DType a, DType b) { \
return (expr); \
} \
}; \
MSHADOW_OP_DECLARE_TUNABLE_FWD_AND_BWD(name)

#define MXNET_SIMPLE_UNARY_MATH_OP(name) MXNET_UNARY_MATH_OP(name, math::name(a))

Expand Down Expand Up @@ -133,6 +157,7 @@ struct softrelu {
}
}
};
MSHADOW_OP_DECLARE_TUNABLE_FWD_AND_BWD(softrelu)

MXNET_UNARY_MATH_OP(softrelu_grad, -math::expm1(-a));

Expand All @@ -153,6 +178,7 @@ struct log10_grad {
return DType(0.4342944819f / static_cast<float>(a));
}
};
MSHADOW_OP_DECLARE_TUNABLE_BACKWARD(log10_grad)

template<>
MSHADOW_XINLINE double log10_grad::Map<double>(double a) {
Expand All @@ -168,6 +194,7 @@ struct log2_grad {
return DType(1.442695041f / static_cast<float>(a));
}
};
MSHADOW_OP_DECLARE_TUNABLE_BACKWARD(log2_grad)

template<>
MSHADOW_XINLINE double log2_grad::Map<double>(double a) {
Expand Down Expand Up @@ -262,6 +289,7 @@ struct sign {
return DType(0);
}
};
MSHADOW_OP_DECLARE_TUNABLE_FWD_AND_BWD(sign)

MXNET_UNARY_MATH_OP_NC(sign_grad, DType(0));

Expand Down Expand Up @@ -332,6 +360,7 @@ struct rint {
return DType((af - floor) <= (ceil - af) ? floor : ceil);
}
};
MSHADOW_OP_DECLARE_TUNABLE_FWD_AND_BWD(rint)

/*! \brief used to round number to integer nearest to 0 */
struct fix {
Expand All @@ -342,6 +371,7 @@ struct fix {
return DType((floor > 0 ? floor : -floor) < (ceil > 0 ? ceil : -ceil) ? floor : ceil);
}
};
MSHADOW_OP_DECLARE_TUNABLE_FWD_AND_BWD(fix)

/*! \brief used for generate gradient of MAE loss*/
MXNET_BINARY_MATH_OP_NC(minus_sign, a - b > DType(0) ? DType(1) : -DType(1));
Expand Down Expand Up @@ -404,6 +434,7 @@ struct mod {
}
}
};
MSHADOW_OP_DECLARE_TUNABLE_FWD_AND_BWD(mod)

template<>
MSHADOW_XINLINE mshadow::half::half2_t mod::Map<mshadow::half::half2_t>
Expand All @@ -418,6 +449,8 @@ struct mod_grad {
return DType(0);
}
};
MSHADOW_OP_DECLARE_TUNABLE_BACKWARD(mod_grad)

template<>
MSHADOW_XINLINE double mod_grad::Map<double>(double a, double b) {
return 1.0;
Expand Down Expand Up @@ -453,6 +486,8 @@ struct mod_rgrad {
return DType(0);
}
};
MSHADOW_OP_DECLARE_TUNABLE_BACKWARD(mod_rgrad)

template<>
MSHADOW_XINLINE double mod_rgrad::Map<double>(double a, double b) {
return -::floor(a/b);
Expand Down Expand Up @@ -516,6 +551,7 @@ struct rmod {
}
}
};
MSHADOW_OP_DECLARE_TUNABLE_FWD_AND_BWD(rmod)

template<>
MSHADOW_XINLINE mshadow::half::half2_t rmod::Map<mshadow::half::half2_t>
Expand All @@ -530,6 +566,8 @@ struct rmod_grad {
return DType(0);
}
};
MSHADOW_OP_DECLARE_TUNABLE_BACKWARD(rmod_grad)

template<>
MSHADOW_XINLINE double rmod_grad::Map<double>(double a, double b) {
return -::floor(b/a);
Expand Down Expand Up @@ -571,6 +609,7 @@ struct clip {
}
}
};
MSHADOW_OP_DECLARE_TUNABLE_FWD_AND_BWD(clip)

/***** gamma ******/

Expand All @@ -584,6 +623,7 @@ struct gamma_grad {
return DType(math::tgamma(af) * special_functions::cephes::psi<float>(af));
}
};
MSHADOW_OP_DECLARE_TUNABLE_BACKWARD(gamma_grad)

template<>
MSHADOW_XINLINE double gamma_grad::Map<double>(double a) {
Expand All @@ -601,6 +641,7 @@ struct gammaln_grad {
return DType(special_functions::cephes::psi<float>(a));
}
};
MSHADOW_OP_DECLARE_TUNABLE_BACKWARD(gammaln_grad)

template<>
MSHADOW_XINLINE double gammaln_grad::Map<double>(double a) {
Expand Down Expand Up @@ -632,6 +673,7 @@ struct smooth_l1_loss {
}
}
}; // struct smooth_l1_loss
MSHADOW_OP_DECLARE_TUNABLE_FWD_AND_BWD(smooth_l1_loss)

/* The derivative of smooth l1 loss is
* f'(x) = sigma^2 * x, |x| < 1 / sigma^2
Expand All @@ -653,6 +695,7 @@ struct smooth_l1_gradient {
}
}
}; // struct smooth_l1_derivative
MSHADOW_OP_DECLARE_TUNABLE_BACKWARD(smooth_l1_gradient)

/*! \brief product reducer */
struct product {
Expand Down Expand Up @@ -754,6 +797,7 @@ struct nansum_grad {
return isnan_typed::IsNan(a) ? DType(0) : DType(1);
}
};
MSHADOW_OP_DECLARE_TUNABLE_BACKWARD(nansum_grad)

/*! \brief product reducer that ignores NaN values in the input */
struct nanprod {
Expand Down Expand Up @@ -790,7 +834,7 @@ struct nanprod_grad {
return isnan_typed::IsNan(a) ? DType(0) : b / a;
}
};

MSHADOW_OP_DECLARE_TUNABLE_BACKWARD(nanprod_grad)
} // namespace mshadow_op
} // namespace op
} // namespace mxnet
Expand Down
Loading