From 576e4ac9656e1fc3c2b21eb8c6e649207c36698c Mon Sep 17 00:00:00 2001 From: Kellen Sunderland Date: Sun, 26 Aug 2018 09:46:38 +0200 Subject: [PATCH] [MXNET-860] Avoid implicit double conversions This avoids some implicit conversions to double and also should unify behaviour between C++11 compliant compilers. --- src/io/image_det_aug_default.cc | 2 +- src/operator/contrib/adaptive_avg_pooling.cc | 4 ++-- src/operator/contrib/multi_proposal.cc | 4 ++-- src/operator/contrib/multibox_detection.cc | 4 ++-- src/operator/contrib/proposal.cc | 4 ++-- src/operator/contrib/roi_align.cc | 8 ++++---- src/operator/correlation-inl.h | 8 ++++---- src/operator/image/image_random-inl.h | 8 ++++---- src/operator/nn/batch_norm.cc | 2 +- src/operator/nn/lrn-inl.h | 6 +++--- src/operator/nn/mkldnn/mkldnn_batch_norm-inl.h | 2 +- src/operator/nn/pooling.cc | 12 ++++++------ src/operator/pooling_v1-inl.h | 10 +++++----- src/operator/roi_pooling.cc | 16 ++++++++-------- src/operator/spatial_transformer.cc | 8 ++++---- src/operator/tensor/elemwise_binary_op-inl.h | 8 ++++---- src/operator/tensor/elemwise_binary_op.h | 4 ++-- 17 files changed, 55 insertions(+), 55 deletions(-) diff --git a/src/io/image_det_aug_default.cc b/src/io/image_det_aug_default.cc index 79e19318366e..15169d8f760d 100644 --- a/src/io/image_det_aug_default.cc +++ b/src/io/image_det_aug_default.cc @@ -560,7 +560,7 @@ class DefaultImageDetAugmenter : public ImageAugmenter { } cv::cvtColor(res, res, CV_HLS2BGR); } - if (fabs(c) > 1e-3) { + if (std::fabs(c) > 1e-3) { cv::Mat tmp = res; tmp.convertTo(res, -1, c + 1.f, 0); } diff --git a/src/operator/contrib/adaptive_avg_pooling.cc b/src/operator/contrib/adaptive_avg_pooling.cc index 00ab36605bf4..a65f5fe8d436 100644 --- a/src/operator/contrib/adaptive_avg_pooling.cc +++ b/src/operator/contrib/adaptive_avg_pooling.cc @@ -26,8 +26,8 @@ // #include "elemwise_op_common.h" #include "../elemwise_op_common.h" -#define START_IND(a, b, c) static_cast(floor(static_cast(a * c) / b)) -#define END_IND(a, b, c) static_cast(ceil(static_cast((a + 1) * c) / b)) +#define START_IND(a, b, c) static_cast(std::floor(static_cast(a * c) / b)) +#define END_IND(a, b, c) static_cast(std::ceil(static_cast((a + 1) * c) / b)) namespace mxnet { namespace op { diff --git a/src/operator/contrib/multi_proposal.cc b/src/operator/contrib/multi_proposal.cc index 3f5796e844a8..e77a0b5aeba1 100644 --- a/src/operator/contrib/multi_proposal.cc +++ b/src/operator/contrib/multi_proposal.cc @@ -67,8 +67,8 @@ inline void BBoxTransformInv(const mshadow::Tensor& boxes, float pred_ctr_x = dx * width + ctr_x; float pred_ctr_y = dy * height + ctr_y; - float pred_w = exp(dw) * width; - float pred_h = exp(dh) * height; + float pred_w = std::exp(dw) * width; + float pred_h = std::exp(dh) * height; float pred_x1 = pred_ctr_x - 0.5 * (pred_w - 1.0); float pred_y1 = pred_ctr_y - 0.5 * (pred_h - 1.0); diff --git a/src/operator/contrib/multibox_detection.cc b/src/operator/contrib/multibox_detection.cc index e5a7dd8fb638..f92460e9e5e9 100644 --- a/src/operator/contrib/multibox_detection.cc +++ b/src/operator/contrib/multibox_detection.cc @@ -62,8 +62,8 @@ inline void TransformLocations(DType *out, const DType *anchors, DType ph = loc_pred[3]; DType ox = px * vx * aw + ax; DType oy = py * vy * ah + ay; - DType ow = exp(pw * vw) * aw / 2; - DType oh = exp(ph * vh) * ah / 2; + DType ow = std::exp(pw * vw) * aw / 2; + DType oh = std::exp(ph * vh) * ah / 2; out[0] = clip ? std::max(DType(0), std::min(DType(1), ox - ow)) : (ox - ow); out[1] = clip ? std::max(DType(0), std::min(DType(1), oy - oh)) : (oy - oh); out[2] = clip ? std::max(DType(0), std::min(DType(1), ox + ow)) : (ox + ow); diff --git a/src/operator/contrib/proposal.cc b/src/operator/contrib/proposal.cc index fa713bba825f..935372d34dbe 100644 --- a/src/operator/contrib/proposal.cc +++ b/src/operator/contrib/proposal.cc @@ -63,8 +63,8 @@ inline void BBoxTransformInv(const mshadow::Tensor& boxes, float pred_ctr_x = dx * width + ctr_x; float pred_ctr_y = dy * height + ctr_y; - float pred_w = exp(dw) * width; - float pred_h = exp(dh) * height; + float pred_w = std::exp(dw) * width; + float pred_h = std::exp(dh) * height; float pred_x1 = pred_ctr_x - 0.5 * (pred_w - 1.0); float pred_y1 = pred_ctr_y - 0.5 * (pred_h - 1.0); diff --git a/src/operator/contrib/roi_align.cc b/src/operator/contrib/roi_align.cc index 22611273cf50..65ee626a17ca 100644 --- a/src/operator/contrib/roi_align.cc +++ b/src/operator/contrib/roi_align.cc @@ -182,9 +182,9 @@ void ROIAlignForward( // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio - : ceil(roi_height / pooled_height); // e.g., = 2 + : std::ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = - (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + (sampling_ratio > 0) ? sampling_ratio : std::ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 @@ -357,9 +357,9 @@ void ROIAlignBackward( // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio - : ceil(roi_height / pooled_height); // e.g., = 2 + : std::ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = - (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + (sampling_ratio > 0) ? sampling_ratio : std::ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 diff --git a/src/operator/correlation-inl.h b/src/operator/correlation-inl.h index 7266a0a91844..9dca44e55121 100644 --- a/src/operator/correlation-inl.h +++ b/src/operator/correlation-inl.h @@ -98,9 +98,9 @@ class CorrelationOp : public Operator { border_size_ = param_.max_displacement + kernel_radius_; stride1 = param_.stride1; stride2 = param_.stride2; - top_width_ = ceil(static_cast(paddedbottomwidth - border_size_ * 2)\ + top_width_ = std::ceil(static_cast(paddedbottomwidth - border_size_ * 2)\ / static_cast(stride1)); - top_height_ = ceil(static_cast(paddedbottomheight - border_size_ * 2)\ + top_height_ = std::ceil(static_cast(paddedbottomheight - border_size_ * 2)\ / static_cast(stride1)); neighborhood_grid_radius_ = param_.max_displacement / stride2; neighborhood_grid_width_ = neighborhood_grid_radius_ * 2 + 1; @@ -211,9 +211,9 @@ void Init(const std::vector >& kwargs) overr border_size_ = param_.max_displacement + kernel_radius_; stride1 = param_.stride1; stride2 = param_.stride2; - top_width_ = ceil(static_cast(paddedbottomwidth - border_size_ * 2)\ + top_width_ = std::ceil(static_cast(paddedbottomwidth - border_size_ * 2)\ / static_cast(stride1)); - top_height_ = ceil(static_cast(paddedbottomheight - border_size_ * 2)\ + top_height_ = std::ceil(static_cast(paddedbottomheight - border_size_ * 2)\ / static_cast(stride1)); neighborhood_grid_radius_ = param_.max_displacement / stride2; neighborhood_grid_width_ = neighborhood_grid_radius_ * 2 + 1; diff --git a/src/operator/image/image_random-inl.h b/src/operator/image/image_random-inl.h index 47beca1d506d..c64ed28ecc2d 100644 --- a/src/operator/image/image_random-inl.h +++ b/src/operator/image/image_random-inl.h @@ -418,10 +418,10 @@ void RGB2HLSConvert(const float& src_r, float diff; vmax = vmin = r; - vmax = fmax(vmax, g); - vmax = fmax(vmax, b); - vmin = fmin(vmin, g); - vmin = fmin(vmin, b); + vmax = std::fmax(vmax, g); + vmax = std::fmax(vmax, b); + vmin = std::fmin(vmin, g); + vmin = std::fmin(vmin, b); diff = vmax - vmin; l = (vmax + vmin) * 0.5f; diff --git a/src/operator/nn/batch_norm.cc b/src/operator/nn/batch_norm.cc index 4ea494d64e47..f28f5d7a436d 100644 --- a/src/operator/nn/batch_norm.cc +++ b/src/operator/nn/batch_norm.cc @@ -33,7 +33,7 @@ #endif /*! \brief inverse standard deviation <-> variance */ -#define VARIANCE_TO_INVSTD(__var$, __eps$) (1.0/sqrt((__var$) + DType(__eps$))) +#define VARIANCE_TO_INVSTD(__var$, __eps$) (1.0/std::sqrt((__var$) + DType(__eps$))) #define INVSTD_TO_VARIANCE(__invstd$, __eps$) ((1.0 / ((__invstd$) * (__invstd$))) - (__eps$)) namespace mxnet { diff --git a/src/operator/nn/lrn-inl.h b/src/operator/nn/lrn-inl.h index 630449598128..7bd914485335 100644 --- a/src/operator/nn/lrn-inl.h +++ b/src/operator/nn/lrn-inl.h @@ -61,9 +61,9 @@ struct LRNParam : public dmlc::Parameter { bool operator==(const LRNParam& other) const { return (this->nsize == other.nsize && - fabs(this->alpha - other.alpha) < 1e-6 && - fabs(this->beta - other.beta) < 1e-6 && - fabs(this->knorm - other.knorm) < 1e-6); + std::fabs(this->alpha - other.alpha) < 1e-6 && + std::fabs(this->beta - other.beta) < 1e-6 && + std::fabs(this->knorm - other.knorm) < 1e-6); } }; // struct LRNParam diff --git a/src/operator/nn/mkldnn/mkldnn_batch_norm-inl.h b/src/operator/nn/mkldnn/mkldnn_batch_norm-inl.h index 9046836e8e75..4e3451c36919 100644 --- a/src/operator/nn/mkldnn/mkldnn_batch_norm-inl.h +++ b/src/operator/nn/mkldnn/mkldnn_batch_norm-inl.h @@ -34,7 +34,7 @@ #include "./mkldnn_ops-inl.h" #include "./mkldnn_base-inl.h" -#define VARIANCE_TO_INVSTD(__var$, __eps$) (1.0/sqrt((__var$) + DType(__eps$))) +#define VARIANCE_TO_INVSTD(__var$, __eps$) (1.0/std::sqrt((__var$) + DType(__eps$))) #define INVSTD_TO_VARIANCE(__invstd$, __eps$) ((1.0 / ((__invstd$) * (__invstd$))) - (__eps$)) namespace mxnet { namespace op { diff --git a/src/operator/nn/pooling.cc b/src/operator/nn/pooling.cc index c133b63623af..558722edb202 100644 --- a/src/operator/nn/pooling.cc +++ b/src/operator/nn/pooling.cc @@ -127,7 +127,7 @@ static bool PoolingShape(const nnvm::NodeAttrs &attrs, (dshape[2] + 2 * param.pad[0] - param.kernel[0]) / param.stride[0]; } else { - oshape[2] = 1 + static_cast(ceil( + oshape[2] = 1 + static_cast(std::ceil( static_cast(dshape[2] + 2 * param.pad[0] - param.kernel[0]) / param.stride[0])); @@ -157,11 +157,11 @@ static bool PoolingShape(const nnvm::NodeAttrs &attrs, (dshape[3] + 2 * param.pad[1] - param.kernel[1]) / param.stride[1]; } else { - oshape[2] = 1 + static_cast(ceil( + oshape[2] = 1 + static_cast(std::ceil( static_cast(dshape[2] + 2 * param.pad[0] - param.kernel[0]) / param.stride[0])); - oshape[3] = 1 + static_cast(ceil( + oshape[3] = 1 + static_cast(std::ceil( static_cast(dshape[3] + 2 * param.pad[1] - param.kernel[1]) / param.stride[1])); @@ -192,15 +192,15 @@ static bool PoolingShape(const nnvm::NodeAttrs &attrs, (dshape[4] + 2 * param.pad[2] - param.kernel[2]) / param.stride[2]; } else { - oshape[2] = 1 + static_cast(ceil( + oshape[2] = 1 + static_cast(std::ceil( static_cast(dshape[2] + 2 * param.pad[0] - param.kernel[0]) / param.stride[0])); - oshape[3] = 1 + static_cast(ceil( + oshape[3] = 1 + static_cast(std::ceil( static_cast(dshape[3] + 2 * param.pad[1] - param.kernel[1]) / param.stride[1])); - oshape[4] = 1 + static_cast(ceil( + oshape[4] = 1 + static_cast(std::ceil( static_cast(dshape[4] + 2 * param.pad[2] - param.kernel[2]) / param.stride[2])); diff --git a/src/operator/pooling_v1-inl.h b/src/operator/pooling_v1-inl.h index 0a663265cbe7..8942ddc0d716 100644 --- a/src/operator/pooling_v1-inl.h +++ b/src/operator/pooling_v1-inl.h @@ -273,10 +273,10 @@ class PoolingV1Prop : public OperatorProperty { oshape[3] = 1 + (dshape[3] + 2 * param_.pad[1] - param_.kernel[1]) / param_.stride[1]; } else { - oshape[2] = 1 + static_cast(ceil(static_cast( + oshape[2] = 1 + static_cast(std::ceil(static_cast( dshape[2] + 2 * param_.pad[0] - param_.kernel[0]) / param_.stride[0])); - oshape[3] = 1 + static_cast(ceil(static_cast( + oshape[3] = 1 + static_cast(std::ceil(static_cast( dshape[3] + 2 * param_.pad[1] - param_.kernel[1]) / param_.stride[1])); } @@ -296,13 +296,13 @@ class PoolingV1Prop : public OperatorProperty { oshape[4] = 1 + (dshape[4] + 2 * param_.pad[2] - param_.kernel[2]) / param_.stride[2]; } else { - oshape[2] = 1 + static_cast(ceil(static_cast( + oshape[2] = 1 + static_cast(std::ceil(static_cast( dshape[2] + 2 * param_.pad[0] - param_.kernel[0]) / param_.stride[0])); - oshape[3] = 1 + static_cast(ceil(static_cast( + oshape[3] = 1 + static_cast(std::ceil(static_cast( dshape[3] + 2 * param_.pad[1] - param_.kernel[1]) / param_.stride[1])); - oshape[4] = 1 + static_cast(ceil(static_cast( + oshape[4] = 1 + static_cast(std::ceil(static_cast( dshape[4] + 2 * param_.pad[2] - param_.kernel[2]) / param_.stride[2])); } diff --git a/src/operator/roi_pooling.cc b/src/operator/roi_pooling.cc index acff1f97dcce..124d811c46a0 100644 --- a/src/operator/roi_pooling.cc +++ b/src/operator/roi_pooling.cc @@ -66,10 +66,10 @@ inline void ROIPoolForward(const Tensor &out, Dtype *top_data_n = top_data + n * out_size; Dtype *argmax_data_n = argmax_data + n * max_idx_size; int roi_batch_ind = bottom_rois_n[0]; - int roi_start_w = round(bottom_rois_n[1] * spatial_scale_); - int roi_start_h = round(bottom_rois_n[2] * spatial_scale_); - int roi_end_w = round(bottom_rois_n[3] * spatial_scale_); - int roi_end_h = round(bottom_rois_n[4] * spatial_scale_); + int roi_start_w = std::round(bottom_rois_n[1] * spatial_scale_); + int roi_start_h = std::round(bottom_rois_n[2] * spatial_scale_); + int roi_end_w = std::round(bottom_rois_n[3] * spatial_scale_); + int roi_end_h = std::round(bottom_rois_n[4] * spatial_scale_); assert(roi_batch_ind >= 0); assert(static_cast(roi_batch_ind) < data.size(0) /* batch size */); @@ -171,10 +171,10 @@ inline void ROIPoolBackwardAcc(const Tensor &in_grad, continue; } - int roi_start_w = round(offset_bottom_rois[1] * spatial_scale_); - int roi_start_h = round(offset_bottom_rois[2] * spatial_scale_); - int roi_end_w = round(offset_bottom_rois[3] * spatial_scale_); - int roi_end_h = round(offset_bottom_rois[4] * spatial_scale_); + int roi_start_w = std::round(offset_bottom_rois[1] * spatial_scale_); + int roi_start_h = std::round(offset_bottom_rois[2] * spatial_scale_); + int roi_end_w = std::round(offset_bottom_rois[3] * spatial_scale_); + int roi_end_h = std::round(offset_bottom_rois[4] * spatial_scale_); bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h); diff --git a/src/operator/spatial_transformer.cc b/src/operator/spatial_transformer.cc index 13937290d90d..8c6779df1b7a 100644 --- a/src/operator/spatial_transformer.cc +++ b/src/operator/spatial_transformer.cc @@ -51,8 +51,8 @@ inline void BilinearSamplingForward(const Tensor &output, const index_t grid_index = n * o_h * o_w * 2 + h * o_w + w; const DType y_real = (*(grid + grid_index + o_h * o_w) + 1) * (i_h - 1) / 2; const DType x_real = (*(grid + grid_index) + 1) * (i_w - 1) / 2; - const auto top_left_y = static_cast(floor(y_real)); - const auto top_left_x = static_cast(floor(x_real)); + const auto top_left_y = static_cast(std::floor(y_real)); + const auto top_left_x = static_cast(std::floor(x_real)); const DType top_left_y_w = 1.0 - (y_real - top_left_y); const DType top_left_x_w = 1.0 - (x_real - top_left_x); const int data_index = n * i_c * i_h * i_w + c * i_h * i_w + @@ -99,8 +99,8 @@ inline void BilinearSamplingBackward(const Tensor &input_grad, const index_t grid_src_index = n * o_h * o_w * 2 + h * o_w + w; const DType y_real = (*(grid_src + grid_src_index + o_h * o_w) + 1) * (i_h - 1) / 2; const DType x_real = (*(grid_src + grid_src_index) + 1) * (i_w - 1) / 2; - const auto top_left_y = static_cast(floor(y_real)); - const auto top_left_x = static_cast(floor(x_real)); + const auto top_left_y = static_cast(std::floor(y_real)); + const auto top_left_x = static_cast(std::floor(x_real)); const DType top_left_y_w = 1.0 - (y_real - top_left_y); const DType top_left_x_w = 1.0 - (x_real - top_left_x); for (index_t c = 0; c < static_cast(o_c); ++c) { diff --git a/src/operator/tensor/elemwise_binary_op-inl.h b/src/operator/tensor/elemwise_binary_op-inl.h index 1b1a1d20077d..72a02ff5fd83 100644 --- a/src/operator/tensor/elemwise_binary_op-inl.h +++ b/src/operator/tensor/elemwise_binary_op-inl.h @@ -68,13 +68,13 @@ void ElemwiseBinaryOp::RspRspOp(mshadow::Stream *s, if (rhs_is_dense) { // For right-side dense, in order to have sparse output, lhs input zero should // always output zero - CHECK(fabs(static_cast(OP::Map(DType(0), DType(99)))) < 1e-4f); + CHECK(std::fabs(static_cast(OP::Map(DType(0), DType(99)))) < 1e-4f); CHECK(!is_dense_result); // Currently not handled } if (lhs_is_dense) { // For left-side dense, in order to have sparse output, lhs input zero should // always output zero - CHECK(fabs(static_cast(OP::Map(DType(99), DType(0)))) < 1e-4f); + CHECK(std::fabs(static_cast(OP::Map(DType(99), DType(0)))) < 1e-4f); CHECK(!is_dense_result); // Currently not handled } @@ -102,10 +102,10 @@ void ElemwiseBinaryOp::RspRspOp(mshadow::Stream *s, CHECK_EQ(is_dense_result, false); if (lhs_in_place) { // For in-place, zero L-value must always be zero output - DCHECK(fabs(static_cast(OP::Map(DType(0), DType(99)))) < DType(1e-3)); + DCHECK(std::fabs(static_cast(OP::Map(DType(0), DType(99)))) < DType(1e-3)); } else { // For in-place, zero R-value must always be zero output - DCHECK(fabs(static_cast(OP::Map(DType(99), DType(0)))) < DType(1e-3)); + DCHECK(std::fabs(static_cast(OP::Map(DType(99), DType(0)))) < DType(1e-3)); } } } diff --git a/src/operator/tensor/elemwise_binary_op.h b/src/operator/tensor/elemwise_binary_op.h index cb1db0ec632e..9b451fa69357 100644 --- a/src/operator/tensor/elemwise_binary_op.h +++ b/src/operator/tensor/elemwise_binary_op.h @@ -646,7 +646,7 @@ class ElemwiseBinaryOp : public OpBase { if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { CHECK_EQ(outputs[0].storage_type(), in_stype); // rsp -> rsp, _. op requires 0-input returns 0-output - DCHECK_LT(fabs(static_cast(LOP::Map(0))), 1e-5f); + DCHECK_LT(std::fabs(static_cast(LOP::Map(0))), 1e-5f); UnaryOp::ComputeEx(attrs, ctx, inputs, req, {outputs[0]}); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); @@ -657,7 +657,7 @@ class ElemwiseBinaryOp : public OpBase { if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { CHECK_EQ(outputs[0].storage_type(), in_stype); // rsp -> _, rsp. op requires 0-input returns 0-output - DCHECK_LT(fabs(static_cast(ROP::Map(0))), 1e-5f); + DCHECK_LT(std::fabs(static_cast(ROP::Map(0))), 1e-5f); UnaryOp::ComputeEx(attrs, ctx, inputs, req, {outputs[1]}); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs);