diff --git a/src/operator/quantization/mkldnn/mkldnn_quantized_fully_connected.cc b/src/operator/quantization/mkldnn/mkldnn_quantized_fully_connected.cc index 71daf2ec2c16..cf3d789e2882 100644 --- a/src/operator/quantization/mkldnn/mkldnn_quantized_fully_connected.cc +++ b/src/operator/quantization/mkldnn/mkldnn_quantized_fully_connected.cc @@ -74,7 +74,7 @@ void MKLDNNQuantizedFullyConnectedForward(const nnvm::NodeAttrs &attrs, int32_t *quantized_bias_ptr = quantized_bias.data().dptr(); size_t bias_size = bias.shape().Size(); #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) - for (size_t i = 0; i < bias_size; ++i) { + for (index_t i = 0; i < static_cast(bias_size); ++i) { quantized_bias_ptr[i] = bias_ptr[i] * bias_int32_rescale; } } diff --git a/src/operator/subgraph/mkldnn/mkldnn_fc.cc b/src/operator/subgraph/mkldnn/mkldnn_fc.cc index 0ec05a2af087..857a27d9a134 100644 --- a/src/operator/subgraph/mkldnn/mkldnn_fc.cc +++ b/src/operator/subgraph/mkldnn/mkldnn_fc.cc @@ -156,7 +156,7 @@ void SgMKLDNNFCOp::Forward(const OpContext &ctx, int32_t *quantized_bias_ptr = cached_bias_.data().dptr(); size_t bias_size = bias.shape().Size(); #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) - for (size_t i = 0; i < bias_size; ++i) { + for (index_t i = 0; i < static_cast(bias_size); ++i) { quantized_bias_ptr[i] = bias_ptr[i] * bias_int32_rescale; } }