Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Revert "Improve FC perf when no_bias=False" #15099

Merged
merged 1 commit into from
May 30, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 1 addition & 13 deletions src/operator/nn/fully_connected-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@
#include "../elemwise_op_common.h"
#include "../linalg.h"
#include "../../common/utils.h"
#include "../tensor/broadcast_reduce_op.h"

namespace mxnet {
namespace op {
Expand Down Expand Up @@ -170,18 +169,7 @@ void FCBackward(const OpContext &ctx, const FullyConnectedParam &param,
// gradient of bias
if (!param.no_bias) {
Tensor<xpu, 1, DType> gbias = in_grad[fullc::kBias].get<xpu, 1, DType>(s);
TBlob grad_blob = TBlob(grad);
TBlob gbias_blob = TBlob(gbias);
mxnet::TShape x(1, 0);
mxnet::TShape small;
if (shape_assign(&gbias_blob.shape_, Shape2(param.num_hidden, 1))) {
small = gbias_blob.shape_;
} else {
small = ReduceAxesShapeImpl(grad_blob.shape_, dmlc::optional<mxnet::TShape>(x), true, false);
}
ReduceAxesComputeImpl<xpu, mshadow::red::sum, false, false,
mshadow_op::identity>(ctx, {grad_blob}, {req[fullc::kBias]},
{in_grad[fullc::kBias]}, small);
Assign(gbias, req[fullc::kBias], sum_rows(grad));
}
// gradient of data
// Legacy approach shown here for comparison:
Expand Down
2 changes: 2 additions & 0 deletions src/operator/nn/fully_connected.cc
Original file line number Diff line number Diff line change
Expand Up @@ -316,9 +316,11 @@ NNVM_REGISTER_OP(_backward_FullyConnected)
const FullyConnectedParam& params = nnvm::get<FullyConnectedParam>(attrs.parsed);
return params.no_bias ? 2 : 3;
})
#if MXNET_USE_MKLDNN == 1
.set_attr<FResourceRequest>("FResourceRequest", [](const NodeAttrs& n) {
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};
})
#endif
.set_attr<nnvm::TIsBackward>("TIsBackward", true)
.set_attr<nnvm::FInplaceOption>("FInplaceOption", [](const NodeAttrs& attrs){
return std::vector<std::pair<int, int> >{{1, 0}};
Expand Down
21 changes: 0 additions & 21 deletions tests/python/unittest/test_operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -696,27 +696,6 @@ def test_symbol_pow():
check_symbolic_backward(test, [data_tmp, exp_tmp], [np.ones(shape)], [data_dir, exp_dir])


@with_seed()
def test_fully_connected():
data = mx.sym.var("data")
fc_weight = mx.sym.var("weight")
fc_bias = mx.sym.var("bias")
fc = mx.sym.FullyConnected(data=data, weight=fc_weight, bias=fc_bias, num_hidden=10, no_bias=False, name='fc')
data = mx.nd.random.uniform(shape=(5, 5, 5, 13), dtype=np.float32)
fc_weight = mx.nd.random.uniform(shape=(10, 325), dtype=np.float32)
fc_bias = mx.nd.random.uniform(shape=(10), dtype=np.float32)
fc_bias2 = mx.nd.random.uniform(shape=(10, 1), dtype=np.float32)
data_np = data.asnumpy().reshape(5, 325)
fc_weight_np = np.transpose(fc_weight.asnumpy())
fc_bias_np = fc_bias.asnumpy()
res = np.dot(data_np, fc_weight_np) + fc_bias.asnumpy()
check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np}, {'fc_output': res})
check_numeric_gradient(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np},
numeric_eps=1e-2, rtol=1e-4, atol=1e-2)
# TODO: Fix Bug #15032 when bias has ndim > 1
#check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias2.asnumpy()}, {'fc_output': res})


@with_seed()
def test_pow_fn():
shape = (3, 4)
Expand Down