Skip to content

Commit

Permalink
add_fc_convert_layers_name (#37157)
Browse files Browse the repository at this point in the history
  • Loading branch information
Wangzheee committed Nov 16, 2021
1 parent 36dd295 commit fa9d497
Showing 1 changed file with 23 additions and 4 deletions.
27 changes: 23 additions & 4 deletions paddle/fluid/inference/tensorrt/convert/fc_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@ namespace tensorrt {
class FcOpConverter : public OpConverter {
public:
nvinfer1::ILayer* reshape_before_fc(nvinfer1::ITensor* before_fc,
nvinfer1::Dims x_dim,
int x_num_col_dims) {
nvinfer1::Dims x_dim, int x_num_col_dims,
std::string output_name) {
// add shuffle before fc
nvinfer1::Dims reshape_before_fc_dim;
reshape_before_fc_dim.nbDims = x_num_col_dims + 3;
Expand All @@ -57,6 +57,9 @@ class FcOpConverter : public OpConverter {
auto* reshape_before_fc_layer =
TRT_ENGINE_ADD_LAYER(engine_, Shuffle, *before_fc);
reshape_before_fc_layer->setReshapeDimensions(reshape_before_fc_dim);
reshape_before_fc_layer->setName(
("fc_op_reshape_before_fc: Shuffle (Output: " + output_name + ")")
.c_str());
return reshape_before_fc_layer;
}

Expand Down Expand Up @@ -164,27 +167,42 @@ class FcOpConverter : public OpConverter {
auto* fc_layer_int8 =
TRT_ENGINE_ADD_LAYER(engine_, Convolution, *inputs, n_output,
nv_ksize, weight.get(), bias.get());
fc_layer_int8->setName(
("fc_op_int8_conv1x1: Convolution (Output: " + output_name + ")")
.c_str());
engine_->SetTensorDynamicRange(fc_layer_int8->getOutput(0), out_scale);
auto* fc_after_reshape_int8 = reshape_after_fc(
fc_layer_int8->getOutput(0), x_dim, x_num_col_dims);
if (activation_type == "relu") {
fc_after_reshape_int8->setName(
("fc_op_int8_reshape_after_fc: Shuffle (Output: " + output_name +
")")
.c_str());
nvinfer1::IActivationLayer* relu_layer_int8 = TRT_ENGINE_ADD_LAYER(
engine_, Activation, *(fc_after_reshape_int8->getOutput(0)),
nvinfer1::ActivationType::kRELU);
RreplenishLayerAndOutput(relu_layer_int8, "relu_after_fc_shuffle",
{output_name}, test_mode);
} else {
RreplenishLayerAndOutput(fc_after_reshape_int8, "shuffle_after_fc",
RreplenishLayerAndOutput(fc_after_reshape_int8,
"fc_op_int8_reshape_after_fc: Shuffle",
{output_name}, test_mode);
}
} else {
// add fc layer
auto* fc_layer_float =
TRT_ENGINE_ADD_LAYER(engine_, FullyConnected, *inputs, n_output,
weight.get(), bias.get());
fc_layer_float->setName(
("fc_op_float: FullyConnected (Output: " + output_name + ")")
.c_str());
auto* fc_after_reshape_float = reshape_after_fc(
fc_layer_float->getOutput(0), x_dim, x_num_col_dims);
if (activation_type == "relu") {
fc_after_reshape_float->setName(
("fc_op_float_reshape_after_fc: Shuffle (Output: " + output_name +
")")
.c_str());
nvinfer1::IActivationLayer* relu_layer_float = TRT_ENGINE_ADD_LAYER(
engine_, Activation, *(fc_after_reshape_float->getOutput(0)),
nvinfer1::ActivationType::kRELU);
Expand Down Expand Up @@ -234,7 +252,8 @@ class FcOpConverter : public OpConverter {
"converter expects x_dim.nbDims > x_num_col_dims, but "
"x_dim.nbDims : %d, x_num_col_dims : %d.",
x_dim.nbDims, x_num_col_dims));
auto* reshape_before_fc_layer = reshape_before_fc(X, x_dim, x_num_col_dims);
auto* reshape_before_fc_layer =
reshape_before_fc(X, x_dim, x_num_col_dims, output_name);
auto* reshape_itensor = reshape_before_fc_layer->getOutput(0);
if (enable_int8) {
engine_->SetTensorDynamicRange(reshape_itensor, in_scale);
Expand Down

1 comment on commit fa9d497

@paddle-bot-old
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Congratulation! Your pull request passed all required CI. You could ask reviewer(s) to approve and merge. 🎉

Please sign in to comment.