Skip to content

Commit

Permalink
update paddle-lite subgraph
Browse files Browse the repository at this point in the history
  • Loading branch information
jiweibo committed Oct 24, 2019
1 parent d2c20df commit adf8c95
Show file tree
Hide file tree
Showing 3 changed files with 85 additions and 3 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,11 @@ void LiteSubgraphPass::SetUpEngine(
paddle::lite_api::PrecisionType precision_type =
enable_int8 ? PRECISION(kInt8) : PRECISION(kFloat);
paddle::lite::Place prefer_place = {target_type, precision_type};

std::set<std::string> param_names_set(repetitive_params.begin(),
repetitive_params.end());
const_cast<std::vector<std::string>&>(repetitive_params)
.assign(param_names_set.begin(), param_names_set.end());
serialize_params(&config.param, scope, repetitive_params);
serialize_params(&config.param, scope, repetitive_params);
config.model = program->Proto()->SerializeAsString();
config.prefer_place = prefer_place;
Expand Down
4 changes: 4 additions & 0 deletions paddle/fluid/inference/lite/tensor_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,8 @@ PrecisionType GetLitePrecisionType(framework::proto::VarType::Type type) {
return PrecisionType::kFloat;
case framework::proto::VarType_Type_INT8:
return PrecisionType::kInt8;
case framework::proto::VarType_Type_INT32:
return PrecisionType::kInt32;
default:
LOG(FATAL) << "Error precision type.";
return PrecisionType::kUnk;
Expand All @@ -59,6 +61,8 @@ framework::proto::VarType::Type GetNativePrecisionType(
return framework::proto::VarType_Type_FP32;
case PrecisionType::kInt8:
return framework::proto::VarType_Type_INT8;
case PrecisionType::kInt32:
return framework::proto::VarType_Type_INT32;
default:
LOG(FATAL) << "Error precision type.";
return static_cast<framework::proto::VarType::Type>(-1);
Expand Down
78 changes: 76 additions & 2 deletions paddle/fluid/inference/lite/test_predictor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,15 +23,89 @@
#include "paddle/fluid/platform/enforce.h"

int main() {
LOG(INFO) << "Hello World!";
LOG(INFO) << "leaky_relu";
paddle::AnalysisConfig config;
config.SetModel("/shixiaowei02/Paddle_lite/xingzhaolong/leaky_relu_model");
// config.SetModel("/Paddle/models/lite/leaky_relu");
config.SwitchUseFeedFetchOps(false);
config.EnableUseGpu(10, 1);
config.EnableUseGpu(10, 0);
config.EnableLiteEngine(paddle::AnalysisConfig::Precision::kFloat32);
config.pass_builder()->TurnOnDebug();

auto predictor = CreatePaddlePredictor(config);
PADDLE_ENFORCE_NOT_NULL(predictor.get());

const int batch_size = 1;
const int channels = 1;
const int height = 3;
const int width = 3;
// float *data = new float[batch_size * channels * height * width];
float data[batch_size * channels * height * width] = {0.5, -0.5, 0, -0, 1,
-1, 2, -2, 3};

auto input_names = predictor->GetInputNames();
auto input_t = predictor->GetInputTensor(input_names[0]);
input_t->Reshape({batch_size, channels, height, width});
input_t->copy_from_cpu(data);

CHECK(predictor->ZeroCopyRun());

std::vector<float> out_data;
auto output_names = predictor->GetOutputNames();
auto output_t = predictor->GetOutputTensor(output_names[0]);
std::vector<int> output_shape = output_t->shape();
int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1,
std::multiplies<int>());
LOG(INFO) << "out_num is " << out_num;
out_data.resize(out_num);
output_t->copy_to_cpu(out_data.data());
return 0;

/*
// for yolov3
LOG(INFO) << "yolo_v3";
paddle::AnalysisConfig config;
config.SetModel("/Paddle/models/lite/yolov3_infer/__model__",
"/Paddle/models/lite/yolov3_infer/__params__");
config.SwitchUseFeedFetchOps(false);
config.EnableUseGpu(10, 1);
config.EnableLiteEngine(paddle::AnalysisConfig::Precision::kFloat32);
config.pass_builder()->TurnOnDebug();
auto predictor = CreatePaddlePredictor(config);
PADDLE_ENFORCE_NOT_NULL(predictor.get());
const int batch_size = 1;
const int channels = 3;
const int height = 608;
const int width = 608;
// float *data = new float[batch_size * channels * height * width];
float data[batch_size * channels * height * width];
memset(data, 0, sizeof(float) * batch_size * channels * height * width);
auto input_names = predictor->GetInputNames();
LOG(INFO) << input_names[0];
LOG(INFO) << input_names[1];
auto input_image = predictor->GetInputTensor(input_names[0]);
input_image->Reshape({batch_size, channels, height, width});
input_image->copy_from_cpu(data);
int im_size_data[2] = {608, 608};
auto input_size = predictor->GetInputTensor(input_names[1]);
input_size->Reshape({1, 2});
input_size->copy_from_cpu(im_size_data);
CHECK(predictor->ZeroCopyRun());
std::vector<float> out_data;
auto output_names = predictor->GetOutputNames();
auto output_t = predictor->GetOutputTensor(output_names[0]);
std::vector<int> output_shape = output_t->shape();
int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1,
std::multiplies<int>());
LOG(INFO) << "out_num is " << out_num;
out_data.resize(out_num);
output_t->copy_to_cpu(out_data.data());
return 0;
*/
}

0 comments on commit adf8c95

Please sign in to comment.