diff --git a/paddle/fluid/inference/analysis/argument.h b/paddle/fluid/inference/analysis/argument.h index 60d670ce41839..82b5ec09521a2 100644 --- a/paddle/fluid/inference/analysis/argument.h +++ b/paddle/fluid/inference/analysis/argument.h @@ -196,8 +196,7 @@ struct Argument { DECL_ARGUMENT_FIELD(lite_passes_filter, LitePassesFilter, std::vector); - DECL_ARGUMENT_FIELD(lite_ops_filter, LiteOpsFilter, - std::vector); + DECL_ARGUMENT_FIELD(lite_ops_filter, LiteOpsFilter, std::vector); DECL_ARGUMENT_FIELD(lite_precision_mode, LitePrecisionMode, AnalysisConfig::Precision); diff --git a/paddle/fluid/inference/analysis/ir_pass_manager.cc b/paddle/fluid/inference/analysis/ir_pass_manager.cc index 3580b89a1874a..1ff77eb243eb9 100644 --- a/paddle/fluid/inference/analysis/ir_pass_manager.cc +++ b/paddle/fluid/inference/analysis/ir_pass_manager.cc @@ -127,7 +127,8 @@ void IRPassManager::CreatePasses(Argument *argument, new framework::ProgramDesc *(&argument->main_program())); } if (pass_name == "lite_subgraph_pass") { - bool enable_int8 = argument->lite_precision_mode() == AnalysisConfig::Precision::kInt8; + bool enable_int8 = + argument->lite_precision_mode() == AnalysisConfig::Precision::kInt8; pass->Set("program", new framework::ProgramDesc *(&argument->main_program())); pass->Set("lite_ops_filter", diff --git a/paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.cc b/paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.cc index 2fcc7e8220b12..2d1ec7a9d28e2 100644 --- a/paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.cc +++ b/paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.cc @@ -21,16 +21,16 @@ #include #include -#include #include +#include -#include "paddle/fluid/inference/lite/op_teller.h" #include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/inference/lite/op_teller.h" #include "paddle/fluid/inference/utils/singleton.h" +#include "paddle/fluid/framework/ir/graph_pattern_detector.h" #include "paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.h" #include "paddle/fluid/inference/analysis/ir_passes/subgraph_detector.h" -#include "paddle/fluid/framework/ir/graph_pattern_detector.h" #include "paddle/fluid/string/pretty_log.h" #include "paddle/fluid/inference/lite/engine.h" @@ -43,8 +43,8 @@ using framework::ir::Node; namespace lite { -std::string UniqueKey(const std::vector &engine_inputs, - const std::vector &engine_outputs, +std::string UniqueKey(const std::vector& engine_inputs, + const std::vector& engine_outputs, const std::string& id) { std::string engine_hash_key = ""; for (auto name : engine_inputs) { @@ -60,8 +60,8 @@ std::string UniqueKey(const std::vector &engine_inputs, std::vector IOVarsFilter(const std::vector& nodes) { std::set names; - for (const auto& node: nodes) { - if (node->IsVar() && !node->Var()->Persistable()) { + for (const auto& node : nodes) { + if (node->IsVar() && !node->Var()->Persistable()) { names.insert(node->Name()); } } @@ -75,19 +75,20 @@ void StrToBinaryFile(const std::string& path, const std::string& str) { } void ModifyHostProgram(framework::ProgramDesc* host_program, - framework::BlockDesc* host_sub_block, - const std::unordered_set& io_var_nodes, - const std::vector& subgraph_ops) { - for (auto *var_node: io_var_nodes) { + framework::BlockDesc* host_sub_block, + const std::unordered_set& io_var_nodes, + const std::vector& subgraph_ops) { + for (auto* var_node : io_var_nodes) { auto* sub_block_var = host_sub_block->Var(var_node->Name()); sub_block_var->Proto()->CopyFrom(*var_node->Var()->Proto()); } - for (auto *op_desc : subgraph_ops) { + for (auto* op_desc : subgraph_ops) { auto* sub_block_op = host_sub_block->AppendOp(); sub_block_op->CopyFrom(*op_desc); if (op_desc->HasAttr("sub_block")) { int32_t global_sub_id = host_sub_block->ID(); - auto *op_sub_block = host_program->MutableBlock(op_desc->GetBlockAttrId("sub_block")); + auto* op_sub_block = + host_program->MutableBlock(op_desc->GetBlockAttrId("sub_block")); op_sub_block->Proto()->set_parent_idx(global_sub_id); } } @@ -97,21 +98,22 @@ void ModifyHostProgram(framework::ProgramDesc* host_program, // (initial) -> proto::desc (flush) -> framework::desc (final). // Ir::Graph is limited to changing the main block, so the sub block // needs to be processed here. -void ModifyEngineProgram(Node *merged_node, - framework::ProgramDesc* host_program, - framework::ProgramDesc* engine_program, - framework::BlockDesc* host_sub_block, - const std::unordered_set& io_var_nodes, - const std::vector& subgraph_ops) { - +void ModifyEngineProgram(Node* merged_node, + framework::ProgramDesc* host_program, + framework::ProgramDesc* engine_program, + framework::BlockDesc* host_sub_block, + const std::unordered_set& io_var_nodes, + const std::vector& subgraph_ops) { // 1. Fill the main block of lite program. - framework::BlockDesc* engine_global_block = engine_program->MutableBlock(framework::kRootBlockIndex); + framework::BlockDesc* engine_global_block = + engine_program->MutableBlock(framework::kRootBlockIndex); PrependFeedOps(engine_global_block, IOVarsFilter(merged_node->inputs)); - for (auto *var_node: io_var_nodes) { - framework::VarDesc* sub_block_var = engine_global_block->Var(var_node->Name()); + for (auto* var_node : io_var_nodes) { + framework::VarDesc* sub_block_var = + engine_global_block->Var(var_node->Name()); sub_block_var->Proto()->CopyFrom(*var_node->Var()->Proto()); } - for (auto *op_desc : subgraph_ops) { + for (auto* op_desc : subgraph_ops) { auto* sub_block_op = engine_global_block->AppendOp(); sub_block_op->CopyFrom(*op_desc); } @@ -123,18 +125,19 @@ void ModifyEngineProgram(Node *merged_node, sub_blocks_map[host_sub_block->ID()] = framework::kRootBlockIndex; std::function&)> append_sub_blocks; append_sub_blocks = [&](const std::vector& ops) { - for (auto *op_desc : ops) { + for (auto* op_desc : ops) { if (op_desc->HasAttr("sub_block")) { int32_t host_op_sub_id = op_desc->GetBlockAttrId("sub_block"); if (copied_host_ids.count(host_op_sub_id)) continue; size_t engine_block_size = engine_program->Size(); auto* host_op_sub_block = host_program->MutableBlock(host_op_sub_id); - auto* engine_op_sub_block = engine_program->AppendBlock(*(op_desc->Block())); - for (auto* var: host_op_sub_block->AllVars()) { + auto* engine_op_sub_block = + engine_program->AppendBlock(*(op_desc->Block())); + for (auto* var : host_op_sub_block->AllVars()) { auto* engine_var = engine_op_sub_block->Var(var->Name()); engine_var->Proto()->CopyFrom(*var->Proto()); } - for (auto* op: host_op_sub_block->AllOps()) { + for (auto* op : host_op_sub_block->AllOps()) { auto* engine_op = engine_op_sub_block->AppendOp(); engine_op->Proto()->CopyFrom(*op->Proto()); } @@ -145,7 +148,7 @@ void ModifyEngineProgram(Node *merged_node, }; append_sub_blocks(subgraph_ops); for (size_t i = 0; i < engine_program->Size(); i++) { - for (auto *op_desc : engine_program->Block(i).AllOps()) { + for (auto* op_desc : engine_program->Block(i).AllOps()) { if (op_desc->HasAttr("sub_block")) { int32_t id = op_desc->GetBlockAttrId("sub_block"); op_desc->SetAttr("sub_block", sub_blocks_map[id]); @@ -154,26 +157,27 @@ void ModifyEngineProgram(Node *merged_node, } } -void OrganizeProgram(Node *merged_node, - framework::ProgramDesc* host_program, - framework::ProgramDesc* engine_program, - std::vector *repetitive_params) { - std::vector& subgraph = *Agent(merged_node).subgraph(); +void OrganizeProgram(Node* merged_node, framework::ProgramDesc* host_program, + framework::ProgramDesc* engine_program, + std::vector* repetitive_params) { + std::vector& subgraph = *Agent(merged_node).subgraph(); PADDLE_ENFORCE(!subgraph.empty()); - const framework::BlockDesc &host_global_block = host_program->Block(framework::kRootBlockIndex); - framework::BlockDesc* host_sub_block = host_program->AppendBlock(host_global_block); + const framework::BlockDesc& host_global_block = + host_program->Block(framework::kRootBlockIndex); + framework::BlockDesc* host_sub_block = + host_program->AppendBlock(host_global_block); string::PrettyLogDetail("--- detect a sub-graph with %d nodes", subgraph.size()); - std::unordered_set io_var_nodes = GetRelatedIOVarNodes(subgraph); - for (const auto* node: io_var_nodes) { + std::unordered_set io_var_nodes = GetRelatedIOVarNodes(subgraph); + for (const auto* node : io_var_nodes) { LOG(INFO) << "IO Variable Name: " << node->Name(); } std::vector subgraph_ops; - for (auto *op_node : subgraph) { + for (auto* op_node : subgraph) { subgraph_ops.push_back(op_node->Op()); } @@ -181,33 +185,35 @@ void OrganizeProgram(Node *merged_node, ModifyEngineProgram(merged_node, host_program, engine_program, host_sub_block, io_var_nodes, subgraph_ops); *repetitive_params = ExtractParameters(io_var_nodes); - for (const auto& param: *repetitive_params) { + for (const auto& param : *repetitive_params) { LOG(INFO) << "Repetitive param: " << param; } host_program->Flush(); engine_program->Flush(); } -} // namespace lite +} // namespace lite -void LiteSubgraphPass::SetUpEngine(framework::ProgramDesc* program, - const std::vector& repetitive_params, - const std::string& unique_key, bool dump_model) const { +void LiteSubgraphPass::SetUpEngine( + framework::ProgramDesc* program, + const std::vector& repetitive_params, + const std::string& unique_key, bool dump_model) const { inference::lite::EngineConfig config; - auto *scope = param_scope(); + auto* scope = param_scope(); // When the pass is started, only the persistent variables of the // main block are read. Fluid seems to allow persistence variables // in the sub block, but they are controlled by context, so the // support is suspended here. - auto serialize_params = [] (std::string* str, framework::Scope* scope, - const std::vector& params) { + auto serialize_params = [](std::string* str, framework::Scope* scope, + const std::vector& params) { std::ostringstream os; platform::CPUDeviceContext ctx; - for (const auto& param: params) { + for (const auto& param : params) { LOG(INFO) << "Serialize param: " << param; - PADDLE_ENFORCE_NOT_NULL(scope->FindVar(param), "Block should already have a '%s' variable", - param); + PADDLE_ENFORCE_NOT_NULL(scope->FindVar(param), + "Block should already have a '%s' variable", + param); auto* tensor = scope->FindVar(param)->GetMutable(); framework::SerializeToStream(os, *tensor, ctx); } @@ -217,7 +223,8 @@ void LiteSubgraphPass::SetUpEngine(framework::ProgramDesc* program, bool use_gpu = Get("use_gpu"); bool enable_int8 = Get("enable_int8"); lite_api::TargetType target_type = use_gpu ? TARGET(kCUDA) : TARGET(kHost); - paddle::lite_api::PrecisionType precision_type = enable_int8 ? PRECISION(kInt8) : PRECISION(kFloat); + paddle::lite_api::PrecisionType precision_type = + enable_int8 ? PRECISION(kInt8) : PRECISION(kFloat); paddle::lite::Place prefer_place = {target_type, precision_type}; serialize_params(&config.param, scope, repetitive_params); @@ -233,40 +240,41 @@ void LiteSubgraphPass::SetUpEngine(framework::ProgramDesc* program, lite::StrToBinaryFile("./model.bin", config.model); lite::StrToBinaryFile("./param.bin", config.param); } - inference::Singleton::Global() - .Create(unique_key, config); + inference::Singleton::Global().Create( + unique_key, config); } void LiteSubgraphPass::BuildOperator( - Node *merged_node, framework::ProgramDesc* global_program, - std::vector *repetitive_params) const { - + Node* merged_node, framework::ProgramDesc* global_program, + std::vector* repetitive_params) const { framework::ProgramDesc engine_program; const std::string id = std::to_string(Get("predictor_id")); - const std::vector input_names = lite::IOVarsFilter(merged_node->inputs); - const std::vector output_names = lite::IOVarsFilter(merged_node->outputs); + const std::vector input_names = + lite::IOVarsFilter(merged_node->inputs); + const std::vector output_names = + lite::IOVarsFilter(merged_node->outputs); const std::string unique_key = lite::UniqueKey(input_names, output_names, id); - lite::OrganizeProgram(merged_node, global_program, &engine_program, repetitive_params); + lite::OrganizeProgram(merged_node, global_program, &engine_program, + repetitive_params); SetUpEngine(&engine_program, *repetitive_params, unique_key); - auto *op_desc = merged_node->Op(); + auto* op_desc = merged_node->Op(); op_desc->SetInput("Xs", input_names); op_desc->SetOutput("Ys", output_names); op_desc->SetType("lite_engine"); op_desc->SetAttr("engine_key", unique_key); } -void LiteSubgraphPass::ApplyImpl( - framework::ir::Graph *graph) const { - +void LiteSubgraphPass::ApplyImpl(framework::ir::Graph* graph) const { framework::ir::FusePassBase::Init("lite_subgraph_pass", graph); - framework::ProgramDesc* global_program = Get("program"); + framework::ProgramDesc* global_program = + Get("program"); - auto &lite_ops_filter = Get>("lite_ops_filter"); + auto& lite_ops_filter = Get>("lite_ops_filter"); - auto teller = [&lite_ops_filter](const Node *node) { + auto teller = [&lite_ops_filter](const Node* node) { if (!node->IsOp() || !node->Op()) return false; else if (node->Op()->Type() == "feed" || node->Op()->Type() == "fetch") @@ -274,24 +282,25 @@ void LiteSubgraphPass::ApplyImpl( else if (std::find(lite_ops_filter.begin(), lite_ops_filter.end(), node->Op()->Type()) != lite_ops_filter.end()) return false; - return inference::lite::OpTeller::Global().Tell(node->Op()->Type(), *node->Op()); + return inference::lite::OpTeller::Global().Tell(node->Op()->Type(), + *node->Op()); }; SubGraphFuser fuser(graph, teller, 0 /* min_subgraph_size */, "lite_engine"); fuser(); std::vector repetitive_params; - for (auto *node : graph->Nodes()) { + for (auto* node : graph->Nodes()) { if (node->IsOp() && !Agent(node).subgraph()->empty()) { BuildOperator(node, global_program, &repetitive_params); - std::unordered_set nodes2remove( + std::unordered_set nodes2remove( Agent(node).subgraph()->begin(), Agent(node).subgraph()->end()); framework::ir::GraphSafeRemoveNodes(graph, nodes2remove); } } - std::unordered_set nodes2remove; - for (auto *node : graph->Nodes()) { + std::unordered_set nodes2remove; + for (auto* node : graph->Nodes()) { if (node->IsOp() && Agent(node).deleted()) { nodes2remove.insert(node); } diff --git a/paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.h b/paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.h index 9248333ea3fbc..e79a64f0f72cf 100644 --- a/paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.h +++ b/paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.h @@ -27,11 +27,12 @@ namespace analysis { class LiteSubgraphPass : public framework::ir::FusePassBase { public: - void ApplyImpl(framework::ir::Graph *graph) const override; + void ApplyImpl(framework::ir::Graph* graph) const override; private: - void BuildOperator(framework::ir::Node *merged_node, framework::ProgramDesc* global_program, - std::vector *repetitive_params) const; + void BuildOperator(framework::ir::Node* merged_node, + framework::ProgramDesc* global_program, + std::vector* repetitive_params) const; void SetUpEngine(framework::ProgramDesc* program, const std::vector& repetitive_params, diff --git a/paddle/fluid/inference/analysis/ir_passes/subgraph_util.cc b/paddle/fluid/inference/analysis/ir_passes/subgraph_util.cc index 17a76ffe86f7b..021b24a2ef947 100644 --- a/paddle/fluid/inference/analysis/ir_passes/subgraph_util.cc +++ b/paddle/fluid/inference/analysis/ir_passes/subgraph_util.cc @@ -68,14 +68,14 @@ std::unordered_set GetRelatedIOVarNodes( return io_nodes; } -void PrependFeedOps(framework::BlockDesc* global_block, - const std::vector& feed_target_names, +void PrependFeedOps(framework::BlockDesc *global_block, + const std::vector &feed_target_names, std::string feed_holder_name) { - framework::VarDesc* feed_var = global_block->Var(feed_holder_name); + framework::VarDesc *feed_var = global_block->Var(feed_holder_name); feed_var->SetType(paddle::framework::proto::VarType::FEED_MINIBATCH); feed_var->SetPersistable(true); for (size_t i = 0; i < feed_target_names.size(); i++) { - framework::OpDesc* feed_op = global_block->AppendOp(); + framework::OpDesc *feed_op = global_block->AppendOp(); feed_op->SetType("feed"); feed_op->SetInput("X", {feed_holder_name}); feed_op->SetOutput("Out", {feed_target_names[i]}); @@ -83,14 +83,14 @@ void PrependFeedOps(framework::BlockDesc* global_block, } } -void PrependFetchOps(framework::BlockDesc* global_block, - const std::vector& fetch_target_names, - std::string fetch_holder_name) { - framework::VarDesc* fetch_var = global_block->Var(fetch_holder_name); +void PrependFetchOps(framework::BlockDesc *global_block, + const std::vector &fetch_target_names, + std::string fetch_holder_name) { + framework::VarDesc *fetch_var = global_block->Var(fetch_holder_name); fetch_var->SetType(paddle::framework::proto::VarType::FETCH_LIST); fetch_var->SetPersistable(true); for (size_t i = 0; i < fetch_target_names.size(); i++) { - framework::OpDesc* fetch_op = global_block->AppendOp(); + framework::OpDesc *fetch_op = global_block->AppendOp(); fetch_op->SetType("fetch"); fetch_op->SetInput("X", {fetch_target_names[i]}); fetch_op->SetOutput("Out", {fetch_holder_name}); diff --git a/paddle/fluid/inference/analysis/ir_passes/subgraph_util.h b/paddle/fluid/inference/analysis/ir_passes/subgraph_util.h index 02d7743ca7268..0635dcc9486e0 100644 --- a/paddle/fluid/inference/analysis/ir_passes/subgraph_util.h +++ b/paddle/fluid/inference/analysis/ir_passes/subgraph_util.h @@ -37,13 +37,13 @@ std::vector ExtractParameters( std::unordered_set GetRelatedIOVarNodes( const std::vector &nodes); -void PrependFeedOps(framework::BlockDesc* global_block, - const std::vector& feed_target_names, +void PrependFeedOps(framework::BlockDesc *global_block, + const std::vector &feed_target_names, std::string feed_holder_name = "feed"); -void PrependFetchOps(framework::BlockDesc* global_block, - const std::vector& fetch_target_names, - std::string fetch_holder_name = "fetch"); +void PrependFetchOps(framework::BlockDesc *global_block, + const std::vector &fetch_target_names, + std::string fetch_holder_name = "fetch"); void RenameAndGetOutputs( const std::vector &subgraph_nodes, diff --git a/paddle/fluid/inference/api/analysis_config.cc b/paddle/fluid/inference/api/analysis_config.cc index 138654066cfa1..cdc3917a3c54c 100644 --- a/paddle/fluid/inference/api/analysis_config.cc +++ b/paddle/fluid/inference/api/analysis_config.cc @@ -468,9 +468,10 @@ void AnalysisConfig::EnableAnakinEngine( Update(); } -void AnalysisConfig::EnableLiteEngine(AnalysisConfig::Precision precision_mode, - const std::vector& passes_filter, - const std::vector& ops_filter) { +void AnalysisConfig::EnableLiteEngine( + AnalysisConfig::Precision precision_mode, + const std::vector &passes_filter, + const std::vector &ops_filter) { use_lite_ = true; lite_precision_mode_ = precision_mode; lite_passes_filter_ = passes_filter; diff --git a/paddle/fluid/inference/api/paddle_analysis_config.h b/paddle/fluid/inference/api/paddle_analysis_config.h index 3efa5291dd677..1641356d12f39 100644 --- a/paddle/fluid/inference/api/paddle_analysis_config.h +++ b/paddle/fluid/inference/api/paddle_analysis_config.h @@ -164,9 +164,10 @@ struct AnalysisConfig { std::vector passes_filter = {}, std::vector ops_filter = {}); - void EnableLiteEngine(AnalysisConfig::Precision precision_mode = Precision::kFloat32, - const std::vector& passes_filter = {}, - const std::vector& ops_filter = {}); + void EnableLiteEngine( + AnalysisConfig::Precision precision_mode = Precision::kFloat32, + const std::vector& passes_filter = {}, + const std::vector& ops_filter = {}); /** A boolean state indicating whether the Anakin sub-graph engine is used. */ diff --git a/paddle/fluid/inference/api/paddle_pass_builder.cc b/paddle/fluid/inference/api/paddle_pass_builder.cc index 276c780912208..af855b6a69a56 100644 --- a/paddle/fluid/inference/api/paddle_pass_builder.cc +++ b/paddle/fluid/inference/api/paddle_pass_builder.cc @@ -118,8 +118,7 @@ GpuPassStrategy::GpuPassStrategy() : PassStrategy({}) { "conv_elementwise_add2_act_fuse_pass", // "conv_elementwise_add_fuse_pass", // #endif // - "transpose_flatten_concat_fuse_pass", - "lite_subgraph_pass", + "transpose_flatten_concat_fuse_pass", "lite_subgraph_pass", // following pass should be located in the last, since it will // work on all fused ops. "runtime_context_cache_pass" diff --git a/paddle/fluid/inference/lite/model_optimize_tool.cc b/paddle/fluid/inference/lite/model_optimize_tool.cc index 4e613db5a24c6..b6f1cbddb603f 100644 --- a/paddle/fluid/inference/lite/model_optimize_tool.cc +++ b/paddle/fluid/inference/lite/model_optimize_tool.cc @@ -13,7 +13,7 @@ // limitations under the License. #include "lite/api/paddle_api.h" -//extern paddle::lite::Predictor; +// extern paddle::lite::Predictor; namespace paddle { namespace lite { @@ -23,10 +23,10 @@ void Main() { auto predictor = std::move(lite_api::CreatePaddlePredictor(config)); } -} // namespace lite_api +} // namespace lite } // namespace paddle int main(int argc, char** argv) { paddle::lite::Main(); return 0; -} \ No newline at end of file +} diff --git a/paddle/fluid/inference/lite/op_teller.cc b/paddle/fluid/inference/lite/op_teller.cc index 1504df984dbd2..4d92a1ce7350c 100644 --- a/paddle/fluid/inference/lite/op_teller.cc +++ b/paddle/fluid/inference/lite/op_teller.cc @@ -131,25 +131,25 @@ struct SimpleOpTeller : public Teller { } private: - std::unordered_set ops_ {}; + std::unordered_set ops_{}; }; struct SingleBlockOpTeller : public Teller { - SingleBlockOpTeller() { - ops_.insert("while"); - } + SingleBlockOpTeller() { ops_.insert("while"); } bool operator()(const std::string& op_type, const framework::OpDesc& op_desc) override { if (ops_.count(op_type)) { SimpleOpTeller supported; const int id = op_desc.GetBlockAttrId("sub_block"); - const framework::BlockDesc& block_desc = op_desc.Block()->Program()->Block(id); - const std::vector& ops_sub_block = block_desc.AllOps(); - for (auto* op: ops_sub_block) { + const framework::BlockDesc& block_desc = + op_desc.Block()->Program()->Block(id); + const std::vector& ops_sub_block = + block_desc.AllOps(); + for (auto* op : ops_sub_block) { if (!supported(op->Type(), *op) && !this->operator()(op->Type(), *op)) { return false; - }; + } } return true; } @@ -160,7 +160,6 @@ struct SingleBlockOpTeller : public Teller { std::unordered_set ops_; }; - bool OpTeller::Tell(const std::string& op_type, const framework::OpDesc& desc) { for (auto& teller : tellers_) { if ((*teller)(op_type, desc)) return true; @@ -176,5 +175,3 @@ OpTeller::OpTeller() { } // namespace lite } // namespace inference } // namespace paddle - - diff --git a/paddle/fluid/inference/lite/test_leaky_relu.cc b/paddle/fluid/inference/lite/test_leaky_relu.cc index 5826a39d6f617..34da46a14a642 100644 --- a/paddle/fluid/inference/lite/test_leaky_relu.cc +++ b/paddle/fluid/inference/lite/test_leaky_relu.cc @@ -13,8 +13,8 @@ // limitations under the License. #include -#include #include +#include #include #include "lite/api/paddle_api.h" #include "lite/api/paddle_place.h" @@ -27,9 +27,8 @@ namespace lite { using paddle::lite_api::Place; void TestModel(const std::vector& valid_places, - const Place& preferred_place, - bool use_npu = false) { - lite_api::CxxConfig cfg; + const Place& preferred_place, bool use_npu = false) { + lite_api::CxxConfig cfg; // cfg.set_model_dir("/shixiaowei02/Paddle_lite/xingzhaolong/leaky_relu_model"); cfg.set_model_dir("/shixiaowei02/models/tmp"); cfg.set_preferred_place(preferred_place); @@ -40,8 +39,9 @@ void TestModel(const std::vector& valid_places, input_tensor->Resize(std::vector({1, 1, 3, 3})); auto* data = input_tensor->mutable_data(); - auto input_shape = input_tensor->shape(); - int item_size = std::accumulate(input_shape.begin(), input_shape.end(), 1, std::multiplies()); + auto input_shape = input_tensor->shape(); + int item_size = std::accumulate(input_shape.begin(), input_shape.end(), 1, + std::multiplies()); for (int i = 0; i < item_size; i++) { data[i] = -1.; } diff --git a/paddle/fluid/inference/lite/test_predictor.cc b/paddle/fluid/inference/lite/test_predictor.cc index a92a8920b392d..7cb1ee7111886 100644 --- a/paddle/fluid/inference/lite/test_predictor.cc +++ b/paddle/fluid/inference/lite/test_predictor.cc @@ -12,15 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include #include +#include #include "lite/api/paddle_use_kernels.h" #include "lite/api/paddle_use_ops.h" #include "lite/api/paddle_use_passes.h" -#include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/inference/api/paddle_inference_api.h" +#include "paddle/fluid/platform/enforce.h" int main() { LOG(INFO) << "Hello World!"; diff --git a/paddle/fluid/operators/lite/lite_engine_op.cc b/paddle/fluid/operators/lite/lite_engine_op.cc index 07088fdbe64b8..34f65644cdfb0 100644 --- a/paddle/fluid/operators/lite/lite_engine_op.cc +++ b/paddle/fluid/operators/lite/lite_engine_op.cc @@ -44,4 +44,3 @@ class LiteInferVarType : public framework::VarTypeInference { namespace ops = paddle::operators; REGISTER_OPERATOR(lite_engine, ops::LiteEngineOp, ops::LiteEngineOpMaker, ops::LiteEngineOpMaker); -