diff --git a/paddle/fluid/jit/CMakeLists.txt b/paddle/fluid/jit/CMakeLists.txt index 9c96d8e986a22..75483ac6544f4 100644 --- a/paddle/fluid/jit/CMakeLists.txt +++ b/paddle/fluid/jit/CMakeLists.txt @@ -26,7 +26,8 @@ cc_library( cc_library( jit_layer SRCS layer.cc - DEPS jit_compilation_unit) + DEPS jit_serializer jit_function_utils jit_serializer_utils + jit_compilation_unit jit_function_schema) if(WITH_TESTING AND NOT WIN32 @@ -45,12 +46,7 @@ if(WITH_TESTING feed_op fetch_op scale_op - jit_serializer - jit_layer - jit_function_utils - jit_function_schema - jit_compilation_unit - jit_serializer_utils) + jit_layer) cc_test( layer_test SRCS layer_test.cc diff --git a/paddle/fluid/jit/compilation_unit.cc b/paddle/fluid/jit/compilation_unit.cc index 261839b479e5b..d62c497d8b338 100644 --- a/paddle/fluid/jit/compilation_unit.cc +++ b/paddle/fluid/jit/compilation_unit.cc @@ -22,16 +22,28 @@ namespace jit { std::shared_ptr CompilationUnit::Function( const std::string &name) const { PADDLE_ENFORCE_EQ( - function_dict_.count(name), + function_map_.count(name), 1, platform::errors::InvalidArgument( - "Funciton name %s is not exist in function_dict_.", name)); - return function_dict_.at(name); + "Funciton name %s is not exist in function_map_.", name)); + return function_map_.at(name); } void CompilationUnit::SetFunction( const std::string &name, const std::shared_ptr &function) { - function_dict_[name] = function; + function_map_[name] = function; +} + +std::vector CompilationUnit::FunctionNames() const { + std::vector names; + for (auto it = function_map_.begin(); it != function_map_.end(); it++) { + names.emplace_back(it->first); + } + return names; +} + +const Name2FunctionMap &CompilationUnit::FunctionMap() const { + return function_map_; } } // namespace jit diff --git a/paddle/fluid/jit/compilation_unit.h b/paddle/fluid/jit/compilation_unit.h index 2944aa928f32f..45a771b649401 100644 --- a/paddle/fluid/jit/compilation_unit.h +++ b/paddle/fluid/jit/compilation_unit.h @@ -21,6 +21,8 @@ namespace paddle { namespace jit { +using Name2FunctionMap = + std::unordered_map>; class CompilationUnit { public: @@ -32,8 +34,12 @@ class CompilationUnit { void SetFunction(const std::string &name, const std::shared_ptr &function); + std::vector FunctionNames() const; + + const Name2FunctionMap &FunctionMap() const; + private: - std::unordered_map> function_dict_; + Name2FunctionMap function_map_; }; } // namespace jit diff --git a/paddle/fluid/jit/executor_function.h b/paddle/fluid/jit/executor_function.h index 224798b7dbb2b..36cb438e34cc2 100644 --- a/paddle/fluid/jit/executor_function.h +++ b/paddle/fluid/jit/executor_function.h @@ -56,6 +56,8 @@ class ExecutorFunction : public BaseFunction { return res; } + const std::shared_ptr &Info() const { return info_; } + private: std::shared_ptr info_; framework::Scope scope_; diff --git a/paddle/fluid/jit/layer.cc b/paddle/fluid/jit/layer.cc index a11101d520493..6662abd17d2cf 100644 --- a/paddle/fluid/jit/layer.cc +++ b/paddle/fluid/jit/layer.cc @@ -42,5 +42,13 @@ void Layer::SetFunction(const std::string& name, unit_.SetFunction(name, function); } +std::vector Layer::FunctionNames() const { + return unit_.FunctionNames(); +} + +const Name2FunctionMap& Layer::FunctionMap() const { + return unit_.FunctionMap(); +} + } // namespace jit } // namespace paddle diff --git a/paddle/fluid/jit/layer.h b/paddle/fluid/jit/layer.h index 1407259d14444..5c9f61b0d47b3 100644 --- a/paddle/fluid/jit/layer.h +++ b/paddle/fluid/jit/layer.h @@ -50,6 +50,10 @@ class Layer { void SetFunction(const std::string& name, const std::shared_ptr& function); + std::vector FunctionNames() const; + + const Name2FunctionMap& FunctionMap() const; + private: // internal::Object obj_; Name2VariableMap params_dict_; diff --git a/paddle/fluid/pybind/CMakeLists.txt b/paddle/fluid/pybind/CMakeLists.txt index 2b7e12499976e..b2ecf36c5d227 100755 --- a/paddle/fluid/pybind/CMakeLists.txt +++ b/paddle/fluid/pybind/CMakeLists.txt @@ -38,7 +38,8 @@ set(PYBIND_DEPS global_utils phi_utils tcp_store - new_profiler) + new_profiler + jit_layer) if(WITH_PSCORE) set(PYBIND_DEPS ${PYBIND_DEPS} ps_service) @@ -121,7 +122,8 @@ set(PYBIND_SRCS io.cc generator_py.cc communication.cc - cuda_streams_py.cc) + cuda_streams_py.cc + jit.cc) if(WITH_CUSTOM_DEVICE) set(PYBIND_DEPS ${PYBIND_DEPS} phi_capi) diff --git a/paddle/fluid/pybind/jit.cc b/paddle/fluid/pybind/jit.cc new file mode 100644 index 0000000000000..07b79742f002e --- /dev/null +++ b/paddle/fluid/pybind/jit.cc @@ -0,0 +1,83 @@ +/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/pybind/jit.h" + +#include "paddle/fluid/framework/variable.h" +#include "paddle/fluid/imperative/layer.h" +#include "paddle/fluid/platform/place.h" + +#include "paddle/fluid/jit/executor_function.h" +#include "paddle/fluid/jit/function_schema.h" +#include "paddle/fluid/jit/layer.h" +#include "paddle/fluid/jit/serializer.h" + +namespace py = pybind11; + +namespace paddle { +namespace pybind { + +using Variable = paddle::framework::Variable; + +void BindJit(pybind11::module *m) { + py::class_(*m, "Layer", R"DOC(Layer Class.)DOC") + .def("function_dict", &jit::Layer::FunctionMap); + + py::class_>( + *m, "ExectorFunction", R"DOC(ExectorFunction Class.)DOC") + .def("__call__", + [](jit::ExecutorFunction &self, + const std::vector> + &tensor_inputs) { + std::vector var_inputs; + for (auto &tensor : tensor_inputs) { + var_inputs.emplace_back(tensor->Var()); + } + auto var_outputs = self(var_inputs); + + std::vector> tensor_outputs; + auto output_names = self.Info()->OutputArgNames(); + for (size_t i = 0; i < var_outputs.size(); ++i) { + auto var = var_outputs[i]; + std::string name = output_names[i]; + imperative::VariableWrapper var_wrapper(name, var); + auto shared_wrapper = + std::make_shared(var_wrapper); + auto shared_varbase = + std::make_shared(shared_wrapper); + tensor_outputs.emplace_back(shared_varbase); + } + return tensor_outputs; + }) + .def("info", &jit::ExecutorFunction::Info); + + py::class_>( + *m, "FunctionInfo", R"DOC(FunctionInfo Class.)DOC") + .def("name", &jit::FunctionInfo::FunctionName) + .def("input_names", &jit::FunctionInfo::InputArgNames) + .def("output_names", &jit::FunctionInfo::OutputArgNames); + + m->def("Load", + [](const std::string &path, const platform::CPUPlace &cpu_place) { + return paddle::jit::Load(path, cpu_place); + }); + + m->def("Load", + [](const std::string &path, const platform::CUDAPlace &cuda_place) { + return paddle::jit::Load(path, cuda_place); + }); +} + +} // namespace pybind +} // namespace paddle diff --git a/paddle/fluid/pybind/jit.h b/paddle/fluid/pybind/jit.h new file mode 100644 index 0000000000000..897e22e8b8594 --- /dev/null +++ b/paddle/fluid/pybind/jit.h @@ -0,0 +1,27 @@ +/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +#pragma once + +#include + +#include "pybind11/pybind11.h" +#include "pybind11/stl.h" + +namespace paddle { +namespace pybind { + +void BindJit(pybind11::module* m); + +} // namespace pybind +} // namespace paddle diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 7b7e9d1a6c9ed..a7b0460d28573 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -90,6 +90,7 @@ limitations under the License. */ #include "paddle/fluid/pybind/eager.h" #include "paddle/fluid/pybind/imperative.h" #include "paddle/fluid/pybind/io.h" +#include "paddle/fluid/pybind/jit.h" #include "paddle/phi/core/compat/convert_utils.h" #include "paddle/phi/core/lod_utils.h" #include "paddle/utils/none.h" @@ -563,6 +564,7 @@ PYBIND11_MODULE(core_noavx, m) { BindEager(&m); BindEagerStringTensor(&m); BindCudaStream(&m); + BindJit(&m); // Not used, just make sure cpu_info.cc is linked. paddle::platform::CpuTotalPhysicalMemory(); diff --git a/python/paddle/fluid/tests/unittests/test_jit_layer.py b/python/paddle/fluid/tests/unittests/test_jit_layer.py new file mode 100644 index 0000000000000..24c0131fd7012 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_jit_layer.py @@ -0,0 +1,82 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import paddle +import unittest +import tempfile +import numpy as np +from paddle.static import InputSpec +from paddle.fluid.framework import _enable_legacy_dygraph +from paddle.jit.layer import Layer +from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator + +_enable_legacy_dygraph() +paddle.seed(1) + + +class Net(paddle.nn.Layer): + + def __init__(self): + super(Net, self).__init__() + self.fc1 = paddle.nn.Linear(4, 4) + self.fc2 = paddle.nn.Linear(4, 4) + self._bias = 0.4 + + @paddle.jit.to_static(input_spec=[InputSpec([None, 4], dtype='float32')]) + def forward(self, x): + out = self.fc1(x) + out = self.fc2(out) + out = paddle.nn.functional.relu(out) + out = paddle.mean(out) + return out + + @paddle.jit.to_static(input_spec=[InputSpec([None, 4], dtype='float32')]) + def infer(self, input): + out = self.fc2(input) + out = out + self._bias + out = paddle.mean(out) + return out + + +class TestMultiLoad(unittest.TestCase): + + def test_multi_load(self): + self.temp_dir = tempfile.TemporaryDirectory() + + x = paddle.full([2, 4], 2) + model = Net() + program_translator = ProgramTranslator() + program_translator.enable(False) + forward_out1 = model.forward(x) + infer_out1 = model.infer(x) + program_translator.enable(True) + + model_path = os.path.join(self.temp_dir.name, 'multi_program') + paddle.jit.save(model, model_path, combine_params=True) + place = paddle.CPUPlace() + if paddle.is_compiled_with_cuda(): + place = paddle.CUDAPlace(0) + jit_layer = Layer() + jit_layer.load(model_path, place) + forward_out2 = jit_layer.forward(x) + infer_out2 = jit_layer.infer(x) + self.assertEqual(np.allclose(forward_out1, forward_out2[0]), True) + self.assertEqual(np.allclose(infer_out1, infer_out2[0]), True) + + self.temp_dir.cleanup() + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/jit/layer.py b/python/paddle/jit/layer.py new file mode 100644 index 0000000000000..8ee3652dca843 --- /dev/null +++ b/python/paddle/jit/layer.py @@ -0,0 +1,51 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2021 NVIDIA Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddle.fluid.core import Load + + +class Layer(object): + + def __init__(self): + self.cpp_layer = None + # {name: Function} + self.functions = {} + + def load(self, load_path, place): + self.cpp_layer = Load(load_path, place) + function_dict = self.cpp_layer.function_dict() + + for name, function in function_dict.items(): + self.functions[name] = Function(function) + setattr(self, name, self.functions[name]) + + +class Function(): + + def __init__(self, function): + self.function = function + self.info = FunctionInfo(function.info()) + + def __call__(self, *args): + return self.function(args) + + +class FunctionInfo(): + + def __init__(self, info): + self.info = info + + def name(self): + return self.info.name()