Skip to content

Commit

Permalink
Polish JitLayer in Python
Browse files Browse the repository at this point in the history
  • Loading branch information
0x45f committed Jul 5, 2022
1 parent 5a55f92 commit 8face41
Show file tree
Hide file tree
Showing 11 changed files with 181 additions and 63 deletions.
4 changes: 4 additions & 0 deletions paddle/fluid/jit/compilation_unit.cc
Original file line number Diff line number Diff line change
Expand Up @@ -42,5 +42,9 @@ std::vector<std::string> CompilationUnit::FunctionNames() const {
return names;
}

const FunctionMap &CompilationUnit::FunctionDict() const {
return function_dict_;
}

} // namespace jit
} // namespace paddle
6 changes: 5 additions & 1 deletion paddle/fluid/jit/compilation_unit.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@

namespace paddle {
namespace jit {
using FunctionMap =
std::unordered_map<std::string, std::shared_ptr<BaseFunction>>;

class CompilationUnit {
public:
Expand All @@ -34,8 +36,10 @@ class CompilationUnit {

std::vector<std::string> FunctionNames() const;

const FunctionMap &FunctionDict() const;

private:
std::unordered_map<std::string, std::shared_ptr<BaseFunction>> function_dict_;
FunctionMap function_dict_;
};

} // namespace jit
Expand Down
2 changes: 2 additions & 0 deletions paddle/fluid/jit/layer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -46,5 +46,7 @@ std::vector<std::string> Layer::FunctionNames() const {
return unit_.FunctionNames();
}

const FunctionMap& Layer::FunctionDict() const { return unit_.FunctionDict(); }

} // namespace jit
} // namespace paddle
2 changes: 2 additions & 0 deletions paddle/fluid/jit/layer.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,8 @@ class Layer {

std::vector<std::string> FunctionNames() const;

const FunctionMap& FunctionDict() const;

private:
// internal::Object obj_;
Name2VariableMap params_dict_;
Expand Down
3 changes: 2 additions & 1 deletion paddle/fluid/pybind/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,8 @@ set(PYBIND_SRCS
io.cc
generator_py.cc
communication.cc
cuda_streams_py.cc)
cuda_streams_py.cc
jit.cc)

if(WITH_CUSTOM_DEVICE)
set(PYBIND_DEPS ${PYBIND_DEPS} phi_capi)
Expand Down
83 changes: 83 additions & 0 deletions paddle/fluid/pybind/jit.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/pybind/jit.h"

#include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/imperative/layer.h"
#include "paddle/fluid/platform/place.h"

#include "paddle/fluid/jit/executor_function.h"
#include "paddle/fluid/jit/function_schema.h"
#include "paddle/fluid/jit/layer.h"
#include "paddle/fluid/jit/serializer.h"

namespace py = pybind11;

namespace paddle {
namespace pybind {

using Variable = paddle::framework::Variable;

void BindJit(pybind11::module *m) {
py::class_<jit::Layer>(*m, "Layer", R"DOC(Layer Class.)DOC")
.def("function_dict", &jit::Layer::FunctionDict);

py::class_<jit::ExecutorFunction, std::shared_ptr<jit::ExecutorFunction>>(
*m, "ExectorFunction", R"DOC(ExectorFunction Class.)DOC")
.def("__call__",
[](jit::ExecutorFunction &self,
const std::vector<std::shared_ptr<imperative::VarBase>>
&tensor_inputs) {
std::vector<Variable> var_inputs;
for (auto &tensor : tensor_inputs) {
var_inputs.emplace_back(tensor->Var());
}
auto var_outputs = self(var_inputs);

std::vector<std::shared_ptr<imperative::VarBase>> tensor_outputs;
auto output_names = self.Info()->OutputArgNames();
for (size_t i = 0; i < var_outputs.size(); ++i) {
auto var = var_outputs[i];
std::string name = output_names[i];
imperative::VariableWrapper var_wrapper(name, var);
auto shared_wrapper =
std::make_shared<imperative::VariableWrapper>(var_wrapper);
auto shared_varbase =
std::make_shared<imperative::VarBase>(shared_wrapper);
tensor_outputs.emplace_back(shared_varbase);
}
return tensor_outputs;
})
.def("info", &jit::ExecutorFunction::Info);

py::class_<jit::FunctionInfo, std::shared_ptr<jit::FunctionInfo>>(
*m, "FunctionInfo", R"DOC(FunctionInfo Class.)DOC")
.def("name", &jit::FunctionInfo::FunctionName)
.def("input_names", &jit::FunctionInfo::InputArgNames)
.def("output_names", &jit::FunctionInfo::OutputArgNames);

m->def("Load",
[](const std::string &path, const platform::CPUPlace &cpu_place) {
return paddle::jit::Load(path, cpu_place);
});

m->def("Load",
[](const std::string &path, const platform::CUDAPlace &cuda_place) {
return paddle::jit::Load(path, cuda_place);
});
}

} // namespace pybind
} // namespace paddle
27 changes: 27 additions & 0 deletions paddle/fluid/pybind/jit.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once

#include <Python.h>

#include "pybind11/pybind11.h"
#include "pybind11/stl.h"

namespace paddle {
namespace pybind {

void BindJit(pybind11::module* m);

} // namespace pybind
} // namespace paddle
44 changes: 2 additions & 42 deletions paddle/fluid/pybind/pybind.cc
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,7 @@ limitations under the License. */
#include "paddle/fluid/pybind/eager.h"
#include "paddle/fluid/pybind/imperative.h"
#include "paddle/fluid/pybind/io.h"
#include "paddle/fluid/pybind/jit.h"
#include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/lod_utils.h"
#include "paddle/utils/none.h"
Expand Down Expand Up @@ -566,6 +567,7 @@ PYBIND11_MODULE(core_noavx, m) {
BindEager(&m);
BindEagerStringTensor(&m);
BindCudaStream(&m);
BindJit(&m);

// Not used, just make sure cpu_info.cc is linked.
paddle::platform::CpuTotalPhysicalMemory();
Expand Down Expand Up @@ -1746,48 +1748,6 @@ PYBIND11_MODULE(core_noavx, m) {
return new_rows;
});

py::class_<jit::ExecutorFunction, std::shared_ptr<jit::ExecutorFunction>>(
m, "ExectorFunction", R"DOC(ExectorFunction Class.)DOC")
.def("__call__",
[](jit::ExecutorFunction &self,
const std::vector<std::shared_ptr<imperative::VarBase>>
&tensor_inputs) {
std::vector<Variable> var_inputs;
for (auto &tensor : tensor_inputs) {
var_inputs.emplace_back(tensor->Var());
}
auto var_outputs = self(var_inputs);

std::vector<std::shared_ptr<imperative::VarBase>> tensor_outputs;
auto output_names = self.Info()->OutputArgNames();
for (size_t i = 0; i < var_outputs.size(); ++i) {
auto var = var_outputs[i];
std::string name = output_names[i];
imperative::VariableWrapper var_wrapper(name, var);
auto shared_wrapper =
std::make_shared<imperative::VariableWrapper>(var_wrapper);
auto shared_varbase =
std::make_shared<imperative::VarBase>(shared_wrapper);
tensor_outputs.emplace_back(shared_varbase);
}
return tensor_outputs;
});

py::class_<jit::Layer>(m, "Layer", R"DOC(Layer Class.)DOC")
.def("function", &jit::Layer::Function)
.def("forward", &jit::Layer::forward)
.def("function_names", &jit::Layer::FunctionNames);

m.def("Load",
[](const std::string &path, const platform::CPUPlace &cpu_place) {
return paddle::jit::Load(path, cpu_place);
});

m.def("Load",
[](const std::string &path, const platform::CUDAPlace &cuda_place) {
return paddle::jit::Load(path, cuda_place);
});

py::class_<Variable>(m, "Variable", R"DOC(Variable Class.
All parameter, weight, gradient are variables in Paddle.
Expand Down
17 changes: 0 additions & 17 deletions python/paddle/fluid/dygraph/jit.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,30 +41,13 @@
from paddle.fluid.framework import _current_expected_place, _dygraph_guard, _dygraph_tracer
from paddle.fluid.framework import dygraph_only, _non_static_mode
from paddle.fluid.wrapped_decorator import wrap_decorator
from paddle.fluid.core import Load

__all__ = [
'TracedLayer', 'declarative', 'dygraph_to_static_func', 'set_code_level',
'set_verbosity', 'save', 'load', 'not_to_static'
]


class JitLayer():

def __init__(self, load_path, place):
self.cpp_jit_layer = Load(load_path, place)
# bind method
for func_name in self.cpp_jit_layer.function_names():
setattr(self, func_name, self.bind_funciton(func_name))

def bind_funciton(self, name):

def inner_funciton(*args):
return self.cpp_jit_layer.function(name)(args)

return inner_funciton


def create_program_from_desc(program_desc):
program = Program()
program.desc = program_desc
Expand Down
5 changes: 3 additions & 2 deletions python/paddle/fluid/tests/unittests/test_jit_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
import numpy as np
from paddle.static import InputSpec
from paddle.fluid.framework import _enable_legacy_dygraph
from paddle.fluid.dygraph.jit import JitLayer
from paddle.jit.layer import Layer
from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator

_enable_legacy_dygraph()
Expand Down Expand Up @@ -68,7 +68,8 @@ def test_multi_load(self):
place = paddle.CPUPlace()
if paddle.is_compiled_with_cuda():
place = paddle.CUDAPlace(0)
jit_layer = JitLayer(model_path, place)
jit_layer = Layer()
jit_layer.load(model_path, place)
forward_out2 = jit_layer.forward(x)
infer_out2 = jit_layer.infer(x)
self.assertEqual(np.allclose(forward_out1, forward_out2[0]), True)
Expand Down
51 changes: 51 additions & 0 deletions python/paddle/jit/layer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2021 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from paddle.fluid.core import Load


class Layer():

def __init__(self):
self.cpp_layer = None
# {name: Function}
self.funciton_map = {}

def load(self, load_path, place):
self.cpp_layer = Load(load_path, place)
function_dict = self.cpp_layer.function_dict()

for name, function in function_dict.items():
self.funciton_map[name] = Function(function)
setattr(self, name, self.funciton_map[name].__call__)


class Function():

def __init__(self, function):
self.function = function
self.info = FunctionInfo(function.info())

def __call__(self, *args):
return self.function(args)


class FunctionInfo():

def __init__(self, info):
self.info = info

def name(self):
return self.info.name()

1 comment on commit 8face41

@paddle-bot-old
Copy link

@paddle-bot-old paddle-bot-old bot commented on 8face41 Jul 5, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🕵️ CI failures summary

🔍 PR: #44010 Commit ID: 8face41 contains failed CI.

🔹 Failed: PR-CI-APPROVAL

Unknown Failed
Unknown Failed

Please sign in to comment.