Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support broadcast tensor in phi system #44590

Merged
merged 2 commits into from
Jul 28, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions paddle/phi/api/lib/api_gen_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,14 +31,14 @@ paddle::optional<phi::DenseTensor> TensorToDenseTensor(
return nullptr;
}

std::unique_ptr<std::vector<phi::DenseTensor>> TensorToDenseTensor(
std::unique_ptr<std::vector<phi::DenseTensor*>> TensorToDenseTensor(
const std::vector<Tensor>& tensors) {
auto pt_tensors = std::make_unique<std::vector<phi::DenseTensor>>();
auto pt_tensors = std::make_unique<std::vector<phi::DenseTensor*>>();
pt_tensors->reserve(tensors.size());

for (const auto& t : tensors) {
pt_tensors->push_back(
*std::dynamic_pointer_cast<phi::DenseTensor>(t.impl()));
std::dynamic_pointer_cast<phi::DenseTensor>(t.impl()).get());
}

return pt_tensors;
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/api/lib/api_gen_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ std::shared_ptr<phi::DenseTensor> TensorToDenseTensor(const Tensor& tensor);
paddle::optional<phi::DenseTensor> TensorToDenseTensor(
const paddle::optional<Tensor>& tensor);

std::unique_ptr<std::vector<phi::DenseTensor>> TensorToDenseTensor(
std::unique_ptr<std::vector<phi::DenseTensor*>> TensorToDenseTensor(
const std::vector<Tensor>& tensors);

std::shared_ptr<phi::SelectedRows> TensorToSelectedRows(const Tensor& tensor);
Expand Down
14 changes: 10 additions & 4 deletions paddle/phi/api/yaml/generator/api_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -582,18 +582,18 @@ def get_kernel_args(self, kernel_tensor_type=None, code_indent=''):
trans_flag = "{false, true}"
if input_name in self.optional_vars:
input_tensor_code = input_tensor_code + f"""
{code_indent} auto {PREFIX_TENSOR_NAME}{input_name} = PrepareData({input_name}, kernel.InputAt({i}), {trans_flag});"""
{code_indent} auto {PREFIX_TENSOR_NAME}{input_name} = PrepareData({input_name}, kernel.InputAt({kernel_param.index(input_name)}), {trans_flag});"""

else:
if self.inputs['input_info'][
input_name] == "const Tensor&":
input_tensor_code = input_tensor_code + f"""
{code_indent} auto {PREFIX_TENSOR_NAME}{input_name} = PrepareData({input_name}, kernel.InputAt({i}), {trans_flag});"""
{code_indent} auto {PREFIX_TENSOR_NAME}{input_name} = PrepareData({input_name}, kernel.InputAt({kernel_param.index(input_name)}), {trans_flag});"""

elif self.inputs['input_info'][
input_name] == "const std::vector<Tensor>&":
input_tensor_code = input_tensor_code + f"""
{code_indent} auto {PREFIX_TENSOR_NAME}{input_name}_vec = PrepareData({input_name}, kernel.InputAt({i}), {trans_flag});
{code_indent} auto {PREFIX_TENSOR_NAME}{input_name}_vec = PrepareData({input_name}, kernel.InputAt({kernel_param.index(input_name)}), {trans_flag});
{code_indent} std::vector<const phi::DenseTensor*> {PREFIX_TENSOR_NAME}{input_name}({PREFIX_TENSOR_NAME}{input_name}_vec->size());
{code_indent} for (size_t i = 0; i < {PREFIX_TENSOR_NAME}{input_name}.size(); ++i) {{
{code_indent} {PREFIX_TENSOR_NAME}{input_name}[i] = &{PREFIX_TENSOR_NAME}{input_name}_vec->at(i);
Expand All @@ -612,7 +612,13 @@ def get_kernel_args(self, kernel_tensor_type=None, code_indent=''):
{code_indent} paddle::optional<phi::TensorBase> {PREFIX_TENSOR_NAME}{input_name} = {input_name} ? paddle::optional<phi::TensorBase>(*{input_name}->impl()) : paddle::none;"""

else:
input_tensor_code = input_tensor_code + f"""
if self.inputs['input_info'][
input_name] == "const std::vector<Tensor>&":
input_tensor_code = input_tensor_code + f"""
{code_indent} auto {PREFIX_TENSOR_NAME}{input_name}_uq_ptr = TensorToDenseTensor({input_name});
{code_indent} const auto& {PREFIX_TENSOR_NAME}{input_name} = *{PREFIX_TENSOR_NAME}{input_name}_uq_ptr;"""
else:
input_tensor_code = input_tensor_code + f"""
{code_indent} auto {PREFIX_TENSOR_NAME}{input_name} = {input_name}.impl();"""

kernel_args = ["*dev_ctx"]
Expand Down
9 changes: 9 additions & 0 deletions paddle/phi/api/yaml/legacy_api.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2360,6 +2360,15 @@
output : Tensor
invoke : full_like(x, 0, dtype, place)

- api: broadcast_tensors
args: (Tensor[] x)
output: Tensor[]{x.size()}
infer_meta:
func: BroadcastTensorsInferMeta
kernel:
func: broadcast_tensors
backward: broadcast_tensors_grad

# eig
- api: eig
args: (Tensor x)
Expand Down
12 changes: 12 additions & 0 deletions paddle/phi/api/yaml/legacy_backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -271,6 +271,18 @@
func : brelu_grad
inplace : (out_grad -> x_grad)

- backward_api : broadcast_tensors_grad
forward : broadcast_tensors (Tensor[] x) -> Tensor[](out)
args : (Tensor[] x, Tensor[] out_grad)
output : Tensor[](x_grad)
infer_meta :
func : UnchangedMultiInferMeta
param : [x]
kernel :
func : broadcast_tensors_grad
param : [out_grad]
no_need_buffer : x

- backward_api : cast_grad
forward : cast (Tensor x, DataType out_dtype) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
Expand Down
33 changes: 28 additions & 5 deletions python/paddle/fluid/tests/unittests/test_broadcast_tensors_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,26 +99,49 @@ def setUp(self):
]
self.set_place()
self.set_dtypes()
self.python_api = paddle.broadcast_tensors

def run_test(self, test_func, args):
def run_dual_test(self, test_func, args):
for dtype in self.dtypes:
for gen_func in self.test_gen_func_list:
self.inputs, self.outputs = gen_func(dtype)
test_func(**args)
if len(self.outputs["Out"]) < 3:
self.python_out_sig = [
f"out{i}" for i in range(len(self.outputs["Out"]))
]
test_func(**args)

def run_triple_in_test(self, test_func, args):
for dtype in self.dtypes:
self.inputs, self.outputs = self.test_gen_func_list[2](dtype)
self.python_out_sig = [
f"out{i}" for i in range(len(self.outputs["Out"]))
]
test_func(**args)

def test_check_output(self):
self.run_test(self.check_output_with_place, {
self.run_dual_test(self.check_output_with_place, {
"place": self.place,
"atol": 1e-1
"atol": 1e-1,
"check_eager": True
})

def test_check_grad_normal(self):
self.run_test(
self.run_dual_test(
self.check_grad_with_place, {
"place": self.place,
"inputs_to_check": ['x0', 'x1'],
"output_names": ['out0', 'out1'],
"max_relative_error": 0.05,
"check_eager": True
})
self.run_triple_in_test(
self.check_grad_with_place, {
"place": self.place,
"inputs_to_check": ['x0', 'x1', 'x2'],
"output_names": ['out0', 'out1', "out2"],
"max_relative_error": 0.05,
"check_eager": True
})


Expand Down
4 changes: 3 additions & 1 deletion python/paddle/tensor/manipulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -1128,7 +1128,9 @@ def broadcast_tensors(input, name=None):
"""

num_inputs = len(input)
if paddle.in_dynamic_mode():
if paddle.framework.in_dygraph_mode():
return _C_ops.final_state_broadcast_tensors(input)
if paddle.framework._non_static_mode():
return _C_ops.broadcast_tensors(input, num_inputs)

check_type(input, 'input', (list, tuple), 'broadcast_tensors')
Expand Down