Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add some yaml config #41053

Merged
merged 14 commits into from
Apr 3, 2022
4 changes: 2 additions & 2 deletions paddle/phi/kernels/cpu/hierarchical_sigmoid_grad.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,11 @@ void HierarchicalSigmoidGradKernelImpl(
const DenseTensor& x,
const DenseTensor& w,
const DenseTensor& label,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
paddle::optional<const DenseTensor&> path,
paddle::optional<const DenseTensor&> code,
paddle::optional<const DenseTensor&> bias,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
int num_classes,
bool remote_prefetch,
int trainer_id,
Expand Down
8 changes: 4 additions & 4 deletions paddle/phi/kernels/cpu/hierarchical_sigmoid_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,11 @@ void HierarchicalSigmoidGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& w,
const DenseTensor& label,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
paddle::optional<const DenseTensor&> path,
paddle::optional<const DenseTensor&> code,
paddle::optional<const DenseTensor&> bias,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
int num_classes,
bool remote_prefetch,
int trainer_id,
Expand All @@ -44,11 +44,11 @@ void HierarchicalSigmoidGradKernel(const Context& ctx,
x,
w,
label,
pre_out,
out_grad,
path,
code,
bias,
pre_out,
out_grad,
num_classes,
remote_prefetch,
trainer_id,
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/hierarchical_sigmoid_grad_kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,11 @@ void HierarchicalSigmoidGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& w,
const DenseTensor& label,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
paddle::optional<const DenseTensor&> path,
paddle::optional<const DenseTensor&> code,
paddle::optional<const DenseTensor&> bias,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
int num_classes,
bool remote_prefetch,
int trainer_id,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,11 +40,11 @@ void HierarchicalSigmoidGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& w,
const DenseTensor& label,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
paddle::optional<const DenseTensor&> path,
paddle::optional<const DenseTensor&> code,
paddle::optional<const DenseTensor&> bias,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
int num_classes,
bool remote_prefetch,
int trainer_id,
Expand All @@ -70,11 +70,11 @@ void HierarchicalSigmoidGradKernel(const Context& ctx,
x,
w,
label,
pre_out,
out_grad,
path,
code,
bias,
pre_out,
out_grad,
num_classes,
remote_prefetch,
trainer_id,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,11 @@ void HierarchicalSigmoidGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& w,
const DenseTensor& label,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
paddle::optional<const DenseTensor&> path,
paddle::optional<const DenseTensor&> code,
paddle::optional<const DenseTensor&> bias,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
int num_classes,
bool remote_prefetch,
int trainer_id,
Expand Down
12 changes: 6 additions & 6 deletions paddle/phi/ops/compat/hierarchical_sigmoid_sig.cc
Original file line number Diff line number Diff line change
Expand Up @@ -38,11 +38,11 @@ KernelSignature HierarchicalSigmoidGradOpArgumentMapping(
{"X",
"W",
"Label",
"PreOut",
GradVarName("Out"),
"PathTable",
"PathCode",
"Bias"},
"Bias",
"PreOut",
GradVarName("Out")},
{"num_classes",
"remote_prefetch",
"trainer_id",
Expand All @@ -57,11 +57,11 @@ KernelSignature HierarchicalSigmoidGradOpArgumentMapping(
{"X",
"W",
"Label",
"PreOut",
GradVarName("Out"),
"PathTable",
"PathCode",
"Bias"},
"Bias",
"PreOut",
GradVarName("Out")},
{"num_classes",
"remote_prefetch",
"trainer_id",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,13 @@
# limitations under the License.

import paddle
import paddle.nn.functional as F
from paddle import fluid
import unittest
import numpy as np
import paddle.fluid.dygraph as dg
import paddle.fluid.initializer as I
import numpy as np
import unittest
import paddle.nn.functional as F
from paddle import fluid
from paddle.fluid.framework import _test_eager_guard
from unittest import TestCase


Expand Down Expand Up @@ -159,12 +160,22 @@ def test_identity_cpu(self):
self.place = fluid.CPUPlace()
self._test_identity()

def test_identity_cpu_check_eager(self):
with _test_eager_guard():
self.test_identity_cpu()

@unittest.skipIf(not fluid.core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
def test_identity_gpu(self):
self.place = fluid.CUDAPlace(0)
self._test_identity()

@unittest.skipIf(not fluid.core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
def test_identity_gpu_check_eager(self):
with _test_eager_guard():
self.test_identity_gpu()


class TestFunctionalConv2DError(TestCase):
batch_size = 4
Expand Down Expand Up @@ -520,6 +531,10 @@ def test_dygraph_exception(self):
with self.assertRaises(ValueError):
self.dygraph_case()

def test_dygraph_exception_check_eager(self):
with _test_eager_guard():
self.test_dygraph_exception()

def test_static_exception(self):
with self.assertRaises(ValueError):
self.static_graph_case()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,13 @@
# limitations under the License.

import paddle
import paddle.nn.functional as F
from paddle import fluid
import numpy as np
import paddle.fluid.dygraph as dg
import paddle.fluid.initializer as I
import numpy as np
import paddle.nn.functional as F
import unittest
from paddle import fluid
from paddle.fluid.framework import _test_eager_guard
from unittest import TestCase


Expand Down Expand Up @@ -165,12 +166,22 @@ def test_identity_cpu(self):
self.place = fluid.CPUPlace()
self._test_identity()

def test_identity_cpu_check_eager(self):
with _test_eager_guard():
self.test_identity_cpu()

@unittest.skipIf(not fluid.core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
def test_identity_gpu(self):
self.place = fluid.CUDAPlace(0)
self._test_identity()

@unittest.skipIf(not fluid.core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
def test_identity_gpu_check_eager(self):
with _test_eager_guard():
self.test_identity_gpu()


class TestFunctionalConv3DTransposeError(TestCase):
batch_size = 4
Expand Down Expand Up @@ -540,6 +551,10 @@ def test_dygraph_exception(self):
with self.assertRaises(ValueError):
self.dygraph_case()

def test_dygraph_exception_check_eager(self):
with _test_eager_guard():
self.test_dygraph_exception()

def test_static_exception(self):
with self.assertRaises(ValueError):
self.static_graph_case()
Expand Down
5 changes: 3 additions & 2 deletions python/paddle/fluid/tests/unittests/test_index_select_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@

class TestIndexSelectOp(OpTest):
def setUp(self):
self.python_api = paddle.index_select
self.op_type = "index_select"
self.init_dtype_type()
index_np = np.random.randint(
Expand Down Expand Up @@ -54,10 +55,10 @@ def init_dtype_type(self):
self.index_size = 100

def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)

def test_check_grad_normal(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)


class TestIndexSelectOpCase2(TestIndexSelectOp):
Expand Down
11 changes: 8 additions & 3 deletions python/paddle/fluid/tests/unittests/test_norm_all.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,8 +86,13 @@ def frobenius_norm(x, axis=None, keepdims=False):
return r


def final_state_frobenius_norm(x, dim, keep_dim, reduce_all):
return paddle.linalg.norm(x, p='fro', axis=dim, keepdim=keep_dim)


class TestFrobeniusNormOp(OpTest):
def setUp(self):
self.python_api = final_state_frobenius_norm
self.op_type = "frobenius_norm"
self.init_test_case()
x = (np.random.random(self.shape) + 1.0).astype(self.dtype)
Expand All @@ -102,10 +107,10 @@ def setUp(self):
self.outputs = {'Out': norm}

def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)

def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)

def init_test_case(self):
self.shape = [2, 3, 4, 5]
Expand All @@ -122,7 +127,7 @@ def init_test_case(self):
self.dtype = "float32"

def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)


class TestPnormOp(OpTest):
Expand Down
19 changes: 13 additions & 6 deletions python/paddle/fluid/tests/unittests/test_pool1d_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,16 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import numpy as np
import paddle
import unittest
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.nn.functional as F
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
import paddle
import paddle.nn.functional as F
import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard


def adaptive_start_index(index, input_size, output_size):
Expand Down Expand Up @@ -244,6 +243,10 @@ def test_pool1d(self):
self.check_avg_dygraph_padding_same(place)
self.check_max_dygraph_return_index_results(place)

def test_dygraph_final_state_api(self):
with _test_eager_guard():
self.test_pool1d()


class TestPool2DError_API(unittest.TestCase):
def test_error_api(self):
Expand Down Expand Up @@ -370,6 +373,10 @@ def run_stride_out_of_range():

self.assertRaises(ValueError, run_stride_out_of_range)

def test_dygraph_final_state_api(self):
with _test_eager_guard():
self.test_error_api()


if __name__ == '__main__':
unittest.main()
17 changes: 13 additions & 4 deletions python/paddle/fluid/tests/unittests/test_pool2d_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,14 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from test_pool2d_op import adaptive_start_index, adaptive_end_index, pool2D_forward_naive, avg_pool2D_forward_naive, max_pool2D_forward_naive
import unittest
from op_test import OpTest
import paddle
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.core as core
from op_test import OpTest
from paddle.fluid.framework import _test_eager_guard
from paddle.nn.functional import avg_pool2d, max_pool2d
import paddle.fluid as fluid
import paddle
from test_pool2d_op import adaptive_start_index, adaptive_end_index, pool2D_forward_naive, avg_pool2D_forward_naive, max_pool2D_forward_naive


class TestPool2D_API(unittest.TestCase):
Expand Down Expand Up @@ -324,6 +325,10 @@ def test_pool2d(self):
self.check_max_dygraph_ceilmode_results(place)
self.check_max_dygraph_nhwc_results(place)

def test_dygraph_final_state_api(self):
with _test_eager_guard():
self.test_pool2d()


class TestPool2DError_API(unittest.TestCase):
def test_error_api(self):
Expand Down Expand Up @@ -524,6 +529,10 @@ def run_stride_out_of_range():

self.assertRaises(ValueError, run_stride_out_of_range)

def test_dygraph_final_state_api(self):
with _test_eager_guard():
self.test_error_api()


if __name__ == '__main__':
unittest.main()
Loading