Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Dy2St] pir dy2st unittest verification - Part 4 #58936

Merged
merged 6 commits into from
Nov 13, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 30 additions & 0 deletions test/dygraph_to_static/dygraph_to_static_utils_new.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import importlib
import inspect
import logging
import os
import unittest
from enum import Flag, auto
from functools import wraps
from pathlib import Path

import numpy as np

Expand Down Expand Up @@ -269,6 +271,11 @@ def test_pir_only(fn):
return fn


def test_pir_api_only(fn):
fn = set_ir_mode(IrMode.PIR_API)(fn)
return fn


def test_legacy_and_pir(fn):
fn = set_ir_mode(IrMode.LEGACY_IR | IrMode.PIR_EXE)(fn)
return fn
Expand Down Expand Up @@ -310,3 +317,26 @@ def show_all_test_cases(test_class):
if attr.startswith("test"):
fn = getattr(test_class, attr)
logger.info(f"{attr}: {fn}")


# Other utilities
def import_module_from_path(module_name, module_path):
"""A better way to import module from other directory than using sys.path.append"""
spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module


def import_legacy_test_utils():
test_root = Path(__file__).parent.parent
legacy_test_utils_path = test_root / "legacy_test/utils.py"
legacy_test_utils = import_module_from_path(
"legacy_test_utils", legacy_test_utils_path
)
return legacy_test_utils


legacy_test_utils = import_legacy_test_utils()
dygraph_guard = legacy_test_utils.dygraph_guard
static_guard = legacy_test_utils.static_guard
116 changes: 57 additions & 59 deletions test/dygraph_to_static/test_bmn.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,18 @@
import unittest

import numpy as np
from dygraph_to_static_utils_new import Dy2StTestBase, test_pir_only
from dygraph_to_static_utils_new import (
Dy2StTestBase,
static_guard,
test_pir_only,
)
from predictor_utils import PredictorTools

import paddle
from paddle import base
from paddle.base import ParamAttr
from paddle.base.dygraph import to_variable
from paddle.jit import to_static
from paddle.base.framework import unique_name
from paddle.jit.translated_layer import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX

SEED = 2000
Expand All @@ -34,8 +38,8 @@
# Note: Set True to eliminate randomness.
# 1. For one operation, cuDNN has several algorithms,
# some algorithm results are non-deterministic, like convolution algorithms.
if base.is_compiled_with_cuda():
base.set_flags({'FLAGS_cudnn_deterministic': True})
if paddle.is_compiled_with_cuda():
paddle.set_flags({'FLAGS_cudnn_deterministic': True})


def get_interp1d_mask(
Expand Down Expand Up @@ -265,7 +269,6 @@ def __init__(self, cfg):
bias_attr=ParamAttr(name="PEM_2d4_b"),
)

@to_static
def forward(self, x):
# Base Module
x = paddle.nn.functional.relu(self.b_conv1(x))
Expand Down Expand Up @@ -641,9 +644,9 @@ class TestTrain(Dy2StTestBase):
def setUp(self):
self.args = Args()
self.place = (
base.CPUPlace()
if not base.is_compiled_with_cuda()
else base.CUDAPlace(0)
paddle.CPUPlace()
if not paddle.is_compiled_with_cuda()
else paddle.CUDAPlace(0)
)

self.temp_dir = tempfile.TemporaryDirectory()
Expand All @@ -656,18 +659,18 @@ def setUp(self):
def tearDown(self):
self.temp_dir.cleanup()

def train_bmn(self, args, place, to_static):
def train_bmn(self, args, to_static):
paddle.jit.enable_to_static(to_static)
loss_data = []

with base.dygraph.guard(place):
with unique_name.guard():
loss_data = []

paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
global local_random
local_random = np.random.RandomState(SEED)

bmn = BMN(args)
bmn = paddle.jit.to_static(bmn)
bmn = paddle.jit.to_static(BMN(args))
adam = optimizer(args, parameter_list=bmn.parameters())

train_reader = fake_data_reader(args, 'train')
Expand Down Expand Up @@ -724,12 +727,10 @@ def train_bmn(self, args, place, to_static):
):
print(
f'[TRAIN] Epoch {epoch}, iter {batch_id} '
+ '\tLoss = {}, \ttem_loss = {}, \tpem_reg_loss = {}, \tpem_cls_loss = {}'.format(
'%f' % float(avg_loss),
'%f' % float(tem_loss),
'%f' % float(pem_reg_loss),
'%f' % float(pem_cls_loss),
)
+ f'\tLoss = {float(avg_loss):f}, '
+ f'\ttem_loss = {float(tem_loss):f}, '
+ f'\tpem_reg_loss = {float(pem_reg_loss):f}, '
+ f'\tpem_cls_loss = {float(pem_cls_loss):f}'
)

# validation
Expand All @@ -752,8 +753,8 @@ def train_bmn(self, args, place, to_static):

@test_pir_only
def test_train_pir(self):
static_res = self.train_bmn(self.args, self.place, to_static=True)
dygraph_res = self.train_bmn(self.args, self.place, to_static=False)
static_res = self.train_bmn(self.args, to_static=True)
dygraph_res = self.train_bmn(self.args, to_static=False)
np.testing.assert_allclose(
dygraph_res,
static_res,
Expand All @@ -766,8 +767,8 @@ def test_train_pir(self):
)

def test_train(self):
static_res = self.train_bmn(self.args, self.place, to_static=True)
dygraph_res = self.train_bmn(self.args, self.place, to_static=False)
static_res = self.train_bmn(self.args, to_static=True)
dygraph_res = self.train_bmn(self.args, to_static=False)
np.testing.assert_allclose(
dygraph_res,
static_res,
Expand Down Expand Up @@ -833,51 +834,48 @@ def verify_predict(self):

def predict_dygraph(self, data):
paddle.jit.enable_to_static(False)
with base.dygraph.guard(self.place):
bmn = BMN(self.args)
# load dygraph trained parameters
model_dict = paddle.load(self.dy_param_path + ".pdparams")
bmn.set_dict(model_dict)
bmn.eval()
bmn = paddle.jit.to_static(BMN(self.args))
# load dygraph trained parameters
model_dict = paddle.load(self.dy_param_path + ".pdparams")
bmn.set_dict(model_dict)
bmn.eval()

x = to_variable(data)
pred_res = bmn(x)
pred_res = [var.numpy() for var in pred_res]
x = to_variable(data)
pred_res = bmn(x)
pred_res = [var.numpy() for var in pred_res]

return pred_res
return pred_res

def predict_static(self, data):
paddle.enable_static()
exe = base.Executor(self.place)
# load inference model
[
inference_program,
feed_target_names,
fetch_targets,
] = paddle.static.io.load_inference_model(
self.model_save_dir,
executor=exe,
model_filename=self.model_filename,
params_filename=self.params_filename,
)
pred_res = exe.run(
inference_program,
feed={feed_target_names[0]: data},
fetch_list=fetch_targets,
)

with static_guard():
exe = paddle.static.Executor(self.place)
# load inference model
[
inference_program,
feed_target_names,
fetch_targets,
] = paddle.static.io.load_inference_model(
self.model_save_dir,
executor=exe,
model_filename=self.model_filename,
params_filename=self.params_filename,
)
pred_res = exe.run(
inference_program,
feed={feed_target_names[0]: data},
fetch_list=fetch_targets,
)
return pred_res

def predict_dygraph_jit(self, data):
with base.dygraph.guard(self.place):
bmn = paddle.jit.load(self.model_save_prefix)
bmn.eval()
bmn = paddle.jit.load(self.model_save_prefix)
bmn.eval()

x = to_variable(data)
pred_res = bmn(x)
pred_res = [var.numpy() for var in pred_res]
x = to_variable(data)
pred_res = bmn(x)
pred_res = [var.numpy() for var in pred_res]

return pred_res
return pred_res

def predict_analysis_inference(self, data):
output = PredictorTools(
Expand Down
Loading