Skip to content

Commit

Permalink
update ut
Browse files Browse the repository at this point in the history
  • Loading branch information
jiweibo committed Nov 10, 2021
1 parent 211940e commit e9a599a
Show file tree
Hide file tree
Showing 3 changed files with 61 additions and 93 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ def run_test(self, quant=False, *args, **kwargs):

self.assertTrue(status)

def inference_config_str(self, config) -> bool:
def inference_config_str(self, config) -> str:
dic = {}
enable_mkldnn = config.mkldnn_enabled()
dic['use_mkldnn'] = enable_mkldnn
Expand Down Expand Up @@ -372,7 +372,7 @@ def run_test(self, quant=False, *args, **kwargs):
status = self.check_op_version() and status
self.assertTrue(status)

def inference_config_str(self, config) -> bool:
def inference_config_str(self, config) -> str:
dic = {}
enable_mkldnn = config.mkldnn_enabled()
dic['use_mkldnn'] = enable_mkldnn
Expand Down Expand Up @@ -491,7 +491,7 @@ def assert_op_size(self, trt_engine_num, paddle_op_num):
'paddle_op_num is {}, but got {}!'.format(
paddle_op_size, paddle_op_num))

def inference_config_str(self, config: paddle_infer.Config):
def inference_config_str(self, config: paddle_infer.Config) -> str:
dic = {}
enable_trt = config.tensorrt_engine_enabled()
trt_precison = config.tensorrt_precision_mode()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.

from auto_scan_test import PassAutoScanTest, SkipReasons
from program_config import TensorConfig, ProgramConfig
from program_config import TensorConfig, ProgramConfig, OpConfig
import numpy as np
import paddle.inference as paddle_infer
from functools import partial
Expand Down Expand Up @@ -116,94 +116,71 @@ def generate_weight2(attrs):
'weight_size': kwargs['weight_size']
}]

ops_config = [{
"op_type": attrs[0]['op_type'],
"op_inputs": {
"Ids": ["input_data1"],
"W": ["embedding_weight1"]
},
"op_outputs": {
"Out": ["embedding_output1"]
},
"op_attrs": {
emb_op1 = OpConfig(
type=attrs[0]['op_type'],
inputs={"Ids": ["input_data1"],
"W": ["embedding_weight1"]},
outputs={"Out": ["embedding_output1"]},
attrs={
'is_sparse': attrs[0]['is_sparse'],
'is_distributed': attrs[0]['is_distributed'],
'padding_idx': attrs[0]['padding_idx'],
}
}, {
"op_type": attrs[0]['op_type'],
"op_inputs": {
"Ids": ["input_data2"],
"W": ["embedding_weight2"]
},
"op_outputs": {
"Out": ["embedding_output2"]
},
"op_attrs": {
'padding_idx': attrs[0]['padding_idx']
})
emb_op2 = OpConfig(
type=attrs[0]['op_type'],
inputs={"Ids": ["input_data2"],
"W": ["embedding_weight2"]},
outputs={"Out": ["embedding_output2"]},
attrs={
'is_sparse': attrs[0]['is_sparse'],
'is_distributed': attrs[0]['is_distributed'],
'padding_idx': attrs[0]['padding_idx'],
},
}, {
"op_type": attrs[0]['op_type'],
"op_inputs": {
"Ids": ["input_data3"],
"W": ["embedding_weight3"]
},
"op_outputs": {
"Out": ["embedding_output3"]
},
"op_attrs": {
'padding_idx': attrs[0]['padding_idx']
})
emb_op3 = OpConfig(
type=attrs[0]['op_type'],
inputs={"Ids": ["input_data3"],
"W": ["embedding_weight3"]},
outputs={"Out": ["embedding_output3"]},
attrs={
'is_sparse': attrs[0]['is_sparse'],
'is_distributed': attrs[0]['is_distributed'],
'padding_idx': attrs[0]['padding_idx'],
},
}, {
"op_type": "elementwise_add",
"op_inputs": {
"X": ["embedding_output2"],
"Y": ["embedding_output3"]
},
"op_outputs": {
"Out": ["elementwise_add_output1"]
},
"op_attrs": {
"axis": attrs[1]['axis'],
}
}, {
"op_type": "elementwise_add",
"op_inputs": {
"X": ["elementwise_add_output1"],
"Y": ["embedding_output1"]
'padding_idx': attrs[0]['padding_idx']
})
add_op1 = OpConfig(
type='elementwise_add',
inputs={
"X": [emb_op2.outputs["Out"][0]],
"Y": [emb_op3.outputs["Out"][0]],
},
"op_outputs": {
"Out": ["elementwise_add_output2"]
outputs={"Out": ["elementwise_add_output1"]},
attrs={"axis": attrs[1]['axis']})
add_op2 = OpConfig(
type='elementwise_add',
inputs={
"X": [add_op1.outputs["Out"][0]],
"Y": [emb_op1.outputs["Out"][0]],
},
"op_attrs": {
"axis": attrs[1]['axis'],
}
}, {
"op_type": "layer_norm",
"op_inputs": {
"X": ["elementwise_add_output2"],
outputs={"Out": ["elementwise_add_output2"]},
attrs={"axis": attrs[1]['axis']})
layer_norm_op = OpConfig(
type='layer_norm',
inputs={
"X": [add_op2.outputs["Out"][0]],
"Bias": ["layer_norm_bias"],
"Scale": ["layer_norm_scale"]
},
"op_outputs": {
outputs={
"Y": ["layer_norm_output1"],
"Mean": ["layer_norm_output2"],
"Variance": ["layer_norm_output3"]
},
"op_attrs": {
attrs={
'begin_norm_axis': attrs[2]['begin_norm_axis'],
'epsilon': attrs[2]['epsilon'],
}
}]

ops = self.generate_op_config(ops_config)
'epsilon': attrs[2]['epsilon']
})

program_config = ProgramConfig(
ops=ops,
ops=[emb_op1, emb_op2, emb_op3, add_op1, add_op2, layer_norm_op],
weights={
"embedding_weight1":
TensorConfig(data_gen=partial(generate_weight1, attrs[3])),
Expand Down Expand Up @@ -242,7 +219,7 @@ def sample_predictor_configs(self, program_config):
precision_mode=paddle_infer.PrecisionType.Float32,
use_static=False,
use_calib_mode=False)
yield config, (10, 3), (1e-5, 1e-5)
yield config, (10, 5), (1e-5, 1e-5)
# trt dynamic_shape
config = self.create_trt_inference_config()
config.enable_tensorrt_engine(
Expand Down Expand Up @@ -280,7 +257,7 @@ def sample_predictor_configs(self, program_config):
"input_data2": [2, 128],
"input_data3": [2, 128]
})
yield config, (10, 3), (1e-5, 1e-5)
yield config, (10, 5), (1e-5, 1e-5)

def add_skip_pass_case(self):
def teller1(program_config, predictor_config):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.

from auto_scan_test import MkldnnAutoScanTest, SkipReasons
from program_config import TensorConfig, ProgramConfig
from program_config import TensorConfig, ProgramConfig, OpConfig
import numpy as np
import paddle.inference as paddle_infer
from functools import partial
Expand Down Expand Up @@ -52,24 +52,15 @@ def generate_alpha(*args, **kwargs):
return np.zeros((1)).astype(np.float32)
return np.random.random(kwargs['in_shape']).astype(np.float32)

ops_config = [{
"op_type": "prelu",
"op_inputs": {
"X": ["input_data"],
"Alpha": ["alpha_weight"]
},
"op_outputs": {
"Out": ["output_data"]
},
"op_attrs": {
"mode": kwargs['mode']
}
}]

ops = self.generate_op_config(ops_config)
prelu_op = OpConfig(
type="prelu",
inputs={"X": ["input_data"],
"Alpha": ["alpha_weight"]},
outputs={"Out": ["output_data"]},
attrs={"mode": kwargs['mode']})

program_config = ProgramConfig(
ops=ops,
ops=[prelu_op],
weights={
"alpha_weight":
TensorConfig(data_gen=partial(generate_alpha, *args, **kwargs))
Expand Down

1 comment on commit e9a599a

@paddle-bot-old
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Congratulation! Your pull request passed all required CI. You could ask reviewer(s) to approve and merge. 🎉

Please sign in to comment.