@@ -423,9 +423,6 @@ def get_padding_offset(bsz, max_seq_len, seq_lens_this_time):
423423 self .numpy_tensor = numpy .random .randint (- len_shape , len_shape , size = self .shape )
424424 return self .numpy_tensor
425425 elif api_config .api_name in ["paddle.clip" , "paddle.Tensor.clip" ] and self .check_arg (api_config , 0 , "x" ):
426- # init input tensor x randomly (index == 0 indicates we are init TensorConfig(x).numpy_tensor)
427- self .numpy_tensor = self .get_random_numpy_tensor (shape = self .shape , data_type = self .dtype )
428-
429426 # if both min and max need a Tensor instead of None, init min and max at the same TensorConfig numpy tensor init process
430427 min_config = self .get_arg (api_config , 1 , "min" )
431428 max_config = self .get_arg (api_config , 2 , "max" )
@@ -440,6 +437,8 @@ def get_padding_offset(bsz, max_seq_len, seq_lens_this_time):
440437
441438 self .set_tensor_arg_value (api_config , 1 , "min" , min_numpy_tensor )
442439 self .set_tensor_arg_value (api_config , 2 , "max" , max_numpy_tensor )
440+ min_config = min_numpy_tensor
441+ max_config = max_numpy_tensor
443442 elif min_config is not None and max_config is not None :
444443 # min and max args are specified but at least one of them is scalar (not a TensorConfig)
445444 # according to API DOC, min and max is float|int|Tensor
@@ -448,13 +447,16 @@ def get_padding_offset(bsz, max_seq_len, seq_lens_this_time):
448447 min_dtype = min_config .dtype
449448 min_numpy_tensor = self .get_random_numpy_tensor (shape = min_shape , data_type = min_dtype , max = max_config )
450449 self .set_tensor_arg_value (api_config , 1 , "min" , min_numpy_tensor )
450+ min_config = min_numpy_tensor
451451 elif (isinstance (max_config , TensorConfig ) and (isinstance (min_config , int ) or isinstance (min_config , float ))):
452452 max_shape = max_config .shape
453453 max_dtype = max_config .dtype
454454 max_numpy_tensor = self .get_random_numpy_tensor (shape = max_shape , data_type = max_dtype , min = min_config )
455455 self .set_tensor_arg_value (api_config , 2 , "max" , max_numpy_tensor )
456+ max_config = max_numpy_tensor
456457 # for both min and max are scalar, there is no need to init numpy tensor
457-
458+ # init input tensor x randomly (index == 0 indicates we are init TensorConfig(x).numpy_tensor)
459+ self .numpy_tensor = self .get_random_numpy_tensor (shape = self .shape , data_type = self .dtype , min = min_config - 1000 , max = max_config + 1000 )
458460 return self .numpy_tensor
459461 elif api_config .api_name == "paddle.vision.ops.distribute_fpn_proposals" :
460462 if (index is not None and index == 0 ) or (key is not None and key == "fpn_rois" ):
@@ -1954,20 +1956,16 @@ def get_random_numpy_tensor(self, shape=None, data_type=None, min=None, max=None
19541956 generate a random numpy tensor with data in [min, max) given shape and data_type
19551957 """
19561958 # extract default init logic
1957- if USE_CACHED_NUMPY :
1958- dtype = "float32" if data_type == "bfloat16" else data_type
1959- numpy_tensor = self .get_cached_numpy (dtype , shape )
1959+ if "int" in data_type :
1960+ min = min if min is not None else - 65535
1961+ max = max if max is not None else 65535
1962+ numpy_tensor = (numpy .random .randint (min , max , size = shape )).astype (data_type )
19601963 else :
1961- if "int" in data_type :
1962- min = min if min is not None else - 65535
1963- max = max if max is not None else 65535
1964- numpy_tensor = (numpy .random .randint (min , max , size = shape )).astype (data_type )
1965- else :
1966- # TO DO: check boundary and cached numpy
1967- dtype = "float32" if data_type == "bfloat16" else data_type
1968- min = min if min is not None else numpy .finfo (dtype ).min / 2
1969- max = max if max is not None else numpy .finfo (dtype ).max / 2
1970- numpy_tensor = (numpy .random .uniform (min , max , size = shape )).astype (dtype )
1964+ # TO DO: check boundary and cached numpy
1965+ dtype = "float32" if data_type == "bfloat16" else data_type
1966+ min = min if min is not None else numpy .finfo (dtype ).min / 2
1967+ max = max if max is not None else numpy .finfo (dtype ).max / 2
1968+ numpy_tensor = (numpy .random .uniform (min , max , size = shape )).astype (dtype )
19711969 return numpy_tensor
19721970
19731971class APIConfig :
0 commit comments