@@ -225,134 +225,3 @@ def make_qnn_relu(expr, fused_activation_fn, scale, zero_point, dtype):
225225 )
226226 if fused_activation_fn == "RELU" :
227227 return tvm .relay .op .clip (expr , a_min = max (qmin , quantize (0.0 )), a_max = qmax )
228-
229-
230- def generate_random_input_data (seed , shape , dtype ):
231- """
232- Generates randomized input numpy arrays based on shape and dtype
233- """
234- random_state = np .random .RandomState (seed )
235- if dtype == np .float32 :
236- return random_state .uniform (- 1 , 1 , size ).astype (dtype )
237- else :
238- low = np .iinfo (dtype ).min
239- high = np .iinfo (dtype ).max + 1
240- return random_state .randint (low , high , shape , dtype )
241-
242-
243- def generate_ref_data_tflite (model ):
244- """
245- This method uses TFLite reference kernels to generate reference output.
246- Random input generator is used to get the input data.
247- It returns randomized inputs and reference outputs.
248- """
249- import tensorflow as tf
250- from distutils .version import LooseVersion
251-
252- output_tolerance = None
253- if tf .__version__ < LooseVersion ("2.5.0" ):
254- output_tolerance = 1
255- interpreter = tf .lite .Interpreter (model_content = model )
256- else :
257- from tensorflow .lite .python .interpreter import OpResolverType
258-
259- output_tolerance = 0
260- interpreter = tf .lite .Interpreter (
261- model_content = model ,
262- experimental_op_resolver_type = OpResolverType .BUILTIN_REF ,
263- experimental_preserve_all_tensors = False ,
264- )
265-
266- interpreter .allocate_tensors ()
267- input_details = interpreter .get_input_details ()
268- output_details = interpreter .get_output_details ()
269-
270- # Generate predictable randomized input
271- seed = 0
272- input_data = {}
273- for input_detail in input_details :
274- input_values = generate_random_input_data (
275- seed , input_detail ["shape" ], input_detail ["dtype" ]
276- )
277- interpreter .set_tensor (input_detail ["index" ], input_values )
278- input_data .update ({input_detail ["name" ]: input_values })
279-
280- interpreter .invoke ()
281-
282- # Obtain the expected output from interpreter
283- expected_output_data = {}
284- for output_detail in output_details :
285- expected_output_data .update (
286- {output_detail ["name" ]: interpreter .get_tensor (output_detail ["index" ])}
287- )
288-
289- return input_data , expected_output_data , output_tolerance
290-
291-
292- def create_conv2d_tflite_model (ifm_shape , kernel_shape , strides , dilation , padding , activation ):
293- """This method prepares TFlite graph with a single Conv2d layer"""
294- import tensorflow as tf
295-
296- class Model (tf .Module ):
297- @tf .function
298- def tf_function (self , x ):
299- # Use tf.nn API to create the model
300- tf_strides = [1 , strides [0 ], strides [1 ], 1 ]
301- op = tf .nn .conv2d (
302- x ,
303- filters = tf .constant (
304- np .random .uniform (size = [kernel_shape [0 ], kernel_shape [1 ], 3 , 3 ]),
305- dtype = tf .float32 ,
306- ),
307- strides = tf_strides ,
308- padding = padding ,
309- dilations = dilation ,
310- )
311- if activation :
312- op = tf .nn .relu (op )
313- return op
314-
315- model = Model ()
316- concrete_func = model .tf_function .get_concrete_function (
317- tf .TensorSpec (ifm_shape , dtype = tf .float32 )
318- )
319-
320- def representative_dataset ():
321- for _ in range (100 ):
322- data = np .random .rand (* tuple (ifm_shape ))
323- yield [data .astype (np .float32 )]
324-
325- converter = tf .lite .TFLiteConverter .from_concrete_functions ([concrete_func ])
326- converter .optimizations = [tf .lite .Optimize .DEFAULT ]
327- converter .representative_dataset = representative_dataset
328- converter .target_spec .supported_ops = [tf .lite .OpsSet .TFLITE_BUILTINS_INT8 ]
329- converter .inference_input_type = tf .int8
330- converter .inference_output_type = tf .int8
331- tflite_model = converter .convert ()
332- return tflite_model
333-
334-
335- def create_conv2d_tflite_relay_models (
336- ifm_shape , kernel_shape , strides , dilation , padding , activation , dtype
337- ):
338- """
339- This method creates a conv2d TFLite layer and prepared TFLite model from it.
340- Converts that into the Relay module and params.
341- Returns TFLite model, Relay module and params.
342- """
343- pytest .importorskip ("tflite" )
344- import tflite .Model
345-
346- serialized_tflite_model = create_conv2d_tflite_model (
347- ifm_shape , kernel_shape , strides , dilation , padding , activation
348- )
349-
350- tflite_model = tflite .Model .Model .GetRootAsModel (serialized_tflite_model , 0 )
351-
352- relay_module , params = relay .frontend .from_tflite (
353- tflite_model ,
354- shape_dict = {"input" : ifm_shape },
355- dtype_dict = {"input" : dtype },
356- )
357-
358- return serialized_tflite_model , relay_module , params
0 commit comments