-
Notifications
You must be signed in to change notification settings - Fork 71
/
benchmark.py
executable file
·130 lines (117 loc) · 5.45 KB
/
benchmark.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
#!/usr/bin/python
from utils import utilities, read_write_data, benchmark_argparser, run_benchmark_models
import sys
import os
import pandas as pd
import gc
import warnings
warnings.simplefilter("ignore")
def main():
# Set Parameters
arg_parser = benchmark_argparser()
args = arg_parser.make_args()
csv_file_path = args.csv_file_path
model_path = args.model_dir
precision = args.precision
# System Check
system_check = utilities(jetson_devkit=args.jetson_devkit, gpu_freq=args.gpu_freq, dla_freq=args.dla_freq)
system_check.close_all_apps()
if system_check.check_trt():
sys.exit()
system_check.set_power_mode(args.power_mode, args.jetson_devkit)
system_check.clear_ram_space()
if args.jetson_clocks:
system_check.set_jetson_clocks()
else:
system_check.run_set_clocks_withDVFS()
system_check.set_jetson_fan(255)
# Read CSV and Write Data
benchmark_data = read_write_data(csv_file_path=csv_file_path, model_path=model_path)
if args.all:
latency_each_model =[]
print("Running all benchmarks.. This will take at least 2 hours...")
for read_index in range (0,len(benchmark_data)):
gc.collect()
model = run_benchmark_models(csv_file_path=csv_file_path, model_path=model_path, precision=precision, benchmark_data=benchmark_data)
download_err = model.execute(read_index=read_index)
if not download_err:
# Reading Results
latency_fps, error_log = model.report()
latency_each_model.append(latency_fps)
# Remove engine and txt files
if not error_log:
model.remove()
del gc.garbage[:]
system_check.clear_ram_space()
benchmark_table = pd.DataFrame(latency_each_model, columns=['GPU (ms)', 'DLA0 (ms)', 'DLA1 (ms)', 'FPS', 'Model Name'])
# Note: GPU, DLA latencies are measured in miliseconds, FPS = Frames per Second
print(benchmark_table[['Model Name', 'FPS']])
if args.plot:
benchmark_data.plot_perf(latency_each_model)
elif args.model_name == 'inception_v4':
model = run_benchmark_models(csv_file_path=csv_file_path, model_path=model_path, precision=precision, benchmark_data=benchmark_data)
download_err = model.execute(read_index=0)
if not download_err:
_, error_log = model.report()
if not error_log:
model.remove()
elif args.model_name == 'vgg19':
model = run_benchmark_models(csv_file_path=csv_file_path, model_path=model_path, precision=precision, benchmark_data=benchmark_data)
download_err = model.execute(read_index=1)
if not download_err:
_, error_log = model.report()
if not error_log:
model.remove()
elif args.model_name == 'super_resolution':
model = run_benchmark_models(csv_file_path=csv_file_path, model_path=model_path, precision=precision, benchmark_data=benchmark_data)
download_err = model.execute(read_index=2)
if not download_err:
_, error_log = model.report()
if not error_log:
model.remove()
elif args.model_name == 'unet':
model = run_benchmark_models(csv_file_path=csv_file_path, model_path=model_path, precision=precision, benchmark_data=benchmark_data)
download_err = model.execute(read_index=3)
if not download_err:
_, error_log = model.report()
if not error_log:
model.remove()
elif args.model_name == 'pose_estimation':
model = run_benchmark_models(csv_file_path=csv_file_path, model_path=model_path, precision=precision, benchmark_data=benchmark_data)
download_err = model.execute(read_index=4)
if not download_err:
_, error_log = model.report()
if not error_log:
model.remove()
elif args.model_name == 'tiny-yolov3':
model = run_benchmark_models(csv_file_path=csv_file_path, model_path=model_path, precision=precision, benchmark_data=benchmark_data)
download_err = model.execute(read_index=5)
if not download_err:
_, error_log = model.report()
if not error_log:
model.remove()
elif args.model_name == 'resnet':
model = run_benchmark_models(csv_file_path=csv_file_path, model_path=model_path, precision=precision, benchmark_data=benchmark_data)
download_err = model.execute(read_index=6)
if not download_err:
_, error_log = model.report()
if not error_log:
model.remove()
elif args.model_name == 'ssd-mobilenet-v1':
model = run_benchmark_models(csv_file_path=csv_file_path, model_path=model_path, precision=precision, benchmark_data=benchmark_data)
download_err = model.execute(read_index=7)
if not download_err:
_, error_log = model.report()
if not error_log:
model.remove()
elif args.model_name == 'ssd-resnet34':
model = run_benchmark_models(csv_file_path=csv_file_path, model_path=model_path, precision=precision, benchmark_data=benchmark_data)
download_err = model.execute(read_index=8)
if not download_err:
_, error_log = model.report()
if not error_log:
model.remove()
system_check.clear_ram_space()
system_check.set_jetson_fan(0)
if __name__ == "__main__":
main()