Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Fixed lint errors
Browse files Browse the repository at this point in the history
  • Loading branch information
connorgoggins committed Feb 12, 2020
1 parent f25b775 commit b1f206d
Show file tree
Hide file tree
Showing 3 changed files with 217 additions and 217 deletions.
48 changes: 24 additions & 24 deletions benchmark/opperf/nd_operations/gemm_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,28 +63,28 @@ def run_gemm_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='nativ
[getattr(MX_OP_MODULE, "dot")], run_backward=True,
dtype=dtype, ctx=ctx,
inputs=[{"lhs": (2**16, 2**16),
"rhs": (2**16, 2**16)},
"rhs": (2**16, 2**16)},
{"lhs": (4, 2**30),
"rhs": (4, 2**30),
"transpose_b": True},
"rhs": (4, 2**30),
"transpose_b": True},
{"lhs": (2**28, 16),
"rhs": (16, 2**28),
"transpose_a": True,
"transpose_b": True}],
"rhs": (16, 2**28),
"transpose_a": True,
"transpose_b": True}],
warmup=warmup, runs=runs, profiler=profiler)

batch_dot_benchmark_res = run_performance_test(
[getattr(MX_OP_MODULE, "batch_dot")], run_backward=True,
dtype=dtype, ctx=ctx,
inputs=[{"lhs": (1, 2**16, 2**16),
"rhs": (1, 2**16, 2**16)},
"rhs": (1, 2**16, 2**16)},
{"lhs": (1, 4, 2**30),
"rhs": (1, 4, 2**30),
"transpose_b": True},
"rhs": (1, 4, 2**30),
"transpose_b": True},
{"lhs": (1, 2**28, 16),
"rhs": (1, 16, 2**28),
"transpose_a": True,
"transpose_b": True}],
"rhs": (1, 16, 2**28),
"transpose_a": True,
"transpose_b": True}],
warmup=warmup, runs=runs, profiler=profiler)
# Operator khatri_rao is not yet implemented for GPU
khatri_rao_benchmark_res = []
Expand All @@ -101,28 +101,28 @@ def run_gemm_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='nativ
[getattr(MX_OP_MODULE, "dot")], run_backward=True,
dtype=dtype, ctx=ctx,
inputs=[{"lhs": (1024, 1024),
"rhs": (1024, 1024)},
"rhs": (1024, 1024)},
{"lhs": (1000, 10),
"rhs": (1000, 10),
"transpose_b": True},
"rhs": (1000, 10),
"transpose_b": True},
{"lhs": (1000, 1),
"rhs": (100, 1000),
"transpose_a": True,
"transpose_b": True}],
"rhs": (100, 1000),
"transpose_a": True,
"transpose_b": True}],
warmup=warmup, runs=runs, profiler=profiler)

batch_dot_benchmark_res = run_performance_test(
[getattr(MX_OP_MODULE, "batch_dot")], run_backward=True,
dtype=dtype, ctx=ctx,
inputs=[{"lhs": (32, 1024, 1024),
"rhs": (32, 1024, 1024)},
"rhs": (32, 1024, 1024)},
{"lhs": (32, 1000, 10),
"rhs": (32, 1000, 10),
"transpose_b": True},
"rhs": (32, 1000, 10),
"transpose_b": True},
{"lhs": (32, 1000, 1),
"rhs": (32, 100, 1000),
"transpose_a": True,
"transpose_b": True}],
"rhs": (32, 100, 1000),
"transpose_a": True,
"transpose_b": True}],
warmup=warmup, runs=runs, profiler=profiler)
# Operator khatri_rao is not yet implemented for GPU
khatri_rao_benchmark_res = []
Expand Down
112 changes: 56 additions & 56 deletions benchmark/opperf/nd_operations/nn_basic_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,50 +38,50 @@ def run_nn_basic_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='n
ctx=ctx,
profiler=profiler,
inputs=[{"data": (2**15, 3, 256, 256),
"num_hidden": 64,
"weight": (64, 3 * 256 * 256),
"bias": (64,),
"flatten": True},
"num_hidden": 64,
"weight": (64, 3 * 256 * 256),
"bias": (64,),
"flatten": True},
{"data": (2**17, 3, 128, 128),
"num_hidden": 64,
"weight": (64, 3 * 128 * 128),
"bias": (64,),
"flatten": False}],
"num_hidden": 64,
"weight": (64, 3 * 128 * 128),
"bias": (64,),
"flatten": False}],
warmup=warmup,
runs=runs)

# Dropout benchmarks
dropout_benchmark_res = run_performance_test([getattr(MX_OP_MODULE, "Dropout")],
run_backward=True,
dtype=dtype,
ctx=ctx,
profiler=profiler,
inputs=[{"data": (2**15, 3, 256, 256),
"p": 0.5,
"mode": "always"},
{"data": (2**28, 16),
"p": 0.5,
"mode": "always"}],
warmup=warmup,
runs=runs)
run_backward=True,
dtype=dtype,
ctx=ctx,
profiler=profiler,
inputs=[{"data": (2**15, 3, 256, 256),
"p": 0.5,
"mode": "always"},
{"data": (2**28, 16),
"p": 0.5,
"mode": "always"}],
warmup=warmup,
runs=runs)
# BatchNorm benchmarks
batchnorm_benchmark_res = run_performance_test([getattr(MX_OP_MODULE, "BatchNorm")],
run_backward=True,
dtype=dtype,
ctx=ctx,
profiler=profiler,
inputs=[{"data": (2**15, 3, 256, 256),
run_backward=True,
dtype=dtype,
ctx=ctx,
profiler=profiler,
inputs=[{"data": (2**15, 3, 256, 256),
"gamma": (3,),
"beta": (3,),
"moving_mean": (3,),
"moving_var": (3,)},
{"data": (2**14, 3, 10000, 10),
{"data": (2**14, 3, 10000, 10),
"gamma": (3,),
"beta": (3,),
"moving_mean": (3,),
"moving_var": (3,)}],
warmup=warmup,
runs=runs)
warmup=warmup,
runs=runs)
else:
# FullyConnnected operator benchmarks
fc_benchmark_res = run_performance_test([getattr(MX_OP_MODULE, "FullyConnected")],
Expand All @@ -90,50 +90,50 @@ def run_nn_basic_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='n
ctx=ctx,
profiler=profiler,
inputs=[{"data": (32, 3, 256, 256),
"num_hidden": 64,
"weight": (64, 3 * 256 * 256),
"bias": (64,),
"flatten": True},
"num_hidden": 64,
"weight": (64, 3 * 256 * 256),
"bias": (64,),
"flatten": True},
{"data": (32, 3, 256, 256),
"num_hidden": 64,
"weight": (64, 256),
"bias": (64,),
"flatten": False}],
"num_hidden": 64,
"weight": (64, 256),
"bias": (64,),
"flatten": False}],
warmup=warmup,
runs=runs)

# Dropout benchmarks
dropout_benchmark_res = run_performance_test([getattr(MX_OP_MODULE, "Dropout")],
run_backward=True,
dtype=dtype,
ctx=ctx,
profiler=profiler,
inputs=[{"data": (32, 3, 256, 256),
"p": 0.5,
"mode": "always"},
{"data": (10000, 10),
"p": 0.5,
"mode": "always"}],
warmup=warmup,
runs=runs)
run_backward=True,
dtype=dtype,
ctx=ctx,
profiler=profiler,
inputs=[{"data": (32, 3, 256, 256),
"p": 0.5,
"mode": "always"},
{"data": (10000, 10),
"p": 0.5,
"mode": "always"}],
warmup=warmup,
runs=runs)
# BatchNorm benchmarks
batchnorm_benchmark_res = run_performance_test([getattr(MX_OP_MODULE, "BatchNorm")],
run_backward=True,
dtype=dtype,
ctx=ctx,
profiler=profiler,
inputs=[{"data": (32, 3, 256, 256),
run_backward=True,
dtype=dtype,
ctx=ctx,
profiler=profiler,
inputs=[{"data": (32, 3, 256, 256),
"gamma": (3,),
"beta": (3,),
"moving_mean": (3,),
"moving_var": (3,)},
{"data": (32, 3, 10000, 10),
{"data": (32, 3, 10000, 10),
"gamma": (3,),
"beta": (3,),
"moving_mean": (3,),
"moving_var": (3,)}],
warmup=warmup,
runs=runs)
warmup=warmup,
runs=runs)
# Prepare combined results
mx_basic_nn_results = merge_map_list(fc_benchmark_res + dropout_benchmark_res + batchnorm_benchmark_res)
return mx_basic_nn_results
Loading

0 comments on commit b1f206d

Please sign in to comment.