Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Added warmup/runs to gemm ops, debugging process failure
Browse files Browse the repository at this point in the history
  • Loading branch information
connorgoggins committed Feb 12, 2020
1 parent c064f47 commit 87c18fc
Show file tree
Hide file tree
Showing 3 changed files with 24 additions and 19 deletions.
2 changes: 1 addition & 1 deletion benchmark/opperf/nd_operations/indexing_routines.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,5 +63,5 @@ def run_indexing_routines_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='na
mx_indexing_ops = get_all_indexing_routines()

# Run benchmarks
mx_indexing_op_results = run_op_benchmarks(mx_indexing_ops, dtype, ctx, profiler, warmup, runs)
mx_indexing_op_results = run_op_benchmarks(mx_indexing_ops, dtype, ctx, profiler, int64_tensor, warmup, runs)
return mx_indexing_op_results
2 changes: 1 addition & 1 deletion benchmark/opperf/opperf.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def run_all_mxnet_operator_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='n

# Run all GEMM operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_gemm_operators_benchmarks(ctx=ctx,
dtype=dtype, profiler=profiler, int64_tensor=int64_tensor))
dtype=dtype, profiler=profiler, int64_tensor=int64_tensor, warmup=warmup, runs=runs))

# Run all Random sampling operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_mx_random_sampling_operators_benchmarks(ctx=ctx, dtype=dtype, profiler=profiler, int64_tensor=int64_tensor, warmup=warmup, runs=runs))
Expand Down
39 changes: 22 additions & 17 deletions benchmark/opperf/utils/op_registry_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,32 +122,37 @@ def prepare_op_inputs(op, arg_params, int64_tensor):
custom_data = ['Activation', 'LeakyReLU', 'Softmax', 'BilinearSampler', 'GridGenerator', 'sample_multinomial', 'linalg_maketrian']

if int64_tensor == 'on':
DEFAULTS_INPUTS = DEFAULTS_INPUTS_LARGE_TENSOR
default_inputs = DEFAULTS_INPUTS_LARGE_TENSOR
else:
default_inputs = DEFAULTS_INPUTS

print(op)

# Prepare op to default input mapping
arg_values = {}
for arg_name, arg_type in zip(arg_params["params"]["arg_names"],
arg_params["params"]["arg_types"]):
print(arg_name)
if "NDArray" in arg_type and op == "ravel_multi_index":
arg_values[arg_name] = DEFAULTS_INPUTS["ravel_data"]
elif op in custom_data and arg_name + "_" + op.lower() in DEFAULTS_INPUTS:
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name + "_" + op.lower()]
elif "NDArray" in arg_type and arg_name + "_nd" in DEFAULTS_INPUTS:
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name + "_nd"]
elif "NDArray" in arg_type and op in ops_4d and arg_name + "_4d" in DEFAULTS_INPUTS:
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name + "_4d"]
elif "NDArray" in arg_type and op in ops_3d and arg_name + "_3d" in DEFAULTS_INPUTS:
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name + "_3d"]
arg_values[arg_name] = default_inputs["ravel_data"]
elif op in custom_data and arg_name + "_" + op.lower() in default_inputs:
arg_values[arg_name] = default_inputs[arg_name + "_" + op.lower()]
elif "NDArray" in arg_type and arg_name + "_nd" in default_inputs:
arg_values[arg_name] = default_inputs[arg_name + "_nd"]
elif "NDArray" in arg_type and op in ops_4d and arg_name + "_4d" in default_inputs:
arg_values[arg_name] = default_inputs[arg_name + "_4d"]
elif "NDArray" in arg_type and op in ops_3d and arg_name + "_3d" in default_inputs:
arg_values[arg_name] = default_inputs[arg_name + "_3d"]
elif "NDArray" in arg_type and op == 'softmax_cross_entropy':
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name + "_smce"]
elif arg_name in DEFAULTS_INPUTS:
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name]
elif "float" in arg_type and arg_name + "_float" in DEFAULTS_INPUTS:
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name + "_float"]
elif "Shape" in arg_type and arg_name + "_shape" in DEFAULTS_INPUTS:
arg_values[arg_name] = default_inputs[arg_name + "_smce"]
elif arg_name in default_inputs:
arg_values[arg_name] = default_inputs[arg_name]
elif "float" in arg_type and arg_name + "_float" in default_inputs:
arg_values[arg_name] = default_inputs[arg_name + "_float"]
elif "Shape" in arg_type and arg_name + "_shape" in default_inputs:
# This is for cases where in some ops 'axis' is Int in some ops a shape tuple.
# Ex: axis in sum is shape, axis in sort is int.
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name + "_shape"]
arg_values[arg_name] = default_inputs[arg_name + "_shape"]

# Number of different inputs we want to use to test
# the operator
Expand Down

0 comments on commit 87c18fc

Please sign in to comment.