From 7c1708fa210a09b44666f3584a3275eaf5c0068e Mon Sep 17 00:00:00 2001 From: Lin Yuan Date: Thu, 12 Sep 2019 17:07:51 +0000 Subject: [PATCH 1/3] set fix seed for profiler --- benchmark/opperf/utils/benchmark_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/benchmark/opperf/utils/benchmark_utils.py b/benchmark/opperf/utils/benchmark_utils.py index a6ee38bf9f65..5db6b0b5ea83 100644 --- a/benchmark/opperf/utils/benchmark_utils.py +++ b/benchmark/opperf/utils/benchmark_utils.py @@ -28,6 +28,7 @@ def _prepare_op_inputs(inputs, run_backward, dtype, ctx): + mx.random.seed(41) kwargs_list = [] args_list = [] From d79e04bdd0d8819050977abfd9f8687de489ddcf Mon Sep 17 00:00:00 2001 From: Lin Yuan Date: Thu, 12 Sep 2019 17:20:35 +0000 Subject: [PATCH 2/3] update README with profiler option --- benchmark/opperf/README.md | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/benchmark/opperf/README.md b/benchmark/opperf/README.md index 6e628dfe40a8..e3b32ebee93a 100644 --- a/benchmark/opperf/README.md +++ b/benchmark/opperf/README.md @@ -129,7 +129,7 @@ Output for the above benchmark run, on a CPU machine, would look something like ``` -## Usecase 3.1 - Run benchmarks for group of operators with same input +## Usecase 4 - Run benchmarks for group of operators with same input For example, you want to run benchmarks for `nd.add`, `nd.sub` operator in MXNet, with the same set of inputs. You just run the following python script. ``` @@ -173,6 +173,22 @@ This utility queries MXNet operator registry to fetch all operators registered w However, fully automated tests are enabled only for simpler operators such as - broadcast operators, element_wise operators etc... For the purpose of readability and giving more control to the users, complex operators such as convolution (2D, 3D), Pooling, Recurrent are not fully automated but expressed as default rules. See `utils/op_registry_utils.py` for more details. +## Use nativa python timer +Optionally, you could use the python time package as the profiler engine to caliberate runtime in each operator. +To use python timer for all operators, use the argument --profiler 'python': +``` +python incubator-mxnet/benchmark/opperf/opperf.py --profiler='python' +``` + +To use python timer for a specific operator, pass the argument profiler to the run_performance_test method: +``` +add_res = run_performance_test([nd.add, nd.subtract], run_backward=True, dtype='float32', ctx=mx.cpu(), + inputs=[{"lhs": (1024, 1024), + "rhs": (1024, 1024)}], + warmup=10, runs=25, profiler='python') +``` +By default, MXNet profiler is used as the profiler engine. + # TODO All contributions are welcome. Below is the list of desired features: From 49b384f13f22fea2bfe0c4ecff7da7879ca839bb Mon Sep 17 00:00:00 2001 From: Lin Yuan Date: Tue, 17 Sep 2019 16:46:20 +0000 Subject: [PATCH 3/3] fix wording --- benchmark/opperf/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmark/opperf/README.md b/benchmark/opperf/README.md index e3b32ebee93a..ef7747cf4483 100644 --- a/benchmark/opperf/README.md +++ b/benchmark/opperf/README.md @@ -173,7 +173,7 @@ This utility queries MXNet operator registry to fetch all operators registered w However, fully automated tests are enabled only for simpler operators such as - broadcast operators, element_wise operators etc... For the purpose of readability and giving more control to the users, complex operators such as convolution (2D, 3D), Pooling, Recurrent are not fully automated but expressed as default rules. See `utils/op_registry_utils.py` for more details. -## Use nativa python timer +## Use python timer Optionally, you could use the python time package as the profiler engine to caliberate runtime in each operator. To use python timer for all operators, use the argument --profiler 'python': ```