From 738701013ea47a9a4aa274c186fc1cdfff05e3d1 Mon Sep 17 00:00:00 2001 From: Vivek Panyam Date: Fri, 7 Aug 2020 16:46:17 -0700 Subject: [PATCH] Add a RuntimeOption to set inter and intra op threadpool sizes --- .../backends/tensorflow/tf_backend.cc | 20 ++++++++++++++++++- .../backends/torchscript/torch_backend.cc | 12 +++++++++++ source/neuropod/options.hh | 13 ++++++++++++ 3 files changed, 44 insertions(+), 1 deletion(-) diff --git a/source/neuropod/backends/tensorflow/tf_backend.cc b/source/neuropod/backends/tensorflow/tf_backend.cc index c30ad67d..0a8844b3 100644 --- a/source/neuropod/backends/tensorflow/tf_backend.cc +++ b/source/neuropod/backends/tensorflow/tf_backend.cc @@ -93,7 +93,7 @@ void check_tf_status(const tensorflow::Status &status) } // Get TF session options given Neuropod RuntimeOptions -tensorflow::SessionOptions get_tf_opts(const RuntimeOptions & /*unused*/) +tensorflow::SessionOptions get_tf_opts(const RuntimeOptions &runtime_opts) { tensorflow::SessionOptions opts; @@ -103,6 +103,24 @@ tensorflow::SessionOptions get_tf_opts(const RuntimeOptions & /*unused*/) opts.config.set_allow_soft_placement(true); opts.config.set_log_device_placement(false); + // Set intra and inter op parallelism + // See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/protobuf/config.proto + if (runtime_opts.intra_op_parallelism_threads != 0) + { + opts.config.set_intra_op_parallelism_threads(runtime_opts.intra_op_parallelism_threads); + } + + if (runtime_opts.inter_op_parallelism_threads == 1) + { + // Only use the caller thread + opts.config.set_inter_op_parallelism_threads(-1); + } + else if (runtime_opts.inter_op_parallelism_threads > 1) + { + // The number in runtime_opts includes the caller thread + opts.config.set_inter_op_parallelism_threads(runtime_opts.inter_op_parallelism_threads - 1); + } + // Note: we can't use GPUOptions::visible_device_list as it is a per process setting // // From: https://github.com/tensorflow/tensorflow/issues/18861#issuecomment-385610497 diff --git a/source/neuropod/backends/torchscript/torch_backend.cc b/source/neuropod/backends/torchscript/torch_backend.cc index 9c845893..7d8fa762 100644 --- a/source/neuropod/backends/torchscript/torch_backend.cc +++ b/source/neuropod/backends/torchscript/torch_backend.cc @@ -225,6 +225,18 @@ std::mutex loaded_op_mutex; TorchNeuropodBackend::TorchNeuropodBackend(const std::string &neuropod_path, const RuntimeOptions &options) : NeuropodBackendWithDefaultAllocator(neuropod_path, options) { + // Set intra and inter op parallelism + // See https://pytorch.org/docs/stable/notes/cpu_threading_torchscript_inference.html#runtime-api + if (options.inter_op_parallelism_threads != 0) + { + at::set_num_interop_threads(options.inter_op_parallelism_threads); + } + + if (options.intra_op_parallelism_threads != 0) + { + at::set_num_threads(options.intra_op_parallelism_threads); + } + if (options.load_model_at_construction) { load_model(); diff --git a/source/neuropod/options.hh b/source/neuropod/options.hh index 319bda07..7d908b46 100644 --- a/source/neuropod/options.hh +++ b/source/neuropod/options.hh @@ -75,6 +75,19 @@ struct RuntimeOptions // Whether or not to disable shape and type checking when running inference bool disable_shape_and_type_checking = false; + + // Set the intra and inter op parallelism for the underlying framework + // Within a given process, only the first usage of the below configuration is used + // See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/protobuf/config.proto + // and https://pytorch.org/docs/stable/notes/cpu_threading_torchscript_inference.html#runtime-api + // for more details + // For true per-model control of these values, use out-of-process execution (see above) + // A value of 0 means system defined + uint32_t intra_op_parallelism_threads = 0; + + // A value of 0 means system defined + // Note: this count includes the caller thread + uint32_t inter_op_parallelism_threads = 0; }; } // namespace neuropod