Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions onnxruntime/core/framework/session_options.h
Original file line number Diff line number Diff line change
Expand Up @@ -226,6 +226,9 @@ struct SessionOptions {
bool has_explicit_ep_context_gen_options = false;
epctx::ModelGenOptions ep_context_gen_options = {};
epctx::ModelGenOptions GetEpContextGenerationOptions() const;

// Optional field to indicate where the session was used from. One of compilation, empty.
std::string used_from;
};

inline std::ostream& operator<<(std::ostream& os, const SessionOptions& session_options) {
Expand Down
3 changes: 2 additions & 1 deletion onnxruntime/core/platform/telemetry.cc
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ void Telemetry::LogSessionCreation(uint32_t session_id, int64_t ir_version, cons
const std::string& model_weight_hash,
const std::unordered_map<std::string, std::string>& model_metadata,
const std::string& loadedFrom, const std::vector<std::string>& execution_provider_ids,
bool use_fp16, bool captureState) const {
bool use_fp16, bool captureState, const std::string& used_from) const {
ORT_UNUSED_PARAMETER(session_id);
ORT_UNUSED_PARAMETER(ir_version);
ORT_UNUSED_PARAMETER(model_producer_name);
Expand All @@ -79,6 +79,7 @@ void Telemetry::LogSessionCreation(uint32_t session_id, int64_t ir_version, cons
ORT_UNUSED_PARAMETER(execution_provider_ids);
ORT_UNUSED_PARAMETER(use_fp16);
ORT_UNUSED_PARAMETER(captureState);
ORT_UNUSED_PARAMETER(used_from);
}

void Telemetry::LogRuntimeError(uint32_t session_id, const common::Status& status, const char* file,
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/core/platform/telemetry.h
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ class Telemetry {
const std::string& model_weight_hash,
const std::unordered_map<std::string, std::string>& model_metadata,
const std::string& loadedFrom, const std::vector<std::string>& execution_provider_ids,
bool use_fp16, bool captureState) const;
bool use_fp16, bool captureState, const std::string& used_from) const;

virtual void LogRuntimeError(uint32_t session_id, const common::Status& status, const char* file,
const char* function, uint32_t line) const;
Expand Down
8 changes: 5 additions & 3 deletions onnxruntime/core/platform/windows/telemetry.cc
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@ void WindowsTelemetry::LogSessionCreation(uint32_t session_id, int64_t ir_versio
const std::string& model_weight_hash,
const std::unordered_map<std::string, std::string>& model_metadata,
const std::string& loaded_from, const std::vector<std::string>& execution_provider_ids,
bool use_fp16, bool captureState) const {
bool use_fp16, bool captureState, const std::string& used_from) const {
if (global_register_count_ == 0 || enabled_ == false)
return;

Expand Down Expand Up @@ -304,7 +304,8 @@ void WindowsTelemetry::LogSessionCreation(uint32_t session_id, int64_t ir_versio
TraceLoggingString(model_weight_hash.c_str(), "modelWeightHash"),
TraceLoggingString(model_metadata_string.c_str(), "modelMetaData"),
TraceLoggingString(loaded_from.c_str(), "loadedFrom"),
TraceLoggingString(execution_provider_string.c_str(), "executionProviderIds"));
TraceLoggingString(execution_provider_string.c_str(), "executionProviderIds"),
TraceLoggingString(used_from.c_str(), "usedFrom"));
} else {
TraceLoggingWrite(telemetry_provider_handle,
"SessionCreation_CaptureState",
Expand All @@ -330,7 +331,8 @@ void WindowsTelemetry::LogSessionCreation(uint32_t session_id, int64_t ir_versio
TraceLoggingString(model_weight_hash.c_str(), "modelWeightHash"),
TraceLoggingString(model_metadata_string.c_str(), "modelMetaData"),
TraceLoggingString(loaded_from.c_str(), "loadedFrom"),
TraceLoggingString(execution_provider_string.c_str(), "executionProviderIds"));
TraceLoggingString(execution_provider_string.c_str(), "executionProviderIds"),
TraceLoggingString(used_from.c_str(), "usedFrom"));
}
}

Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/core/platform/windows/telemetry.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ class WindowsTelemetry : public Telemetry {
const std::string& model_weight_hash,
const std::unordered_map<std::string, std::string>& model_metadata,
const std::string& loadedFrom, const std::vector<std::string>& execution_provider_ids,
bool use_fp16, bool captureState) const override;
bool use_fp16, bool captureState, const std::string& used_from) const override;

void LogRuntimeError(uint32_t session_id, const common::Status& status, const char* file,
const char* function, uint32_t line) const override;
Expand Down
3 changes: 2 additions & 1 deletion onnxruntime/core/session/inference_session.cc
Original file line number Diff line number Diff line change
Expand Up @@ -518,6 +518,7 @@
}

telemetry_ = {};
telemetry_.used_from = session_options_.used_from;

Check failure on line 521 in onnxruntime/core/session/inference_session.cc

View workflow job for this annotation

GitHub Actions / build_x64_release_vitisai

'used_from': is not a member of 'onnxruntime::InferenceSession::Telemetry'

Check failure on line 521 in onnxruntime/core/session/inference_session.cc

View workflow job for this annotation

GitHub Actions / build_x64_release

'used_from': is not a member of 'onnxruntime::InferenceSession::Telemetry'

Check failure on line 521 in onnxruntime/core/session/inference_session.cc

View workflow job for this annotation

GitHub Actions / build_x86_release

'used_from': is not a member of 'onnxruntime::InferenceSession::Telemetry'

Check failure on line 521 in onnxruntime/core/session/inference_session.cc

View workflow job for this annotation

GitHub Actions / build_x64_release_ep_generic_interface

'used_from': is not a member of 'onnxruntime::InferenceSession::Telemetry'

Check failure on line 521 in onnxruntime/core/session/inference_session.cc

View workflow job for this annotation

GitHub Actions / build_x64_debug

'used_from': is not a member of 'onnxruntime::InferenceSession::Telemetry'

Check failure on line 521 in onnxruntime/core/session/inference_session.cc

View workflow job for this annotation

GitHub Actions / build_x64_release_xnnpack

'used_from': is not a member of 'onnxruntime::InferenceSession::Telemetry'

Check failure on line 521 in onnxruntime/core/session/inference_session.cc

View workflow job for this annotation

GitHub Actions / Windows GPU TensorRT CI Pipeline

'used_from': is not a member of 'onnxruntime::InferenceSession::Telemetry'

Check failure on line 521 in onnxruntime/core/session/inference_session.cc

View workflow job for this annotation

GitHub Actions / Windows GPU CUDA CI Pipeline

'used_from': is not a member of 'onnxruntime::InferenceSession::Telemetry'

Check failure on line 521 in onnxruntime/core/session/inference_session.cc

View workflow job for this annotation

GitHub Actions / Windows GPU DML CI Pipeline

'used_from': is not a member of 'onnxruntime::InferenceSession::Telemetry'

Check failure on line 521 in onnxruntime/core/session/inference_session.cc

View workflow job for this annotation

GitHub Actions / webgpu_minimal_build_edge_build_x64_RelWithDebInfo

'used_from': is not a member of 'onnxruntime::InferenceSession::Telemetry'

Check failure on line 521 in onnxruntime/core/session/inference_session.cc

View workflow job for this annotation

GitHub Actions / webgpu_build_x64_RelWithDebInfo (novcpkg, dynamic)

'used_from': is not a member of 'onnxruntime::InferenceSession::Telemetry'

Check failure on line 521 in onnxruntime/core/session/inference_session.cc

View workflow job for this annotation

GitHub Actions / webgpu_build_x64_RelWithDebInfo (novcpkg, static)

'used_from': is not a member of 'onnxruntime::InferenceSession::Telemetry'

Check failure on line 521 in onnxruntime/core/session/inference_session.cc

View workflow job for this annotation

GitHub Actions / webgpu_build_x64_RelWithDebInfo (vcpkg, static)

'used_from': is not a member of 'onnxruntime::InferenceSession::Telemetry'

Check failure on line 521 in onnxruntime/core/session/inference_session.cc

View workflow job for this annotation

GitHub Actions / webgpu_build_x64_RelWithDebInfo (vcpkg, dynamic)

'used_from': is not a member of 'onnxruntime::InferenceSession::Telemetry'

#ifdef _WIN32
std::lock_guard<std::mutex> lock(active_sessions_mutex_);
Expand Down Expand Up @@ -2504,7 +2505,7 @@
env.GetTelemetryProvider().LogSessionCreation(
session_id_, model_->IrVersion(), model_->ProducerName(), model_->ProducerVersion(), model_->Domain(),
graph.DomainToVersionMap(), model_file_name, graph.Name(), model_weight_type, model_graph_hash, model_weight_hash,
model_->MetaData(), telemetry_.event_name_, execution_providers_.GetIds(), model_has_fp16_inputs, false);
model_->MetaData(), telemetry_.event_name_, execution_providers_.GetIds(), model_has_fp16_inputs, false, telemetry_.used_from_);

LOGS(*session_logger_, INFO) << "Session successfully initialized.";
}
Expand Down Expand Up @@ -3890,7 +3891,7 @@
std::string model_weight_type = session->GetWeightDataType();
std::string model_graph_hash = session->GetGraphHash();
std::string model_weight_hash = session->GetWeightHash();
env.GetTelemetryProvider().LogSessionCreation(

Check failure on line 3894 in onnxruntime/core/session/inference_session.cc

View workflow job for this annotation

GitHub Actions / build_x64_release_vitisai

'onnxruntime::Telemetry::LogSessionCreation': function does not take 16 arguments

Check failure on line 3894 in onnxruntime/core/session/inference_session.cc

View workflow job for this annotation

GitHub Actions / build_x64_release

'onnxruntime::Telemetry::LogSessionCreation': function does not take 16 arguments

Check failure on line 3894 in onnxruntime/core/session/inference_session.cc

View workflow job for this annotation

GitHub Actions / build_x86_release

'onnxruntime::Telemetry::LogSessionCreation': function does not take 16 arguments

Check failure on line 3894 in onnxruntime/core/session/inference_session.cc

View workflow job for this annotation

GitHub Actions / build_x64_release_ep_generic_interface

'onnxruntime::Telemetry::LogSessionCreation': function does not take 16 arguments

Check failure on line 3894 in onnxruntime/core/session/inference_session.cc

View workflow job for this annotation

GitHub Actions / build_x64_debug

'onnxruntime::Telemetry::LogSessionCreation': function does not take 16 arguments

Check failure on line 3894 in onnxruntime/core/session/inference_session.cc

View workflow job for this annotation

GitHub Actions / build_x64_release_xnnpack

'onnxruntime::Telemetry::LogSessionCreation': function does not take 16 arguments

Check failure on line 3894 in onnxruntime/core/session/inference_session.cc

View workflow job for this annotation

GitHub Actions / Windows GPU TensorRT CI Pipeline

'onnxruntime::Telemetry::LogSessionCreation': function does not take 16 arguments

Check failure on line 3894 in onnxruntime/core/session/inference_session.cc

View workflow job for this annotation

GitHub Actions / Windows GPU CUDA CI Pipeline

'onnxruntime::Telemetry::LogSessionCreation': function does not take 16 arguments

Check failure on line 3894 in onnxruntime/core/session/inference_session.cc

View workflow job for this annotation

GitHub Actions / Windows GPU DML CI Pipeline

'onnxruntime::Telemetry::LogSessionCreation': function does not take 16 arguments

Check failure on line 3894 in onnxruntime/core/session/inference_session.cc

View workflow job for this annotation

GitHub Actions / webgpu_minimal_build_edge_build_x64_RelWithDebInfo

'onnxruntime::Telemetry::LogSessionCreation': function does not take 16 arguments

Check failure on line 3894 in onnxruntime/core/session/inference_session.cc

View workflow job for this annotation

GitHub Actions / webgpu_build_x64_RelWithDebInfo (novcpkg, dynamic)

'onnxruntime::Telemetry::LogSessionCreation': function does not take 16 arguments

Check failure on line 3894 in onnxruntime/core/session/inference_session.cc

View workflow job for this annotation

GitHub Actions / webgpu_build_x64_RelWithDebInfo (novcpkg, static)

'onnxruntime::Telemetry::LogSessionCreation': function does not take 16 arguments

Check failure on line 3894 in onnxruntime/core/session/inference_session.cc

View workflow job for this annotation

GitHub Actions / webgpu_build_x64_RelWithDebInfo (vcpkg, static)

'onnxruntime::Telemetry::LogSessionCreation': function does not take 16 arguments

Check failure on line 3894 in onnxruntime/core/session/inference_session.cc

View workflow job for this annotation

GitHub Actions / webgpu_build_x64_RelWithDebInfo (vcpkg, dynamic)

'onnxruntime::Telemetry::LogSessionCreation': function does not take 16 arguments
session->session_id_, model->IrVersion(), model->ProducerName(), model->ProducerVersion(), model->Domain(),
graph.DomainToVersionMap(), model_file_name, graph.Name(), model_weight_type, model_graph_hash, model_weight_hash,
model->MetaData(), session->telemetry_.event_name_, session->execution_providers_.GetIds(), model_has_fp16_inputs, true);
Expand Down
1 change: 1 addition & 0 deletions onnxruntime/core/session/inference_session.h
Original file line number Diff line number Diff line change
Expand Up @@ -944,6 +944,7 @@ class InferenceSession {
uint32_t total_runs_since_last_ = 0; // the total number of Run() calls since the last report
long long total_run_duration_since_last_ = 0; // the total duration (us) of Run() calls since the last report
std::string event_name_; // where the model is loaded from: ["model_loading_uri", "model_loading_proto", "model_loading_istream"]
Comment on lines 944 to 946
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
uint32_t total_runs_since_last_ = 0; // the total number of Run() calls since the last report
long long total_run_duration_since_last_ = 0; // the total duration (us) of Run() calls since the last report
std::string event_name_; // where the model is loaded from: ["model_loading_uri", "model_loading_proto", "model_loading_istream"]
uint32_t total_runs_since_last_ = 0; // the total number of Run() calls since the last report
long long total_run_duration_since_last_ = 0; // the total duration (us) of Run() calls since the last report
std::string event_name_; // where the model is loaded from: ["model_loading_uri", "model_loading_proto", "model_loading_istream"]

std::string used_from_;
std::unordered_map<int64_t, long long> duration_per_batch_size_; // the duration (us) of Run() calls per batch size since the last report

TimePoint time_sent_last_; // the TimePoint of the last report
Expand Down
1 change: 1 addition & 0 deletions onnxruntime/core/session/model_compilation_options.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
namespace onnxruntime {
ModelCompilationOptions::ModelCompilationOptions(const onnxruntime::Environment& env, const OrtSessionOptions& session_options)
: env_(env), session_options_(session_options) {
session_options_.value.used_from = "compilation";
session_options_.value.has_explicit_ep_context_gen_options = true;
session_options_.value.ep_context_gen_options = session_options.value.GetEpContextGenerationOptions();
session_options_.value.ep_context_gen_options.enable = true;
Expand Down
Loading