diff --git a/sdk/ml/azure-ai-ml/README.md b/sdk/ml/azure-ai-ml/README.md index de3218705219..0ad6cf795472 100644 --- a/sdk/ml/azure-ai-ml/README.md +++ b/sdk/ml/azure-ai-ml/README.md @@ -94,9 +94,11 @@ See full SDK logging documentation with examples [here][sdk_logging_docs]. ### Telemetry -The Azure ML Python SDK includes a telemetry feature that collects usage and failure data about the SDK and sends it to Microsoft when you use the SDK. +The Azure ML Python SDK includes a telemetry feature that collects usage and failure data about the SDK and sends it to Microsoft when you use the SDK in a Jupyter Notebook only. +Telemetry will **not** be collected for any use of the Python SDK outside of a Jupyter Notebook. + Telemetry data helps the SDK team understand how the SDK is used so it can be improved and the information about failures helps the team resolve problems and fix bugs. -The SDK telemetry feature is enabled by default. To opt out of the telemetry feature, set the AZUREML_SDKV2_TELEMETRY_OPTOUT environment variable to 1 or true. +The SDK telemetry feature is enabled by default for Jupyter Notebook usage. To opt out of the telemetry feature, set the AZUREML_SDKV2_TELEMETRY_OPTOUT environment variable to '1' or 'true'. ## Next steps diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_ml_client.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_ml_client.py index 38aaf3170b04..66f095131df8 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_ml_client.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_ml_client.py @@ -31,6 +31,7 @@ from azure.ai.ml._restclient.v2022_10_01_preview import AzureMachineLearningWorkspaces as ServiceClient102022Preview from azure.ai.ml._restclient.v2022_10_01 import AzureMachineLearningWorkspaces as ServiceClient102022 from azure.ai.ml._scope_dependent_operations import OperationConfig, OperationsContainer, OperationScope +from azure.ai.ml._telemetry.logging_handler import get_appinsights_log_handler from azure.ai.ml._user_agent import USER_AGENT from azure.ai.ml._utils._experimental import experimental from azure.ai.ml._utils._http_utils import HttpPipeline @@ -215,12 +216,12 @@ def __init__( if registry_name: properties.update({"registry_name": registry_name}) - # user_agent = None - # if "user_agent" in kwargs: - # user_agent = kwargs.get("user_agent") - # app_insights_handler = get_appinsights_log_handler(user_agent, **{"properties": properties}) - # app_insights_handler_kwargs = {"app_insights_handler": app_insights_handler} - app_insights_handler_kwargs = {} + user_agent = None + if "user_agent" in kwargs: + user_agent = kwargs.get("user_agent") + + app_insights_handler = get_appinsights_log_handler(user_agent, **{"properties": properties}) + app_insights_handler_kwargs = {"app_insights_handler": app_insights_handler} base_url = _get_base_url_from_metadata(cloud_name=cloud_name, is_local_mfe=True) self._base_url = base_url diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_telemetry/__init__.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_telemetry/__init__.py index bacf7237719a..7fb3b412ec6c 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_telemetry/__init__.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_telemetry/__init__.py @@ -4,14 +4,14 @@ __path__ = __import__("pkgutil").extend_path(__path__, __name__) -# from .activity import ActivityType, log_activity, monitor_with_activity, monitor_with_telemetry_mixin -# from .logging_handler import AML_INTERNAL_LOGGER_NAMESPACE, get_appinsights_log_handler +from .activity import ActivityType, log_activity, monitor_with_activity, monitor_with_telemetry_mixin +from .logging_handler import AML_INTERNAL_LOGGER_NAMESPACE, get_appinsights_log_handler __all__ = [ - # "monitor_with_activity", - # "monitor_with_telemetry_mixin", - # "log_activity", - # "ActivityType", - # "get_appinsights_log_handler", - # "AML_INTERNAL_LOGGER_NAMESPACE", + "monitor_with_activity", + "monitor_with_telemetry_mixin", + "log_activity", + "ActivityType", + "get_appinsights_log_handler", + "AML_INTERNAL_LOGGER_NAMESPACE", ] diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_telemetry/activity.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_telemetry/activity.py index da15801c8300..3a039ce02539 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_telemetry/activity.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_telemetry/activity.py @@ -22,10 +22,10 @@ from marshmallow import ValidationError -from azure.ai.ml.exceptions import ErrorCategory, MlException from azure.core.exceptions import HttpResponseError -from .._utils.utils import _is_user_error_from_exception_type, _is_user_error_from_status_code, _str_to_bool +from azure.ai.ml._utils.utils import _is_user_error_from_exception_type, _is_user_error_from_status_code, _str_to_bool +from azure.ai.ml.exceptions import ErrorCategory, MlException # Get environment variable IS_IN_CI_PIPELINE to decide whether it's in CI test IS_IN_CI_PIPELINE = _str_to_bool(os.environ.get("IS_IN_CI_PIPELINE", "False")) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_telemetry/logging_handler.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_telemetry/logging_handler.py index 80f0caa0865a..52af4aef0088 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_telemetry/logging_handler.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_telemetry/logging_handler.py @@ -4,23 +4,16 @@ # pylint: disable=protected-access -"""Contains functionality for sending telemetry to Application Insights.""" +"""Contains functionality for sending telemetry to Application Insights via OpenCensus Azure Monitor Exporter.""" -import datetime -import json -import urllib.request as http_client_t +import logging +import platform from os import getenv -from urllib.error import HTTPError - -# from applicationinsights import TelemetryClient -# from applicationinsights.channel import ( -# AsynchronousQueue, -# AsynchronousSender, -# SynchronousQueue, -# SynchronousSender, -# TelemetryChannel, -# TelemetryContext, -# ) + +from opencensus.ext.azure.log_exporter import AzureLogHandler + +from azure.ai.ml._user_agent import USER_AGENT + AML_INTERNAL_LOGGER_NAMESPACE = "azure.ai.ml._telemetry" @@ -51,308 +44,98 @@ ] -def is_telemetry_collection_disabled(): - telemetry_disabled = getenv(AZUREML_SDKV2_TELEMETRY_OPTOUT_ENV_VAR) - return telemetry_disabled and (telemetry_disabled.lower() == "true" or telemetry_disabled == "1") - - -# def get_appinsights_log_handler( -# user_agent, -# *args, # pylint: disable=unused-argument -# instrumentation_key=None, -# component_name=None, -# **kwargs -# ): -# """Enable the Application Insights logging handler for specified logger and -# instrumentation key. - -# Enable diagnostics collection with the :func:`azureml.telemetry.set_diagnostics_collection` function. - -# :param instrumentation_key: The Application Insights instrumentation key. -# :type instrumentation_key: str -# :param component_name: The component name. -# :type component_name: str -# :param args: Optional arguments for formatting messages. -# :type args: list -# :param kwargs: Optional keyword arguments for adding additional information to messages. -# :type kwargs: dict -# :return: The logging handler. -# :rtype: logging.Handler -# """ -# try: -# if instrumentation_key is None: -# instrumentation_key = INSTRUMENTATION_KEY - -# if is_telemetry_collection_disabled(): -# return logging.NullHandler() - -# if not user_agent or not user_agent.lower() == USER_AGENT.lower(): -# return logging.NullHandler() - -# if "properties" in kwargs and "subscription_id" in kwargs.get("properties"): -# if kwargs.get("properties")["subscription_id"] in test_subscriptions: -# return logging.NullHandler() - -# child_namespace = component_name or __name__ -# current_logger = logging.getLogger(AML_INTERNAL_LOGGER_NAMESPACE).getChild(child_namespace) -# current_logger.propagate = False -# current_logger.setLevel(logging.CRITICAL) - -# context = TelemetryContext() -# custom_properties = {"PythonVersion": platform.python_version()} -# custom_properties.update({"user_agent": user_agent}) -# if "properties" in kwargs: -# context._properties.update(kwargs.pop("properties")) -# context._properties.update(custom_properties) -# handler = AppInsightsLoggingHandler(instrumentation_key, current_logger, telemetry_context=context) - -# return handler -# except Exception: # pylint: disable=broad-except -# # ignore exceptions, telemetry should not block -# return logging.NullHandler() - - -# class AppInsightsLoggingHandler(logging.Handler): -# """Integration point between Python's logging framework and the Application -# Insights service. - -# :param instrumentation_key: The instrumentation key to use while sending telemetry to the Application -# Insights service. -# :type instrumentation_key: str -# :param logger: -# :type logger: logger -# :param sender: The Application Insight sender object. -# :type sender: SynchronousSender -# :param args: Optional arguments for formatting messages. -# :type args: list -# :param kwargs: Optional keyword arguments for adding additional information to messages. -# :type kwargs: dict -# """ - -# def __init__(self, instrumentation_key, logger, *args, sender=None, **kwargs): -# """Initialize a new instance of the class. - -# :param instrumentation_key: The instrumentation key to use while sending telemetry to the Application -# Insights service. -# :type instrumentation_key: str -# :param sender: The Application Insight sender object. -# :type sender: SynchronousSender -# :param args: Optional arguments for formatting messages. -# :type args: list -# :param kwargs: Optional keyword arguments for adding additional information to messages. -# :type kwargs: dict -# """ -# if not instrumentation_key: -# raise Exception("Instrumentation key was required but not provided") - -# telemetry_context = None -# if "telemetry_context" in kwargs: -# telemetry_context = kwargs.pop("telemetry_context") -# else: -# telemetry_context = TelemetryContext() - -# if "properties" in kwargs: -# telemetry_context._properties.update(kwargs.pop("properties")) - -# self.logger = logger -# self._sender = sender or _RetrySynchronousSender - -# # Configuring an asynchronous client as default telemetry client (fire and forget mode) -# self._default_client = TelemetryClient( -# instrumentation_key, self._create_asynchronous_channel(telemetry_context) -# ) - -# # Configuring a synchronous client and should be only used in critical scenarios -# self._synchronous_client = TelemetryClient( -# instrumentation_key, self._create_synchronous_channel(telemetry_context) -# ) - -# super(AppInsightsLoggingHandler, self).__init__(*args, **kwargs) - -# def flush(self): -# """Flush the queued up telemetry to the service.""" -# self._default_client.flush() -# return super(AppInsightsLoggingHandler, self).flush() - -# def emit(self, record): -# """Emit a log record. - -# :param record: The log record to format and send. -# :type record: logging.LogRecord -# """ -# if is_telemetry_collection_disabled(): -# return -# try: -# if ( -# reformat_traceback -# and record.levelno >= logging.WARNING -# and hasattr(record, "message") -# and record.message.find(TRACEBACK_LOOKUP_STR) != -1 -# ): -# record.message = format_exc() -# record.msg = record.message - -# properties = {"level": record.levelname} -# properties.update(self._default_client.context.properties) - -# formatted_message = self.format(record) - -# if hasattr(record, "properties"): -# properties.update(record.properties) -# # if we have exec_info, send it as an exception -# if record.exc_info and not all(item is None for item in record.exc_info): -# # for compliance we not allowed to collect trace with file path -# self._synchronous_client.track_trace(format_exc(), severity=record.levelname, properties=properties) -# return -# # otherwise, send the trace -# self._default_client.track_trace(formatted_message, severity=record.levelname, properties=properties) -# except Exception: # pylint: disable=broad-except -# # ignore exceptions, telemetry should not block because of trimming -# return - -# def _create_synchronous_channel(self, context): -# """Create a synchronous app insight channel. - -# :param context: The Application Insights context. -# :return: TelemetryChannel -# """ -# channel = TelemetryChannel(context=context, queue=SynchronousQueue(self._sender(self.logger))) -# # the behavior is same to call flush every time -# channel.queue.max_queue_length = 1 -# return channel - -# def _create_asynchronous_channel(self, context): -# """Create an async app insight channel. - -# :param context: The Applications Insights context. -# :return: TelemetryChannel -# """ -# sender = _RetryAsynchronousSender(self.logger) -# queue = AsynchronousQueue(sender) -# channel = TelemetryChannel(context, queue) - -# # flush telemetry if we have 10 or more telemetry items in our queue -# channel.queue.max_queue_length = 10 -# # send telemetry to the service in batches of 10 -# channel.sender.send_buffer_size = 10 -# # the background worker thread will be active for 1 seconds before it shuts down. if -# # during this time items are picked up from the queue, the timer is reset. -# channel.sender.send_time = 1 -# # the background worker thread will poll the queue every 0.1 seconds for new items -# # 100ms is the most aggressive setting we can set -# channel.sender.send_interval = 0.1 - -# return channel - - -# class _RetrySynchronousSender(SynchronousSender): -# """Synchronous sender with limited retry. - -# SenderBase does infinite retry; this class avoids that. -# """ - -# def __init__(self, logger, timeout=10, retry=1): -# super(_RetrySynchronousSender, self).__init__() -# self._logger = logger -# self.send_timeout = timeout -# self.retry = retry -# self.consecutive_failures = 0 - -# def send(self, data_to_send): -# """Override the default resend mechanism in SenderBase. - -# Stop resend based on retry during failure. -# """ -# status = _http_send(self._logger, data_to_send, self.service_endpoint_uri, self.send_timeout) -# if status is SUCCESS: -# self.consecutive_failures = 0 -# return -# self.consecutive_failures = self.consecutive_failures + 1 - -# if self.consecutive_failures <= self.retry: -# for data in data_to_send: -# self._queue.put(data) - - -# class _RetryAsynchronousSender(AsynchronousSender): -# """Asynchronous sender with limited retry. - -# SenderBase does infinite retry; this class avoids that. -# """ - -# def __init__(self, logger, timeout=10, retry=3): -# super(_RetryAsynchronousSender, self).__init__() -# self._logger = logger -# self.send_timeout = timeout -# self.retry = retry -# self.consecutive_failures = 0 - -# def send(self, data_to_send): -# """Override the default resend mechanism in SenderBase. - -# Stop resend based on retry during failure. -# """ -# status = _http_send(self._logger, data_to_send, self.service_endpoint_uri, self.send_timeout) -# if status is SUCCESS: -# self.consecutive_failures = 0 -# return -# self.consecutive_failures = self.consecutive_failures + 1 - -# if self.consecutive_failures <= self.retry: -# for data in data_to_send: -# self._queue.put(data) - - -def _json_serialize_unknown(obj): - """JSON serializer for objects not serializable by default json code. - - :param obj: the object to be serialized +class CustomDimensionsFilter(logging.Filter): + """Add application-wide properties to AzureLogHandler records""" + + def __init__(self, custom_dimensions=None): # pylint: disable=super-init-not-called + self.custom_dimensions = custom_dimensions or {} + + def filter(self, record): + """Adds the default custom_dimensions into the current log record""" + custom_dimensions = self.custom_dimensions.copy() + custom_dimensions.update(getattr(record, 'custom_dimensions', {})) + record.custom_dimensions = custom_dimensions + + return True + + +def in_jupyter_notebook() -> bool: """ - if isinstance(obj, datetime.datetime): - return obj.isoformat() - raise TypeError("Type %s not serializable" % type(obj)) + Checks if user is using a Jupyter Notebook. This is necessary because logging is not allowed in + non-Jupyter contexts. + Adapted from https://stackoverflow.com/a/22424821 + """ + try: # cspell:ignore ipython + from IPython import get_ipython + if 'IPKernelApp' not in get_ipython().config: + return False + except ImportError: + return False + except AttributeError: + return False + return True -def _http_send(logger, data_to_send, service_endpoint_uri, send_timeout=10): - """Replicate Application Insights SenderBase send method. - :param logger: logger - :param data_to_send: items to send - :param service_endpoint_uri: endpoint - :param send_timeout: timeout - :return: SUCCESS/FAILURE +def is_telemetry_collection_disabled(): + telemetry_disabled = getenv(AZUREML_SDKV2_TELEMETRY_OPTOUT_ENV_VAR) + if telemetry_disabled and (telemetry_disabled.lower() == "true" or telemetry_disabled == "1"): + return True + if not in_jupyter_notebook: + return True + return False + + +def get_appinsights_log_handler( + user_agent, + *args, # pylint: disable=unused-argument + instrumentation_key=None, + component_name=None, + **kwargs + ): + """Enable the OpenCensus logging handler for specified logger and instrumentation key to send info to AppInsights. + + :param user_agent: Information about the user's browser. + :type user_agent: Dict[str, str] + :param instrumentation_key: The Application Insights instrumentation key. + :type instrumentation_key: str + :param component_name: The component name. + :type component_name: str + :param args: Optional arguments for formatting messages. + :type args: list + :param kwargs: Optional keyword arguments for adding additional information to messages. + :type kwargs: dict + :return: The logging handler. + :rtype: opencensus.ext.azure.log_exporter.AzureLogHandler """ - request_payload = json.dumps([a.write() for a in data_to_send], default=_json_serialize_unknown) - - content = bytearray(request_payload, "utf-8") - begin = datetime.datetime.now() - request = http_client_t.Request( - service_endpoint_uri, - content, - { - "Accept": "application/json", - "Content-Type": "application/json; charset=utf-8", - }, - ) try: - response = http_client_t.urlopen(request, timeout=send_timeout) # nosec - logger.info("Sending %d bytes", len(content)) - status_code = response.getcode() - if 200 <= status_code < 300: - return SUCCESS - except HTTPError as e: - logger.error("Upload failed. HTTPError: %s", e) - if e.getcode() == 400: - return SUCCESS - except OSError as e: # socket timeout - # stop retry during socket timeout - logger.error("Upload failed. OSError: %s", e) - except Exception as e: # pylint: disable=broad-except - logger.error("Unexpected exception: %s", e) - finally: - logger.info( - "Finish uploading in %f seconds.", - (datetime.datetime.now() - begin).total_seconds(), - ) - - return FAILURE + if instrumentation_key is None: + instrumentation_key = INSTRUMENTATION_KEY + + if is_telemetry_collection_disabled(): + return logging.NullHandler() + + if not user_agent or not user_agent.lower() == USER_AGENT.lower(): + return logging.NullHandler() + + if "properties" in kwargs and "subscription_id" in kwargs.get("properties"): + if kwargs.get("properties")["subscription_id"] in test_subscriptions: + return logging.NullHandler() + + child_namespace = component_name or __name__ + current_logger = logging.getLogger(AML_INTERNAL_LOGGER_NAMESPACE).getChild(child_namespace) + current_logger.propagate = False + current_logger.setLevel(logging.CRITICAL) + + custom_properties = {"PythonVersion": platform.python_version()} + custom_properties.update({"user_agent": user_agent}) + if "properties" in kwargs: + custom_properties.update(kwargs.pop("properties")) + handler = AzureLogHandler(connection_string=f'InstrumentationKey={instrumentation_key}') + current_logger.addHandler(handler) + handler.addFilter(CustomDimensionsFilter(custom_properties)) + + return handler + except Exception: # pylint: disable=broad-except + # ignore exceptions, telemetry should not block + return logging.NullHandler() diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_logger_utils.py b/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_logger_utils.py index a5ef80620105..41a6f7f089fd 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_logger_utils.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/_utils/_logger_utils.py @@ -5,7 +5,7 @@ import logging import sys -# from azure.ai.ml._telemetry import AML_INTERNAL_LOGGER_NAMESPACE +from azure.ai.ml._telemetry.logging_handler import AML_INTERNAL_LOGGER_NAMESPACE def initialize_logger_info(module_logger: logging.Logger, terminator="\n") -> None: @@ -20,11 +20,11 @@ def initialize_logger_info(module_logger: logging.Logger, terminator="\n") -> No class OpsLogger: def __init__(self, name: str): - # self.logger: logging.Logger = logging.getLogger(AML_INTERNAL_LOGGER_NAMESPACE + name) - # self.logger.propagate = False + self.package_logger: logging.Logger = logging.getLogger(AML_INTERNAL_LOGGER_NAMESPACE + name) + self.package_logger.propagate = False self.module_logger = logging.getLogger(name) self.custom_dimensions = {} - # def update_info(self, data: dict) -> None: - # if "app_insights_handler" in data: - # self.logger.addHandler(data.pop("app_insights_handler")) + def update_info(self, data: dict) -> None: + if "app_insights_handler" in data: + self.package_logger.addHandler(data.pop("app_insights_handler")) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_batch_deployment_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_batch_deployment_operations.py index eff64fe10d8f..a62c62341e58 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_batch_deployment_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_batch_deployment_operations.py @@ -14,6 +14,7 @@ OperationScope, _ScopeDependentOperations, ) +from azure.ai.ml._telemetry import ActivityType, monitor_with_activity from azure.ai.ml._utils._azureml_polling import AzureMLPolling from azure.ai.ml._utils._arm_id_utils import AMLVersionedArmId from azure.ai.ml._utils._endpoint_utils import upload_dependencies, validate_scoring_script @@ -30,7 +31,7 @@ from ._operation_orchestrator import OperationOrchestrator ops_logger = OpsLogger(__name__) -module_logger = ops_logger.module_logger +logger, module_logger = ops_logger.package_logger, ops_logger.module_logger class BatchDeploymentOperations(_ScopeDependentOperations): @@ -51,7 +52,7 @@ def __init__( **kwargs: Dict, ): super(BatchDeploymentOperations, self).__init__(operation_scope, operation_config) - # ops_logger.update_info(kwargs) + ops_logger.update_info(kwargs) self._batch_deployment = service_client_05_2022.batch_deployments self._batch_job_deployment = kwargs.pop("service_client_09_2020_dataplanepreview").batch_job_deployment self._batch_endpoint_operations = service_client_05_2022.batch_endpoints @@ -62,7 +63,7 @@ def __init__( self._requests_pipeline: HttpPipeline = kwargs.pop("requests_pipeline") @distributed_trace - # @monitor_with_activity(logger, "BatchDeployment.BeginCreateOrUpdate", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "BatchDeployment.BeginCreateOrUpdate", ActivityType.PUBLICAPI) def begin_create_or_update( self, deployment: BatchDeployment, @@ -125,7 +126,7 @@ def begin_create_or_update( raise ex @distributed_trace - # @monitor_with_activity(logger, "BatchDeployment.Get", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "BatchDeployment.Get", ActivityType.PUBLICAPI) def get(self, name: str, endpoint_name: str) -> BatchDeployment: """Get a deployment resource. @@ -150,7 +151,7 @@ def get(self, name: str, endpoint_name: str) -> BatchDeployment: return deployment @distributed_trace - # @monitor_with_activity(logger, "BatchDeployment.BeginDelete", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "BatchDeployment.BeginDelete", ActivityType.PUBLICAPI) def begin_delete(self, name: str, endpoint_name: str) -> LROPoller[None]: """Delete a batch deployment. @@ -183,7 +184,7 @@ def begin_delete(self, name: str, endpoint_name: str) -> LROPoller[None]: return delete_poller @distributed_trace - # @monitor_with_activity(logger, "BatchDeployment.List", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "BatchDeployment.List", ActivityType.PUBLICAPI) def list(self, endpoint_name: str) -> ItemPaged[BatchDeployment]: """List a deployment resource. @@ -201,7 +202,7 @@ def list(self, endpoint_name: str) -> ItemPaged[BatchDeployment]: ) @distributed_trace - # @monitor_with_activity(logger, "BatchDeployment.ListJobs", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "BatchDeployment.ListJobs", ActivityType.PUBLICAPI) def list_jobs(self, endpoint_name: str, *, name: str = None) -> ItemPaged[BatchJob]: """List jobs under the provided batch endpoint deployment. This is only valid for batch endpoint. diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_batch_endpoint_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_batch_endpoint_operations.py index 5f7173eb9286..d88ab05c02d0 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_batch_endpoint_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_batch_endpoint_operations.py @@ -21,6 +21,7 @@ OperationScope, _ScopeDependentOperations, ) +from azure.ai.ml._telemetry import ActivityType, monitor_with_activity from azure.ai.ml._utils._arm_id_utils import get_datastore_arm_id, is_ARM_id_for_resource, remove_datastore_prefix from azure.ai.ml._utils._azureml_polling import AzureMLPolling from azure.ai.ml._utils._endpoint_utils import validate_response @@ -54,7 +55,7 @@ from azure.ai.ml.operations import DatastoreOperations ops_logger = OpsLogger(__name__) -module_logger = ops_logger.module_logger +logger, module_logger = ops_logger.package_logger, ops_logger.module_logger class BatchEndpointOperations(_ScopeDependentOperations): @@ -76,7 +77,7 @@ def __init__( ): super(BatchEndpointOperations, self).__init__(operation_scope, operation_config) - # ops_logger.update_info(kwargs) + ops_logger.update_info(kwargs) self._batch_operation = service_client_05_2022.batch_endpoints self._batch_deployment_operation = service_client_05_2022.batch_deployments self._batch_job_endpoint = kwargs.pop("service_client_09_2020_dataplanepreview").batch_job_endpoint @@ -91,7 +92,7 @@ def _datastore_operations(self) -> "DatastoreOperations": return self._all_operations.all_operations[AzureMLResourceType.DATASTORE] @distributed_trace - # @monitor_with_activity(logger, "BatchEndpoint.List", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "BatchEndpoint.List", ActivityType.PUBLICAPI) def list(self) -> ItemPaged[BatchEndpoint]: """List endpoints of the workspace. @@ -106,7 +107,7 @@ def list(self) -> ItemPaged[BatchEndpoint]: ) @distributed_trace - # @monitor_with_activity(logger, "BatchEndpoint.Get", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "BatchEndpoint.Get", ActivityType.PUBLICAPI) def get( self, name: str, @@ -129,7 +130,7 @@ def get( return endpoint_data @distributed_trace - # @monitor_with_activity(logger, "BatchEndpoint.BeginDelete", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "BatchEndpoint.BeginDelete", ActivityType.PUBLICAPI) def begin_delete(self, name: str) -> LROPoller[None]: """Delete a batch Endpoint. @@ -159,7 +160,7 @@ def begin_delete(self, name: str) -> LROPoller[None]: return delete_poller @distributed_trace - # @monitor_with_activity(logger, "BatchEndpoint.BeginCreateOrUpdate", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "BatchEndpoint.BeginCreateOrUpdate", ActivityType.PUBLICAPI) def begin_create_or_update(self, endpoint: BatchEndpoint) -> LROPoller[BatchEndpoint]: """Create or update a batch endpoint. @@ -187,7 +188,7 @@ def begin_create_or_update(self, endpoint: BatchEndpoint) -> LROPoller[BatchEndp raise ex @distributed_trace - # @monitor_with_activity(logger, "BatchEndpoint.Invoke", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "BatchEndpoint.Invoke", ActivityType.PUBLICAPI) def invoke( self, endpoint_name: str, @@ -293,7 +294,7 @@ def invoke( return BatchJobResource.deserialize(batch_job) @distributed_trace - # @monitor_with_activity(logger, "BatchEndpoint.ListJobs", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "BatchEndpoint.ListJobs", ActivityType.PUBLICAPI) def list_jobs(self, endpoint_name: str) -> ItemPaged[BatchJob]: """List jobs under the provided batch endpoint deployment. This is only valid for batch endpoint. diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_code_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_code_operations.py index 11d8c4f92f73..3b4fb110f5f9 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_code_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_code_operations.py @@ -20,6 +20,7 @@ ) from azure.ai.ml._restclient.v2022_10_01_preview import AzureMachineLearningWorkspaces as ServiceClient102022 from azure.ai.ml._scope_dependent_operations import OperationConfig, OperationScope, _ScopeDependentOperations +from azure.ai.ml._telemetry import ActivityType, monitor_with_activity from azure.ai.ml._utils._logger_utils import OpsLogger from azure.ai.ml._utils._registry_utils import get_asset_body_for_registry_storage, get_sas_uri_for_registry_asset from azure.ai.ml.entities._assets import Code @@ -34,7 +35,7 @@ from azure.core.exceptions import HttpResponseError ops_logger = OpsLogger(__name__) -module_logger = ops_logger.module_logger +logger, module_logger = ops_logger.package_logger, ops_logger.module_logger class CodeOperations(_ScopeDependentOperations): @@ -53,14 +54,14 @@ def __init__( **kwargs: Dict, ): super(CodeOperations, self).__init__(operation_scope, operation_config) - # ops_logger.update_info(kwargs) + ops_logger.update_info(kwargs) self._service_client = service_client self._version_operation = service_client.code_versions self._container_operation = service_client.code_containers self._datastore_operation = datastore_operations self._init_kwargs = kwargs - # @monitor_with_activity(logger, "Code.CreateOrUpdate", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Code.CreateOrUpdate", ActivityType.PUBLICAPI) def create_or_update(self, code: Code) -> Code: """Returns created or updated code asset. @@ -139,7 +140,7 @@ def create_or_update(self, code: Code) -> Code: ) raise ex - # @monitor_with_activity(logger, "Code.Get", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Code.Get", ActivityType.PUBLICAPI) def get(self, name: str, version: str) -> Code: """Returns information about the specified code asset. diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_component_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_component_operations.py index d527365f3072..255a8304aef8 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_component_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_component_operations.py @@ -20,6 +20,11 @@ OperationScope, _ScopeDependentOperations, ) +from azure.ai.ml._telemetry import ( + ActivityType, + monitor_with_activity, + monitor_with_telemetry_mixin, +) from azure.ai.ml._utils._arm_id_utils import is_ARM_id_for_resource, is_registry_id_for_resource from azure.ai.ml._utils._asset_utils import ( _archive_or_restore, @@ -47,7 +52,7 @@ from ..entities._job.pipeline._attr_dict import has_attr_safe ops_logger = OpsLogger(__name__) -module_logger = ops_logger.module_logger +logger, module_logger = ops_logger.package_logger, ops_logger.module_logger class ComponentOperations(_ScopeDependentOperations): @@ -67,8 +72,7 @@ def __init__( **kwargs: Dict, ): super(ComponentOperations, self).__init__(operation_scope, operation_config) - # if "app_insights_handler" in kwargs: - # logger.addHandler(kwargs.pop("app_insights_handler")) + ops_logger.update_info(kwargs) self._version_operation = service_client.component_versions self._container_operation = service_client.component_containers self._all_operations = all_operations @@ -95,7 +99,7 @@ def _job_operations(self): return self._all_operations.get_operation(AzureMLResourceType.JOB, lambda x: isinstance(x, JobOperations)) - # @monitor_with_activity(logger, "Component.List", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Component.List", ActivityType.PUBLICAPI) def list( self, name: Union[str, None] = None, @@ -149,7 +153,7 @@ def list( ) ) - # @monitor_with_telemetry_mixin(logger, "Component.Get", ActivityType.PUBLICAPI) + @monitor_with_telemetry_mixin(logger, "Component.Get", ActivityType.PUBLICAPI) def get(self, name: str, version: Optional[str] = None, label: Optional[str] = None) -> Component: """Returns information about the specified component. @@ -204,7 +208,7 @@ def get(self, name: str, version: Optional[str] = None, label: Optional[str] = N return component @experimental - # @monitor_with_telemetry_mixin(logger, "Component.Validate", ActivityType.PUBLICAPI) + @monitor_with_telemetry_mixin(logger, "Component.Validate", ActivityType.PUBLICAPI) # pylint: disable=no-self-use def validate( self, @@ -223,7 +227,7 @@ def validate( """ return self._validate(component, raise_on_failure=raise_on_failure) - # @monitor_with_telemetry_mixin(logger, "Component.Validate", ActivityType.INTERNALCALL) + @monitor_with_telemetry_mixin(logger, "Component.Validate", ActivityType.INTERNALCALL) def _validate( # pylint: disable=no-self-use self, component: Union[Component, types.FunctionType], @@ -242,12 +246,12 @@ def _validate( # pylint: disable=no-self-use result.resolve_location_for_diagnostics(component._source_path) return result - # @monitor_with_telemetry_mixin( - # logger, - # "Component.CreateOrUpdate", - # ActivityType.PUBLICAPI, - # extra_keys=["is_anonymous"], - # ) + @monitor_with_telemetry_mixin( + logger, + "Component.CreateOrUpdate", + ActivityType.PUBLICAPI, + extra_keys=["is_anonymous"], + ) def create_or_update( self, component: Union[Component, types.FunctionType], version=None, *, skip_validation: bool = False, **kwargs ) -> Component: @@ -353,7 +357,7 @@ def create_or_update( self._resolve_arm_id_for_pipeline_component_jobs(component.jobs, self._orchestrators.resolve_azureml_id) return component - # @monitor_with_telemetry_mixin(logger, "Component.Archive", ActivityType.PUBLICAPI) + @monitor_with_telemetry_mixin(logger, "Component.Archive", ActivityType.PUBLICAPI) def archive(self, name: str, version: str = None, label: str = None) -> None: """Archive a component. @@ -374,7 +378,7 @@ def archive(self, name: str, version: str = None, label: str = None) -> None: label=label, ) - # @monitor_with_telemetry_mixin(logger, "Component.Restore", ActivityType.PUBLICAPI) + @monitor_with_telemetry_mixin(logger, "Component.Restore", ActivityType.PUBLICAPI) def restore(self, name: str, version: str = None, label: str = None) -> None: """Restore an archived component. diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_compute_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_compute_operations.py index 7d4b31f05904..9254d6b0991b 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_compute_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_compute_operations.py @@ -8,6 +8,7 @@ from azure.ai.ml._restclient.v2022_01_01_preview import AzureMachineLearningWorkspaces as ServiceClient012022Preview from azure.ai.ml._scope_dependent_operations import OperationConfig, OperationScope, _ScopeDependentOperations +from azure.ai.ml._telemetry import ActivityType, monitor_with_activity from azure.ai.ml._utils._logger_utils import OpsLogger from azure.ai.ml.constants._common import COMPUTE_UPDATE_ERROR from azure.ai.ml.constants._compute import ComputeType @@ -16,7 +17,7 @@ from azure.core.tracing.decorator import distributed_trace ops_logger = OpsLogger(__name__) -module_logger = ops_logger.module_logger +logger, module_logger = ops_logger.package_logger, ops_logger.module_logger class ComputeOperations(_ScopeDependentOperations): @@ -35,7 +36,7 @@ def __init__( **kwargs: Dict, ): super(ComputeOperations, self).__init__(operation_scope, operation_config) - # ops_logger.update_info(kwargs) + ops_logger.update_info(kwargs) self._operation = service_client.compute self._workspace_operations = service_client.workspaces self._vmsize_operations = service_client.virtual_machine_sizes @@ -43,7 +44,7 @@ def __init__( self._init_kwargs = kwargs @distributed_trace - # @monitor_with_activity(logger, "Compute.List", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Compute.List", ActivityType.PUBLICAPI) def list(self, *, compute_type: str = None) -> Iterable[Compute]: """List computes of the workspace. @@ -64,7 +65,7 @@ def list(self, *, compute_type: str = None) -> Iterable[Compute]: ) @distributed_trace - # @monitor_with_activity(logger, "Compute.Get", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Compute.Get", ActivityType.PUBLICAPI) def get(self, name: str) -> Compute: """Get a compute resource. @@ -82,7 +83,7 @@ def get(self, name: str) -> Compute: return Compute._from_rest_object(rest_obj) @distributed_trace - # @monitor_with_activity(logger, "Compute.ListNodes", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Compute.ListNodes", ActivityType.PUBLICAPI) def list_nodes(self, name: str) -> Iterable[AmlComputeNodeInfo]: """Get a compute resource nodes. @@ -99,7 +100,7 @@ def list_nodes(self, name: str) -> Iterable[AmlComputeNodeInfo]: ) @distributed_trace - # @monitor_with_activity(logger, "Compute.BeginCreateOrUpdate", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Compute.BeginCreateOrUpdate", ActivityType.PUBLICAPI) def begin_create_or_update(self, compute: Compute) -> LROPoller[Compute]: """Create a compute. @@ -128,7 +129,7 @@ def begin_create_or_update(self, compute: Compute) -> LROPoller[Compute]: return poller @distributed_trace - # @monitor_with_activity(logger, "Compute.Attach", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Compute.Attach", ActivityType.PUBLICAPI) def begin_attach(self, compute: Compute, **kwargs: Any) -> LROPoller[Compute]: """Attaches a compute to the workspace. @@ -140,7 +141,7 @@ def begin_attach(self, compute: Compute, **kwargs: Any) -> LROPoller[Compute]: return self.begin_create_or_update(compute=compute, **kwargs) @distributed_trace - # @monitor_with_activity(logger, "Compute.BeginUpdate", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Compute.BeginUpdate", ActivityType.PUBLICAPI) def begin_update(self, compute: Compute) -> LROPoller[Compute]: """Update a compute. Currently only valid for AmlCompute. @@ -166,7 +167,7 @@ def begin_update(self, compute: Compute) -> LROPoller[Compute]: return poller @distributed_trace - # @monitor_with_activity(logger, "Compute.BeginDelete", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Compute.BeginDelete", ActivityType.PUBLICAPI) def begin_delete(self, name: str, *, action: str = "Delete") -> LROPoller[None]: """Delete a compute. @@ -186,7 +187,7 @@ def begin_delete(self, name: str, *, action: str = "Delete") -> LROPoller[None]: ) @distributed_trace - # @monitor_with_activity(logger, "Compute.BeginStart", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Compute.BeginStart", ActivityType.PUBLICAPI) def begin_start(self, name: str) -> LROPoller[None]: """Start a compute. @@ -203,7 +204,7 @@ def begin_start(self, name: str) -> LROPoller[None]: ) @distributed_trace - # @monitor_with_activity(logger, "Compute.BeginStop", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Compute.BeginStop", ActivityType.PUBLICAPI) def begin_stop(self, name: str) -> LROPoller[None]: """Stop a compute. @@ -219,7 +220,7 @@ def begin_stop(self, name: str) -> LROPoller[None]: ) @distributed_trace - # @monitor_with_activity(logger, "Compute.BeginRestart", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Compute.BeginRestart", ActivityType.PUBLICAPI) def begin_restart(self, name: str) -> LROPoller[None]: """Restart a compute. @@ -235,7 +236,7 @@ def begin_restart(self, name: str) -> LROPoller[None]: ) @distributed_trace - # @monitor_with_activity(logger, "Compute.ListUsage", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Compute.ListUsage", ActivityType.PUBLICAPI) def list_usage(self, *, location: str = None) -> Iterable[Usage]: """Gets the current usage information as well as limits for AML resources for given subscription and location. @@ -254,7 +255,7 @@ def list_usage(self, *, location: str = None) -> Iterable[Usage]: ) @distributed_trace - # @monitor_with_activity(logger, "Compute.ListSizes", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Compute.ListSizes", ActivityType.PUBLICAPI) def list_sizes(self, *, location: str = None, compute_type: str = None) -> Iterable[VmSize]: """Returns supported VM Sizes in a location. diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_data_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_data_operations.py index ced22578d307..35733e55683d 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_data_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_data_operations.py @@ -20,6 +20,7 @@ from azure.ai.ml._restclient.v2022_02_01_preview.models import ListViewType from azure.ai.ml._restclient.v2022_05_01 import AzureMachineLearningWorkspaces as ServiceClient052022 from azure.ai.ml._scope_dependent_operations import OperationConfig, OperationScope, _ScopeDependentOperations +from azure.ai.ml._telemetry import ActivityType, monitor_with_activity from azure.ai.ml._utils._asset_utils import ( _archive_or_restore, _create_or_update_autoincrement, @@ -50,7 +51,7 @@ from azure.core.paging import ItemPaged ops_logger = OpsLogger(__name__) -module_logger = ops_logger.module_logger +logger, module_logger = ops_logger.package_logger, ops_logger.module_logger class DataOperations(_ScopeDependentOperations): @@ -64,7 +65,7 @@ def __init__( ): super(DataOperations, self).__init__(operation_scope, operation_config) - # ops_logger.update_info(kwargs) + ops_logger.update_info(kwargs) self._operation = service_client.data_versions self._container_operation = service_client.data_containers self._datastore_operation = datastore_operations @@ -74,7 +75,7 @@ def __init__( # returns the asset associated with the label self._managed_label_resolver = {"latest": self._get_latest_version} - # @monitor_with_activity(logger, "Data.List", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Data.List", ActivityType.PUBLICAPI) def list( self, name: Optional[str] = None, @@ -106,7 +107,7 @@ def list( **self._scope_kwargs, ) - # @monitor_with_activity(logger, "Data.Get", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Data.Get", ActivityType.PUBLICAPI) def get(self, name: str, version: Optional[str] = None, label: Optional[str] = None) -> Data: """Get the specified data asset. @@ -155,7 +156,7 @@ def get(self, name: str, version: Optional[str] = None, label: Optional[str] = N except (ValidationException, SchemaValidationError) as ex: log_and_raise_error(ex) - # @monitor_with_activity(logger, "Data.CreateOrUpdate", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Data.CreateOrUpdate", ActivityType.PUBLICAPI) def create_or_update(self, data: Data) -> Data: """Returns created or updated data asset. @@ -216,7 +217,7 @@ def create_or_update(self, data: Data) -> Data: ) raise ex - # @monitor_with_activity(logger, "Data.Validate", ActivityType.INTERNALCALL) + @monitor_with_activity(logger, "Data.Validate", ActivityType.INTERNALCALL) def _validate(self, data: Data) -> Union[List[str], None]: if not data.path: msg = "Missing data path. Path is required for data." @@ -243,7 +244,7 @@ def _validate(self, data: Data) -> Union[List[str], None]: metadata_yaml_path = None except Exception: # pylint: disable=broad-except # skip validation for remote MLTable when the contents cannot be read - module_logger.info( + logger.info( "Unable to access MLTable metadata at path %s", asset_path, exc_info=1, @@ -277,14 +278,14 @@ def _try_get_mltable_metadata_jsonschema( try: return download_mltable_metadata_schema(mltable_schema_url, self._requests_pipeline) except Exception: # pylint: disable=broad-except - module_logger.info( + logger.info( 'Failed to download MLTable metadata jsonschema from "%s", skipping validation', mltable_schema_url, exc_info=1, ) return None - # @monitor_with_activity(logger, "Data.Archive", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Data.Archive", ActivityType.PUBLICAPI) def archive(self, name: str, version: str = None, label: str = None) -> None: """Archive a data asset. @@ -307,7 +308,7 @@ def archive(self, name: str, version: str = None, label: str = None) -> None: label=label, ) - # @monitor_with_activity(logger, "Data.Restore", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Data.Restore", ActivityType.PUBLICAPI) def restore(self, name: str, version: str = None, label: str = None) -> None: """Restore an archived data asset. diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_datastore_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_datastore_operations.py index 84feb7df6298..cad7bb3faa61 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_datastore_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_datastore_operations.py @@ -12,12 +12,13 @@ from azure.ai.ml._restclient.v2022_05_01 import AzureMachineLearningWorkspaces as ServiceClient2022_05_01 from azure.ai.ml._restclient.v2022_05_01.models import DatastoreData, DatastoreSecrets, NoneDatastoreCredentials from azure.ai.ml._scope_dependent_operations import OperationConfig, OperationScope, _ScopeDependentOperations +from azure.ai.ml._telemetry import ActivityType, monitor_with_activity from azure.ai.ml._utils._logger_utils import OpsLogger from azure.ai.ml.entities._datastore.datastore import Datastore from azure.ai.ml.exceptions import ValidationException ops_logger = OpsLogger(__name__) -module_logger = ops_logger.module_logger +logger, module_logger = ops_logger.package_logger, ops_logger.module_logger class DatastoreOperations(_ScopeDependentOperations): @@ -36,12 +37,12 @@ def __init__( **kwargs: Dict ): super(DatastoreOperations, self).__init__(operation_scope, operation_config) - # ops_logger.update_info(kwargs) + ops_logger.update_info(kwargs) self._operation = serviceclient_2022_05_01.datastores self._credential = serviceclient_2022_05_01._config.credential self._init_kwargs = kwargs - # @monitor_with_activity(logger, "Datastore.List", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Datastore.List", ActivityType.PUBLICAPI) def list(self, *, include_secrets: bool = False) -> Iterable[Datastore]: """Lists all datastores and associated information within a workspace. @@ -63,7 +64,7 @@ def _list_helper(datastore_resource, include_secrets: bool): **self._init_kwargs ) - # @monitor_with_activity(logger, "Datastore.ListSecrets", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Datastore.ListSecrets", ActivityType.PUBLICAPI) def _list_secrets(self, name: str) -> DatastoreSecrets: return self._operation.list_secrets( name=name, @@ -72,7 +73,7 @@ def _list_secrets(self, name: str) -> DatastoreSecrets: **self._init_kwargs ) - # @monitor_with_activity(logger, "Datastore.Delete", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Datastore.Delete", ActivityType.PUBLICAPI) def delete(self, name: str) -> None: """Deletes a datastore reference with the given name from the workspace. This method does not delete the actual datastore or @@ -89,7 +90,7 @@ def delete(self, name: str) -> None: **self._init_kwargs ) - # @monitor_with_activity(logger, "Datastore.Get", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Datastore.Get", ActivityType.PUBLICAPI) def get(self, name: str, *, include_secrets: bool = False) -> Datastore: """Returns information about the datastore referenced by the given name. @@ -121,7 +122,7 @@ def _fetch_and_populate_secret(self, datastore_resource: DatastoreData) -> None: secrets = self._list_secrets(datastore_resource.name) datastore_resource.properties.credentials.secrets = secrets - # @monitor_with_activity(logger, "Datastore.GetDefault", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Datastore.GetDefault", ActivityType.PUBLICAPI) def get_default(self, *, include_secrets: bool = False) -> Datastore: """Returns the workspace's default datastore. @@ -143,7 +144,7 @@ def get_default(self, *, include_secrets: bool = False) -> Datastore: except (ValidationException, SchemaValidationError) as ex: log_and_raise_error(ex) - # @monitor_with_activity(logger, "Datastore.CreateOrUpdate", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Datastore.CreateOrUpdate", ActivityType.PUBLICAPI) def create_or_update(self, datastore: Datastore) -> Datastore: """Attaches the passed in datastore to the workspace or updates the datastore if it already exists. diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_environment_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_environment_operations.py index d78c3694a1cd..72357b1f9b46 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_environment_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_environment_operations.py @@ -21,6 +21,7 @@ OperationScope, _ScopeDependentOperations, ) +from azure.ai.ml._telemetry import ActivityType, monitor_with_activity from azure.ai.ml._utils._asset_utils import ( _archive_or_restore, _create_or_update_autoincrement, @@ -34,7 +35,7 @@ from azure.ai.ml.exceptions import ErrorCategory, ErrorTarget, ValidationErrorType, ValidationException ops_logger = OpsLogger(__name__) -module_logger = ops_logger.module_logger +logger, module_logger = ops_logger.package_logger, ops_logger.module_logger class EnvironmentOperations(_ScopeDependentOperations): @@ -54,7 +55,7 @@ def __init__( **kwargs: Any, ): super(EnvironmentOperations, self).__init__(operation_scope, operation_config) - # ops_logger.update_info(kwargs) + ops_logger.update_info(kwargs) self._kwargs = kwargs self._containers_operations = service_client.environment_containers self._version_operations = service_client.environment_versions @@ -66,7 +67,7 @@ def __init__( # returns the asset associated with the label self._managed_label_resolver = {"latest": self._get_latest_version} - # @monitor_with_activity(logger, "Environment.CreateOrUpdate", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Environment.CreateOrUpdate", ActivityType.PUBLICAPI) def create_or_update(self, environment: Environment) -> Environment: """Returns created or updated environment asset. @@ -190,7 +191,7 @@ def _get(self, name: str, version: str = None) -> EnvironmentVersionData: ) ) - # @monitor_with_activity(logger, "Environment.Get", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Environment.Get", ActivityType.PUBLICAPI) def get(self, name: str, version: str = None, label: str = None) -> Environment: """Returns the specified environment asset. @@ -232,7 +233,7 @@ def get(self, name: str, version: str = None, label: str = None) -> Environment: return Environment._from_rest_object(env_version_resource) - # @monitor_with_activity(logger, "Environment.List", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Environment.List", ActivityType.PUBLICAPI) def list( self, name: str = None, @@ -285,7 +286,7 @@ def list( ) ) - # @monitor_with_activity(logger, "Environment.Delete", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Environment.Delete", ActivityType.PUBLICAPI) def archive(self, name: str, version: str = None, label: str = None) -> None: """Archive an environment or an environment version. @@ -307,7 +308,7 @@ def archive(self, name: str, version: str = None, label: str = None) -> None: label=label, ) - # @monitor_with_activity(logger, "Environment.Restore", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Environment.Restore", ActivityType.PUBLICAPI) def restore(self, name: str, version: str = None, label: str = None) -> None: """Restore an archived environment version. diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_job_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_job_operations.py index b237fb7dca08..cfde50360755 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_job_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_job_operations.py @@ -36,6 +36,7 @@ OperationScope, _ScopeDependentOperations, ) +from azure.ai.ml._telemetry import ActivityType, monitor_with_activity, monitor_with_telemetry_mixin from azure.ai.ml._utils._http_utils import HttpPipeline from azure.ai.ml._utils._logger_utils import OpsLogger from azure.ai.ml._utils.utils import ( @@ -118,7 +119,7 @@ from azure.ai.ml.operations import DatastoreOperations ops_logger = OpsLogger(__name__) -module_logger = ops_logger.module_logger +logger, module_logger = ops_logger.package_logger, ops_logger.module_logger class JobOperations(_ScopeDependentOperations): @@ -139,7 +140,7 @@ def __init__( **kwargs: Any, ): super(JobOperations, self).__init__(operation_scope, operation_config) - # ops_logger.update_info(kwargs) + ops_logger.update_info(kwargs) self._operation_2022_10_preview = service_client_10_2022_preview.jobs self._service_client = service_client_10_2022_preview self._all_operations = all_operations @@ -219,7 +220,7 @@ def _api_url(self): return self._api_base_url @distributed_trace - # @monitor_with_activity(logger, "Job.List", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Job.List", ActivityType.PUBLICAPI) def list( self, *, @@ -262,7 +263,7 @@ def _handle_rest_errors(self, job_object): pass @distributed_trace - # @monitor_with_telemetry_mixin(logger, "Job.Get", ActivityType.PUBLICAPI) + @monitor_with_telemetry_mixin(logger, "Job.Get", ActivityType.PUBLICAPI) def get(self, name: str) -> Job: """Get a job resource. @@ -286,7 +287,7 @@ def get(self, name: str) -> Job: return job @distributed_trace - # @monitor_with_telemetry_mixin(logger, "Job.ShowServices", ActivityType.PUBLICAPI) + @monitor_with_telemetry_mixin(logger, "Job.ShowServices", ActivityType.PUBLICAPI) def show_services(self, name: str, node_index: int = 0) -> Dict[str, ServiceInstance]: """Get services associated with a job's node. @@ -307,7 +308,7 @@ def show_services(self, name: str, node_index: int = 0) -> Dict[str, ServiceInst } @distributed_trace - # @monitor_with_activity(logger, "Job.Cancel", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Job.Cancel", ActivityType.PUBLICAPI) def begin_cancel(self, name: str) -> LROPoller[None]: """Cancel job resource. @@ -363,7 +364,7 @@ def _try_get_compute_arm_id(self, compute: Union[Compute, str]): @distributed_trace @experimental - # @monitor_with_telemetry_mixin(logger, "Job.Validate", ActivityType.PUBLICAPI) + @monitor_with_telemetry_mixin(logger, "Job.Validate", ActivityType.PUBLICAPI) def validate(self, job: Job, *, raise_on_failure: bool = False, **kwargs) -> ValidationResult: """Validate a job. Anonymous assets may be created if there are inline defined entities, e.g. Component, Environment & Code. Only pipeline job @@ -378,7 +379,7 @@ def validate(self, job: Job, *, raise_on_failure: bool = False, **kwargs) -> Val """ return self._validate(job, raise_on_failure=raise_on_failure, **kwargs) - # @monitor_with_telemetry_mixin(logger, "Job.Validate", ActivityType.INTERNALCALL) + @monitor_with_telemetry_mixin(logger, "Job.Validate", ActivityType.INTERNALCALL) def _validate( self, job: Job, *, raise_on_failure: bool = False, **kwargs # pylint:disable=unused-argument ) -> ValidationResult: @@ -430,7 +431,7 @@ def _validate( return validation_result.try_raise(raise_error=raise_on_failure, error_target=ErrorTarget.PIPELINE) @distributed_trace - # @monitor_with_telemetry_mixin(logger, "Job.CreateOrUpdate", ActivityType.PUBLICAPI) + @monitor_with_telemetry_mixin(logger, "Job.CreateOrUpdate", ActivityType.PUBLICAPI) def create_or_update( self, job: Job, @@ -577,7 +578,7 @@ def _archive_or_restore(self, name: str, is_archived: bool): ) @distributed_trace - # @monitor_with_telemetry_mixin(logger, "Job.Archive", ActivityType.PUBLICAPI) + @monitor_with_telemetry_mixin(logger, "Job.Archive", ActivityType.PUBLICAPI) def archive(self, name: str) -> None: """Archive a job or restore an archived job. @@ -589,7 +590,7 @@ def archive(self, name: str) -> None: self._archive_or_restore(name=name, is_archived=True) @distributed_trace - # @monitor_with_telemetry_mixin(logger, "Job.Restore", ActivityType.PUBLICAPI) + @monitor_with_telemetry_mixin(logger, "Job.Restore", ActivityType.PUBLICAPI) def restore(self, name: str) -> None: """Archive a job or restore an archived job. @@ -601,7 +602,7 @@ def restore(self, name: str) -> None: self._archive_or_restore(name=name, is_archived=False) @distributed_trace - # @monitor_with_activity(logger, "Job.Stream", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Job.Stream", ActivityType.PUBLICAPI) def stream(self, name: str) -> None: """Stream logs of a job. @@ -618,7 +619,7 @@ def stream(self, name: str) -> None: ) @distributed_trace - # @monitor_with_activity(logger, "Job.Download", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Job.Download", ActivityType.PUBLICAPI) def download( self, name: str, @@ -673,7 +674,7 @@ def download( output_directory_name = "named-outputs" def log_missing_uri(what: str): - module_logger.debug( + logger.debug( 'Could not download %s for job "%s" (job status: %s)', what, job_details.name, job_details.status ) diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_model_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_model_operations.py index 6ab8d729d265..50967c0b168e 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_model_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_model_operations.py @@ -26,6 +26,7 @@ from azure.ai.ml._restclient.v2022_02_01_preview.models import ListViewType, ModelVersionData from azure.ai.ml._restclient.v2022_05_01 import AzureMachineLearningWorkspaces as ServiceClient052022 from azure.ai.ml._scope_dependent_operations import OperationConfig, OperationScope, _ScopeDependentOperations +from azure.ai.ml._telemetry import ActivityType, monitor_with_activity from azure.ai.ml._utils._asset_utils import ( _archive_or_restore, _create_or_update_autoincrement, @@ -54,7 +55,7 @@ from azure.core.exceptions import ResourceNotFoundError ops_logger = OpsLogger(__name__) -module_logger = ops_logger.module_logger +logger, module_logger = ops_logger.package_logger, ops_logger.module_logger class ModelOperations(_ScopeDependentOperations): @@ -65,7 +66,6 @@ class ModelOperations(_ScopeDependentOperations): attaches it as an attribute. """ - # pylint: disable=unused-argument def __init__( self, operation_scope: OperationScope, @@ -75,7 +75,7 @@ def __init__( **kwargs: Dict, ): super(ModelOperations, self).__init__(operation_scope, operation_config) - # ops_logger.update_info(kwargs) + ops_logger.update_info(kwargs) self._model_versions_operation = service_client.model_versions self._model_container_operation = service_client.model_containers self._service_client = service_client @@ -85,7 +85,7 @@ def __init__( # returns the asset associated with the label self._managed_label_resolver = {"latest": self._get_latest_version} - # @monitor_with_activity(logger, "Model.CreateOrUpdate", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Model.CreateOrUpdate", ActivityType.PUBLICAPI) def create_or_update( self, model: Union[Model, WorkspaceModelReference] ) -> Model: # TODO: Are we going to implement job_name? @@ -250,7 +250,7 @@ def _get(self, name: str, version: str = None) -> ModelVersionData: # name:late ) ) - # @monitor_with_activity(logger, "Model.Get", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Model.Get", ActivityType.PUBLICAPI) def get(self, name: str, version: str = None, label: str = None) -> Model: """Returns information about the specified model asset. @@ -292,7 +292,7 @@ def get(self, name: str, version: str = None, label: str = None) -> Model: return Model._from_rest_object(model_version_resource) - # @monitor_with_activity(logger, "Model.Download", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Model.Download", ActivityType.PUBLICAPI) def download(self, name: str, version: str, download_path: Union[PathLike, str] = ".") -> None: """Download files related to a model. @@ -350,7 +350,7 @@ def download(self, name: str, version: str, download_path: Union[PathLike, str] module_logger.info("Downloading the model %s at %s\n", path_prefix, path_file) storage_client.download(starts_with=path_prefix, destination=path_file) - # @monitor_with_activity(logger, "Model.Archive", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Model.Archive", ActivityType.PUBLICAPI) def archive(self, name: str, version: str = None, label: str = None) -> None: """Archive a model asset. @@ -371,7 +371,7 @@ def archive(self, name: str, version: str = None, label: str = None) -> None: label=label, ) - # @monitor_with_activity(logger, "Model.Restore", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Model.Restore", ActivityType.PUBLICAPI) def restore(self, name: str, version: str = None, label: str = None) -> None: """Restore an archived model asset. @@ -392,7 +392,7 @@ def restore(self, name: str, version: str = None, label: str = None) -> None: label=label, ) - # @monitor_with_activity(logger, "Model.List", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Model.List", ActivityType.PUBLICAPI) def list( self, name: str = None, diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_online_deployment_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_online_deployment_operations.py index c2281ec92e0e..c31e2e3b32ac 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_online_deployment_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_online_deployment_operations.py @@ -20,6 +20,7 @@ OperationScope, _ScopeDependentOperations, ) +from azure.ai.ml._telemetry import ActivityType, monitor_with_activity from azure.ai.ml._utils._azureml_polling import AzureMLPolling from azure.ai.ml._utils._arm_id_utils import AMLVersionedArmId from azure.ai.ml._utils._endpoint_utils import upload_dependencies,validate_scoring_script @@ -43,7 +44,7 @@ from ._operation_orchestrator import OperationOrchestrator ops_logger = OpsLogger(__name__) -module_logger = ops_logger.module_logger +logger, module_logger = ops_logger.package_logger, ops_logger.module_logger class OnlineDeploymentOperations(_ScopeDependentOperations): @@ -65,7 +66,7 @@ def __init__( **kwargs: Dict, ): super(OnlineDeploymentOperations, self).__init__(operation_scope, operation_config) - # ops_logger.update_info(kwargs) + ops_logger.update_info(kwargs) self._local_deployment_helper = local_deployment_helper self._online_deployment = service_client_02_2022_preview.online_deployments self._online_endpoint_operations = service_client_02_2022_preview.online_endpoints @@ -74,7 +75,7 @@ def __init__( self._init_kwargs = kwargs @distributed_trace - # @monitor_with_activity(logger, "OnlineDeployment.BeginCreateOrUpdate", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "OnlineDeployment.BeginCreateOrUpdate", ActivityType.PUBLICAPI) def begin_create_or_update( self, deployment: OnlineDeployment, @@ -182,7 +183,7 @@ def begin_create_or_update( raise ex @distributed_trace - # @monitor_with_activity(logger, "OnlineDeployment.Get", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "OnlineDeployment.Get", ActivityType.PUBLICAPI) def get(self, name: str, endpoint_name: str, *, local: Optional[bool] = False) -> OnlineDeployment: """Get a deployment resource. @@ -213,7 +214,7 @@ def get(self, name: str, endpoint_name: str, *, local: Optional[bool] = False) - return deployment @distributed_trace - # @monitor_with_activity(logger, "OnlineDeployment.Delete", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "OnlineDeployment.Delete", ActivityType.PUBLICAPI) def begin_delete(self, name: str, endpoint_name: str, *, local: Optional[bool] = False) -> LROPoller[None]: """Delete a deployment. @@ -238,7 +239,7 @@ def begin_delete(self, name: str, endpoint_name: str, *, local: Optional[bool] = ) @distributed_trace - # @monitor_with_activity(logger, "OnlineDeployment.GetLogs", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "OnlineDeployment.GetLogs", ActivityType.PUBLICAPI) def get_logs( self, name: str, @@ -281,7 +282,7 @@ def get_logs( ).content @distributed_trace - # @monitor_with_activity(logger, "OnlineDeployment.List", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "OnlineDeployment.List", ActivityType.PUBLICAPI) def list(self, endpoint_name: str, *, local: bool = False) -> ItemPaged[OnlineDeployment]: """List a deployment resource. diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_online_endpoint_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_online_endpoint_operations.py index e90093d4e55c..41229003c44f 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_online_endpoint_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_online_endpoint_operations.py @@ -21,6 +21,7 @@ OperationScope, _ScopeDependentOperations, ) +from azure.ai.ml._telemetry import ActivityType, monitor_with_activity from azure.ai.ml._utils._azureml_polling import AzureMLPolling from azure.ai.ml._utils._endpoint_utils import validate_response from azure.ai.ml._utils._http_utils import HttpPipeline @@ -40,7 +41,7 @@ from ._operation_orchestrator import OperationOrchestrator ops_logger = OpsLogger(__name__) -module_logger = ops_logger.module_logger +logger, module_logger = ops_logger.package_logger, ops_logger.module_logger def _strip_zeroes_from_traffic(traffic: Dict[str, str]) -> Dict[str, str]: @@ -66,7 +67,7 @@ def __init__( **kwargs: Dict, ): super(OnlineEndpointOperations, self).__init__(operation_scope, operation_config) - # ops_logger.update_info(kwargs) + ops_logger.update_info(kwargs) self._online_operation = service_client_02_2022_preview.online_endpoints self._online_deployment_operation = service_client_02_2022_preview.online_deployments self._all_operations = all_operations @@ -77,7 +78,7 @@ def __init__( self._requests_pipeline: HttpPipeline = kwargs.pop("requests_pipeline") @distributed_trace - # @monitor_with_activity(logger, "OnlineEndpoint.List", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "OnlineEndpoint.List", ActivityType.PUBLICAPI) def list(self, *, local: bool = False) -> ItemPaged[OnlineEndpoint]: """List endpoints of the workspace. @@ -97,7 +98,7 @@ def list(self, *, local: bool = False) -> ItemPaged[OnlineEndpoint]: ) @distributed_trace - # @monitor_with_activity(logger, "OnlineEndpoint.ListKeys", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "OnlineEndpoint.ListKeys", ActivityType.PUBLICAPI) def get_keys(self, name: str) -> Union[EndpointAuthKeys, EndpointAuthToken]: """Get the auth credentials. @@ -110,7 +111,7 @@ def get_keys(self, name: str) -> Union[EndpointAuthKeys, EndpointAuthToken]: return self._get_online_credentials(name=name) @distributed_trace - # @monitor_with_activity(logger, "OnlineEndpoint.Get", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "OnlineEndpoint.Get", ActivityType.PUBLICAPI) def get( self, name: str, @@ -158,7 +159,7 @@ def get( return converted_endpoint @distributed_trace - # @monitor_with_activity(logger, "OnlineEndpoint.BeginDelete", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "OnlineEndpoint.BeginDelete", ActivityType.PUBLICAPI) def begin_delete(self, name: str = None, *, local: bool = False) -> LROPoller[None]: """Delete an Online Endpoint. @@ -194,7 +195,7 @@ def begin_delete(self, name: str = None, *, local: bool = False) -> LROPoller[No return delete_poller @distributed_trace - # @monitor_with_activity(logger, "OnlineEndpoint.BeginDeleteOrUpdate", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "OnlineEndpoint.BeginDeleteOrUpdate", ActivityType.PUBLICAPI) def begin_create_or_update(self, endpoint: OnlineEndpoint, *, local: bool = False) -> LROPoller[OnlineEndpoint]: """Create or update an endpoint. @@ -257,7 +258,7 @@ def begin_create_or_update(self, endpoint: OnlineEndpoint, *, local: bool = Fals raise ex @distributed_trace - # @monitor_with_activity(logger, "OnlineEndpoint.BeginGenerateKeys", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "OnlineEndpoint.BeginGenerateKeys", ActivityType.PUBLICAPI) def begin_regenerate_keys( self, name: str, @@ -292,7 +293,7 @@ def begin_regenerate_keys( ) @distributed_trace - # @monitor_with_activity(logger, "OnlineEndpoint.Invoke", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "OnlineEndpoint.Invoke", ActivityType.PUBLICAPI) def invoke( self, endpoint_name: str, diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_registry_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_registry_operations.py index fb6139a5ed61..4c8514cdd7ba 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_registry_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_registry_operations.py @@ -10,6 +10,7 @@ AzureMachineLearningWorkspaces as ServiceClient102022 from azure.ai.ml._scope_dependent_operations import (OperationsContainer, OperationScope) +from azure.ai.ml._telemetry import ActivityType, monitor_with_activity from azure.ai.ml._utils._logger_utils import OpsLogger from azure.ai.ml.entities import Registry from azure.ai.ml.exceptions import (ErrorCategory, ErrorTarget, @@ -22,7 +23,8 @@ from ..constants._common import LROConfigurations, Scope ops_logger = OpsLogger(__name__) -module_logger = ops_logger.module_logger +logger, module_logger = ops_logger.package_logger, ops_logger.module_logger + class RegistryOperations: """RegistryOperations. @@ -40,7 +42,7 @@ def __init__( credentials: TokenCredential = None, **kwargs: Dict, ): - # ops_logger.update_info(kwargs) + ops_logger.update_info(kwargs) self._subscription_id = operation_scope.subscription_id self._resource_group_name = operation_scope.resource_group_name self._default_registry_name = operation_scope.registry_name @@ -50,8 +52,8 @@ def __init__( self.containerRegistry = "none" self._init_kwargs = kwargs + @monitor_with_activity(logger, "Registry.List", ActivityType.PUBLICAPI) @experimental - #@ monitor_with_activity(logger, "Registry.List", ActivityType.PUBLICAPI) def list(self, *, scope: str = Scope.RESOURCE_GROUP) -> Iterable[Registry]: """List all registries that the user has access to in the current resource group or subscription. @@ -68,8 +70,8 @@ def list(self, *, scope: str = Scope.RESOURCE_GROUP) -> Iterable[Registry]: return self._operation.list(cls=lambda objs: [Registry._from_rest_object(obj) for obj in objs], \ resource_group_name=self._resource_group_name) + @monitor_with_activity(logger, "Registry.Get", ActivityType.PUBLICAPI) @experimental - # @monitor_with_activity(logger, "Registry.Get", ActivityType.PUBLICAPI) def get(self, name: str = None) -> Registry: """Get a registry by name. @@ -109,8 +111,8 @@ def _get_polling(self, name): path_format_arguments=path_format_arguments, ) + @monitor_with_activity(logger, "Registry.BeginCreate", ActivityType.PUBLICAPI) @experimental - # @monitor_with_activity(logger, "Registry.BeginCreate", ActivityType.PUBLICAPI) def begin_create( self, registry: Registry, @@ -138,8 +140,8 @@ def begin_create( return poller + @monitor_with_activity(logger, "Registry.Delete", ActivityType.PUBLICAPI) @experimental - # @monitor_with_activity(logger, "Registry.Delete", ActivityType.PUBLICAPI) def delete(self, *, name: str, **kwargs: Dict) -> None: """Delete a registry. Returns nothing on a successful operation. diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_schedule_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_schedule_operations.py index b46a2e91d0f1..6cfc432a14aa 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_schedule_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_schedule_operations.py @@ -11,6 +11,7 @@ OperationScope, _ScopeDependentOperations, ) +from azure.ai.ml._telemetry import ActivityType, monitor_with_activity, monitor_with_telemetry_mixin from azure.ai.ml._utils._logger_utils import OpsLogger from azure.ai.ml.entities import Job, JobSchedule from azure.core.credentials import TokenCredential @@ -25,7 +26,7 @@ from ._operation_orchestrator import OperationOrchestrator ops_logger = OpsLogger(__name__) -module_logger = ops_logger.module_logger +logger, module_logger = ops_logger.package_logger, ops_logger.module_logger class ScheduleOperations(_ScopeDependentOperations): @@ -47,7 +48,7 @@ def __init__( **kwargs: Any, ): super(ScheduleOperations, self).__init__(operation_scope, operation_config) - # ops_logger.update_info(kwargs) + ops_logger.update_info(kwargs) self.service_client = service_client_10_2022.schedules self._all_operations = all_operations self._stream_logs_until_completion = stream_logs_until_completion @@ -69,7 +70,7 @@ def _job_operations(self) -> JobOperations: return self._all_operations.get_operation(AzureMLResourceType.JOB, lambda x: isinstance(x, JobOperations)) @distributed_trace - # @monitor_with_activity(logger, "Schedule.List", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Schedule.List", ActivityType.PUBLICAPI) def list( self, *, @@ -114,7 +115,7 @@ def _get_polling(self, name): ) @distributed_trace - # @monitor_with_activity(logger, "Schedule.Delete", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Schedule.Delete", ActivityType.PUBLICAPI) def begin_delete( self, name, @@ -134,7 +135,7 @@ def begin_delete( return poller @distributed_trace - # @monitor_with_telemetry_mixin(logger, "Schedule.Get", ActivityType.PUBLICAPI) + @monitor_with_telemetry_mixin(logger, "Schedule.Get", ActivityType.PUBLICAPI) def get( self, name, @@ -155,7 +156,7 @@ def get( ) @distributed_trace - # @monitor_with_telemetry_mixin(logger, "Schedule.CreateOrUpdate", ActivityType.PUBLICAPI) + @monitor_with_telemetry_mixin(logger, "Schedule.CreateOrUpdate", ActivityType.PUBLICAPI) def begin_create_or_update( self, schedule, @@ -187,7 +188,7 @@ def begin_create_or_update( return poller @distributed_trace - # @monitor_with_activity(logger, "Schedule.Enable", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Schedule.Enable", ActivityType.PUBLICAPI) def begin_enable( self, name, @@ -204,7 +205,7 @@ def begin_enable( return self.begin_create_or_update(schedule) @distributed_trace - # @monitor_with_activity(logger, "Schedule.Disable", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Schedule.Disable", ActivityType.PUBLICAPI) def begin_disable( self, name, diff --git a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_workspace_operations.py b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_workspace_operations.py index b8f513f5983a..41c819af812a 100644 --- a/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_workspace_operations.py +++ b/sdk/ml/azure-ai-ml/azure/ai/ml/operations/_workspace_operations.py @@ -12,6 +12,7 @@ from azure.ai.ml._restclient.v2022_05_01 import AzureMachineLearningWorkspaces as ServiceClient052022 from azure.ai.ml._restclient.v2022_05_01.models import WorkspaceUpdateParameters from azure.ai.ml._scope_dependent_operations import OperationsContainer, OperationScope +from azure.ai.ml._telemetry import ActivityType, monitor_with_activity from azure.ai.ml._utils._logger_utils import OpsLogger from azure.ai.ml._utils._workspace_utils import ( delete_resource_by_arm_id, @@ -40,7 +41,7 @@ from azure.core.tracing.decorator import distributed_trace ops_logger = OpsLogger(__name__) -module_logger = ops_logger.module_logger +logger, module_logger = ops_logger.package_logger, ops_logger.module_logger class WorkspaceOperations: @@ -59,7 +60,7 @@ def __init__( credentials: TokenCredential = None, **kwargs: Dict, ): - # ops_logger.update_info(kwargs) + ops_logger.update_info(kwargs) self._subscription_id = operation_scope.subscription_id self._resource_group_name = operation_scope.resource_group_name self._default_workspace_name = operation_scope.workspace_name @@ -69,7 +70,7 @@ def __init__( self._init_kwargs = kwargs self.containerRegistry = "none" - # @monitor_with_activity(logger, "Workspace.List", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Workspace.List", ActivityType.PUBLICAPI) def list(self, *, scope: str = Scope.RESOURCE_GROUP) -> Iterable[Workspace]: """List all workspaces that the user has access to in the current resource group or subscription. @@ -89,7 +90,7 @@ def list(self, *, scope: str = Scope.RESOURCE_GROUP) -> Iterable[Workspace]: cls=lambda objs: [Workspace._from_rest_object(obj) for obj in objs], ) - # @monitor_with_activity(logger, "Workspace.Get", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Workspace.Get", ActivityType.PUBLICAPI) @distributed_trace def get(self, name: str = None, **kwargs: Dict) -> Workspace: """Get a workspace by name. @@ -105,7 +106,7 @@ def get(self, name: str = None, **kwargs: Dict) -> Workspace: obj = self._operation.get(resource_group, workspace_name) return Workspace._from_rest_object(obj) - # @monitor_with_activity(logger, "Workspace.Get_Keys", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Workspace.Get_Keys", ActivityType.PUBLICAPI) @distributed_trace def get_keys(self, name: str = None) -> WorkspaceKeys: """Get keys for the workspace. @@ -119,7 +120,7 @@ def get_keys(self, name: str = None) -> WorkspaceKeys: obj = self._operation.list_keys(self._resource_group_name, workspace_name) return WorkspaceKeys._from_rest_object(obj) - # @monitor_with_activity(logger, "Workspace.BeginSyncKeys", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Workspace.BeginSyncKeys", ActivityType.PUBLICAPI) @distributed_trace def begin_sync_keys(self, name: str = None) -> LROPoller: """Triggers the workspace to immediately synchronize keys. If keys for @@ -136,7 +137,7 @@ def begin_sync_keys(self, name: str = None) -> LROPoller: workspace_name = self._check_workspace_name(name) return self._operation.begin_resync_keys(self._resource_group_name, workspace_name) - # @monitor_with_activity(logger, "Workspace.BeginCreate", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Workspace.BeginCreate", ActivityType.PUBLICAPI) @distributed_trace def begin_create( self, @@ -212,7 +213,7 @@ def callback(): CustomArmTemplateDeploymentPollingMethod(poller, arm_submit, callback), ) - # @monitor_with_activity(logger, "Workspace.BeginUpdate", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Workspace.BeginUpdate", ActivityType.PUBLICAPI) @distributed_trace def begin_update( self, @@ -302,7 +303,7 @@ def callback(_, deserialized, args): poller = self._operation.begin_update(resource_group, workspace.name, update_param, polling=True, cls=callback) return poller - # @monitor_with_activity(logger, "Workspace.BeginDelete", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Workspace.BeginDelete", ActivityType.PUBLICAPI) @distributed_trace def begin_delete(self, name: str, *, delete_dependent_resources: bool, **kwargs: Dict) -> LROPoller: """Delete a workspace. @@ -352,7 +353,7 @@ def begin_delete(self, name: str, *, delete_dependent_resources: bool, **kwargs: return poller @distributed_trace - # @monitor_with_activity(logger, "Workspace.BeginDiagnose", ActivityType.PUBLICAPI) + @monitor_with_activity(logger, "Workspace.BeginDiagnose", ActivityType.PUBLICAPI) def begin_diagnose(self, name: str, **kwargs: Dict) -> LROPoller[DiagnoseResponseResultValue]: """Diagnose workspace setup problems. diff --git a/sdk/ml/azure-ai-ml/setup.py b/sdk/ml/azure-ai-ml/setup.py index 275e5ffb67a4..d7395c73aa5a 100644 --- a/sdk/ml/azure-ai-ml/setup.py +++ b/sdk/ml/azure-ai-ml/setup.py @@ -84,7 +84,7 @@ "isodate", "azure-common<2.0.0,>=1.1", "typing-extensions<5.0.0", - # "applicationinsights<=0.11.10", # To be replaced by azure-monitor-query + "opencensus-ext-azure<=1.1.7", ], extras_require={ # user can run `pip install azure-ai-ml[designer]` to install mldesigner alone with this package diff --git a/sdk/ml/azure-ai-ml/tests/internal_utils/unittests/test_logger_utils.py b/sdk/ml/azure-ai-ml/tests/internal_utils/unittests/test_logger_utils.py new file mode 100644 index 000000000000..cfadd8b11ab0 --- /dev/null +++ b/sdk/ml/azure-ai-ml/tests/internal_utils/unittests/test_logger_utils.py @@ -0,0 +1,64 @@ +import logging +from mock import patch +import pytest + +from opencensus.ext.azure.log_exporter import AzureLogHandler + +from azure.ai.ml._telemetry import AML_INTERNAL_LOGGER_NAMESPACE, get_appinsights_log_handler +from azure.ai.ml._utils._logger_utils import OpsLogger, initialize_logger_info +from azure.ai.ml._user_agent import USER_AGENT + + +@pytest.mark.unittest +class TestLoggerUtils: + def test_initialize_logger_info(self) -> None: + test_name = "test" + test_terminator = "\n\n" + module_logger = logging.getLogger(test_name) + + initialize_logger_info(module_logger, terminator=test_terminator) + + assert module_logger.level == logging.INFO + assert not module_logger.propagate + assert module_logger.hasHandlers() + assert module_logger.handlers[0].terminator == test_terminator + + +@pytest.mark.unittest +class TestOpsLogger: + def test_init(self) -> None: + test_name = "test" + test_logger = OpsLogger(name=test_name) + + assert test_logger is not None + assert test_logger.package_logger.name == AML_INTERNAL_LOGGER_NAMESPACE + test_name + assert not test_logger.package_logger.propagate + assert test_logger.module_logger.name == test_name + assert len(test_logger.custom_dimensions) == 0 + + def test_update_info(self) -> None: + test_name = "test" + test_handler = logging.NullHandler() + test_data = {"app_insights_handler": test_handler} + + test_logger = OpsLogger(name=test_name) + test_logger.update_info(test_data) + + assert len(test_data) == 0 + assert test_logger.package_logger.hasHandlers() + assert test_logger.package_logger.handlers[0] == test_handler + + def test_disabled_logging(self) -> None: + with patch( + "azure.ai.ml._telemetry.logging_handler.is_telemetry_collection_disabled", + return_value=True + ): + handler = get_appinsights_log_handler(user_agent=USER_AGENT) + assert isinstance(handler, logging.NullHandler) + + with patch( + "azure.ai.ml._telemetry.logging_handler.is_telemetry_collection_disabled", + return_value=False + ): + handler = get_appinsights_log_handler(user_agent=USER_AGENT) + assert isinstance(handler, AzureLogHandler)