diff --git a/google/cloud/aiplatform_v1/__init__.py b/google/cloud/aiplatform_v1/__init__.py index 6e033c349b..bb6e2a833a 100644 --- a/google/cloud/aiplatform_v1/__init__.py +++ b/google/cloud/aiplatform_v1/__init__.py @@ -348,6 +348,7 @@ from .types.model_service import GetModelEvaluationRequest from .types.model_service import GetModelEvaluationSliceRequest from .types.model_service import GetModelRequest +from .types.model_service import ImportModelEvaluationRequest from .types.model_service import ListModelEvaluationSlicesRequest from .types.model_service import ListModelEvaluationSlicesResponse from .types.model_service import ListModelEvaluationsRequest @@ -720,6 +721,7 @@ "ImportFeatureValuesOperationMetadata", "ImportFeatureValuesRequest", "ImportFeatureValuesResponse", + "ImportModelEvaluationRequest", "Index", "IndexEndpoint", "IndexEndpointServiceClient", diff --git a/google/cloud/aiplatform_v1/gapic_metadata.json b/google/cloud/aiplatform_v1/gapic_metadata.json index b7e8b7361a..412462f2ff 100644 --- a/google/cloud/aiplatform_v1/gapic_metadata.json +++ b/google/cloud/aiplatform_v1/gapic_metadata.json @@ -1291,6 +1291,11 @@ "get_model_evaluation_slice" ] }, + "ImportModelEvaluation": { + "methods": [ + "import_model_evaluation" + ] + }, "ListModelEvaluationSlices": { "methods": [ "list_model_evaluation_slices" @@ -1346,6 +1351,11 @@ "get_model_evaluation_slice" ] }, + "ImportModelEvaluation": { + "methods": [ + "import_model_evaluation" + ] + }, "ListModelEvaluationSlices": { "methods": [ "list_model_evaluation_slices" diff --git a/google/cloud/aiplatform_v1/services/model_service/async_client.py b/google/cloud/aiplatform_v1/services/model_service/async_client.py index 1de234ae55..0f2990602b 100644 --- a/google/cloud/aiplatform_v1/services/model_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/model_service/async_client.py @@ -40,6 +40,7 @@ from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import model as gca_model from google.cloud.aiplatform_v1.types import model_evaluation +from google.cloud.aiplatform_v1.types import model_evaluation as gca_model_evaluation from google.cloud.aiplatform_v1.types import model_evaluation_slice from google.cloud.aiplatform_v1.types import model_service from google.cloud.aiplatform_v1.types import operation as gca_operation @@ -886,6 +887,108 @@ def sample_export_model(): # Done; return the response. return response + async def import_model_evaluation( + self, + request: Union[model_service.ImportModelEvaluationRequest, dict] = None, + *, + parent: str = None, + model_evaluation: gca_model_evaluation.ModelEvaluation = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model_evaluation.ModelEvaluation: + r"""Imports an externally generated ModelEvaluation. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_import_model_evaluation(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ImportModelEvaluationRequest( + parent="parent_value", + ) + + # Make the request + response = client.import_model_evaluation(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ImportModelEvaluationRequest, dict]): + The request object. Request message for + [ModelService.ImportModelEvaluation][google.cloud.aiplatform.v1.ModelService.ImportModelEvaluation] + parent (:class:`str`): + Required. The name of the parent model resource. Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model_evaluation (:class:`google.cloud.aiplatform_v1.types.ModelEvaluation`): + Required. Model evaluation resource + to be imported. + + This corresponds to the ``model_evaluation`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.ModelEvaluation: + A collection of metrics calculated by + comparing Model's predictions on all of + the test data against annotations from + the test data. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model_evaluation]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = model_service.ImportModelEvaluationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if model_evaluation is not None: + request.model_evaluation = model_evaluation + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.import_model_evaluation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + async def get_model_evaluation( self, request: Union[model_service.GetModelEvaluationRequest, dict] = None, diff --git a/google/cloud/aiplatform_v1/services/model_service/client.py b/google/cloud/aiplatform_v1/services/model_service/client.py index 65f24a64d6..f265801bdb 100644 --- a/google/cloud/aiplatform_v1/services/model_service/client.py +++ b/google/cloud/aiplatform_v1/services/model_service/client.py @@ -43,6 +43,7 @@ from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import model as gca_model from google.cloud.aiplatform_v1.types import model_evaluation +from google.cloud.aiplatform_v1.types import model_evaluation as gca_model_evaluation from google.cloud.aiplatform_v1.types import model_evaluation_slice from google.cloud.aiplatform_v1.types import model_service from google.cloud.aiplatform_v1.types import operation as gca_operation @@ -1143,6 +1144,108 @@ def sample_export_model(): # Done; return the response. return response + def import_model_evaluation( + self, + request: Union[model_service.ImportModelEvaluationRequest, dict] = None, + *, + parent: str = None, + model_evaluation: gca_model_evaluation.ModelEvaluation = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model_evaluation.ModelEvaluation: + r"""Imports an externally generated ModelEvaluation. + + .. code-block:: python + + from google.cloud import aiplatform_v1 + + def sample_import_model_evaluation(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ImportModelEvaluationRequest( + parent="parent_value", + ) + + # Make the request + response = client.import_model_evaluation(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ImportModelEvaluationRequest, dict]): + The request object. Request message for + [ModelService.ImportModelEvaluation][google.cloud.aiplatform.v1.ModelService.ImportModelEvaluation] + parent (str): + Required. The name of the parent model resource. Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model_evaluation (google.cloud.aiplatform_v1.types.ModelEvaluation): + Required. Model evaluation resource + to be imported. + + This corresponds to the ``model_evaluation`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.ModelEvaluation: + A collection of metrics calculated by + comparing Model's predictions on all of + the test data against annotations from + the test data. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model_evaluation]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.ImportModelEvaluationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.ImportModelEvaluationRequest): + request = model_service.ImportModelEvaluationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if model_evaluation is not None: + request.model_evaluation = model_evaluation + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.import_model_evaluation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + def get_model_evaluation( self, request: Union[model_service.GetModelEvaluationRequest, dict] = None, diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/base.py b/google/cloud/aiplatform_v1/services/model_service/transports/base.py index a0ab34039d..ce93a90abb 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/base.py @@ -29,6 +29,7 @@ from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import model as gca_model from google.cloud.aiplatform_v1.types import model_evaluation +from google.cloud.aiplatform_v1.types import model_evaluation as gca_model_evaluation from google.cloud.aiplatform_v1.types import model_evaluation_slice from google.cloud.aiplatform_v1.types import model_service from google.longrunning import operations_pb2 # type: ignore @@ -144,6 +145,11 @@ def _prep_wrapped_messages(self, client_info): self.export_model: gapic_v1.method.wrap_method( self.export_model, default_timeout=None, client_info=client_info, ), + self.import_model_evaluation: gapic_v1.method.wrap_method( + self.import_model_evaluation, + default_timeout=None, + client_info=client_info, + ), self.get_model_evaluation: gapic_v1.method.wrap_method( self.get_model_evaluation, default_timeout=None, @@ -236,6 +242,18 @@ def export_model( ]: raise NotImplementedError() + @property + def import_model_evaluation( + self, + ) -> Callable[ + [model_service.ImportModelEvaluationRequest], + Union[ + gca_model_evaluation.ModelEvaluation, + Awaitable[gca_model_evaluation.ModelEvaluation], + ], + ]: + raise NotImplementedError() + @property def get_model_evaluation( self, diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py index dfc3d6029c..eee84b3917 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py @@ -28,6 +28,7 @@ from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import model as gca_model from google.cloud.aiplatform_v1.types import model_evaluation +from google.cloud.aiplatform_v1.types import model_evaluation as gca_model_evaluation from google.cloud.aiplatform_v1.types import model_evaluation_slice from google.cloud.aiplatform_v1.types import model_service from google.longrunning import operations_pb2 # type: ignore @@ -413,6 +414,35 @@ def export_model( ) return self._stubs["export_model"] + @property + def import_model_evaluation( + self, + ) -> Callable[ + [model_service.ImportModelEvaluationRequest], + gca_model_evaluation.ModelEvaluation, + ]: + r"""Return a callable for the import model evaluation method over gRPC. + + Imports an externally generated ModelEvaluation. + + Returns: + Callable[[~.ImportModelEvaluationRequest], + ~.ModelEvaluation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "import_model_evaluation" not in self._stubs: + self._stubs["import_model_evaluation"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.ModelService/ImportModelEvaluation", + request_serializer=model_service.ImportModelEvaluationRequest.serialize, + response_deserializer=gca_model_evaluation.ModelEvaluation.deserialize, + ) + return self._stubs["import_model_evaluation"] + @property def get_model_evaluation( self, diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py index d9f86fcf53..9ab912c613 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py @@ -28,6 +28,7 @@ from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import model as gca_model from google.cloud.aiplatform_v1.types import model_evaluation +from google.cloud.aiplatform_v1.types import model_evaluation as gca_model_evaluation from google.cloud.aiplatform_v1.types import model_evaluation_slice from google.cloud.aiplatform_v1.types import model_service from google.longrunning import operations_pb2 # type: ignore @@ -427,6 +428,35 @@ def export_model( ) return self._stubs["export_model"] + @property + def import_model_evaluation( + self, + ) -> Callable[ + [model_service.ImportModelEvaluationRequest], + Awaitable[gca_model_evaluation.ModelEvaluation], + ]: + r"""Return a callable for the import model evaluation method over gRPC. + + Imports an externally generated ModelEvaluation. + + Returns: + Callable[[~.ImportModelEvaluationRequest], + Awaitable[~.ModelEvaluation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "import_model_evaluation" not in self._stubs: + self._stubs["import_model_evaluation"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.ModelService/ImportModelEvaluation", + request_serializer=model_service.ImportModelEvaluationRequest.serialize, + response_deserializer=gca_model_evaluation.ModelEvaluation.deserialize, + ) + return self._stubs["import_model_evaluation"] + @property def get_model_evaluation( self, diff --git a/google/cloud/aiplatform_v1/types/__init__.py b/google/cloud/aiplatform_v1/types/__init__.py index 7294866523..0a8934d961 100644 --- a/google/cloud/aiplatform_v1/types/__init__.py +++ b/google/cloud/aiplatform_v1/types/__init__.py @@ -345,6 +345,7 @@ GetModelEvaluationRequest, GetModelEvaluationSliceRequest, GetModelRequest, + ImportModelEvaluationRequest, ListModelEvaluationSlicesRequest, ListModelEvaluationSlicesResponse, ListModelEvaluationsRequest, @@ -795,6 +796,7 @@ "GetModelEvaluationRequest", "GetModelEvaluationSliceRequest", "GetModelRequest", + "ImportModelEvaluationRequest", "ListModelEvaluationSlicesRequest", "ListModelEvaluationSlicesResponse", "ListModelEvaluationsRequest", diff --git a/google/cloud/aiplatform_v1/types/model_evaluation.py b/google/cloud/aiplatform_v1/types/model_evaluation.py index f2ef3a6fde..106fea45b4 100644 --- a/google/cloud/aiplatform_v1/types/model_evaluation.py +++ b/google/cloud/aiplatform_v1/types/model_evaluation.py @@ -35,43 +35,95 @@ class ModelEvaluation(proto.Message): Output only. The resource name of the ModelEvaluation. metrics_schema_uri (str): - Output only. Points to a YAML file stored on Google Cloud - Storage describing the + Points to a YAML file stored on Google Cloud Storage + describing the [metrics][google.cloud.aiplatform.v1.ModelEvaluation.metrics] of this ModelEvaluation. The schema is defined as an OpenAPI 3.0.2 `Schema Object `__. metrics (google.protobuf.struct_pb2.Value): - Output only. Evaluation metrics of the Model. The schema of - the metrics is stored in + Evaluation metrics of the Model. The schema of the metrics + is stored in [metrics_schema_uri][google.cloud.aiplatform.v1.ModelEvaluation.metrics_schema_uri] create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this ModelEvaluation was created. slice_dimensions (Sequence[str]): - Output only. All possible + All possible [dimensions][ModelEvaluationSlice.slice.dimension] of ModelEvaluationSlices. The dimensions can be used as the filter of the [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices] request, in the form of ``slice.dimension = ``. + data_item_schema_uri (str): + Points to a YAML file stored on Google Cloud Storage + describing [EvaluatedDataItemView.data_item_payload][] and + [EvaluatedAnnotation.data_item_payload][]. The schema is + defined as an OpenAPI 3.0.2 `Schema + Object `__. + + This field is not populated if there are neither + EvaluatedDataItemViews nor EvaluatedAnnotations under this + ModelEvaluation. + annotation_schema_uri (str): + Points to a YAML file stored on Google Cloud Storage + describing [EvaluatedDataItemView.predictions][], + [EvaluatedDataItemView.ground_truths][], + [EvaluatedAnnotation.predictions][], and + [EvaluatedAnnotation.ground_truths][]. The schema is defined + as an OpenAPI 3.0.2 `Schema + Object `__. + + This field is not populated if there are neither + EvaluatedDataItemViews nor EvaluatedAnnotations under this + ModelEvaluation. model_explanation (google.cloud.aiplatform_v1.types.ModelExplanation): - Output only. Aggregated explanation metrics - for the Model's prediction output over the data - this ModelEvaluation uses. This field is - populated only if the Model is evaluated with + Aggregated explanation metrics for the + Model's prediction output over the data this + ModelEvaluation uses. This field is populated + only if the Model is evaluated with explanations, and only for AutoML tabular Models. + explanation_specs (Sequence[google.cloud.aiplatform_v1.types.ModelEvaluation.ModelEvaluationExplanationSpec]): + Describes the values of + [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] + that are used for explaining the predicted values on the + evaluated data. """ + class ModelEvaluationExplanationSpec(proto.Message): + r""" + + Attributes: + explanation_type (str): + Explanation type. + + For AutoML Image Classification models, possible values are: + + - ``image-integrated-gradients`` + - ``image-xrai`` + explanation_spec (google.cloud.aiplatform_v1.types.ExplanationSpec): + Explanation spec details. + """ + + explanation_type = proto.Field(proto.STRING, number=1,) + explanation_spec = proto.Field( + proto.MESSAGE, number=2, message=explanation.ExplanationSpec, + ) + name = proto.Field(proto.STRING, number=1,) metrics_schema_uri = proto.Field(proto.STRING, number=2,) metrics = proto.Field(proto.MESSAGE, number=3, message=struct_pb2.Value,) create_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,) slice_dimensions = proto.RepeatedField(proto.STRING, number=5,) + data_item_schema_uri = proto.Field(proto.STRING, number=6,) + annotation_schema_uri = proto.Field(proto.STRING, number=7,) model_explanation = proto.Field( proto.MESSAGE, number=8, message=explanation.ModelExplanation, ) + explanation_specs = proto.RepeatedField( + proto.MESSAGE, number=9, message=ModelEvaluationExplanationSpec, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/model_service.py b/google/cloud/aiplatform_v1/types/model_service.py index 194b811d4e..270bbbf167 100644 --- a/google/cloud/aiplatform_v1/types/model_service.py +++ b/google/cloud/aiplatform_v1/types/model_service.py @@ -17,7 +17,7 @@ from google.cloud.aiplatform_v1.types import io from google.cloud.aiplatform_v1.types import model as gca_model -from google.cloud.aiplatform_v1.types import model_evaluation +from google.cloud.aiplatform_v1.types import model_evaluation as gca_model_evaluation from google.cloud.aiplatform_v1.types import model_evaluation_slice from google.cloud.aiplatform_v1.types import operation from google.protobuf import field_mask_pb2 # type: ignore @@ -37,6 +37,7 @@ "ExportModelRequest", "ExportModelOperationMetadata", "ExportModelResponse", + "ImportModelEvaluationRequest", "GetModelEvaluationRequest", "ListModelEvaluationsRequest", "ListModelEvaluationsResponse", @@ -321,6 +322,25 @@ class ExportModelResponse(proto.Message): """ +class ImportModelEvaluationRequest(proto.Message): + r"""Request message for + [ModelService.ImportModelEvaluation][google.cloud.aiplatform.v1.ModelService.ImportModelEvaluation] + + Attributes: + parent (str): + Required. The name of the parent model resource. Format: + ``projects/{project}/locations/{location}/models/{model}`` + model_evaluation (google.cloud.aiplatform_v1.types.ModelEvaluation): + Required. Model evaluation resource to be + imported. + """ + + parent = proto.Field(proto.STRING, number=1,) + model_evaluation = proto.Field( + proto.MESSAGE, number=2, message=gca_model_evaluation.ModelEvaluation, + ) + + class GetModelEvaluationRequest(proto.Message): r"""Request message for [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1.ModelService.GetModelEvaluation]. @@ -383,7 +403,7 @@ def raw_page(self): return self model_evaluations = proto.RepeatedField( - proto.MESSAGE, number=1, message=model_evaluation.ModelEvaluation, + proto.MESSAGE, number=1, message=gca_model_evaluation.ModelEvaluation, ) next_page_token = proto.Field(proto.STRING, number=2,) diff --git a/google/cloud/aiplatform_v1beta1/__init__.py b/google/cloud/aiplatform_v1beta1/__init__.py index ae71432f11..905c1b778b 100644 --- a/google/cloud/aiplatform_v1beta1/__init__.py +++ b/google/cloud/aiplatform_v1beta1/__init__.py @@ -348,6 +348,7 @@ from .types.model_service import GetModelEvaluationRequest from .types.model_service import GetModelEvaluationSliceRequest from .types.model_service import GetModelRequest +from .types.model_service import ImportModelEvaluationRequest from .types.model_service import ListModelEvaluationSlicesRequest from .types.model_service import ListModelEvaluationSlicesResponse from .types.model_service import ListModelEvaluationsRequest @@ -720,6 +721,7 @@ "ImportFeatureValuesOperationMetadata", "ImportFeatureValuesRequest", "ImportFeatureValuesResponse", + "ImportModelEvaluationRequest", "Index", "IndexEndpoint", "IndexEndpointServiceClient", diff --git a/google/cloud/aiplatform_v1beta1/gapic_metadata.json b/google/cloud/aiplatform_v1beta1/gapic_metadata.json index b584f16b81..1be3df6faa 100644 --- a/google/cloud/aiplatform_v1beta1/gapic_metadata.json +++ b/google/cloud/aiplatform_v1beta1/gapic_metadata.json @@ -1291,6 +1291,11 @@ "get_model_evaluation_slice" ] }, + "ImportModelEvaluation": { + "methods": [ + "import_model_evaluation" + ] + }, "ListModelEvaluationSlices": { "methods": [ "list_model_evaluation_slices" @@ -1346,6 +1351,11 @@ "get_model_evaluation_slice" ] }, + "ImportModelEvaluation": { + "methods": [ + "import_model_evaluation" + ] + }, "ListModelEvaluationSlices": { "methods": [ "list_model_evaluation_slices" diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py index b96692805c..69f9839ce8 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py @@ -183,19 +183,16 @@ def parse_annotated_dataset_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def dataset_path(project: str, location: str, dataset: str,) -> str: + def dataset_path(project: str, dataset: str,) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( - project=project, location=location, dataset=dataset, + return "projects/{project}/datasets/{dataset}".format( + project=project, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod @@ -215,16 +212,19 @@ def parse_dataset_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def dataset_path(project: str, dataset: str,) -> str: + def dataset_path(project: str, location: str, dataset: str,) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( - project=project, dataset=dataset, + return "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py index 9061ebc3e3..0e153641d3 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py @@ -40,6 +40,9 @@ from google.cloud.aiplatform_v1beta1.types import model from google.cloud.aiplatform_v1beta1.types import model as gca_model from google.cloud.aiplatform_v1beta1.types import model_evaluation +from google.cloud.aiplatform_v1beta1.types import ( + model_evaluation as gca_model_evaluation, +) from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice from google.cloud.aiplatform_v1beta1.types import model_service from google.cloud.aiplatform_v1beta1.types import operation as gca_operation @@ -887,6 +890,108 @@ def sample_export_model(): # Done; return the response. return response + async def import_model_evaluation( + self, + request: Union[model_service.ImportModelEvaluationRequest, dict] = None, + *, + parent: str = None, + model_evaluation: gca_model_evaluation.ModelEvaluation = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model_evaluation.ModelEvaluation: + r"""Imports an externally generated ModelEvaluation. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_import_model_evaluation(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ImportModelEvaluationRequest( + parent="parent_value", + ) + + # Make the request + response = client.import_model_evaluation(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ImportModelEvaluationRequest, dict]): + The request object. Request message for + [ModelService.ImportModelEvaluation][google.cloud.aiplatform.v1beta1.ModelService.ImportModelEvaluation] + parent (:class:`str`): + Required. The name of the parent model resource. Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model_evaluation (:class:`google.cloud.aiplatform_v1beta1.types.ModelEvaluation`): + Required. Model evaluation resource + to be imported. + + This corresponds to the ``model_evaluation`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ModelEvaluation: + A collection of metrics calculated by + comparing Model's predictions on all of + the test data against annotations from + the test data. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model_evaluation]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = model_service.ImportModelEvaluationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if model_evaluation is not None: + request.model_evaluation = model_evaluation + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.import_model_evaluation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + async def get_model_evaluation( self, request: Union[model_service.GetModelEvaluationRequest, dict] = None, diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/client.py b/google/cloud/aiplatform_v1beta1/services/model_service/client.py index f8f429570f..6d52da5fc2 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/client.py @@ -43,6 +43,9 @@ from google.cloud.aiplatform_v1beta1.types import model from google.cloud.aiplatform_v1beta1.types import model as gca_model from google.cloud.aiplatform_v1beta1.types import model_evaluation +from google.cloud.aiplatform_v1beta1.types import ( + model_evaluation as gca_model_evaluation, +) from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice from google.cloud.aiplatform_v1beta1.types import model_service from google.cloud.aiplatform_v1beta1.types import operation as gca_operation @@ -1144,6 +1147,108 @@ def sample_export_model(): # Done; return the response. return response + def import_model_evaluation( + self, + request: Union[model_service.ImportModelEvaluationRequest, dict] = None, + *, + parent: str = None, + model_evaluation: gca_model_evaluation.ModelEvaluation = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model_evaluation.ModelEvaluation: + r"""Imports an externally generated ModelEvaluation. + + .. code-block:: python + + from google.cloud import aiplatform_v1beta1 + + def sample_import_model_evaluation(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ImportModelEvaluationRequest( + parent="parent_value", + ) + + # Make the request + response = client.import_model_evaluation(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ImportModelEvaluationRequest, dict]): + The request object. Request message for + [ModelService.ImportModelEvaluation][google.cloud.aiplatform.v1beta1.ModelService.ImportModelEvaluation] + parent (str): + Required. The name of the parent model resource. Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model_evaluation (google.cloud.aiplatform_v1beta1.types.ModelEvaluation): + Required. Model evaluation resource + to be imported. + + This corresponds to the ``model_evaluation`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ModelEvaluation: + A collection of metrics calculated by + comparing Model's predictions on all of + the test data against annotations from + the test data. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model_evaluation]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.ImportModelEvaluationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.ImportModelEvaluationRequest): + request = model_service.ImportModelEvaluationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if model_evaluation is not None: + request.model_evaluation = model_evaluation + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.import_model_evaluation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + def get_model_evaluation( self, request: Union[model_service.GetModelEvaluationRequest, dict] = None, diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py index e4f9785d0a..d495a11eda 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py @@ -29,6 +29,9 @@ from google.cloud.aiplatform_v1beta1.types import model from google.cloud.aiplatform_v1beta1.types import model as gca_model from google.cloud.aiplatform_v1beta1.types import model_evaluation +from google.cloud.aiplatform_v1beta1.types import ( + model_evaluation as gca_model_evaluation, +) from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice from google.cloud.aiplatform_v1beta1.types import model_service from google.longrunning import operations_pb2 # type: ignore @@ -144,6 +147,11 @@ def _prep_wrapped_messages(self, client_info): self.export_model: gapic_v1.method.wrap_method( self.export_model, default_timeout=5.0, client_info=client_info, ), + self.import_model_evaluation: gapic_v1.method.wrap_method( + self.import_model_evaluation, + default_timeout=None, + client_info=client_info, + ), self.get_model_evaluation: gapic_v1.method.wrap_method( self.get_model_evaluation, default_timeout=5.0, client_info=client_info, ), @@ -234,6 +242,18 @@ def export_model( ]: raise NotImplementedError() + @property + def import_model_evaluation( + self, + ) -> Callable[ + [model_service.ImportModelEvaluationRequest], + Union[ + gca_model_evaluation.ModelEvaluation, + Awaitable[gca_model_evaluation.ModelEvaluation], + ], + ]: + raise NotImplementedError() + @property def get_model_evaluation( self, diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py index 2d7e0ed1fb..5c8bb90908 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py @@ -28,6 +28,9 @@ from google.cloud.aiplatform_v1beta1.types import model from google.cloud.aiplatform_v1beta1.types import model as gca_model from google.cloud.aiplatform_v1beta1.types import model_evaluation +from google.cloud.aiplatform_v1beta1.types import ( + model_evaluation as gca_model_evaluation, +) from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice from google.cloud.aiplatform_v1beta1.types import model_service from google.longrunning import operations_pb2 # type: ignore @@ -414,6 +417,35 @@ def export_model( ) return self._stubs["export_model"] + @property + def import_model_evaluation( + self, + ) -> Callable[ + [model_service.ImportModelEvaluationRequest], + gca_model_evaluation.ModelEvaluation, + ]: + r"""Return a callable for the import model evaluation method over gRPC. + + Imports an externally generated ModelEvaluation. + + Returns: + Callable[[~.ImportModelEvaluationRequest], + ~.ModelEvaluation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "import_model_evaluation" not in self._stubs: + self._stubs["import_model_evaluation"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ModelService/ImportModelEvaluation", + request_serializer=model_service.ImportModelEvaluationRequest.serialize, + response_deserializer=gca_model_evaluation.ModelEvaluation.deserialize, + ) + return self._stubs["import_model_evaluation"] + @property def get_model_evaluation( self, diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py index d63944e8a8..ae6b4fcb93 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py @@ -28,6 +28,9 @@ from google.cloud.aiplatform_v1beta1.types import model from google.cloud.aiplatform_v1beta1.types import model as gca_model from google.cloud.aiplatform_v1beta1.types import model_evaluation +from google.cloud.aiplatform_v1beta1.types import ( + model_evaluation as gca_model_evaluation, +) from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice from google.cloud.aiplatform_v1beta1.types import model_service from google.longrunning import operations_pb2 # type: ignore @@ -428,6 +431,35 @@ def export_model( ) return self._stubs["export_model"] + @property + def import_model_evaluation( + self, + ) -> Callable[ + [model_service.ImportModelEvaluationRequest], + Awaitable[gca_model_evaluation.ModelEvaluation], + ]: + r"""Return a callable for the import model evaluation method over gRPC. + + Imports an externally generated ModelEvaluation. + + Returns: + Callable[[~.ImportModelEvaluationRequest], + Awaitable[~.ModelEvaluation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "import_model_evaluation" not in self._stubs: + self._stubs["import_model_evaluation"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ModelService/ImportModelEvaluation", + request_serializer=model_service.ImportModelEvaluationRequest.serialize, + response_deserializer=gca_model_evaluation.ModelEvaluation.deserialize, + ) + return self._stubs["import_model_evaluation"] + @property def get_model_evaluation( self, diff --git a/google/cloud/aiplatform_v1beta1/types/__init__.py b/google/cloud/aiplatform_v1beta1/types/__init__.py index 5a362679b8..cdd7349d5b 100644 --- a/google/cloud/aiplatform_v1beta1/types/__init__.py +++ b/google/cloud/aiplatform_v1beta1/types/__init__.py @@ -345,6 +345,7 @@ GetModelEvaluationRequest, GetModelEvaluationSliceRequest, GetModelRequest, + ImportModelEvaluationRequest, ListModelEvaluationSlicesRequest, ListModelEvaluationSlicesResponse, ListModelEvaluationsRequest, @@ -795,6 +796,7 @@ "GetModelEvaluationRequest", "GetModelEvaluationSliceRequest", "GetModelRequest", + "ImportModelEvaluationRequest", "ListModelEvaluationSlicesRequest", "ListModelEvaluationSlicesResponse", "ListModelEvaluationsRequest", diff --git a/google/cloud/aiplatform_v1beta1/types/model_evaluation.py b/google/cloud/aiplatform_v1beta1/types/model_evaluation.py index 3ac25010ea..51fc3307bb 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_evaluation.py +++ b/google/cloud/aiplatform_v1beta1/types/model_evaluation.py @@ -35,35 +35,35 @@ class ModelEvaluation(proto.Message): Output only. The resource name of the ModelEvaluation. metrics_schema_uri (str): - Output only. Points to a YAML file stored on Google Cloud - Storage describing the + Points to a YAML file stored on Google Cloud Storage + describing the [metrics][google.cloud.aiplatform.v1beta1.ModelEvaluation.metrics] of this ModelEvaluation. The schema is defined as an OpenAPI 3.0.2 `Schema Object `__. metrics (google.protobuf.struct_pb2.Value): - Output only. Evaluation metrics of the Model. The schema of - the metrics is stored in + Evaluation metrics of the Model. The schema of the metrics + is stored in [metrics_schema_uri][google.cloud.aiplatform.v1beta1.ModelEvaluation.metrics_schema_uri] create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this ModelEvaluation was created. slice_dimensions (Sequence[str]): - Output only. All possible + All possible [dimensions][ModelEvaluationSlice.slice.dimension] of ModelEvaluationSlices. The dimensions can be used as the filter of the [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices] request, in the form of ``slice.dimension = ``. model_explanation (google.cloud.aiplatform_v1beta1.types.ModelExplanation): - Output only. Aggregated explanation metrics - for the Model's prediction output over the data - this ModelEvaluation uses. This field is - populated only if the Model is evaluated with + Aggregated explanation metrics for the + Model's prediction output over the data this + ModelEvaluation uses. This field is populated + only if the Model is evaluated with explanations, and only for AutoML tabular Models. explanation_specs (Sequence[google.cloud.aiplatform_v1beta1.types.ModelEvaluation.ModelEvaluationExplanationSpec]): - Output only. Describes the values of + Describes the values of [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec] that are used for explaining the predicted values on the evaluated data. diff --git a/google/cloud/aiplatform_v1beta1/types/model_service.py b/google/cloud/aiplatform_v1beta1/types/model_service.py index 4c2e656379..4f8c7604c3 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_service.py +++ b/google/cloud/aiplatform_v1beta1/types/model_service.py @@ -17,7 +17,9 @@ from google.cloud.aiplatform_v1beta1.types import io from google.cloud.aiplatform_v1beta1.types import model as gca_model -from google.cloud.aiplatform_v1beta1.types import model_evaluation +from google.cloud.aiplatform_v1beta1.types import ( + model_evaluation as gca_model_evaluation, +) from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice from google.cloud.aiplatform_v1beta1.types import operation from google.protobuf import field_mask_pb2 # type: ignore @@ -37,6 +39,7 @@ "ExportModelRequest", "ExportModelOperationMetadata", "ExportModelResponse", + "ImportModelEvaluationRequest", "GetModelEvaluationRequest", "ListModelEvaluationsRequest", "ListModelEvaluationsResponse", @@ -310,6 +313,25 @@ class ExportModelResponse(proto.Message): """ +class ImportModelEvaluationRequest(proto.Message): + r"""Request message for + [ModelService.ImportModelEvaluation][google.cloud.aiplatform.v1beta1.ModelService.ImportModelEvaluation] + + Attributes: + parent (str): + Required. The name of the parent model resource. Format: + ``projects/{project}/locations/{location}/models/{model}`` + model_evaluation (google.cloud.aiplatform_v1beta1.types.ModelEvaluation): + Required. Model evaluation resource to be + imported. + """ + + parent = proto.Field(proto.STRING, number=1,) + model_evaluation = proto.Field( + proto.MESSAGE, number=2, message=gca_model_evaluation.ModelEvaluation, + ) + + class GetModelEvaluationRequest(proto.Message): r"""Request message for [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluation]. @@ -372,7 +394,7 @@ def raw_page(self): return self model_evaluations = proto.RepeatedField( - proto.MESSAGE, number=1, message=model_evaluation.ModelEvaluation, + proto.MESSAGE, number=1, message=gca_model_evaluation.ModelEvaluation, ) next_page_token = proto.Field(proto.STRING, number=2,) diff --git a/samples/generated_samples/aiplatform_v1_generated_model_service_import_model_evaluation_async.py b/samples/generated_samples/aiplatform_v1_generated_model_service_import_model_evaluation_async.py new file mode 100644 index 0000000000..d705d6505f --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_model_service_import_model_evaluation_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportModelEvaluation +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_ModelService_ImportModelEvaluation_async] +from google.cloud import aiplatform_v1 + + +async def sample_import_model_evaluation(): + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ImportModelEvaluationRequest( + parent="parent_value", + ) + + # Make the request + response = await client.import_model_evaluation(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_ModelService_ImportModelEvaluation_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_model_service_import_model_evaluation_sync.py b/samples/generated_samples/aiplatform_v1_generated_model_service_import_model_evaluation_sync.py new file mode 100644 index 0000000000..f2910ac56e --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_model_service_import_model_evaluation_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportModelEvaluation +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_ModelService_ImportModelEvaluation_sync] +from google.cloud import aiplatform_v1 + + +def sample_import_model_evaluation(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ImportModelEvaluationRequest( + parent="parent_value", + ) + + # Make the request + response = client.import_model_evaluation(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_ModelService_ImportModelEvaluation_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_model_service_import_model_evaluation_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_model_service_import_model_evaluation_async.py new file mode 100644 index 0000000000..67a59cf5e5 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_model_service_import_model_evaluation_async.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportModelEvaluation +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_ImportModelEvaluation_async] +from google.cloud import aiplatform_v1beta1 + + +async def sample_import_model_evaluation(): + # Create a client + client = aiplatform_v1beta1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ImportModelEvaluationRequest( + parent="parent_value", + ) + + # Make the request + response = await client.import_model_evaluation(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_ImportModelEvaluation_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_model_service_import_model_evaluation_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_model_service_import_model_evaluation_sync.py new file mode 100644 index 0000000000..bb92dfdaea --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_model_service_import_model_evaluation_sync.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportModelEvaluation +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelService_ImportModelEvaluation_sync] +from google.cloud import aiplatform_v1beta1 + + +def sample_import_model_evaluation(): + # Create a client + client = aiplatform_v1beta1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ImportModelEvaluationRequest( + parent="parent_value", + ) + + # Make the request + response = client.import_model_evaluation(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelService_ImportModelEvaluation_sync] diff --git a/samples/generated_samples/snippet_metadata_aiplatform_v1.json b/samples/generated_samples/snippet_metadata_aiplatform_v1.json index 739600de8b..249a78cb01 100644 --- a/samples/generated_samples/snippet_metadata_aiplatform_v1.json +++ b/samples/generated_samples/snippet_metadata_aiplatform_v1.json @@ -10478,6 +10478,95 @@ } ] }, + { + "clientMethod": { + "async": true, + "method": { + "service": { + "shortName": "ModelService" + }, + "shortName": "ImportModelEvaluation" + } + }, + "file": "aiplatform_v1_generated_model_service_import_model_evaluation_async.py", + "regionTag": "aiplatform_v1_generated_ModelService_ImportModelEvaluation_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ] + }, + { + "clientMethod": { + "method": { + "service": { + "shortName": "ModelService" + }, + "shortName": "ImportModelEvaluation" + } + }, + "file": "aiplatform_v1_generated_model_service_import_model_evaluation_sync.py", + "regionTag": "aiplatform_v1_generated_ModelService_ImportModelEvaluation_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ] + }, { "clientMethod": { "async": true, diff --git a/samples/generated_samples/snippet_metadata_aiplatform_v1beta1.json b/samples/generated_samples/snippet_metadata_aiplatform_v1beta1.json index 27d819b5e5..d6c82957ab 100644 --- a/samples/generated_samples/snippet_metadata_aiplatform_v1beta1.json +++ b/samples/generated_samples/snippet_metadata_aiplatform_v1beta1.json @@ -10478,6 +10478,95 @@ } ] }, + { + "clientMethod": { + "async": true, + "method": { + "service": { + "shortName": "ModelService" + }, + "shortName": "ImportModelEvaluation" + } + }, + "file": "aiplatform_v1beta1_generated_model_service_import_model_evaluation_async.py", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ImportModelEvaluation_async", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ] + }, + { + "clientMethod": { + "method": { + "service": { + "shortName": "ModelService" + }, + "shortName": "ImportModelEvaluation" + } + }, + "file": "aiplatform_v1beta1_generated_model_service_import_model_evaluation_sync.py", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ImportModelEvaluation_sync", + "segments": [ + { + "end": 44, + "start": 27, + "type": "FULL" + }, + { + "end": 44, + "start": 27, + "type": "SHORT" + }, + { + "end": 33, + "start": 31, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 38, + "start": 34, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 41, + "start": 39, + "type": "REQUEST_EXECUTION" + }, + { + "end": 45, + "start": 42, + "type": "RESPONSE_HANDLING" + } + ] + }, { "clientMethod": { "async": true, diff --git a/tests/unit/gapic/aiplatform_v1/test_model_service.py b/tests/unit/gapic/aiplatform_v1/test_model_service.py index b117076472..de69632166 100644 --- a/tests/unit/gapic/aiplatform_v1/test_model_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_model_service.py @@ -48,6 +48,7 @@ from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import model as gca_model from google.cloud.aiplatform_v1.types import model_evaluation +from google.cloud.aiplatform_v1.types import model_evaluation as gca_model_evaluation from google.cloud.aiplatform_v1.types import model_evaluation_slice from google.cloud.aiplatform_v1.types import model_service from google.cloud.aiplatform_v1.types import operation as gca_operation @@ -2087,6 +2088,254 @@ async def test_export_model_flattened_error_async(): ) +@pytest.mark.parametrize( + "request_type", [model_service.ImportModelEvaluationRequest, dict,] +) +def test_import_model_evaluation(request_type, transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_model_evaluation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model_evaluation.ModelEvaluation( + name="name_value", + metrics_schema_uri="metrics_schema_uri_value", + slice_dimensions=["slice_dimensions_value"], + data_item_schema_uri="data_item_schema_uri_value", + annotation_schema_uri="annotation_schema_uri_value", + ) + response = client.import_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ImportModelEvaluationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model_evaluation.ModelEvaluation) + assert response.name == "name_value" + assert response.metrics_schema_uri == "metrics_schema_uri_value" + assert response.slice_dimensions == ["slice_dimensions_value"] + assert response.data_item_schema_uri == "data_item_schema_uri_value" + assert response.annotation_schema_uri == "annotation_schema_uri_value" + + +def test_import_model_evaluation_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_model_evaluation), "__call__" + ) as call: + client.import_model_evaluation() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ImportModelEvaluationRequest() + + +@pytest.mark.asyncio +async def test_import_model_evaluation_async( + transport: str = "grpc_asyncio", + request_type=model_service.ImportModelEvaluationRequest, +): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_model_evaluation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_model_evaluation.ModelEvaluation( + name="name_value", + metrics_schema_uri="metrics_schema_uri_value", + slice_dimensions=["slice_dimensions_value"], + data_item_schema_uri="data_item_schema_uri_value", + annotation_schema_uri="annotation_schema_uri_value", + ) + ) + response = await client.import_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ImportModelEvaluationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model_evaluation.ModelEvaluation) + assert response.name == "name_value" + assert response.metrics_schema_uri == "metrics_schema_uri_value" + assert response.slice_dimensions == ["slice_dimensions_value"] + assert response.data_item_schema_uri == "data_item_schema_uri_value" + assert response.annotation_schema_uri == "annotation_schema_uri_value" + + +@pytest.mark.asyncio +async def test_import_model_evaluation_async_from_dict(): + await test_import_model_evaluation_async(request_type=dict) + + +def test_import_model_evaluation_field_headers(): + client = ModelServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ImportModelEvaluationRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_model_evaluation), "__call__" + ) as call: + call.return_value = gca_model_evaluation.ModelEvaluation() + client.import_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_import_model_evaluation_field_headers_async(): + client = ModelServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ImportModelEvaluationRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_model_evaluation), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_model_evaluation.ModelEvaluation() + ) + await client.import_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_import_model_evaluation_flattened(): + client = ModelServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_model_evaluation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model_evaluation.ModelEvaluation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.import_model_evaluation( + parent="parent_value", + model_evaluation=gca_model_evaluation.ModelEvaluation(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].model_evaluation + mock_val = gca_model_evaluation.ModelEvaluation(name="name_value") + assert arg == mock_val + + +def test_import_model_evaluation_flattened_error(): + client = ModelServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.import_model_evaluation( + model_service.ImportModelEvaluationRequest(), + parent="parent_value", + model_evaluation=gca_model_evaluation.ModelEvaluation(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_import_model_evaluation_flattened_async(): + client = ModelServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_model_evaluation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model_evaluation.ModelEvaluation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_model_evaluation.ModelEvaluation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.import_model_evaluation( + parent="parent_value", + model_evaluation=gca_model_evaluation.ModelEvaluation(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].model_evaluation + mock_val = gca_model_evaluation.ModelEvaluation(name="name_value") + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_import_model_evaluation_flattened_error_async(): + client = ModelServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.import_model_evaluation( + model_service.ImportModelEvaluationRequest(), + parent="parent_value", + model_evaluation=gca_model_evaluation.ModelEvaluation(name="name_value"), + ) + + @pytest.mark.parametrize( "request_type", [model_service.GetModelEvaluationRequest, dict,] ) @@ -2108,6 +2357,8 @@ def test_get_model_evaluation(request_type, transport: str = "grpc"): name="name_value", metrics_schema_uri="metrics_schema_uri_value", slice_dimensions=["slice_dimensions_value"], + data_item_schema_uri="data_item_schema_uri_value", + annotation_schema_uri="annotation_schema_uri_value", ) response = client.get_model_evaluation(request) @@ -2121,6 +2372,8 @@ def test_get_model_evaluation(request_type, transport: str = "grpc"): assert response.name == "name_value" assert response.metrics_schema_uri == "metrics_schema_uri_value" assert response.slice_dimensions == ["slice_dimensions_value"] + assert response.data_item_schema_uri == "data_item_schema_uri_value" + assert response.annotation_schema_uri == "annotation_schema_uri_value" def test_get_model_evaluation_empty_call(): @@ -2163,6 +2416,8 @@ async def test_get_model_evaluation_async( name="name_value", metrics_schema_uri="metrics_schema_uri_value", slice_dimensions=["slice_dimensions_value"], + data_item_schema_uri="data_item_schema_uri_value", + annotation_schema_uri="annotation_schema_uri_value", ) ) response = await client.get_model_evaluation(request) @@ -2177,6 +2432,8 @@ async def test_get_model_evaluation_async( assert response.name == "name_value" assert response.metrics_schema_uri == "metrics_schema_uri_value" assert response.slice_dimensions == ["slice_dimensions_value"] + assert response.data_item_schema_uri == "data_item_schema_uri_value" + assert response.annotation_schema_uri == "annotation_schema_uri_value" @pytest.mark.asyncio @@ -3444,6 +3701,7 @@ def test_model_service_base_transport(): "update_model", "delete_model", "export_model", + "import_model_evaluation", "get_model_evaluation", "list_model_evaluations", "get_model_evaluation_slice", diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py index a18e833705..d98d95606c 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py @@ -1795,20 +1795,18 @@ def test_parse_annotated_dataset_path(): def test_dataset_path(): project = "cuttlefish" - location = "mussel" - dataset = "winkle" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( - project=project, location=location, dataset=dataset, + dataset = "mussel" + expected = "projects/{project}/datasets/{dataset}".format( + project=project, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "nautilus", - "location": "scallop", - "dataset": "abalone", + "project": "winkle", + "dataset": "nautilus", } path = MigrationServiceClient.dataset_path(**expected) @@ -1818,9 +1816,9 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "squid" - location = "clam" - dataset = "whelk" + project = "scallop" + location = "abalone" + dataset = "squid" expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, location=location, dataset=dataset, ) @@ -1830,9 +1828,9 @@ def test_dataset_path(): def test_parse_dataset_path(): expected = { - "project": "octopus", - "location": "oyster", - "dataset": "nudibranch", + "project": "clam", + "location": "whelk", + "dataset": "octopus", } path = MigrationServiceClient.dataset_path(**expected) @@ -1842,18 +1840,20 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "cuttlefish" - dataset = "mussel" - expected = "projects/{project}/datasets/{dataset}".format( - project=project, dataset=dataset, + project = "oyster" + location = "nudibranch" + dataset = "cuttlefish" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "winkle", + "project": "mussel", + "location": "winkle", "dataset": "nautilus", } path = MigrationServiceClient.dataset_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py index 3862553420..0567bdaa1f 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py @@ -50,6 +50,9 @@ from google.cloud.aiplatform_v1beta1.types import model from google.cloud.aiplatform_v1beta1.types import model as gca_model from google.cloud.aiplatform_v1beta1.types import model_evaluation +from google.cloud.aiplatform_v1beta1.types import ( + model_evaluation as gca_model_evaluation, +) from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice from google.cloud.aiplatform_v1beta1.types import model_service from google.cloud.aiplatform_v1beta1.types import operation as gca_operation @@ -2089,6 +2092,246 @@ async def test_export_model_flattened_error_async(): ) +@pytest.mark.parametrize( + "request_type", [model_service.ImportModelEvaluationRequest, dict,] +) +def test_import_model_evaluation(request_type, transport: str = "grpc"): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_model_evaluation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model_evaluation.ModelEvaluation( + name="name_value", + metrics_schema_uri="metrics_schema_uri_value", + slice_dimensions=["slice_dimensions_value"], + ) + response = client.import_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ImportModelEvaluationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model_evaluation.ModelEvaluation) + assert response.name == "name_value" + assert response.metrics_schema_uri == "metrics_schema_uri_value" + assert response.slice_dimensions == ["slice_dimensions_value"] + + +def test_import_model_evaluation_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_model_evaluation), "__call__" + ) as call: + client.import_model_evaluation() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ImportModelEvaluationRequest() + + +@pytest.mark.asyncio +async def test_import_model_evaluation_async( + transport: str = "grpc_asyncio", + request_type=model_service.ImportModelEvaluationRequest, +): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_model_evaluation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_model_evaluation.ModelEvaluation( + name="name_value", + metrics_schema_uri="metrics_schema_uri_value", + slice_dimensions=["slice_dimensions_value"], + ) + ) + response = await client.import_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ImportModelEvaluationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model_evaluation.ModelEvaluation) + assert response.name == "name_value" + assert response.metrics_schema_uri == "metrics_schema_uri_value" + assert response.slice_dimensions == ["slice_dimensions_value"] + + +@pytest.mark.asyncio +async def test_import_model_evaluation_async_from_dict(): + await test_import_model_evaluation_async(request_type=dict) + + +def test_import_model_evaluation_field_headers(): + client = ModelServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ImportModelEvaluationRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_model_evaluation), "__call__" + ) as call: + call.return_value = gca_model_evaluation.ModelEvaluation() + client.import_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_import_model_evaluation_field_headers_async(): + client = ModelServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ImportModelEvaluationRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_model_evaluation), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_model_evaluation.ModelEvaluation() + ) + await client.import_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_import_model_evaluation_flattened(): + client = ModelServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_model_evaluation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model_evaluation.ModelEvaluation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.import_model_evaluation( + parent="parent_value", + model_evaluation=gca_model_evaluation.ModelEvaluation(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].model_evaluation + mock_val = gca_model_evaluation.ModelEvaluation(name="name_value") + assert arg == mock_val + + +def test_import_model_evaluation_flattened_error(): + client = ModelServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.import_model_evaluation( + model_service.ImportModelEvaluationRequest(), + parent="parent_value", + model_evaluation=gca_model_evaluation.ModelEvaluation(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_import_model_evaluation_flattened_async(): + client = ModelServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_model_evaluation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model_evaluation.ModelEvaluation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_model_evaluation.ModelEvaluation() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.import_model_evaluation( + parent="parent_value", + model_evaluation=gca_model_evaluation.ModelEvaluation(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].model_evaluation + mock_val = gca_model_evaluation.ModelEvaluation(name="name_value") + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_import_model_evaluation_flattened_error_async(): + client = ModelServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.import_model_evaluation( + model_service.ImportModelEvaluationRequest(), + parent="parent_value", + model_evaluation=gca_model_evaluation.ModelEvaluation(name="name_value"), + ) + + @pytest.mark.parametrize( "request_type", [model_service.GetModelEvaluationRequest, dict,] ) @@ -3446,6 +3689,7 @@ def test_model_service_base_transport(): "update_model", "delete_model", "export_model", + "import_model_evaluation", "get_model_evaluation", "list_model_evaluations", "get_model_evaluation_slice",