diff --git a/docs/aiplatform_v1/deployment_resource_pool_service.rst b/docs/aiplatform_v1/deployment_resource_pool_service.rst new file mode 100644 index 0000000000..04ea21857d --- /dev/null +++ b/docs/aiplatform_v1/deployment_resource_pool_service.rst @@ -0,0 +1,10 @@ +DeploymentResourcePoolService +----------------------------------------------- + +.. automodule:: google.cloud.aiplatform_v1.services.deployment_resource_pool_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1.services.deployment_resource_pool_service.pagers + :members: + :inherited-members: diff --git a/docs/aiplatform_v1/services_.rst b/docs/aiplatform_v1/services_.rst index fe8381e687..0f0396abb6 100644 --- a/docs/aiplatform_v1/services_.rst +++ b/docs/aiplatform_v1/services_.rst @@ -4,6 +4,7 @@ Services for Google Cloud Aiplatform v1 API :maxdepth: 2 dataset_service + deployment_resource_pool_service endpoint_service feature_online_store_admin_service feature_online_store_service diff --git a/google/cloud/aiplatform_v1/__init__.py b/google/cloud/aiplatform_v1/__init__.py index 2287068cfa..566f429e72 100644 --- a/google/cloud/aiplatform_v1/__init__.py +++ b/google/cloud/aiplatform_v1/__init__.py @@ -20,6 +20,12 @@ from .services.dataset_service import DatasetServiceClient from .services.dataset_service import DatasetServiceAsyncClient +from .services.deployment_resource_pool_service import ( + DeploymentResourcePoolServiceClient, +) +from .services.deployment_resource_pool_service import ( + DeploymentResourcePoolServiceAsyncClient, +) from .services.endpoint_service import EndpointServiceClient from .services.endpoint_service import EndpointServiceAsyncClient from .services.feature_online_store_admin_service import ( @@ -77,6 +83,18 @@ from .types.artifact import Artifact from .types.batch_prediction_job import BatchPredictionJob from .types.completion_stats import CompletionStats +from .types.content import Blob +from .types.content import Candidate +from .types.content import Citation +from .types.content import CitationMetadata +from .types.content import Content +from .types.content import FileData +from .types.content import GenerationConfig +from .types.content import Part +from .types.content import SafetyRating +from .types.content import SafetySetting +from .types.content import VideoMetadata +from .types.content import HarmCategory from .types.context import Context from .types.custom_job import ContainerSpec from .types.custom_job import CustomJob @@ -91,6 +109,7 @@ from .types.data_labeling_job import TrainingConfig from .types.dataset import Dataset from .types.dataset import ExportDataConfig +from .types.dataset import ExportFilterSplit from .types.dataset import ExportFractionSplit from .types.dataset import ImportDataConfig from .types.dataset_service import CreateDatasetOperationMetadata @@ -128,6 +147,20 @@ from .types.dataset_version import DatasetVersion from .types.deployed_index_ref import DeployedIndexRef from .types.deployed_model_ref import DeployedModelRef +from .types.deployment_resource_pool import DeploymentResourcePool +from .types.deployment_resource_pool_service import ( + CreateDeploymentResourcePoolOperationMetadata, +) +from .types.deployment_resource_pool_service import CreateDeploymentResourcePoolRequest +from .types.deployment_resource_pool_service import DeleteDeploymentResourcePoolRequest +from .types.deployment_resource_pool_service import GetDeploymentResourcePoolRequest +from .types.deployment_resource_pool_service import ListDeploymentResourcePoolsRequest +from .types.deployment_resource_pool_service import ListDeploymentResourcePoolsResponse +from .types.deployment_resource_pool_service import QueryDeployedModelsRequest +from .types.deployment_resource_pool_service import QueryDeployedModelsResponse +from .types.deployment_resource_pool_service import ( + UpdateDeploymentResourcePoolOperationMetadata, +) from .types.encryption_spec import EncryptionSpec from .types.endpoint import DeployedModel from .types.endpoint import Endpoint @@ -508,6 +541,8 @@ from .types.nas_job import NasJobSpec from .types.nas_job import NasTrial from .types.nas_job import NasTrialDetail +from .types.openapi import Schema +from .types.openapi import Type from .types.operation import DeleteOperationMetadata from .types.operation import GenericOperationMetadata from .types.pipeline_failure_policy import PipelineFailurePolicy @@ -537,6 +572,8 @@ from .types.prediction_service import DirectRawPredictResponse from .types.prediction_service import ExplainRequest from .types.prediction_service import ExplainResponse +from .types.prediction_service import GenerateContentRequest +from .types.prediction_service import GenerateContentResponse from .types.prediction_service import PredictRequest from .types.prediction_service import PredictResponse from .types.prediction_service import RawPredictRequest @@ -627,6 +664,10 @@ from .types.tensorboard_service import WriteTensorboardRunDataRequest from .types.tensorboard_service import WriteTensorboardRunDataResponse from .types.tensorboard_time_series import TensorboardTimeSeries +from .types.tool import FunctionCall +from .types.tool import FunctionDeclaration +from .types.tool import FunctionResponse +from .types.tool import Tool from .types.training_pipeline import FilterSplit from .types.training_pipeline import FractionSplit from .types.training_pipeline import InputDataConfig @@ -667,6 +708,7 @@ __all__ = ( "DatasetServiceAsyncClient", + "DeploymentResourcePoolServiceAsyncClient", "EndpointServiceAsyncClient", "FeatureOnlineStoreAdminServiceAsyncClient", "FeatureOnlineStoreServiceAsyncClient", @@ -727,6 +769,7 @@ "BatchReadTensorboardTimeSeriesDataResponse", "BigQueryDestination", "BigQuerySource", + "Blob", "BlurBaselineConfig", "BoolArray", "CancelBatchPredictionJobRequest", @@ -736,15 +779,19 @@ "CancelNasJobRequest", "CancelPipelineJobRequest", "CancelTrainingPipelineRequest", + "Candidate", "CheckTrialEarlyStoppingStateMetatdata", "CheckTrialEarlyStoppingStateRequest", "CheckTrialEarlyStoppingStateResponse", + "Citation", + "CitationMetadata", "CompleteTrialRequest", "CompletionStats", "ComputeTokensRequest", "ComputeTokensResponse", "ContainerRegistryDestination", "ContainerSpec", + "Content", "Context", "CopyModelOperationMetadata", "CopyModelRequest", @@ -760,6 +807,8 @@ "CreateDatasetRequest", "CreateDatasetVersionOperationMetadata", "CreateDatasetVersionRequest", + "CreateDeploymentResourcePoolOperationMetadata", + "CreateDeploymentResourcePoolRequest", "CreateEndpointOperationMetadata", "CreateEndpointRequest", "CreateEntityTypeOperationMetadata", @@ -816,6 +865,7 @@ "DeleteDataLabelingJobRequest", "DeleteDatasetRequest", "DeleteDatasetVersionRequest", + "DeleteDeploymentResourcePoolRequest", "DeleteEndpointRequest", "DeleteEntityTypeRequest", "DeleteExecutionRequest", @@ -859,6 +909,8 @@ "DeployedIndexRef", "DeployedModel", "DeployedModelRef", + "DeploymentResourcePool", + "DeploymentResourcePoolServiceClient", "DestinationFeatureSetting", "DirectPredictRequest", "DirectPredictResponse", @@ -895,6 +947,7 @@ "ExportFeatureValuesOperationMetadata", "ExportFeatureValuesRequest", "ExportFeatureValuesResponse", + "ExportFilterSplit", "ExportFractionSplit", "ExportModelOperationMetadata", "ExportModelRequest", @@ -923,12 +976,19 @@ "FeaturestoreServiceClient", "FetchFeatureValuesRequest", "FetchFeatureValuesResponse", + "FileData", "FilterSplit", "FindNeighborsRequest", "FindNeighborsResponse", "FractionSplit", + "FunctionCall", + "FunctionDeclaration", + "FunctionResponse", "GcsDestination", "GcsSource", + "GenerateContentRequest", + "GenerateContentResponse", + "GenerationConfig", "GenericOperationMetadata", "GetAnnotationSpecRequest", "GetArtifactRequest", @@ -938,6 +998,7 @@ "GetDataLabelingJobRequest", "GetDatasetRequest", "GetDatasetVersionRequest", + "GetDeploymentResourcePoolRequest", "GetEndpointRequest", "GetEntityTypeRequest", "GetExecutionRequest", @@ -969,6 +1030,7 @@ "GetTensorboardTimeSeriesRequest", "GetTrainingPipelineRequest", "GetTrialRequest", + "HarmCategory", "HyperparameterTuningJob", "IdMatcher", "ImportDataConfig", @@ -1011,6 +1073,8 @@ "ListDatasetVersionsResponse", "ListDatasetsRequest", "ListDatasetsResponse", + "ListDeploymentResourcePoolsRequest", + "ListDeploymentResourcePoolsResponse", "ListEndpointsRequest", "ListEndpointsResponse", "ListEntityTypesRequest", @@ -1121,6 +1185,7 @@ "NearestNeighborSearchOperationMetadata", "Neighbor", "NfsMount", + "Part", "PauseModelDeploymentMonitoringJobRequest", "PauseScheduleRequest", "PersistentDiskSpec", @@ -1157,6 +1222,8 @@ "PythonPackageSpec", "QueryArtifactLineageSubgraphRequest", "QueryContextLineageSubgraphRequest", + "QueryDeployedModelsRequest", + "QueryDeployedModelsResponse", "QueryExecutionInputsAndOutputsRequest", "RawPredictRequest", "ReadFeatureValuesRequest", @@ -1180,6 +1247,8 @@ "RestoreDatasetVersionRequest", "ResumeModelDeploymentMonitoringJobRequest", "ResumeScheduleRequest", + "SafetyRating", + "SafetySetting", "SampleConfig", "SampledShapleyAttribution", "SamplingStrategy", @@ -1188,6 +1257,7 @@ "Schedule", "ScheduleServiceClient", "Scheduling", + "Schema", "SearchDataItemsRequest", "SearchDataItemsResponse", "SearchFeaturesRequest", @@ -1230,10 +1300,12 @@ "TimeSeriesDataPoint", "TimestampSplit", "TokensInfo", + "Tool", "TrainingConfig", "TrainingPipeline", "Trial", "TrialContext", + "Type", "UndeployIndexOperationMetadata", "UndeployIndexRequest", "UndeployIndexResponse", @@ -1244,6 +1316,7 @@ "UpdateArtifactRequest", "UpdateContextRequest", "UpdateDatasetRequest", + "UpdateDeploymentResourcePoolOperationMetadata", "UpdateEndpointRequest", "UpdateEntityTypeRequest", "UpdateExecutionRequest", @@ -1281,6 +1354,7 @@ "UpsertDatapointsResponse", "UserActionReference", "Value", + "VideoMetadata", "VizierServiceClient", "WorkerPoolSpec", "WriteFeatureValuesPayload", diff --git a/google/cloud/aiplatform_v1/gapic_metadata.json b/google/cloud/aiplatform_v1/gapic_metadata.json index 990c4ce96b..3be91cfcdf 100644 --- a/google/cloud/aiplatform_v1/gapic_metadata.json +++ b/google/cloud/aiplatform_v1/gapic_metadata.json @@ -199,6 +199,70 @@ } } }, + "DeploymentResourcePoolService": { + "clients": { + "grpc": { + "libraryClient": "DeploymentResourcePoolServiceClient", + "rpcs": { + "CreateDeploymentResourcePool": { + "methods": [ + "create_deployment_resource_pool" + ] + }, + "DeleteDeploymentResourcePool": { + "methods": [ + "delete_deployment_resource_pool" + ] + }, + "GetDeploymentResourcePool": { + "methods": [ + "get_deployment_resource_pool" + ] + }, + "ListDeploymentResourcePools": { + "methods": [ + "list_deployment_resource_pools" + ] + }, + "QueryDeployedModels": { + "methods": [ + "query_deployed_models" + ] + } + } + }, + "grpc-async": { + "libraryClient": "DeploymentResourcePoolServiceAsyncClient", + "rpcs": { + "CreateDeploymentResourcePool": { + "methods": [ + "create_deployment_resource_pool" + ] + }, + "DeleteDeploymentResourcePool": { + "methods": [ + "delete_deployment_resource_pool" + ] + }, + "GetDeploymentResourcePool": { + "methods": [ + "get_deployment_resource_pool" + ] + }, + "ListDeploymentResourcePools": { + "methods": [ + "list_deployment_resource_pools" + ] + }, + "QueryDeployedModels": { + "methods": [ + "query_deployed_models" + ] + } + } + } + } + }, "EndpointService": { "clients": { "grpc": { @@ -2188,6 +2252,11 @@ "server_streaming_predict" ] }, + "StreamGenerateContent": { + "methods": [ + "stream_generate_content" + ] + }, "StreamingPredict": { "methods": [ "streaming_predict" @@ -2233,6 +2302,11 @@ "server_streaming_predict" ] }, + "StreamGenerateContent": { + "methods": [ + "stream_generate_content" + ] + }, "StreamingPredict": { "methods": [ "streaming_predict" diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/__init__.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/__init__.py new file mode 100644 index 0000000000..9d061415ec --- /dev/null +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import DeploymentResourcePoolServiceClient +from .async_client import DeploymentResourcePoolServiceAsyncClient + +__all__ = ( + "DeploymentResourcePoolServiceClient", + "DeploymentResourcePoolServiceAsyncClient", +) diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/async_client.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/async_client.py new file mode 100644 index 0000000000..f0237853ba --- /dev/null +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/async_client.py @@ -0,0 +1,1592 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.cloud.aiplatform_v1 import gapic_version as package_version + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry_async as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.deployment_resource_pool_service import pagers +from google.cloud.aiplatform_v1.types import deployment_resource_pool +from google.cloud.aiplatform_v1.types import ( + deployment_resource_pool as gca_deployment_resource_pool, +) +from google.cloud.aiplatform_v1.types import deployment_resource_pool_service +from google.cloud.aiplatform_v1.types import endpoint +from google.cloud.aiplatform_v1.types import machine_resources +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import DeploymentResourcePoolServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import DeploymentResourcePoolServiceGrpcAsyncIOTransport +from .client import DeploymentResourcePoolServiceClient + + +class DeploymentResourcePoolServiceAsyncClient: + """A service that manages the DeploymentResourcePool resource.""" + + _client: DeploymentResourcePoolServiceClient + + DEFAULT_ENDPOINT = DeploymentResourcePoolServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = DeploymentResourcePoolServiceClient.DEFAULT_MTLS_ENDPOINT + + deployment_resource_pool_path = staticmethod( + DeploymentResourcePoolServiceClient.deployment_resource_pool_path + ) + parse_deployment_resource_pool_path = staticmethod( + DeploymentResourcePoolServiceClient.parse_deployment_resource_pool_path + ) + endpoint_path = staticmethod(DeploymentResourcePoolServiceClient.endpoint_path) + parse_endpoint_path = staticmethod( + DeploymentResourcePoolServiceClient.parse_endpoint_path + ) + model_path = staticmethod(DeploymentResourcePoolServiceClient.model_path) + parse_model_path = staticmethod( + DeploymentResourcePoolServiceClient.parse_model_path + ) + common_billing_account_path = staticmethod( + DeploymentResourcePoolServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + DeploymentResourcePoolServiceClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod( + DeploymentResourcePoolServiceClient.common_folder_path + ) + parse_common_folder_path = staticmethod( + DeploymentResourcePoolServiceClient.parse_common_folder_path + ) + common_organization_path = staticmethod( + DeploymentResourcePoolServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + DeploymentResourcePoolServiceClient.parse_common_organization_path + ) + common_project_path = staticmethod( + DeploymentResourcePoolServiceClient.common_project_path + ) + parse_common_project_path = staticmethod( + DeploymentResourcePoolServiceClient.parse_common_project_path + ) + common_location_path = staticmethod( + DeploymentResourcePoolServiceClient.common_location_path + ) + parse_common_location_path = staticmethod( + DeploymentResourcePoolServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DeploymentResourcePoolServiceAsyncClient: The constructed client. + """ + return DeploymentResourcePoolServiceClient.from_service_account_info.__func__(DeploymentResourcePoolServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DeploymentResourcePoolServiceAsyncClient: The constructed client. + """ + return DeploymentResourcePoolServiceClient.from_service_account_file.__func__(DeploymentResourcePoolServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return DeploymentResourcePoolServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> DeploymentResourcePoolServiceTransport: + """Returns the transport used by the client instance. + + Returns: + DeploymentResourcePoolServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(DeploymentResourcePoolServiceClient).get_transport_class, + type(DeploymentResourcePoolServiceClient), + ) + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, DeploymentResourcePoolServiceTransport] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the deployment resource pool service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.DeploymentResourcePoolServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = DeploymentResourcePoolServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def create_deployment_resource_pool( + self, + request: Optional[ + Union[ + deployment_resource_pool_service.CreateDeploymentResourcePoolRequest, + dict, + ] + ] = None, + *, + parent: Optional[str] = None, + deployment_resource_pool: Optional[ + gca_deployment_resource_pool.DeploymentResourcePool + ] = None, + deployment_resource_pool_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Create a DeploymentResourcePool. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_create_deployment_resource_pool(): + # Create a client + client = aiplatform_v1.DeploymentResourcePoolServiceAsyncClient() + + # Initialize request argument(s) + deployment_resource_pool = aiplatform_v1.DeploymentResourcePool() + deployment_resource_pool.dedicated_resources.min_replica_count = 1803 + + request = aiplatform_v1.CreateDeploymentResourcePoolRequest( + parent="parent_value", + deployment_resource_pool=deployment_resource_pool, + deployment_resource_pool_id="deployment_resource_pool_id_value", + ) + + # Make the request + operation = client.create_deployment_resource_pool(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.CreateDeploymentResourcePoolRequest, dict]]): + The request object. Request message for + CreateDeploymentResourcePool method. + parent (:class:`str`): + Required. The parent location resource where this + DeploymentResourcePool will be created. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployment_resource_pool (:class:`google.cloud.aiplatform_v1.types.DeploymentResourcePool`): + Required. The DeploymentResourcePool + to create. + + This corresponds to the ``deployment_resource_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployment_resource_pool_id (:class:`str`): + Required. The ID to use for the DeploymentResourcePool, + which will become the final component of the + DeploymentResourcePool's resource name. + + The maximum length is 63 characters, and valid + characters are ``/^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$/``. + + This corresponds to the ``deployment_resource_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.DeploymentResourcePool` A description of resources that can be shared by multiple DeployedModels, + whose underlying specification consists of a + DedicatedResources. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [parent, deployment_resource_pool, deployment_resource_pool_id] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = deployment_resource_pool_service.CreateDeploymentResourcePoolRequest( + request + ) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if deployment_resource_pool is not None: + request.deployment_resource_pool = deployment_resource_pool + if deployment_resource_pool_id is not None: + request.deployment_resource_pool_id = deployment_resource_pool_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_deployment_resource_pool, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_deployment_resource_pool.DeploymentResourcePool, + metadata_type=deployment_resource_pool_service.CreateDeploymentResourcePoolOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_deployment_resource_pool( + self, + request: Optional[ + Union[ + deployment_resource_pool_service.GetDeploymentResourcePoolRequest, dict + ] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> deployment_resource_pool.DeploymentResourcePool: + r"""Get a DeploymentResourcePool. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_get_deployment_resource_pool(): + # Create a client + client = aiplatform_v1.DeploymentResourcePoolServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetDeploymentResourcePoolRequest( + name="name_value", + ) + + # Make the request + response = await client.get_deployment_resource_pool(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.GetDeploymentResourcePoolRequest, dict]]): + The request object. Request message for + GetDeploymentResourcePool method. + name (:class:`str`): + Required. The name of the DeploymentResourcePool to + retrieve. Format: + ``projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.DeploymentResourcePool: + A description of resources that can + be shared by multiple DeployedModels, + whose underlying specification consists + of a DedicatedResources. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = deployment_resource_pool_service.GetDeploymentResourcePoolRequest( + request + ) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_deployment_resource_pool, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_deployment_resource_pools( + self, + request: Optional[ + Union[ + deployment_resource_pool_service.ListDeploymentResourcePoolsRequest, + dict, + ] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDeploymentResourcePoolsAsyncPager: + r"""List DeploymentResourcePools in a location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_list_deployment_resource_pools(): + # Create a client + client = aiplatform_v1.DeploymentResourcePoolServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListDeploymentResourcePoolsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_deployment_resource_pools(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.ListDeploymentResourcePoolsRequest, dict]]): + The request object. Request message for + ListDeploymentResourcePools method. + parent (:class:`str`): + Required. The parent Location which owns this collection + of DeploymentResourcePools. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.deployment_resource_pool_service.pagers.ListDeploymentResourcePoolsAsyncPager: + Response message for + ListDeploymentResourcePools method. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = deployment_resource_pool_service.ListDeploymentResourcePoolsRequest( + request + ) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_deployment_resource_pools, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListDeploymentResourcePoolsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_deployment_resource_pool( + self, + request: Optional[ + Union[ + deployment_resource_pool_service.DeleteDeploymentResourcePoolRequest, + dict, + ] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Delete a DeploymentResourcePool. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_delete_deployment_resource_pool(): + # Create a client + client = aiplatform_v1.DeploymentResourcePoolServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteDeploymentResourcePoolRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_deployment_resource_pool(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.DeleteDeploymentResourcePoolRequest, dict]]): + The request object. Request message for + DeleteDeploymentResourcePool method. + name (:class:`str`): + Required. The name of the DeploymentResourcePool to + delete. Format: + ``projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = deployment_resource_pool_service.DeleteDeploymentResourcePoolRequest( + request + ) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_deployment_resource_pool, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def query_deployed_models( + self, + request: Optional[ + Union[deployment_resource_pool_service.QueryDeployedModelsRequest, dict] + ] = None, + *, + deployment_resource_pool: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.QueryDeployedModelsAsyncPager: + r"""List DeployedModels that have been deployed on this + DeploymentResourcePool. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_query_deployed_models(): + # Create a client + client = aiplatform_v1.DeploymentResourcePoolServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.QueryDeployedModelsRequest( + deployment_resource_pool="deployment_resource_pool_value", + ) + + # Make the request + page_result = client.query_deployed_models(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.QueryDeployedModelsRequest, dict]]): + The request object. Request message for + QueryDeployedModels method. + deployment_resource_pool (:class:`str`): + Required. The name of the target DeploymentResourcePool + to query. Format: + ``projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}`` + + This corresponds to the ``deployment_resource_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.deployment_resource_pool_service.pagers.QueryDeployedModelsAsyncPager: + Response message for + QueryDeployedModels method. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([deployment_resource_pool]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = deployment_resource_pool_service.QueryDeployedModelsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if deployment_resource_pool is not None: + request.deployment_resource_pool = deployment_resource_pool + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.query_deployed_models, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("deployment_resource_pool", request.deployment_resource_pool),) + ), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.QueryDeployedModelsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def wait_operation( + self, + request: Optional[operations_pb2.WaitOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: Optional[iam_policy_pb2.GetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: Optional[iam_policy_pb2.TestIamPermissionsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self) -> "DeploymentResourcePoolServiceAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("DeploymentResourcePoolServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/client.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/client.py new file mode 100644 index 0000000000..766856dae7 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/client.py @@ -0,0 +1,1883 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) + +from google.cloud.aiplatform_v1 import gapic_version as package_version + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.deployment_resource_pool_service import pagers +from google.cloud.aiplatform_v1.types import deployment_resource_pool +from google.cloud.aiplatform_v1.types import ( + deployment_resource_pool as gca_deployment_resource_pool, +) +from google.cloud.aiplatform_v1.types import deployment_resource_pool_service +from google.cloud.aiplatform_v1.types import endpoint +from google.cloud.aiplatform_v1.types import machine_resources +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import DeploymentResourcePoolServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import DeploymentResourcePoolServiceGrpcTransport +from .transports.grpc_asyncio import DeploymentResourcePoolServiceGrpcAsyncIOTransport + + +class DeploymentResourcePoolServiceClientMeta(type): + """Metaclass for the DeploymentResourcePoolService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[DeploymentResourcePoolServiceTransport]] + _transport_registry["grpc"] = DeploymentResourcePoolServiceGrpcTransport + _transport_registry[ + "grpc_asyncio" + ] = DeploymentResourcePoolServiceGrpcAsyncIOTransport + + def get_transport_class( + cls, + label: Optional[str] = None, + ) -> Type[DeploymentResourcePoolServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class DeploymentResourcePoolServiceClient( + metaclass=DeploymentResourcePoolServiceClientMeta +): + """A service that manages the DeploymentResourcePool resource.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DeploymentResourcePoolServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DeploymentResourcePoolServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> DeploymentResourcePoolServiceTransport: + """Returns the transport used by the client instance. + + Returns: + DeploymentResourcePoolServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def deployment_resource_pool_path( + project: str, + location: str, + deployment_resource_pool: str, + ) -> str: + """Returns a fully-qualified deployment_resource_pool string.""" + return "projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}".format( + project=project, + location=location, + deployment_resource_pool=deployment_resource_pool, + ) + + @staticmethod + def parse_deployment_resource_pool_path(path: str) -> Dict[str, str]: + """Parses a deployment_resource_pool path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/deploymentResourcePools/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def endpoint_path( + project: str, + location: str, + endpoint: str, + ) -> str: + """Returns a fully-qualified endpoint string.""" + return "projects/{project}/locations/{location}/endpoints/{endpoint}".format( + project=project, + location=location, + endpoint=endpoint, + ) + + @staticmethod + def parse_endpoint_path(path: str) -> Dict[str, str]: + """Parses a endpoint path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def model_path( + project: str, + location: str, + model: str, + ) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format( + project=project, + location=location, + model=model, + ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str, str]: + """Parses a model path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path( + billing_account: str, + ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path( + folder: str, + ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format( + folder=folder, + ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path( + organization: str, + ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format( + organization=organization, + ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path( + project: str, + ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format( + project=project, + ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path( + project: str, + location: str, + ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[client_options_lib.ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, DeploymentResourcePoolServiceTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the deployment resource pool service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, DeploymentResourcePoolServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( + client_options + ) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError( + "client_options.api_key and credentials are mutually exclusive" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, DeploymentResourcePoolServiceTransport): + # transport is a DeploymentResourcePoolServiceTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr( + google.auth._default, "get_api_key_credentials" + ): + credentials = google.auth._default.get_api_key_credentials( + api_key_value + ) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=client_options.api_audience, + ) + + def create_deployment_resource_pool( + self, + request: Optional[ + Union[ + deployment_resource_pool_service.CreateDeploymentResourcePoolRequest, + dict, + ] + ] = None, + *, + parent: Optional[str] = None, + deployment_resource_pool: Optional[ + gca_deployment_resource_pool.DeploymentResourcePool + ] = None, + deployment_resource_pool_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Create a DeploymentResourcePool. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_create_deployment_resource_pool(): + # Create a client + client = aiplatform_v1.DeploymentResourcePoolServiceClient() + + # Initialize request argument(s) + deployment_resource_pool = aiplatform_v1.DeploymentResourcePool() + deployment_resource_pool.dedicated_resources.min_replica_count = 1803 + + request = aiplatform_v1.CreateDeploymentResourcePoolRequest( + parent="parent_value", + deployment_resource_pool=deployment_resource_pool, + deployment_resource_pool_id="deployment_resource_pool_id_value", + ) + + # Make the request + operation = client.create_deployment_resource_pool(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateDeploymentResourcePoolRequest, dict]): + The request object. Request message for + CreateDeploymentResourcePool method. + parent (str): + Required. The parent location resource where this + DeploymentResourcePool will be created. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployment_resource_pool (google.cloud.aiplatform_v1.types.DeploymentResourcePool): + Required. The DeploymentResourcePool + to create. + + This corresponds to the ``deployment_resource_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployment_resource_pool_id (str): + Required. The ID to use for the DeploymentResourcePool, + which will become the final component of the + DeploymentResourcePool's resource name. + + The maximum length is 63 characters, and valid + characters are ``/^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$/``. + + This corresponds to the ``deployment_resource_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.DeploymentResourcePool` A description of resources that can be shared by multiple DeployedModels, + whose underlying specification consists of a + DedicatedResources. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [parent, deployment_resource_pool, deployment_resource_pool_id] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a deployment_resource_pool_service.CreateDeploymentResourcePoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, + deployment_resource_pool_service.CreateDeploymentResourcePoolRequest, + ): + request = ( + deployment_resource_pool_service.CreateDeploymentResourcePoolRequest( + request + ) + ) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if deployment_resource_pool is not None: + request.deployment_resource_pool = deployment_resource_pool + if deployment_resource_pool_id is not None: + request.deployment_resource_pool_id = deployment_resource_pool_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.create_deployment_resource_pool + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_deployment_resource_pool.DeploymentResourcePool, + metadata_type=deployment_resource_pool_service.CreateDeploymentResourcePoolOperationMetadata, + ) + + # Done; return the response. + return response + + def get_deployment_resource_pool( + self, + request: Optional[ + Union[ + deployment_resource_pool_service.GetDeploymentResourcePoolRequest, dict + ] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> deployment_resource_pool.DeploymentResourcePool: + r"""Get a DeploymentResourcePool. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_get_deployment_resource_pool(): + # Create a client + client = aiplatform_v1.DeploymentResourcePoolServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetDeploymentResourcePoolRequest( + name="name_value", + ) + + # Make the request + response = client.get_deployment_resource_pool(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetDeploymentResourcePoolRequest, dict]): + The request object. Request message for + GetDeploymentResourcePool method. + name (str): + Required. The name of the DeploymentResourcePool to + retrieve. Format: + ``projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.DeploymentResourcePool: + A description of resources that can + be shared by multiple DeployedModels, + whose underlying specification consists + of a DedicatedResources. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a deployment_resource_pool_service.GetDeploymentResourcePoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, deployment_resource_pool_service.GetDeploymentResourcePoolRequest + ): + request = deployment_resource_pool_service.GetDeploymentResourcePoolRequest( + request + ) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.get_deployment_resource_pool + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_deployment_resource_pools( + self, + request: Optional[ + Union[ + deployment_resource_pool_service.ListDeploymentResourcePoolsRequest, + dict, + ] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDeploymentResourcePoolsPager: + r"""List DeploymentResourcePools in a location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_list_deployment_resource_pools(): + # Create a client + client = aiplatform_v1.DeploymentResourcePoolServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListDeploymentResourcePoolsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_deployment_resource_pools(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListDeploymentResourcePoolsRequest, dict]): + The request object. Request message for + ListDeploymentResourcePools method. + parent (str): + Required. The parent Location which owns this collection + of DeploymentResourcePools. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.deployment_resource_pool_service.pagers.ListDeploymentResourcePoolsPager: + Response message for + ListDeploymentResourcePools method. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a deployment_resource_pool_service.ListDeploymentResourcePoolsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, deployment_resource_pool_service.ListDeploymentResourcePoolsRequest + ): + request = ( + deployment_resource_pool_service.ListDeploymentResourcePoolsRequest( + request + ) + ) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.list_deployment_resource_pools + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListDeploymentResourcePoolsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_deployment_resource_pool( + self, + request: Optional[ + Union[ + deployment_resource_pool_service.DeleteDeploymentResourcePoolRequest, + dict, + ] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Delete a DeploymentResourcePool. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_delete_deployment_resource_pool(): + # Create a client + client = aiplatform_v1.DeploymentResourcePoolServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteDeploymentResourcePoolRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_deployment_resource_pool(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteDeploymentResourcePoolRequest, dict]): + The request object. Request message for + DeleteDeploymentResourcePool method. + name (str): + Required. The name of the DeploymentResourcePool to + delete. Format: + ``projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a deployment_resource_pool_service.DeleteDeploymentResourcePoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, + deployment_resource_pool_service.DeleteDeploymentResourcePoolRequest, + ): + request = ( + deployment_resource_pool_service.DeleteDeploymentResourcePoolRequest( + request + ) + ) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.delete_deployment_resource_pool + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def query_deployed_models( + self, + request: Optional[ + Union[deployment_resource_pool_service.QueryDeployedModelsRequest, dict] + ] = None, + *, + deployment_resource_pool: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.QueryDeployedModelsPager: + r"""List DeployedModels that have been deployed on this + DeploymentResourcePool. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_query_deployed_models(): + # Create a client + client = aiplatform_v1.DeploymentResourcePoolServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.QueryDeployedModelsRequest( + deployment_resource_pool="deployment_resource_pool_value", + ) + + # Make the request + page_result = client.query_deployed_models(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.QueryDeployedModelsRequest, dict]): + The request object. Request message for + QueryDeployedModels method. + deployment_resource_pool (str): + Required. The name of the target DeploymentResourcePool + to query. Format: + ``projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}`` + + This corresponds to the ``deployment_resource_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.deployment_resource_pool_service.pagers.QueryDeployedModelsPager: + Response message for + QueryDeployedModels method. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([deployment_resource_pool]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a deployment_resource_pool_service.QueryDeployedModelsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, deployment_resource_pool_service.QueryDeployedModelsRequest + ): + request = deployment_resource_pool_service.QueryDeployedModelsRequest( + request + ) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if deployment_resource_pool is not None: + request.deployment_resource_pool = deployment_resource_pool + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.query_deployed_models] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("deployment_resource_pool", request.deployment_resource_pool),) + ), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.QueryDeployedModelsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self) -> "DeploymentResourcePoolServiceClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def wait_operation( + self, + request: Optional[operations_pb2.WaitOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: Optional[iam_policy_pb2.GetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: Optional[iam_policy_pb2.TestIamPermissionsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("DeploymentResourcePoolServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/pagers.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/pagers.py new file mode 100644 index 0000000000..12394e6ad3 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/pagers.py @@ -0,0 +1,316 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import ( + Any, + AsyncIterator, + Awaitable, + Callable, + Sequence, + Tuple, + Optional, + Iterator, +) + +from google.cloud.aiplatform_v1.types import deployment_resource_pool +from google.cloud.aiplatform_v1.types import deployment_resource_pool_service +from google.cloud.aiplatform_v1.types import endpoint + + +class ListDeploymentResourcePoolsPager: + """A pager for iterating through ``list_deployment_resource_pools`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListDeploymentResourcePoolsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``deployment_resource_pools`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListDeploymentResourcePools`` requests and continue to iterate + through the ``deployment_resource_pools`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListDeploymentResourcePoolsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., deployment_resource_pool_service.ListDeploymentResourcePoolsResponse + ], + request: deployment_resource_pool_service.ListDeploymentResourcePoolsRequest, + response: deployment_resource_pool_service.ListDeploymentResourcePoolsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListDeploymentResourcePoolsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListDeploymentResourcePoolsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = ( + deployment_resource_pool_service.ListDeploymentResourcePoolsRequest(request) + ) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages( + self, + ) -> Iterator[deployment_resource_pool_service.ListDeploymentResourcePoolsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[deployment_resource_pool.DeploymentResourcePool]: + for page in self.pages: + yield from page.deployment_resource_pools + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListDeploymentResourcePoolsAsyncPager: + """A pager for iterating through ``list_deployment_resource_pools`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListDeploymentResourcePoolsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``deployment_resource_pools`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListDeploymentResourcePools`` requests and continue to iterate + through the ``deployment_resource_pools`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListDeploymentResourcePoolsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., + Awaitable[ + deployment_resource_pool_service.ListDeploymentResourcePoolsResponse + ], + ], + request: deployment_resource_pool_service.ListDeploymentResourcePoolsRequest, + response: deployment_resource_pool_service.ListDeploymentResourcePoolsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListDeploymentResourcePoolsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListDeploymentResourcePoolsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = ( + deployment_resource_pool_service.ListDeploymentResourcePoolsRequest(request) + ) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[ + deployment_resource_pool_service.ListDeploymentResourcePoolsResponse + ]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__( + self, + ) -> AsyncIterator[deployment_resource_pool.DeploymentResourcePool]: + async def async_generator(): + async for page in self.pages: + for response in page.deployment_resource_pools: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class QueryDeployedModelsPager: + """A pager for iterating through ``query_deployed_models`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.QueryDeployedModelsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``deployed_models`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``QueryDeployedModels`` requests and continue to iterate + through the ``deployed_models`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.QueryDeployedModelsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., deployment_resource_pool_service.QueryDeployedModelsResponse + ], + request: deployment_resource_pool_service.QueryDeployedModelsRequest, + response: deployment_resource_pool_service.QueryDeployedModelsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.QueryDeployedModelsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.QueryDeployedModelsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = deployment_resource_pool_service.QueryDeployedModelsRequest( + request + ) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages( + self, + ) -> Iterator[deployment_resource_pool_service.QueryDeployedModelsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[endpoint.DeployedModel]: + for page in self.pages: + yield from page.deployed_models + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class QueryDeployedModelsAsyncPager: + """A pager for iterating through ``query_deployed_models`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.QueryDeployedModelsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``deployed_models`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``QueryDeployedModels`` requests and continue to iterate + through the ``deployed_models`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.QueryDeployedModelsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[deployment_resource_pool_service.QueryDeployedModelsResponse] + ], + request: deployment_resource_pool_service.QueryDeployedModelsRequest, + response: deployment_resource_pool_service.QueryDeployedModelsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.QueryDeployedModelsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.QueryDeployedModelsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = deployment_resource_pool_service.QueryDeployedModelsRequest( + request + ) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[deployment_resource_pool_service.QueryDeployedModelsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[endpoint.DeployedModel]: + async def async_generator(): + async for page in self.pages: + for response in page.deployed_models: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/__init__.py new file mode 100644 index 0000000000..f2fc5498d1 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/__init__.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import DeploymentResourcePoolServiceTransport +from .grpc import DeploymentResourcePoolServiceGrpcTransport +from .grpc_asyncio import DeploymentResourcePoolServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = ( + OrderedDict() +) # type: Dict[str, Type[DeploymentResourcePoolServiceTransport]] +_transport_registry["grpc"] = DeploymentResourcePoolServiceGrpcTransport +_transport_registry["grpc_asyncio"] = DeploymentResourcePoolServiceGrpcAsyncIOTransport + +__all__ = ( + "DeploymentResourcePoolServiceTransport", + "DeploymentResourcePoolServiceGrpcTransport", + "DeploymentResourcePoolServiceGrpcAsyncIOTransport", +) diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/base.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/base.py new file mode 100644 index 0000000000..495dbdb594 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/base.py @@ -0,0 +1,327 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +from google.cloud.aiplatform_v1 import gapic_version as package_version + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import deployment_resource_pool +from google.cloud.aiplatform_v1.types import deployment_resource_pool_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +class DeploymentResourcePoolServiceTransport(abc.ABC): + """Abstract transport class for DeploymentResourcePoolService.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + DEFAULT_HOST: str = "aiplatform.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default( + **scopes_kwargs, quota_project_id=quota_project_id + ) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience( + api_audience if api_audience else host + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_deployment_resource_pool: gapic_v1.method.wrap_method( + self.create_deployment_resource_pool, + default_timeout=None, + client_info=client_info, + ), + self.get_deployment_resource_pool: gapic_v1.method.wrap_method( + self.get_deployment_resource_pool, + default_timeout=None, + client_info=client_info, + ), + self.list_deployment_resource_pools: gapic_v1.method.wrap_method( + self.list_deployment_resource_pools, + default_timeout=None, + client_info=client_info, + ), + self.delete_deployment_resource_pool: gapic_v1.method.wrap_method( + self.delete_deployment_resource_pool, + default_timeout=None, + client_info=client_info, + ), + self.query_deployed_models: gapic_v1.method.wrap_method( + self.query_deployed_models, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_deployment_resource_pool( + self, + ) -> Callable[ + [deployment_resource_pool_service.CreateDeploymentResourcePoolRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def get_deployment_resource_pool( + self, + ) -> Callable[ + [deployment_resource_pool_service.GetDeploymentResourcePoolRequest], + Union[ + deployment_resource_pool.DeploymentResourcePool, + Awaitable[deployment_resource_pool.DeploymentResourcePool], + ], + ]: + raise NotImplementedError() + + @property + def list_deployment_resource_pools( + self, + ) -> Callable[ + [deployment_resource_pool_service.ListDeploymentResourcePoolsRequest], + Union[ + deployment_resource_pool_service.ListDeploymentResourcePoolsResponse, + Awaitable[ + deployment_resource_pool_service.ListDeploymentResourcePoolsResponse + ], + ], + ]: + raise NotImplementedError() + + @property + def delete_deployment_resource_pool( + self, + ) -> Callable[ + [deployment_resource_pool_service.DeleteDeploymentResourcePoolRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def query_deployed_models( + self, + ) -> Callable[ + [deployment_resource_pool_service.QueryDeployedModelsRequest], + Union[ + deployment_resource_pool_service.QueryDeployedModelsResponse, + Awaitable[deployment_resource_pool_service.QueryDeployedModelsResponse], + ], + ]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[ + operations_pb2.ListOperationsResponse, + Awaitable[operations_pb2.ListOperationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location( + self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[ + locations_pb2.ListLocationsResponse, + Awaitable[locations_pb2.ListLocationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ("DeploymentResourcePoolServiceTransport",) diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/grpc.py new file mode 100644 index 0000000000..2b74817de0 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/grpc.py @@ -0,0 +1,620 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1.types import deployment_resource_pool +from google.cloud.aiplatform_v1.types import deployment_resource_pool_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .base import DeploymentResourcePoolServiceTransport, DEFAULT_CLIENT_INFO + + +class DeploymentResourcePoolServiceGrpcTransport( + DeploymentResourcePoolServiceTransport +): + """gRPC backend transport for DeploymentResourcePoolService. + + A service that manages the DeploymentResourcePool resource. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service.""" + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + + # Return the client from cache. + return self._operations_client + + @property + def create_deployment_resource_pool( + self, + ) -> Callable[ + [deployment_resource_pool_service.CreateDeploymentResourcePoolRequest], + operations_pb2.Operation, + ]: + r"""Return a callable for the create deployment resource + pool method over gRPC. + + Create a DeploymentResourcePool. + + Returns: + Callable[[~.CreateDeploymentResourcePoolRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_deployment_resource_pool" not in self._stubs: + self._stubs[ + "create_deployment_resource_pool" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.DeploymentResourcePoolService/CreateDeploymentResourcePool", + request_serializer=deployment_resource_pool_service.CreateDeploymentResourcePoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_deployment_resource_pool"] + + @property + def get_deployment_resource_pool( + self, + ) -> Callable[ + [deployment_resource_pool_service.GetDeploymentResourcePoolRequest], + deployment_resource_pool.DeploymentResourcePool, + ]: + r"""Return a callable for the get deployment resource pool method over gRPC. + + Get a DeploymentResourcePool. + + Returns: + Callable[[~.GetDeploymentResourcePoolRequest], + ~.DeploymentResourcePool]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_deployment_resource_pool" not in self._stubs: + self._stubs["get_deployment_resource_pool"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.DeploymentResourcePoolService/GetDeploymentResourcePool", + request_serializer=deployment_resource_pool_service.GetDeploymentResourcePoolRequest.serialize, + response_deserializer=deployment_resource_pool.DeploymentResourcePool.deserialize, + ) + return self._stubs["get_deployment_resource_pool"] + + @property + def list_deployment_resource_pools( + self, + ) -> Callable[ + [deployment_resource_pool_service.ListDeploymentResourcePoolsRequest], + deployment_resource_pool_service.ListDeploymentResourcePoolsResponse, + ]: + r"""Return a callable for the list deployment resource pools method over gRPC. + + List DeploymentResourcePools in a location. + + Returns: + Callable[[~.ListDeploymentResourcePoolsRequest], + ~.ListDeploymentResourcePoolsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_deployment_resource_pools" not in self._stubs: + self._stubs[ + "list_deployment_resource_pools" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.DeploymentResourcePoolService/ListDeploymentResourcePools", + request_serializer=deployment_resource_pool_service.ListDeploymentResourcePoolsRequest.serialize, + response_deserializer=deployment_resource_pool_service.ListDeploymentResourcePoolsResponse.deserialize, + ) + return self._stubs["list_deployment_resource_pools"] + + @property + def delete_deployment_resource_pool( + self, + ) -> Callable[ + [deployment_resource_pool_service.DeleteDeploymentResourcePoolRequest], + operations_pb2.Operation, + ]: + r"""Return a callable for the delete deployment resource + pool method over gRPC. + + Delete a DeploymentResourcePool. + + Returns: + Callable[[~.DeleteDeploymentResourcePoolRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_deployment_resource_pool" not in self._stubs: + self._stubs[ + "delete_deployment_resource_pool" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.DeploymentResourcePoolService/DeleteDeploymentResourcePool", + request_serializer=deployment_resource_pool_service.DeleteDeploymentResourcePoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_deployment_resource_pool"] + + @property + def query_deployed_models( + self, + ) -> Callable[ + [deployment_resource_pool_service.QueryDeployedModelsRequest], + deployment_resource_pool_service.QueryDeployedModelsResponse, + ]: + r"""Return a callable for the query deployed models method over gRPC. + + List DeployedModels that have been deployed on this + DeploymentResourcePool. + + Returns: + Callable[[~.QueryDeployedModelsRequest], + ~.QueryDeployedModelsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "query_deployed_models" not in self._stubs: + self._stubs["query_deployed_models"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.DeploymentResourcePoolService/QueryDeployedModels", + request_serializer=deployment_resource_pool_service.QueryDeployedModelsRequest.serialize, + response_deserializer=deployment_resource_pool_service.QueryDeployedModelsResponse.deserialize, + ) + return self._stubs["query_deployed_models"] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ("DeploymentResourcePoolServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..43b6ad9cb3 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/grpc_asyncio.py @@ -0,0 +1,621 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import deployment_resource_pool +from google.cloud.aiplatform_v1.types import deployment_resource_pool_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .base import DeploymentResourcePoolServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import DeploymentResourcePoolServiceGrpcTransport + + +class DeploymentResourcePoolServiceGrpcAsyncIOTransport( + DeploymentResourcePoolServiceTransport +): + """gRPC AsyncIO backend transport for DeploymentResourcePoolService. + + A service that manages the DeploymentResourcePool resource. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_deployment_resource_pool( + self, + ) -> Callable[ + [deployment_resource_pool_service.CreateDeploymentResourcePoolRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the create deployment resource + pool method over gRPC. + + Create a DeploymentResourcePool. + + Returns: + Callable[[~.CreateDeploymentResourcePoolRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_deployment_resource_pool" not in self._stubs: + self._stubs[ + "create_deployment_resource_pool" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.DeploymentResourcePoolService/CreateDeploymentResourcePool", + request_serializer=deployment_resource_pool_service.CreateDeploymentResourcePoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_deployment_resource_pool"] + + @property + def get_deployment_resource_pool( + self, + ) -> Callable[ + [deployment_resource_pool_service.GetDeploymentResourcePoolRequest], + Awaitable[deployment_resource_pool.DeploymentResourcePool], + ]: + r"""Return a callable for the get deployment resource pool method over gRPC. + + Get a DeploymentResourcePool. + + Returns: + Callable[[~.GetDeploymentResourcePoolRequest], + Awaitable[~.DeploymentResourcePool]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_deployment_resource_pool" not in self._stubs: + self._stubs["get_deployment_resource_pool"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.DeploymentResourcePoolService/GetDeploymentResourcePool", + request_serializer=deployment_resource_pool_service.GetDeploymentResourcePoolRequest.serialize, + response_deserializer=deployment_resource_pool.DeploymentResourcePool.deserialize, + ) + return self._stubs["get_deployment_resource_pool"] + + @property + def list_deployment_resource_pools( + self, + ) -> Callable[ + [deployment_resource_pool_service.ListDeploymentResourcePoolsRequest], + Awaitable[deployment_resource_pool_service.ListDeploymentResourcePoolsResponse], + ]: + r"""Return a callable for the list deployment resource pools method over gRPC. + + List DeploymentResourcePools in a location. + + Returns: + Callable[[~.ListDeploymentResourcePoolsRequest], + Awaitable[~.ListDeploymentResourcePoolsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_deployment_resource_pools" not in self._stubs: + self._stubs[ + "list_deployment_resource_pools" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.DeploymentResourcePoolService/ListDeploymentResourcePools", + request_serializer=deployment_resource_pool_service.ListDeploymentResourcePoolsRequest.serialize, + response_deserializer=deployment_resource_pool_service.ListDeploymentResourcePoolsResponse.deserialize, + ) + return self._stubs["list_deployment_resource_pools"] + + @property + def delete_deployment_resource_pool( + self, + ) -> Callable[ + [deployment_resource_pool_service.DeleteDeploymentResourcePoolRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the delete deployment resource + pool method over gRPC. + + Delete a DeploymentResourcePool. + + Returns: + Callable[[~.DeleteDeploymentResourcePoolRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_deployment_resource_pool" not in self._stubs: + self._stubs[ + "delete_deployment_resource_pool" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.DeploymentResourcePoolService/DeleteDeploymentResourcePool", + request_serializer=deployment_resource_pool_service.DeleteDeploymentResourcePoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_deployment_resource_pool"] + + @property + def query_deployed_models( + self, + ) -> Callable[ + [deployment_resource_pool_service.QueryDeployedModelsRequest], + Awaitable[deployment_resource_pool_service.QueryDeployedModelsResponse], + ]: + r"""Return a callable for the query deployed models method over gRPC. + + List DeployedModels that have been deployed on this + DeploymentResourcePool. + + Returns: + Callable[[~.QueryDeployedModelsRequest], + Awaitable[~.QueryDeployedModelsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "query_deployed_models" not in self._stubs: + self._stubs["query_deployed_models"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.DeploymentResourcePoolService/QueryDeployedModels", + request_serializer=deployment_resource_pool_service.QueryDeployedModelsRequest.serialize, + response_deserializer=deployment_resource_pool_service.QueryDeployedModelsResponse.deserialize, + ) + return self._stubs["query_deployed_models"] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ("DeploymentResourcePoolServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py b/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py index 732e590201..eec859f388 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py @@ -70,6 +70,12 @@ class EndpointServiceAsyncClient: DEFAULT_ENDPOINT = EndpointServiceClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = EndpointServiceClient.DEFAULT_MTLS_ENDPOINT + deployment_resource_pool_path = staticmethod( + EndpointServiceClient.deployment_resource_pool_path + ) + parse_deployment_resource_pool_path = staticmethod( + EndpointServiceClient.parse_deployment_resource_pool_path + ) endpoint_path = staticmethod(EndpointServiceClient.endpoint_path) parse_endpoint_path = staticmethod(EndpointServiceClient.parse_endpoint_path) model_path = staticmethod(EndpointServiceClient.model_path) diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/client.py b/google/cloud/aiplatform_v1/services/endpoint_service/client.py index efc1d8cf4e..23beca18e1 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/client.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/client.py @@ -187,6 +187,28 @@ def transport(self) -> EndpointServiceTransport: """ return self._transport + @staticmethod + def deployment_resource_pool_path( + project: str, + location: str, + deployment_resource_pool: str, + ) -> str: + """Returns a fully-qualified deployment_resource_pool string.""" + return "projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}".format( + project=project, + location=location, + deployment_resource_pool=deployment_resource_pool, + ) + + @staticmethod + def parse_deployment_resource_pool_path(path: str) -> Dict[str, str]: + """Parses a deployment_resource_pool path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/deploymentResourcePools/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def endpoint_path( project: str, diff --git a/google/cloud/aiplatform_v1/services/llm_utility_service/async_client.py b/google/cloud/aiplatform_v1/services/llm_utility_service/async_client.py index 5154819336..26c91f8f45 100644 --- a/google/cloud/aiplatform_v1/services/llm_utility_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/llm_utility_service/async_client.py @@ -249,9 +249,14 @@ async def sample_count_tokens(): instances = aiplatform_v1.Value() instances.null_value = "NULL_VALUE" + contents = aiplatform_v1.Content() + contents.parts.text = "text_value" + request = aiplatform_v1.CountTokensRequest( endpoint="endpoint_value", + model="model_value", instances=instances, + contents=contents, ) # Make the request diff --git a/google/cloud/aiplatform_v1/services/llm_utility_service/client.py b/google/cloud/aiplatform_v1/services/llm_utility_service/client.py index 4c043b995c..a128b03d00 100644 --- a/google/cloud/aiplatform_v1/services/llm_utility_service/client.py +++ b/google/cloud/aiplatform_v1/services/llm_utility_service/client.py @@ -476,9 +476,14 @@ def sample_count_tokens(): instances = aiplatform_v1.Value() instances.null_value = "NULL_VALUE" + contents = aiplatform_v1.Content() + contents.parts.text = "text_value" + request = aiplatform_v1.CountTokensRequest( endpoint="endpoint_value", + model="model_value", instances=instances, + contents=contents, ) # Make the request diff --git a/google/cloud/aiplatform_v1/services/migration_service/client.py b/google/cloud/aiplatform_v1/services/migration_service/client.py index ec924555fe..e1a310325c 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1/services/migration_service/client.py @@ -208,40 +208,40 @@ def parse_annotated_dataset_path(path: str) -> Dict[str, str]: @staticmethod def dataset_path( project: str, - location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( + return "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod def dataset_path( project: str, + location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( + return "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod diff --git a/google/cloud/aiplatform_v1/services/prediction_service/async_client.py b/google/cloud/aiplatform_v1/services/prediction_service/async_client.py index b7fb387523..e55e4896c0 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/async_client.py @@ -46,6 +46,7 @@ OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore from google.api import httpbody_pb2 # type: ignore +from google.cloud.aiplatform_v1.types import content from google.cloud.aiplatform_v1.types import explanation from google.cloud.aiplatform_v1.types import prediction_service from google.cloud.aiplatform_v1.types import types @@ -1153,6 +1154,130 @@ async def sample_explain(): # Done; return the response. return response + def stream_generate_content( + self, + request: Optional[ + Union[prediction_service.GenerateContentRequest, dict] + ] = None, + *, + model: Optional[str] = None, + contents: Optional[MutableSequence[content.Content]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[AsyncIterable[prediction_service.GenerateContentResponse]]: + r"""Generate content with multimodal inputs with + streaming support. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_stream_generate_content(): + # Create a client + client = aiplatform_v1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + contents = aiplatform_v1.Content() + contents.parts.text = "text_value" + + request = aiplatform_v1.GenerateContentRequest( + model="model_value", + contents=contents, + ) + + # Make the request + stream = await client.stream_generate_content(request=request) + + # Handle the response + async for response in stream: + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.GenerateContentRequest, dict]]): + The request object. Request message for [PredictionService.GenerateContent]. + model (:class:`str`): + Required. The name of the publisher model requested to + serve the prediction. Format: + ``projects/{project}/locations/{location}/publishers/*/models/*`` + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + contents (:class:`MutableSequence[google.cloud.aiplatform_v1.types.Content]`): + Required. The content of the current + conversation with the model. + For single-turn queries, this is a + single instance. For multi-turn queries, + this is a repeated field that contains + conversation history + latest request. + + This corresponds to the ``contents`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[google.cloud.aiplatform_v1.types.GenerateContentResponse]: + Response message for + [PredictionService.GenerateContent]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, contents]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = prediction_service.GenerateContentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if contents: + request.contents.extend(contents) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.stream_generate_content, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + async def list_operations( self, request: Optional[operations_pb2.ListOperationsRequest] = None, diff --git a/google/cloud/aiplatform_v1/services/prediction_service/client.py b/google/cloud/aiplatform_v1/services/prediction_service/client.py index 3d2189cb42..2f4537397b 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/client.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/client.py @@ -49,6 +49,7 @@ OptionalRetry = Union[retries.Retry, object] # type: ignore from google.api import httpbody_pb2 # type: ignore +from google.cloud.aiplatform_v1.types import content from google.cloud.aiplatform_v1.types import explanation from google.cloud.aiplatform_v1.types import prediction_service from google.cloud.aiplatform_v1.types import types @@ -1392,6 +1393,130 @@ def sample_explain(): # Done; return the response. return response + def stream_generate_content( + self, + request: Optional[ + Union[prediction_service.GenerateContentRequest, dict] + ] = None, + *, + model: Optional[str] = None, + contents: Optional[MutableSequence[content.Content]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[prediction_service.GenerateContentResponse]: + r"""Generate content with multimodal inputs with + streaming support. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_stream_generate_content(): + # Create a client + client = aiplatform_v1.PredictionServiceClient() + + # Initialize request argument(s) + contents = aiplatform_v1.Content() + contents.parts.text = "text_value" + + request = aiplatform_v1.GenerateContentRequest( + model="model_value", + contents=contents, + ) + + # Make the request + stream = client.stream_generate_content(request=request) + + # Handle the response + for response in stream: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GenerateContentRequest, dict]): + The request object. Request message for [PredictionService.GenerateContent]. + model (str): + Required. The name of the publisher model requested to + serve the prediction. Format: + ``projects/{project}/locations/{location}/publishers/*/models/*`` + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + contents (MutableSequence[google.cloud.aiplatform_v1.types.Content]): + Required. The content of the current + conversation with the model. + For single-turn queries, this is a + single instance. For multi-turn queries, + this is a repeated field that contains + conversation history + latest request. + + This corresponds to the ``contents`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[google.cloud.aiplatform_v1.types.GenerateContentResponse]: + Response message for + [PredictionService.GenerateContent]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, contents]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a prediction_service.GenerateContentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, prediction_service.GenerateContentRequest): + request = prediction_service.GenerateContentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if contents is not None: + request.contents = contents + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.stream_generate_content] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + def __enter__(self) -> "PredictionServiceClient": return self diff --git a/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py b/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py index 47e7ec2c5a..52d68fe079 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py @@ -168,6 +168,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=None, client_info=client_info, ), + self.stream_generate_content: gapic_v1.method.wrap_method( + self.stream_generate_content, + default_timeout=None, + client_info=client_info, + ), } def close(self): @@ -272,6 +277,18 @@ def explain( ]: raise NotImplementedError() + @property + def stream_generate_content( + self, + ) -> Callable[ + [prediction_service.GenerateContentRequest], + Union[ + prediction_service.GenerateContentResponse, + Awaitable[prediction_service.GenerateContentResponse], + ], + ]: + raise NotImplementedError() + @property def list_operations( self, diff --git a/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py index 5e00227ea5..a70cd92007 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py @@ -485,6 +485,36 @@ def explain( ) return self._stubs["explain"] + @property + def stream_generate_content( + self, + ) -> Callable[ + [prediction_service.GenerateContentRequest], + prediction_service.GenerateContentResponse, + ]: + r"""Return a callable for the stream generate content method over gRPC. + + Generate content with multimodal inputs with + streaming support. + + Returns: + Callable[[~.GenerateContentRequest], + ~.GenerateContentResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "stream_generate_content" not in self._stubs: + self._stubs["stream_generate_content"] = self.grpc_channel.unary_stream( + "/google.cloud.aiplatform.v1.PredictionService/StreamGenerateContent", + request_serializer=prediction_service.GenerateContentRequest.serialize, + response_deserializer=prediction_service.GenerateContentResponse.deserialize, + ) + return self._stubs["stream_generate_content"] + def close(self): self.grpc_channel.close() diff --git a/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py index 989ecc5050..46a09f9eeb 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py @@ -492,6 +492,36 @@ def explain( ) return self._stubs["explain"] + @property + def stream_generate_content( + self, + ) -> Callable[ + [prediction_service.GenerateContentRequest], + Awaitable[prediction_service.GenerateContentResponse], + ]: + r"""Return a callable for the stream generate content method over gRPC. + + Generate content with multimodal inputs with + streaming support. + + Returns: + Callable[[~.GenerateContentRequest], + Awaitable[~.GenerateContentResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "stream_generate_content" not in self._stubs: + self._stubs["stream_generate_content"] = self.grpc_channel.unary_stream( + "/google.cloud.aiplatform.v1.PredictionService/StreamGenerateContent", + request_serializer=prediction_service.GenerateContentRequest.serialize, + response_deserializer=prediction_service.GenerateContentResponse.deserialize, + ) + return self._stubs["stream_generate_content"] + def close(self): return self.grpc_channel.close() diff --git a/google/cloud/aiplatform_v1/types/__init__.py b/google/cloud/aiplatform_v1/types/__init__.py index 1760236eea..a9a635efef 100644 --- a/google/cloud/aiplatform_v1/types/__init__.py +++ b/google/cloud/aiplatform_v1/types/__init__.py @@ -28,6 +28,20 @@ from .completion_stats import ( CompletionStats, ) +from .content import ( + Blob, + Candidate, + Citation, + CitationMetadata, + Content, + FileData, + GenerationConfig, + Part, + SafetyRating, + SafetySetting, + VideoMetadata, + HarmCategory, +) from .context import ( Context, ) @@ -51,6 +65,7 @@ from .dataset import ( Dataset, ExportDataConfig, + ExportFilterSplit, ExportFractionSplit, ImportDataConfig, ) @@ -97,6 +112,20 @@ from .deployed_model_ref import ( DeployedModelRef, ) +from .deployment_resource_pool import ( + DeploymentResourcePool, +) +from .deployment_resource_pool_service import ( + CreateDeploymentResourcePoolOperationMetadata, + CreateDeploymentResourcePoolRequest, + DeleteDeploymentResourcePoolRequest, + GetDeploymentResourcePoolRequest, + ListDeploymentResourcePoolsRequest, + ListDeploymentResourcePoolsResponse, + QueryDeployedModelsRequest, + QueryDeployedModelsResponse, + UpdateDeploymentResourcePoolOperationMetadata, +) from .encryption_spec import ( EncryptionSpec, ) @@ -562,6 +591,10 @@ NasTrial, NasTrialDetail, ) +from .openapi import ( + Schema, + Type, +) from .operation import ( DeleteOperationMetadata, GenericOperationMetadata, @@ -596,6 +629,8 @@ DirectRawPredictResponse, ExplainRequest, ExplainResponse, + GenerateContentRequest, + GenerateContentResponse, PredictRequest, PredictResponse, RawPredictRequest, @@ -715,6 +750,12 @@ from .tensorboard_time_series import ( TensorboardTimeSeries, ) +from .tool import ( + FunctionCall, + FunctionDeclaration, + FunctionResponse, + Tool, +) from .training_pipeline import ( FilterSplit, FractionSplit, @@ -772,6 +813,18 @@ "Artifact", "BatchPredictionJob", "CompletionStats", + "Blob", + "Candidate", + "Citation", + "CitationMetadata", + "Content", + "FileData", + "GenerationConfig", + "Part", + "SafetyRating", + "SafetySetting", + "VideoMetadata", + "HarmCategory", "Context", "ContainerSpec", "CustomJob", @@ -786,6 +839,7 @@ "TrainingConfig", "Dataset", "ExportDataConfig", + "ExportFilterSplit", "ExportFractionSplit", "ImportDataConfig", "CreateDatasetOperationMetadata", @@ -823,6 +877,16 @@ "DatasetVersion", "DeployedIndexRef", "DeployedModelRef", + "DeploymentResourcePool", + "CreateDeploymentResourcePoolOperationMetadata", + "CreateDeploymentResourcePoolRequest", + "DeleteDeploymentResourcePoolRequest", + "GetDeploymentResourcePoolRequest", + "ListDeploymentResourcePoolsRequest", + "ListDeploymentResourcePoolsResponse", + "QueryDeployedModelsRequest", + "QueryDeployedModelsResponse", + "UpdateDeploymentResourcePoolOperationMetadata", "EncryptionSpec", "DeployedModel", "Endpoint", @@ -1191,6 +1255,8 @@ "NasJobSpec", "NasTrial", "NasTrialDetail", + "Schema", + "Type", "DeleteOperationMetadata", "GenericOperationMetadata", "PipelineFailurePolicy", @@ -1220,6 +1286,8 @@ "DirectRawPredictResponse", "ExplainRequest", "ExplainResponse", + "GenerateContentRequest", + "GenerateContentResponse", "PredictRequest", "PredictResponse", "RawPredictRequest", @@ -1310,6 +1378,10 @@ "WriteTensorboardRunDataRequest", "WriteTensorboardRunDataResponse", "TensorboardTimeSeries", + "FunctionCall", + "FunctionDeclaration", + "FunctionResponse", + "Tool", "FilterSplit", "FractionSplit", "InputDataConfig", diff --git a/google/cloud/aiplatform_v1/types/batch_prediction_job.py b/google/cloud/aiplatform_v1/types/batch_prediction_job.py index ac034c04e8..7b62b1b370 100644 --- a/google/cloud/aiplatform_v1/types/batch_prediction_job.py +++ b/google/cloud/aiplatform_v1/types/batch_prediction_job.py @@ -383,8 +383,8 @@ class InstanceConfig(proto.Message): [excluded_fields][google.cloud.aiplatform.v1.BatchPredictionJob.InstanceConfig.excluded_fields] must be empty. - The input must be JSONL with objects at each line, CSV, - BigQuery or TfRecord. + The input must be JSONL with objects at each line, BigQuery + or TfRecord. excluded_fields (MutableSequence[str]): Fields that will be excluded in the prediction instance that is sent to the Model. @@ -397,8 +397,8 @@ class InstanceConfig(proto.Message): [included_fields][google.cloud.aiplatform.v1.BatchPredictionJob.InstanceConfig.included_fields] must be empty. - The input must be JSONL with objects at each line, CSV, - BigQuery or TfRecord. + The input must be JSONL with objects at each line, BigQuery + or TfRecord. """ instance_type: str = proto.Field( diff --git a/google/cloud/aiplatform_v1/types/content.py b/google/cloud/aiplatform_v1/types/content.py new file mode 100644 index 0000000000..116c8d6093 --- /dev/null +++ b/google/cloud/aiplatform_v1/types/content.py @@ -0,0 +1,571 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import tool +from google.protobuf import duration_pb2 # type: ignore +from google.type import date_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", + manifest={ + "HarmCategory", + "Content", + "Part", + "Blob", + "FileData", + "VideoMetadata", + "GenerationConfig", + "SafetySetting", + "SafetyRating", + "CitationMetadata", + "Citation", + "Candidate", + }, +) + + +class HarmCategory(proto.Enum): + r"""Harm categories that will block the content. + + Values: + HARM_CATEGORY_UNSPECIFIED (0): + The harm category is unspecified. + HARM_CATEGORY_HATE_SPEECH (1): + The harm category is hate speech. + HARM_CATEGORY_DANGEROUS_CONTENT (2): + The harm category is dangerous content. + HARM_CATEGORY_HARASSMENT (3): + The harm category is harassment. + HARM_CATEGORY_SEXUALLY_EXPLICIT (4): + The harm category is sexually explicit + content. + """ + HARM_CATEGORY_UNSPECIFIED = 0 + HARM_CATEGORY_HATE_SPEECH = 1 + HARM_CATEGORY_DANGEROUS_CONTENT = 2 + HARM_CATEGORY_HARASSMENT = 3 + HARM_CATEGORY_SEXUALLY_EXPLICIT = 4 + + +class Content(proto.Message): + r"""The base structured datatype containing multi-part content of a + message. + + A ``Content`` includes a ``role`` field designating the producer of + the ``Content`` and a ``parts`` field containing multi-part data + that contains the content of the message turn. + + Attributes: + role (str): + Optional. The producer of the content. Must + be either 'user' or 'model'. + Useful to set for multi-turn conversations, + otherwise can be left blank or unset. + parts (MutableSequence[google.cloud.aiplatform_v1.types.Part]): + Required. Ordered ``Parts`` that constitute a single + message. Parts may have different IANA MIME types. + """ + + role: str = proto.Field( + proto.STRING, + number=1, + ) + parts: MutableSequence["Part"] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="Part", + ) + + +class Part(proto.Message): + r"""A datatype containing media that is part of a multi-part ``Content`` + message. + + A ``Part`` consists of data which has an associated datatype. A + ``Part`` can only contain one of the accepted types in + ``Part.data``. + + A ``Part`` must have a fixed IANA MIME type identifying the type and + subtype of the media if ``inline_data`` or ``file_data`` field is + filled with raw bytes. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + text (str): + Optional. Text part (can be code). + + This field is a member of `oneof`_ ``data``. + inline_data (google.cloud.aiplatform_v1.types.Blob): + Optional. Inlined bytes data. + + This field is a member of `oneof`_ ``data``. + file_data (google.cloud.aiplatform_v1.types.FileData): + Optional. URI based data. + + This field is a member of `oneof`_ ``data``. + function_call (google.cloud.aiplatform_v1.types.FunctionCall): + Optional. A predicted [FunctionCall] returned from the model + that contains a string representing the + [FunctionDeclaration.name] with the parameters and their + values. + + This field is a member of `oneof`_ ``data``. + function_response (google.cloud.aiplatform_v1.types.FunctionResponse): + Optional. The result output of a [FunctionCall] that + contains a string representing the + [FunctionDeclaration.name] and a structured JSON object + containing any output from the function call. It is used as + context to the model. + + This field is a member of `oneof`_ ``data``. + video_metadata (google.cloud.aiplatform_v1.types.VideoMetadata): + Optional. Video metadata. The metadata should only be + specified while the video data is presented in inline_data + or file_data. + + This field is a member of `oneof`_ ``metadata``. + """ + + text: str = proto.Field( + proto.STRING, + number=1, + oneof="data", + ) + inline_data: "Blob" = proto.Field( + proto.MESSAGE, + number=2, + oneof="data", + message="Blob", + ) + file_data: "FileData" = proto.Field( + proto.MESSAGE, + number=3, + oneof="data", + message="FileData", + ) + function_call: tool.FunctionCall = proto.Field( + proto.MESSAGE, + number=5, + oneof="data", + message=tool.FunctionCall, + ) + function_response: tool.FunctionResponse = proto.Field( + proto.MESSAGE, + number=6, + oneof="data", + message=tool.FunctionResponse, + ) + video_metadata: "VideoMetadata" = proto.Field( + proto.MESSAGE, + number=4, + oneof="metadata", + message="VideoMetadata", + ) + + +class Blob(proto.Message): + r"""Raw media bytes. + + Text should not be sent as raw bytes, use the 'text' field. + + Attributes: + mime_type (str): + Required. The IANA standard MIME type of the + source data. + data (bytes): + Required. Raw bytes for media formats. + """ + + mime_type: str = proto.Field( + proto.STRING, + number=1, + ) + data: bytes = proto.Field( + proto.BYTES, + number=2, + ) + + +class FileData(proto.Message): + r"""URI based data. + + Attributes: + mime_type (str): + Required. The IANA standard MIME type of the + source data. + file_uri (str): + Required. URI. + """ + + mime_type: str = proto.Field( + proto.STRING, + number=1, + ) + file_uri: str = proto.Field( + proto.STRING, + number=2, + ) + + +class VideoMetadata(proto.Message): + r"""Metadata describes the input video content. + + Attributes: + start_offset (google.protobuf.duration_pb2.Duration): + Optional. The start offset of the video. + end_offset (google.protobuf.duration_pb2.Duration): + Optional. The end offset of the video. + """ + + start_offset: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + end_offset: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=2, + message=duration_pb2.Duration, + ) + + +class GenerationConfig(proto.Message): + r"""Generation config. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + temperature (float): + Optional. Controls the randomness of + predictions. + + This field is a member of `oneof`_ ``_temperature``. + top_p (float): + Optional. If specified, nucleus sampling will + be used. + + This field is a member of `oneof`_ ``_top_p``. + top_k (float): + Optional. If specified, top-k sampling will + be used. + + This field is a member of `oneof`_ ``_top_k``. + candidate_count (int): + Optional. Number of candidates to generate. + + This field is a member of `oneof`_ ``_candidate_count``. + max_output_tokens (int): + Optional. The maximum number of output tokens + to generate per message. + + This field is a member of `oneof`_ ``_max_output_tokens``. + stop_sequences (MutableSequence[str]): + Optional. Stop sequences. + """ + + temperature: float = proto.Field( + proto.FLOAT, + number=1, + optional=True, + ) + top_p: float = proto.Field( + proto.FLOAT, + number=2, + optional=True, + ) + top_k: float = proto.Field( + proto.FLOAT, + number=3, + optional=True, + ) + candidate_count: int = proto.Field( + proto.INT32, + number=4, + optional=True, + ) + max_output_tokens: int = proto.Field( + proto.INT32, + number=5, + optional=True, + ) + stop_sequences: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=6, + ) + + +class SafetySetting(proto.Message): + r"""Safety settings. + + Attributes: + category (google.cloud.aiplatform_v1.types.HarmCategory): + Required. Harm category. + threshold (google.cloud.aiplatform_v1.types.SafetySetting.HarmBlockThreshold): + Required. The harm block threshold. + """ + + class HarmBlockThreshold(proto.Enum): + r"""Probability based thresholds levels for blocking. + + Values: + HARM_BLOCK_THRESHOLD_UNSPECIFIED (0): + Unspecified harm block threshold. + BLOCK_LOW_AND_ABOVE (1): + Block low threshold and above (i.e. block + more). + BLOCK_MEDIUM_AND_ABOVE (2): + Block medium threshold and above. + BLOCK_ONLY_HIGH (3): + Block only high threshold (i.e. block less). + BLOCK_NONE (4): + Block none. + """ + HARM_BLOCK_THRESHOLD_UNSPECIFIED = 0 + BLOCK_LOW_AND_ABOVE = 1 + BLOCK_MEDIUM_AND_ABOVE = 2 + BLOCK_ONLY_HIGH = 3 + BLOCK_NONE = 4 + + category: "HarmCategory" = proto.Field( + proto.ENUM, + number=1, + enum="HarmCategory", + ) + threshold: HarmBlockThreshold = proto.Field( + proto.ENUM, + number=2, + enum=HarmBlockThreshold, + ) + + +class SafetyRating(proto.Message): + r"""Safety rating corresponding to the generated content. + + Attributes: + category (google.cloud.aiplatform_v1.types.HarmCategory): + Output only. Harm category. + probability (google.cloud.aiplatform_v1.types.SafetyRating.HarmProbability): + Output only. Harm probability levels in the + content. + blocked (bool): + Output only. Indicates whether the content + was filtered out because of this rating. + """ + + class HarmProbability(proto.Enum): + r"""Harm probability levels in the content. + + Values: + HARM_PROBABILITY_UNSPECIFIED (0): + Harm probability unspecified. + NEGLIGIBLE (1): + Negligible level of harm. + LOW (2): + Low level of harm. + MEDIUM (3): + Medium level of harm. + HIGH (4): + High level of harm. + """ + HARM_PROBABILITY_UNSPECIFIED = 0 + NEGLIGIBLE = 1 + LOW = 2 + MEDIUM = 3 + HIGH = 4 + + category: "HarmCategory" = proto.Field( + proto.ENUM, + number=1, + enum="HarmCategory", + ) + probability: HarmProbability = proto.Field( + proto.ENUM, + number=2, + enum=HarmProbability, + ) + blocked: bool = proto.Field( + proto.BOOL, + number=3, + ) + + +class CitationMetadata(proto.Message): + r"""A collection of source attributions for a piece of content. + + Attributes: + citations (MutableSequence[google.cloud.aiplatform_v1.types.Citation]): + Output only. List of citations. + """ + + citations: MutableSequence["Citation"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="Citation", + ) + + +class Citation(proto.Message): + r"""Source attributions for content. + + Attributes: + start_index (int): + Output only. Start index into the content. + end_index (int): + Output only. End index into the content. + uri (str): + Output only. Url reference of the + attribution. + title (str): + Output only. Title of the attribution. + license_ (str): + Output only. License of the attribution. + publication_date (google.type.date_pb2.Date): + Output only. Publication date of the + attribution. + """ + + start_index: int = proto.Field( + proto.INT32, + number=1, + ) + end_index: int = proto.Field( + proto.INT32, + number=2, + ) + uri: str = proto.Field( + proto.STRING, + number=3, + ) + title: str = proto.Field( + proto.STRING, + number=4, + ) + license_: str = proto.Field( + proto.STRING, + number=5, + ) + publication_date: date_pb2.Date = proto.Field( + proto.MESSAGE, + number=6, + message=date_pb2.Date, + ) + + +class Candidate(proto.Message): + r"""A response candidate generated from the model. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + index (int): + Output only. Index of the candidate. + content (google.cloud.aiplatform_v1.types.Content): + Output only. Content parts of the candidate. + finish_reason (google.cloud.aiplatform_v1.types.Candidate.FinishReason): + Output only. The reason why the model stopped + generating tokens. If empty, the model has not + stopped generating the tokens. + safety_ratings (MutableSequence[google.cloud.aiplatform_v1.types.SafetyRating]): + Output only. List of ratings for the safety + of a response candidate. + There is at most one rating per category. + finish_message (str): + Output only. Describes the reason the mode stopped + generating tokens in more detail. This is only filled when + ``finish_reason`` is set. + + This field is a member of `oneof`_ ``_finish_message``. + citation_metadata (google.cloud.aiplatform_v1.types.CitationMetadata): + Output only. Source attribution of the + generated content. + """ + + class FinishReason(proto.Enum): + r"""The reason why the model stopped generating tokens. + If empty, the model has not stopped generating the tokens. + + Values: + FINISH_REASON_UNSPECIFIED (0): + The finish reason is unspecified. + STOP (1): + Natural stop point of the model or provided + stop sequence. + MAX_TOKENS (2): + The maximum number of tokens as specified in + the request was reached. + SAFETY (3): + The token generation was stopped as the + response was flagged for safety reasons. NOTE: + When streaming the Candidate.content will be + empty if content filters blocked the output. + RECITATION (4): + The token generation was stopped as the + response was flagged for unauthorized citations. + OTHER (5): + All other reasons that stopped the token + generation + """ + FINISH_REASON_UNSPECIFIED = 0 + STOP = 1 + MAX_TOKENS = 2 + SAFETY = 3 + RECITATION = 4 + OTHER = 5 + + index: int = proto.Field( + proto.INT32, + number=1, + ) + content: "Content" = proto.Field( + proto.MESSAGE, + number=2, + message="Content", + ) + finish_reason: FinishReason = proto.Field( + proto.ENUM, + number=3, + enum=FinishReason, + ) + safety_ratings: MutableSequence["SafetyRating"] = proto.RepeatedField( + proto.MESSAGE, + number=4, + message="SafetyRating", + ) + finish_message: str = proto.Field( + proto.STRING, + number=5, + optional=True, + ) + citation_metadata: "CitationMetadata" = proto.Field( + proto.MESSAGE, + number=6, + message="CitationMetadata", + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/dataset.py b/google/cloud/aiplatform_v1/types/dataset.py index 0e916dc27b..adfa23f713 100644 --- a/google/cloud/aiplatform_v1/types/dataset.py +++ b/google/cloud/aiplatform_v1/types/dataset.py @@ -33,6 +33,7 @@ "ImportDataConfig", "ExportDataConfig", "ExportFractionSplit", + "ExportFilterSplit", }, ) @@ -248,6 +249,10 @@ class ExportDataConfig(proto.Message): r"""Describes what part of the Dataset is to be exported, the destination of the export and how to export. + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -270,14 +275,84 @@ class ExportDataConfig(proto.Message): Split based on fractions defining the size of each set. + This field is a member of `oneof`_ ``split``. + filter_split (google.cloud.aiplatform_v1.types.ExportFilterSplit): + Split based on the provided filters for each + set. + This field is a member of `oneof`_ ``split``. annotations_filter (str): An expression for filtering what part of the Dataset is to be exported. Only Annotations that match this filter will be exported. The filter syntax is the same as in [ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. + saved_query_id (str): + The ID of a SavedQuery (annotation set) under the Dataset + specified by [dataset_id][] used for filtering Annotations + for training. + + Only used for custom training data export use cases. Only + applicable to Datasets that have SavedQueries. + + Only Annotations that are associated with this SavedQuery + are used in respectively training. When used in conjunction + with + [annotations_filter][google.cloud.aiplatform.v1.ExportDataConfig.annotations_filter], + the Annotations used for training are filtered by both + [saved_query_id][google.cloud.aiplatform.v1.ExportDataConfig.saved_query_id] + and + [annotations_filter][google.cloud.aiplatform.v1.ExportDataConfig.annotations_filter]. + + Only one of + [saved_query_id][google.cloud.aiplatform.v1.ExportDataConfig.saved_query_id] + and + [annotation_schema_uri][google.cloud.aiplatform.v1.ExportDataConfig.annotation_schema_uri] + should be specified as both of them represent the same + thing: problem type. + annotation_schema_uri (str): + The Cloud Storage URI that points to a YAML file describing + the annotation schema. The schema is defined as an OpenAPI + 3.0.2 `Schema + Object `__. + The schema files that can be used here are found in + gs://google-cloud-aiplatform/schema/dataset/annotation/, + note that the chosen schema must be consistent with + [metadata][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] + of the Dataset specified by [dataset_id][]. + + Only used for custom training data export use cases. Only + applicable to Datasets that have DataItems and Annotations. + + Only Annotations that both match this schema and belong to + DataItems not ignored by the split method are used in + respectively training, validation or test role, depending on + the role of the DataItem they are on. + + When used in conjunction with + [annotations_filter][google.cloud.aiplatform.v1.ExportDataConfig.annotations_filter], + the Annotations used for training are filtered by both + [annotations_filter][google.cloud.aiplatform.v1.ExportDataConfig.annotations_filter] + and + [annotation_schema_uri][google.cloud.aiplatform.v1.ExportDataConfig.annotation_schema_uri]. + export_use (google.cloud.aiplatform_v1.types.ExportDataConfig.ExportUse): + Indicates the usage of the exported files. """ + class ExportUse(proto.Enum): + r"""ExportUse indicates the usage of the exported files. It + restricts file destination, format, annotations to be exported, + whether to allow unannotated data to be exported and whether to + clone files to temp Cloud Storage bucket. + + Values: + EXPORT_USE_UNSPECIFIED (0): + Regular user export. + CUSTOM_CODE_TRAINING (6): + Export for custom code training. + """ + EXPORT_USE_UNSPECIFIED = 0 + CUSTOM_CODE_TRAINING = 6 + gcs_destination: io.GcsDestination = proto.Field( proto.MESSAGE, number=1, @@ -290,10 +365,29 @@ class ExportDataConfig(proto.Message): oneof="split", message="ExportFractionSplit", ) + filter_split: "ExportFilterSplit" = proto.Field( + proto.MESSAGE, + number=7, + oneof="split", + message="ExportFilterSplit", + ) annotations_filter: str = proto.Field( proto.STRING, number=2, ) + saved_query_id: str = proto.Field( + proto.STRING, + number=11, + ) + annotation_schema_uri: str = proto.Field( + proto.STRING, + number=12, + ) + export_use: ExportUse = proto.Field( + proto.ENUM, + number=4, + enum=ExportUse, + ) class ExportFractionSplit(proto.Message): @@ -331,4 +425,58 @@ class ExportFractionSplit(proto.Message): ) +class ExportFilterSplit(proto.Message): + r"""Assigns input data to training, validation, and test sets + based on the given filters, data pieces not matched by any + filter are ignored. Currently only supported for Datasets + containing DataItems. + If any of the filters in this message are to match nothing, then + they can be set as '-' (the minus sign). + + Supported only for unstructured Datasets. + + Attributes: + training_filter (str): + Required. A filter on DataItems of the Dataset. DataItems + that match this filter are used to train the Model. A filter + with same syntax as the one used in + [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems] + may be used. If a single DataItem is matched by more than + one of the FilterSplit filters, then it is assigned to the + first set that applies to it in the training, validation, + test order. + validation_filter (str): + Required. A filter on DataItems of the Dataset. DataItems + that match this filter are used to validate the Model. A + filter with same syntax as the one used in + [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems] + may be used. If a single DataItem is matched by more than + one of the FilterSplit filters, then it is assigned to the + first set that applies to it in the training, validation, + test order. + test_filter (str): + Required. A filter on DataItems of the Dataset. DataItems + that match this filter are used to test the Model. A filter + with same syntax as the one used in + [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems] + may be used. If a single DataItem is matched by more than + one of the FilterSplit filters, then it is assigned to the + first set that applies to it in the training, validation, + test order. + """ + + training_filter: str = proto.Field( + proto.STRING, + number=1, + ) + validation_filter: str = proto.Field( + proto.STRING, + number=2, + ) + test_filter: str = proto.Field( + proto.STRING, + number=3, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/dataset_service.py b/google/cloud/aiplatform_v1/types/dataset_service.py index 12ef9f8fd7..2686792ea9 100644 --- a/google/cloud/aiplatform_v1/types/dataset_service.py +++ b/google/cloud/aiplatform_v1/types/dataset_service.py @@ -23,6 +23,7 @@ from google.cloud.aiplatform_v1.types import data_item as gca_data_item from google.cloud.aiplatform_v1.types import dataset as gca_dataset from google.cloud.aiplatform_v1.types import dataset_version as gca_dataset_version +from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import operation from google.cloud.aiplatform_v1.types import saved_query as gca_saved_query from google.protobuf import field_mask_pb2 # type: ignore @@ -349,14 +350,26 @@ class ExportDataResponse(proto.Message): Attributes: exported_files (MutableSequence[str]): - All of the files that are exported in this - export operation. + All of the files that are exported in this export operation. + For custom code training export, only three (training, + validation and test) GCS paths in wildcard format are + populated (e.g., gs://.../training-*). + data_stats (google.cloud.aiplatform_v1.types.Model.DataStats): + Only present for custom code training export + use case. Records data stats, i.e., + train/validation/test item/annotation counts + calculated during the export operation. """ exported_files: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=1, ) + data_stats: model.Model.DataStats = proto.Field( + proto.MESSAGE, + number=2, + message=model.Model.DataStats, + ) class ExportDataOperationMetadata(proto.Message): diff --git a/google/cloud/aiplatform_v1/types/deployed_index_ref.py b/google/cloud/aiplatform_v1/types/deployed_index_ref.py index 50544e9655..23fa11b40a 100644 --- a/google/cloud/aiplatform_v1/types/deployed_index_ref.py +++ b/google/cloud/aiplatform_v1/types/deployed_index_ref.py @@ -38,6 +38,9 @@ class DeployedIndexRef(proto.Message): deployed_index_id (str): Immutable. The ID of the DeployedIndex in the above IndexEndpoint. + display_name (str): + Output only. The display name of the + DeployedIndex. """ index_endpoint: str = proto.Field( @@ -48,6 +51,10 @@ class DeployedIndexRef(proto.Message): proto.STRING, number=2, ) + display_name: str = proto.Field( + proto.STRING, + number=3, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/deployment_resource_pool.py b/google/cloud/aiplatform_v1/types/deployment_resource_pool.py new file mode 100644 index 0000000000..5639546731 --- /dev/null +++ b/google/cloud/aiplatform_v1/types/deployment_resource_pool.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import machine_resources +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", + manifest={ + "DeploymentResourcePool", + }, +) + + +class DeploymentResourcePool(proto.Message): + r"""A description of resources that can be shared by multiple + DeployedModels, whose underlying specification consists of a + DedicatedResources. + + Attributes: + name (str): + Immutable. The resource name of the DeploymentResourcePool. + Format: + ``projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}`` + dedicated_resources (google.cloud.aiplatform_v1.types.DedicatedResources): + Required. The underlying DedicatedResources + that the DeploymentResourcePool uses. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + DeploymentResourcePool was created. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + dedicated_resources: machine_resources.DedicatedResources = proto.Field( + proto.MESSAGE, + number=2, + message=machine_resources.DedicatedResources, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/deployment_resource_pool_service.py b/google/cloud/aiplatform_v1/types/deployment_resource_pool_service.py new file mode 100644 index 0000000000..c82b0a2f72 --- /dev/null +++ b/google/cloud/aiplatform_v1/types/deployment_resource_pool_service.py @@ -0,0 +1,300 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import deployed_model_ref +from google.cloud.aiplatform_v1.types import ( + deployment_resource_pool as gca_deployment_resource_pool, +) +from google.cloud.aiplatform_v1.types import endpoint +from google.cloud.aiplatform_v1.types import operation + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", + manifest={ + "CreateDeploymentResourcePoolRequest", + "CreateDeploymentResourcePoolOperationMetadata", + "GetDeploymentResourcePoolRequest", + "ListDeploymentResourcePoolsRequest", + "ListDeploymentResourcePoolsResponse", + "UpdateDeploymentResourcePoolOperationMetadata", + "DeleteDeploymentResourcePoolRequest", + "QueryDeployedModelsRequest", + "QueryDeployedModelsResponse", + }, +) + + +class CreateDeploymentResourcePoolRequest(proto.Message): + r"""Request message for CreateDeploymentResourcePool method. + + Attributes: + parent (str): + Required. The parent location resource where this + DeploymentResourcePool will be created. Format: + ``projects/{project}/locations/{location}`` + deployment_resource_pool (google.cloud.aiplatform_v1.types.DeploymentResourcePool): + Required. The DeploymentResourcePool to + create. + deployment_resource_pool_id (str): + Required. The ID to use for the DeploymentResourcePool, + which will become the final component of the + DeploymentResourcePool's resource name. + + The maximum length is 63 characters, and valid characters + are ``/^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$/``. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + deployment_resource_pool: gca_deployment_resource_pool.DeploymentResourcePool = ( + proto.Field( + proto.MESSAGE, + number=2, + message=gca_deployment_resource_pool.DeploymentResourcePool, + ) + ) + deployment_resource_pool_id: str = proto.Field( + proto.STRING, + number=3, + ) + + +class CreateDeploymentResourcePoolOperationMetadata(proto.Message): + r"""Runtime operation information for + CreateDeploymentResourcePool method. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata: operation.GenericOperationMetadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class GetDeploymentResourcePoolRequest(proto.Message): + r"""Request message for GetDeploymentResourcePool method. + + Attributes: + name (str): + Required. The name of the DeploymentResourcePool to + retrieve. Format: + ``projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListDeploymentResourcePoolsRequest(proto.Message): + r"""Request message for ListDeploymentResourcePools method. + + Attributes: + parent (str): + Required. The parent Location which owns this collection of + DeploymentResourcePools. Format: + ``projects/{project}/locations/{location}`` + page_size (int): + The maximum number of DeploymentResourcePools + to return. The service may return fewer than + this value. + page_token (str): + A page token, received from a previous + ``ListDeploymentResourcePools`` call. Provide this to + retrieve the subsequent page. + + When paginating, all other parameters provided to + ``ListDeploymentResourcePools`` must match the call that + provided the page token. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + + +class ListDeploymentResourcePoolsResponse(proto.Message): + r"""Response message for ListDeploymentResourcePools method. + + Attributes: + deployment_resource_pools (MutableSequence[google.cloud.aiplatform_v1.types.DeploymentResourcePool]): + The DeploymentResourcePools from the + specified location. + next_page_token (str): + A token, which can be sent as ``page_token`` to retrieve the + next page. If this field is omitted, there are no subsequent + pages. + """ + + @property + def raw_page(self): + return self + + deployment_resource_pools: MutableSequence[ + gca_deployment_resource_pool.DeploymentResourcePool + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_deployment_resource_pool.DeploymentResourcePool, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateDeploymentResourcePoolOperationMetadata(proto.Message): + r"""Runtime operation information for + UpdateDeploymentResourcePool method. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata: operation.GenericOperationMetadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class DeleteDeploymentResourcePoolRequest(proto.Message): + r"""Request message for DeleteDeploymentResourcePool method. + + Attributes: + name (str): + Required. The name of the DeploymentResourcePool to delete. + Format: + ``projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class QueryDeployedModelsRequest(proto.Message): + r"""Request message for QueryDeployedModels method. + + Attributes: + deployment_resource_pool (str): + Required. The name of the target DeploymentResourcePool to + query. Format: + ``projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}`` + page_size (int): + The maximum number of DeployedModels to + return. The service may return fewer than this + value. + page_token (str): + A page token, received from a previous + ``QueryDeployedModels`` call. Provide this to retrieve the + subsequent page. + + When paginating, all other parameters provided to + ``QueryDeployedModels`` must match the call that provided + the page token. + """ + + deployment_resource_pool: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + + +class QueryDeployedModelsResponse(proto.Message): + r"""Response message for QueryDeployedModels method. + + Attributes: + deployed_models (MutableSequence[google.cloud.aiplatform_v1.types.DeployedModel]): + DEPRECATED Use deployed_model_refs instead. + next_page_token (str): + A token, which can be sent as ``page_token`` to retrieve the + next page. If this field is omitted, there are no subsequent + pages. + deployed_model_refs (MutableSequence[google.cloud.aiplatform_v1.types.DeployedModelRef]): + References to the DeployedModels that share + the specified deploymentResourcePool. + total_deployed_model_count (int): + The total number of DeployedModels on this + DeploymentResourcePool. + total_endpoint_count (int): + The total number of Endpoints that have + DeployedModels on this DeploymentResourcePool. + """ + + @property + def raw_page(self): + return self + + deployed_models: MutableSequence[endpoint.DeployedModel] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=endpoint.DeployedModel, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + deployed_model_refs: MutableSequence[ + deployed_model_ref.DeployedModelRef + ] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=deployed_model_ref.DeployedModelRef, + ) + total_deployed_model_count: int = proto.Field( + proto.INT32, + number=4, + ) + total_endpoint_count: int = proto.Field( + proto.INT32, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/endpoint.py b/google/cloud/aiplatform_v1/types/endpoint.py index 57eafe739e..a8299d45af 100644 --- a/google/cloud/aiplatform_v1/types/endpoint.py +++ b/google/cloud/aiplatform_v1/types/endpoint.py @@ -222,6 +222,12 @@ class DeployedModel(proto.Message): degree are decided by Vertex AI, and require only a modest additional configuration. + This field is a member of `oneof`_ ``prediction_resources``. + shared_resources (str): + The resource name of the shared DeploymentResourcePool to + deploy on. Format: + ``projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}`` + This field is a member of `oneof`_ ``prediction_resources``. id (str): Immutable. The ID of the DeployedModel. If not provided upon @@ -320,6 +326,11 @@ class DeployedModel(proto.Message): oneof="prediction_resources", message=machine_resources.AutomaticResources, ) + shared_resources: str = proto.Field( + proto.STRING, + number=17, + oneof="prediction_resources", + ) id: str = proto.Field( proto.STRING, number=1, diff --git a/google/cloud/aiplatform_v1/types/feature_group.py b/google/cloud/aiplatform_v1/types/feature_group.py index e5697313bd..43cbbb81cf 100644 --- a/google/cloud/aiplatform_v1/types/feature_group.py +++ b/google/cloud/aiplatform_v1/types/feature_group.py @@ -45,7 +45,7 @@ class FeatureGroup(proto.Message): This field is a member of `oneof`_ ``source``. name (str): - Output only. Name of the FeatureGroup. Format: + Identifier. Name of the FeatureGroup. Format: ``projects/{project}/locations/{location}/featureGroups/{featureGroup}`` create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this FeatureGroup diff --git a/google/cloud/aiplatform_v1/types/feature_online_store.py b/google/cloud/aiplatform_v1/types/feature_online_store.py index af177abd0a..f7023fccc0 100644 --- a/google/cloud/aiplatform_v1/types/feature_online_store.py +++ b/google/cloud/aiplatform_v1/types/feature_online_store.py @@ -47,7 +47,7 @@ class FeatureOnlineStore(proto.Message): This field is a member of `oneof`_ ``storage_type``. name (str): - Output only. Name of the FeatureOnlineStore. Format: + Identifier. Name of the FeatureOnlineStore. Format: ``projects/{project}/locations/{location}/featureOnlineStores/{featureOnlineStore}`` create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this diff --git a/google/cloud/aiplatform_v1/types/feature_view.py b/google/cloud/aiplatform_v1/types/feature_view.py index 42e3adac1e..9ade66f97c 100644 --- a/google/cloud/aiplatform_v1/types/feature_view.py +++ b/google/cloud/aiplatform_v1/types/feature_view.py @@ -55,7 +55,7 @@ class FeatureView(proto.Message): This field is a member of `oneof`_ ``source``. name (str): - Output only. Name of the FeatureView. Format: + Identifier. Name of the FeatureView. Format: ``projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}`` create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this FeatureView @@ -113,7 +113,7 @@ class BigQuerySource(proto.Message): ) class SyncConfig(proto.Message): - r""" + r"""Configuration for Sync. Only one option is set. Attributes: cron (str): diff --git a/google/cloud/aiplatform_v1/types/feature_view_sync.py b/google/cloud/aiplatform_v1/types/feature_view_sync.py index 671c49fbf9..7c5f4846ef 100644 --- a/google/cloud/aiplatform_v1/types/feature_view_sync.py +++ b/google/cloud/aiplatform_v1/types/feature_view_sync.py @@ -38,7 +38,7 @@ class FeatureViewSync(proto.Message): Attributes: name (str): - Output only. Name of the FeatureViewSync. Format: + Identifier. Name of the FeatureViewSync. Format: ``projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}/featureViewSyncs/{feature_view_sync}`` create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Time when this FeatureViewSync diff --git a/google/cloud/aiplatform_v1/types/featurestore_service.py b/google/cloud/aiplatform_v1/types/featurestore_service.py index 6112892869..b8335337a6 100644 --- a/google/cloud/aiplatform_v1/types/featurestore_service.py +++ b/google/cloud/aiplatform_v1/types/featurestore_service.py @@ -1272,7 +1272,7 @@ class ListFeaturesRequest(proto.Message): When paginating, all other parameters provided to [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures] - or or + or [FeatureRegistryService.ListFeatures][google.cloud.aiplatform.v1.FeatureRegistryService.ListFeatures] must match the call that provided the page token. order_by (str): diff --git a/google/cloud/aiplatform_v1/types/model.py b/google/cloud/aiplatform_v1/types/model.py index cec6e15f4e..3219e71846 100644 --- a/google/cloud/aiplatform_v1/types/model.py +++ b/google/cloud/aiplatform_v1/types/model.py @@ -286,6 +286,12 @@ class Model(proto.Message): See https://goo.gl/xmQnxf for more information and examples of labels. + data_stats (google.cloud.aiplatform_v1.types.Model.DataStats): + Stats of data used for training or evaluating the Model. + + Only populated when the Model is trained by a + TrainingPipeline with + [data_input_config][TrainingPipeline.data_input_config]. encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources @@ -323,7 +329,9 @@ class DeploymentResourcesType(proto.Enum): SHARED_RESOURCES (3): Resources that can be shared by multiple [DeployedModels][google.cloud.aiplatform.v1.DeployedModel]. - A pre-configured [DeploymentResourcePool][] is required. + A pre-configured + [DeploymentResourcePool][google.cloud.aiplatform.v1.DeploymentResourcePool] + is required. """ DEPLOYMENT_RESOURCES_TYPE_UNSPECIFIED = 0 DEDICATED_RESOURCES = 1 @@ -396,6 +404,61 @@ class ExportableContent(proto.Enum): enum="Model.ExportFormat.ExportableContent", ) + class DataStats(proto.Message): + r"""Stats of data used for train or evaluate the Model. + + Attributes: + training_data_items_count (int): + Number of DataItems that were used for + training this Model. + validation_data_items_count (int): + Number of DataItems that were used for + validating this Model during training. + test_data_items_count (int): + Number of DataItems that were used for + evaluating this Model. If the Model is evaluated + multiple times, this will be the number of test + DataItems used by the first evaluation. If the + Model is not evaluated, the number is 0. + training_annotations_count (int): + Number of Annotations that are used for + training this Model. + validation_annotations_count (int): + Number of Annotations that are used for + validating this Model during training. + test_annotations_count (int): + Number of Annotations that are used for + evaluating this Model. If the Model is evaluated + multiple times, this will be the number of test + Annotations used by the first evaluation. If the + Model is not evaluated, the number is 0. + """ + + training_data_items_count: int = proto.Field( + proto.INT64, + number=1, + ) + validation_data_items_count: int = proto.Field( + proto.INT64, + number=2, + ) + test_data_items_count: int = proto.Field( + proto.INT64, + number=3, + ) + training_annotations_count: int = proto.Field( + proto.INT64, + number=4, + ) + validation_annotations_count: int = proto.Field( + proto.INT64, + number=5, + ) + test_annotations_count: int = proto.Field( + proto.INT64, + number=6, + ) + class OriginalModelInfo(proto.Message): r"""Contains information about the original Model if this Model is a copy. @@ -528,6 +591,11 @@ class OriginalModelInfo(proto.Message): proto.STRING, number=17, ) + data_stats: DataStats = proto.Field( + proto.MESSAGE, + number=21, + message=DataStats, + ) encryption_spec: gca_encryption_spec.EncryptionSpec = proto.Field( proto.MESSAGE, number=24, @@ -1003,6 +1071,9 @@ class ModelSourceType(proto.Enum): Garden. GENIE (5): The Model is saved or tuned from Genie. + CUSTOM_TEXT_EMBEDDING (6): + The Model is uploaded by text embedding + finetuning pipeline. """ MODEL_SOURCE_TYPE_UNSPECIFIED = 0 AUTOML = 1 @@ -1010,6 +1081,7 @@ class ModelSourceType(proto.Enum): BQML = 3 MODEL_GARDEN = 4 GENIE = 5 + CUSTOM_TEXT_EMBEDDING = 6 source_type: ModelSourceType = proto.Field( proto.ENUM, diff --git a/google/cloud/aiplatform_v1/types/model_service.py b/google/cloud/aiplatform_v1/types/model_service.py index 8cec4f5c33..b32c7ab847 100644 --- a/google/cloud/aiplatform_v1/types/model_service.py +++ b/google/cloud/aiplatform_v1/types/model_service.py @@ -95,11 +95,12 @@ class UploadModelRequest(proto.Message): Optional. The user-provided custom service account to use to do the model upload. If empty, `Vertex AI Service Agent `__ - will be used. Users uploading the Model must have the - ``iam.serviceAccounts.actAs`` permission on this service - account. Also, this account must belong to the project - specified in the ``parent`` field and have all necessary - read permissions. + will be used to access resources needed to upload the model. + This account must belong to the target project where the + model is uploaded to, i.e., the project specified in the + ``parent`` field of this request and have necessary read + permissions (to Google Cloud Storage, Artifact Registry, + etc.). """ parent: str = proto.Field( diff --git a/google/cloud/aiplatform_v1/types/openapi.py b/google/cloud/aiplatform_v1/types/openapi.py new file mode 100644 index 0000000000..86c481f1c2 --- /dev/null +++ b/google/cloud/aiplatform_v1/types/openapi.py @@ -0,0 +1,142 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", + manifest={ + "Type", + "Schema", + }, +) + + +class Type(proto.Enum): + r"""Type contains the list of OpenAPI data types as defined by + https://swagger.io/docs/specification/data-models/data-types/ + + Values: + TYPE_UNSPECIFIED (0): + Not specified, should not be used. + STRING (1): + OpenAPI string type + NUMBER (2): + OpenAPI number type + INTEGER (3): + OpenAPI integer type + BOOLEAN (4): + OpenAPI boolean type + ARRAY (5): + OpenAPI array type + OBJECT (6): + OpenAPI object type + """ + TYPE_UNSPECIFIED = 0 + STRING = 1 + NUMBER = 2 + INTEGER = 3 + BOOLEAN = 4 + ARRAY = 5 + OBJECT = 6 + + +class Schema(proto.Message): + r"""Schema is used to define the format of input/output data. Represents + a select subset of an `OpenAPI 3.0 schema + object `__. More fields + may be added in the future as needed. + + Attributes: + type_ (google.cloud.aiplatform_v1.types.Type): + Optional. The type of the data. + format_ (str): + Optional. The format of the data. + Supported formats: + + for NUMBER type: float, double + for INTEGER type: int32, int64 + description (str): + Optional. The description of the data. + nullable (bool): + Optional. Indicates if the value may be null. + items (google.cloud.aiplatform_v1.types.Schema): + Optional. Schema of the elements of + Type.ARRAY. + enum (MutableSequence[str]): + Optional. Possible values of the element of Type.STRING with + enum format. For example we can define an Enum Direction as + : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", + "WEST"]} + properties (MutableMapping[str, google.cloud.aiplatform_v1.types.Schema]): + Optional. Properties of Type.OBJECT. + required (MutableSequence[str]): + Optional. Required properties of Type.OBJECT. + example (google.protobuf.struct_pb2.Value): + Optional. Example of the object. Will only + populated when the object is the root. + """ + + type_: "Type" = proto.Field( + proto.ENUM, + number=1, + enum="Type", + ) + format_: str = proto.Field( + proto.STRING, + number=7, + ) + description: str = proto.Field( + proto.STRING, + number=8, + ) + nullable: bool = proto.Field( + proto.BOOL, + number=6, + ) + items: "Schema" = proto.Field( + proto.MESSAGE, + number=2, + message="Schema", + ) + enum: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=9, + ) + properties: MutableMapping[str, "Schema"] = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=3, + message="Schema", + ) + required: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=5, + ) + example: struct_pb2.Value = proto.Field( + proto.MESSAGE, + number=4, + message=struct_pb2.Value, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/prediction_service.py b/google/cloud/aiplatform_v1/types/prediction_service.py index fa1cb784e3..90abde4c91 100644 --- a/google/cloud/aiplatform_v1/types/prediction_service.py +++ b/google/cloud/aiplatform_v1/types/prediction_service.py @@ -20,7 +20,9 @@ import proto # type: ignore from google.api import httpbody_pb2 # type: ignore +from google.cloud.aiplatform_v1.types import content from google.cloud.aiplatform_v1.types import explanation +from google.cloud.aiplatform_v1.types import tool from google.cloud.aiplatform_v1.types import types from google.protobuf import struct_pb2 # type: ignore @@ -43,6 +45,8 @@ "ExplainResponse", "CountTokensRequest", "CountTokensResponse", + "GenerateContentRequest", + "GenerateContentResponse", }, ) @@ -530,21 +534,36 @@ class CountTokensRequest(proto.Message): Required. The name of the Endpoint requested to perform token counting. Format: ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + model (str): + Required. The name of the publisher model requested to serve + the prediction. Format: + ``projects/{project}/locations/{location}/publishers/*/models/*`` instances (MutableSequence[google.protobuf.struct_pb2.Value]): Required. The instances that are the input to token counting call. Schema is identical to the prediction schema of the underlying model. + contents (MutableSequence[google.cloud.aiplatform_v1.types.Content]): + Required. Input content. """ endpoint: str = proto.Field( proto.STRING, number=1, ) + model: str = proto.Field( + proto.STRING, + number=3, + ) instances: MutableSequence[struct_pb2.Value] = proto.RepeatedField( proto.MESSAGE, number=2, message=struct_pb2.Value, ) + contents: MutableSequence[content.Content] = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=content.Content, + ) class CountTokensResponse(proto.Message): @@ -569,4 +588,163 @@ class CountTokensResponse(proto.Message): ) +class GenerateContentRequest(proto.Message): + r"""Request message for [PredictionService.GenerateContent]. + + Attributes: + model (str): + Required. The name of the publisher model requested to serve + the prediction. Format: + ``projects/{project}/locations/{location}/publishers/*/models/*`` + contents (MutableSequence[google.cloud.aiplatform_v1.types.Content]): + Required. The content of the current + conversation with the model. + For single-turn queries, this is a single + instance. For multi-turn queries, this is a + repeated field that contains conversation + history + latest request. + tools (MutableSequence[google.cloud.aiplatform_v1.types.Tool]): + Optional. A list of ``Tools`` the model may use to generate + the next response. + + A ``Tool`` is a piece of code that enables the system to + interact with external systems to perform an action, or set + of actions, outside of knowledge and scope of the model. The + only supported tool is currently ``Function`` + safety_settings (MutableSequence[google.cloud.aiplatform_v1.types.SafetySetting]): + Optional. Per request settings for blocking + unsafe content. Enforced on + GenerateContentResponse.candidates. + generation_config (google.cloud.aiplatform_v1.types.GenerationConfig): + Optional. Generation config. + """ + + model: str = proto.Field( + proto.STRING, + number=5, + ) + contents: MutableSequence[content.Content] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=content.Content, + ) + tools: MutableSequence[tool.Tool] = proto.RepeatedField( + proto.MESSAGE, + number=6, + message=tool.Tool, + ) + safety_settings: MutableSequence[content.SafetySetting] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=content.SafetySetting, + ) + generation_config: content.GenerationConfig = proto.Field( + proto.MESSAGE, + number=4, + message=content.GenerationConfig, + ) + + +class GenerateContentResponse(proto.Message): + r"""Response message for [PredictionService.GenerateContent]. + + Attributes: + candidates (MutableSequence[google.cloud.aiplatform_v1.types.Candidate]): + Output only. Generated candidates. + prompt_feedback (google.cloud.aiplatform_v1.types.GenerateContentResponse.PromptFeedback): + Output only. Content filter results for a + prompt sent in the request. Note: Sent only in + the first stream chunk. Only happens when no + candidates were generated due to content + violations. + usage_metadata (google.cloud.aiplatform_v1.types.GenerateContentResponse.UsageMetadata): + Usage metadata about the response(s). + """ + + class PromptFeedback(proto.Message): + r"""Content filter results for a prompt sent in the request. + + Attributes: + block_reason (google.cloud.aiplatform_v1.types.GenerateContentResponse.PromptFeedback.BlockedReason): + Output only. Blocked reason. + safety_ratings (MutableSequence[google.cloud.aiplatform_v1.types.SafetyRating]): + Output only. Safety ratings. + block_reason_message (str): + Output only. A readable block reason message. + """ + + class BlockedReason(proto.Enum): + r"""Blocked reason enumeration. + + Values: + BLOCKED_REASON_UNSPECIFIED (0): + Unspecified blocked reason. + SAFETY (1): + Candidates blocked due to safety. + OTHER (2): + Candidates blocked due to other reason. + """ + BLOCKED_REASON_UNSPECIFIED = 0 + SAFETY = 1 + OTHER = 2 + + block_reason: "GenerateContentResponse.PromptFeedback.BlockedReason" = ( + proto.Field( + proto.ENUM, + number=1, + enum="GenerateContentResponse.PromptFeedback.BlockedReason", + ) + ) + safety_ratings: MutableSequence[content.SafetyRating] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=content.SafetyRating, + ) + block_reason_message: str = proto.Field( + proto.STRING, + number=3, + ) + + class UsageMetadata(proto.Message): + r"""Usage metadata about response(s). + + Attributes: + prompt_token_count (int): + Number of tokens in the request. + candidates_token_count (int): + Number of tokens in the response(s). + total_token_count (int): + + """ + + prompt_token_count: int = proto.Field( + proto.INT32, + number=1, + ) + candidates_token_count: int = proto.Field( + proto.INT32, + number=2, + ) + total_token_count: int = proto.Field( + proto.INT32, + number=3, + ) + + candidates: MutableSequence[content.Candidate] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=content.Candidate, + ) + prompt_feedback: PromptFeedback = proto.Field( + proto.MESSAGE, + number=3, + message=PromptFeedback, + ) + usage_metadata: UsageMetadata = proto.Field( + proto.MESSAGE, + number=4, + message=UsageMetadata, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/publisher_model.py b/google/cloud/aiplatform_v1/types/publisher_model.py index 96e3f941dd..b2546aef23 100644 --- a/google/cloud/aiplatform_v1/types/publisher_model.py +++ b/google/cloud/aiplatform_v1/types/publisher_model.py @@ -55,6 +55,9 @@ class PublisherModel(proto.Message): launch_stage (google.cloud.aiplatform_v1.types.PublisherModel.LaunchStage): Optional. Indicates the launch stage of the model. + version_state (google.cloud.aiplatform_v1.types.PublisherModel.VersionState): + Optional. Indicates the state of the model + version. publisher_model_template (str): Optional. Output only. Immutable. Used to indicate this model has a publisher model and @@ -124,6 +127,21 @@ class LaunchStage(proto.Enum): PUBLIC_PREVIEW = 3 GA = 4 + class VersionState(proto.Enum): + r"""An enum representing the state of the PublicModelVersion. + + Values: + VERSION_STATE_UNSPECIFIED (0): + The version state is unspecified. + VERSION_STATE_STABLE (1): + Used to indicate the version is stable. + VERSION_STATE_UNSTABLE (2): + Used to indicate the version is unstable. + """ + VERSION_STATE_UNSPECIFIED = 0 + VERSION_STATE_STABLE = 1 + VERSION_STATE_UNSTABLE = 2 + class ResourceReference(proto.Message): r"""Reference to a resource. @@ -143,6 +161,14 @@ class ResourceReference(proto.Message): The resource name of the Google Cloud resource. + This field is a member of `oneof`_ ``reference``. + use_case (str): + Use case (CUJ) of the resource. + + This field is a member of `oneof`_ ``reference``. + description (str): + Description of the resource. + This field is a member of `oneof`_ ``reference``. """ @@ -156,6 +182,16 @@ class ResourceReference(proto.Message): number=2, oneof="reference", ) + use_case: str = proto.Field( + proto.STRING, + number=3, + oneof="reference", + ) + description: str = proto.Field( + proto.STRING, + number=4, + oneof="reference", + ) class Documentation(proto.Message): r"""A named piece of documentation. @@ -304,6 +340,9 @@ class Deploy(proto.Message): title (str): Required. The title of the regional resource reference. + public_artifact_uri (str): + Optional. The signed URI for ephemeral Cloud + Storage access to model artifact. """ dedicated_resources: machine_resources.DedicatedResources = proto.Field( @@ -345,6 +384,10 @@ class Deploy(proto.Message): proto.STRING, number=8, ) + public_artifact_uri: str = proto.Field( + proto.STRING, + number=9, + ) view_rest_api: "PublisherModel.CallToAction.ViewRestApi" = proto.Field( proto.MESSAGE, @@ -432,6 +475,11 @@ class Deploy(proto.Message): number=29, enum=LaunchStage, ) + version_state: VersionState = proto.Field( + proto.ENUM, + number=37, + enum=VersionState, + ) publisher_model_template: str = proto.Field( proto.STRING, number=30, diff --git a/google/cloud/aiplatform_v1/types/tool.py b/google/cloud/aiplatform_v1/types/tool.py new file mode 100644 index 0000000000..8ec86b3cfd --- /dev/null +++ b/google/cloud/aiplatform_v1/types/tool.py @@ -0,0 +1,172 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import openapi +from google.protobuf import struct_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", + manifest={ + "Tool", + "FunctionDeclaration", + "FunctionCall", + "FunctionResponse", + }, +) + + +class Tool(proto.Message): + r"""Tool details that the model may use to generate response. + + A ``Tool`` is a piece of code that enables the system to interact + with external systems to perform an action, or set of actions, + outside of knowledge and scope of the model. + + Attributes: + function_declarations (MutableSequence[google.cloud.aiplatform_v1.types.FunctionDeclaration]): + Optional. One or more function declarations to be passed to + the model along with the current user query. Model may + decide to call a subset of these functions by populating + [FunctionCall][content.part.function_call] in the response. + User should provide a + [FunctionResponse][content.part.function_response] for each + function call in the next turn. Based on the function + responses, Model will generate the final response back to + the user. Maximum 64 function declarations can be provided. + """ + + function_declarations: MutableSequence["FunctionDeclaration"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="FunctionDeclaration", + ) + + +class FunctionDeclaration(proto.Message): + r"""Structured representation of a function declaration as defined by + the `OpenAPI 3.0 + specification `__. Included in + this declaration are the function name and parameters. This + FunctionDeclaration is a representation of a block of code that can + be used as a ``Tool`` by the model and executed by the client. + + Attributes: + name (str): + Required. The name of the function to call. + Must start with a letter or an underscore. + Must be a-z, A-Z, 0-9, or contain underscores + and dashes, with a maximum length of 64. + description (str): + Optional. Description and purpose of the + function. Model uses it to decide how and + whether to call the function. + parameters (google.cloud.aiplatform_v1.types.Schema): + Optional. Describes the parameters to this + function in JSON Schema Object format. Reflects + the Open API 3.03 Parameter Object. string Key: + the name of the parameter. Parameter names are + case sensitive. Schema Value: the Schema + defining the type used for the parameter. For + function with no parameters, this can be left + unset. Example with 1 required and 1 optional + parameter: type: OBJECT properties: + + param1: + + type: STRING + param2: + + type: INTEGER + required: + + - param1 + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + description: str = proto.Field( + proto.STRING, + number=2, + ) + parameters: openapi.Schema = proto.Field( + proto.MESSAGE, + number=3, + message=openapi.Schema, + ) + + +class FunctionCall(proto.Message): + r"""A predicted [FunctionCall] returned from the model that contains a + string representing the [FunctionDeclaration.name] and a structured + JSON object containing the parameters and their values. + + Attributes: + name (str): + Required. The name of the function to call. Matches + [FunctionDeclaration.name]. + args (google.protobuf.struct_pb2.Struct): + Optional. Required. The function parameters and values in + JSON object format. See [FunctionDeclaration.parameters] for + parameter details. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + args: struct_pb2.Struct = proto.Field( + proto.MESSAGE, + number=2, + message=struct_pb2.Struct, + ) + + +class FunctionResponse(proto.Message): + r"""The result output from a [FunctionCall] that contains a string + representing the [FunctionDeclaration.name] and a structured JSON + object containing any output from the function is used as context to + the model. This should contain the result of a [FunctionCall] made + based on model prediction. + + Attributes: + name (str): + Required. The name of the function to call. Matches + [FunctionDeclaration.name] and [FunctionCall.name]. + response (google.protobuf.struct_pb2.Struct): + Required. The function response in JSON + object format. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + response: struct_pb2.Struct = proto.Field( + proto.MESSAGE, + number=2, + message=struct_pb2.Struct, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/__init__.py b/google/cloud/aiplatform_v1beta1/__init__.py index e5fb3a1f53..f015c3a7c1 100644 --- a/google/cloud/aiplatform_v1beta1/__init__.py +++ b/google/cloud/aiplatform_v1beta1/__init__.py @@ -85,6 +85,18 @@ from .types.artifact import Artifact from .types.batch_prediction_job import BatchPredictionJob from .types.completion_stats import CompletionStats +from .types.content import Blob +from .types.content import Candidate +from .types.content import Citation +from .types.content import CitationMetadata +from .types.content import Content +from .types.content import FileData +from .types.content import GenerationConfig +from .types.content import Part +from .types.content import SafetyRating +from .types.content import SafetySetting +from .types.content import VideoMetadata +from .types.content import HarmCategory from .types.context import Context from .types.custom_job import ContainerSpec from .types.custom_job import CustomJob @@ -537,6 +549,8 @@ from .types.nas_job import NasJobSpec from .types.nas_job import NasTrial from .types.nas_job import NasTrialDetail +from .types.openapi import Schema +from .types.openapi import Type from .types.operation import DeleteOperationMetadata from .types.operation import GenericOperationMetadata from .types.persistent_resource import PersistentResource @@ -582,6 +596,8 @@ from .types.prediction_service import DirectRawPredictResponse from .types.prediction_service import ExplainRequest from .types.prediction_service import ExplainResponse +from .types.prediction_service import GenerateContentRequest +from .types.prediction_service import GenerateContentResponse from .types.prediction_service import PredictRequest from .types.prediction_service import PredictResponse from .types.prediction_service import RawPredictRequest @@ -672,6 +688,10 @@ from .types.tensorboard_service import WriteTensorboardRunDataRequest from .types.tensorboard_service import WriteTensorboardRunDataResponse from .types.tensorboard_time_series import TensorboardTimeSeries +from .types.tool import FunctionCall +from .types.tool import FunctionDeclaration +from .types.tool import FunctionResponse +from .types.tool import Tool from .types.training_pipeline import FilterSplit from .types.training_pipeline import FractionSplit from .types.training_pipeline import InputDataConfig @@ -776,6 +796,7 @@ "BatchReadTensorboardTimeSeriesDataResponse", "BigQueryDestination", "BigQuerySource", + "Blob", "BlurBaselineConfig", "BoolArray", "CancelBatchPredictionJobRequest", @@ -785,15 +806,19 @@ "CancelNasJobRequest", "CancelPipelineJobRequest", "CancelTrainingPipelineRequest", + "Candidate", "CheckTrialEarlyStoppingStateMetatdata", "CheckTrialEarlyStoppingStateRequest", "CheckTrialEarlyStoppingStateResponse", + "Citation", + "CitationMetadata", "CompleteTrialRequest", "CompletionStats", "ComputeTokensRequest", "ComputeTokensResponse", "ContainerRegistryDestination", "ContainerSpec", + "Content", "Context", "CopyModelOperationMetadata", "CopyModelRequest", @@ -980,12 +1005,19 @@ "FeaturestoreServiceClient", "FetchFeatureValuesRequest", "FetchFeatureValuesResponse", + "FileData", "FilterSplit", "FindNeighborsRequest", "FindNeighborsResponse", "FractionSplit", + "FunctionCall", + "FunctionDeclaration", + "FunctionResponse", "GcsDestination", "GcsSource", + "GenerateContentRequest", + "GenerateContentResponse", + "GenerationConfig", "GenericOperationMetadata", "GetAnnotationSpecRequest", "GetArtifactRequest", @@ -1028,6 +1060,7 @@ "GetTensorboardTimeSeriesRequest", "GetTrainingPipelineRequest", "GetTrialRequest", + "HarmCategory", "HyperparameterTuningJob", "IdMatcher", "ImportDataConfig", @@ -1189,6 +1222,7 @@ "NearestNeighbors", "Neighbor", "NfsMount", + "Part", "PauseModelDeploymentMonitoringJobRequest", "PauseScheduleRequest", "PersistentDiskSpec", @@ -1256,6 +1290,8 @@ "RestoreDatasetVersionRequest", "ResumeModelDeploymentMonitoringJobRequest", "ResumeScheduleRequest", + "SafetyRating", + "SafetySetting", "SampleConfig", "SampledShapleyAttribution", "SamplingStrategy", @@ -1264,6 +1300,7 @@ "Schedule", "ScheduleServiceClient", "Scheduling", + "Schema", "SearchDataItemsRequest", "SearchDataItemsResponse", "SearchFeaturesRequest", @@ -1309,10 +1346,12 @@ "TimeSeriesDataPoint", "TimestampSplit", "TokensInfo", + "Tool", "TrainingConfig", "TrainingPipeline", "Trial", "TrialContext", + "Type", "UndeployIndexOperationMetadata", "UndeployIndexRequest", "UndeployIndexResponse", @@ -1363,6 +1402,7 @@ "UpsertDatapointsResponse", "UserActionReference", "Value", + "VideoMetadata", "VizierServiceClient", "WorkerPoolSpec", "WriteFeatureValuesPayload", diff --git a/google/cloud/aiplatform_v1beta1/gapic_metadata.json b/google/cloud/aiplatform_v1beta1/gapic_metadata.json index 1dca7b669f..fc47229625 100644 --- a/google/cloud/aiplatform_v1beta1/gapic_metadata.json +++ b/google/cloud/aiplatform_v1beta1/gapic_metadata.json @@ -2341,6 +2341,11 @@ "server_streaming_predict" ] }, + "StreamGenerateContent": { + "methods": [ + "stream_generate_content" + ] + }, "StreamingPredict": { "methods": [ "streaming_predict" @@ -2391,6 +2396,11 @@ "server_streaming_predict" ] }, + "StreamGenerateContent": { + "methods": [ + "stream_generate_content" + ] + }, "StreamingPredict": { "methods": [ "streaming_predict" diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py index 55a32d1c1b..c91bedd48f 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py @@ -230,40 +230,40 @@ def parse_dataset_path(path: str) -> Dict[str, str]: @staticmethod def dataset_path( project: str, - location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( + return "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod def dataset_path( project: str, + location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( + return "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py index fa0ad636d5..c04887d796 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py @@ -46,6 +46,7 @@ OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore from google.api import httpbody_pb2 # type: ignore +from google.cloud.aiplatform_v1beta1.types import content from google.cloud.aiplatform_v1beta1.types import explanation from google.cloud.aiplatform_v1beta1.types import prediction_service from google.cloud.aiplatform_v1beta1.types import types @@ -1184,9 +1185,14 @@ async def sample_count_tokens(): instances = aiplatform_v1beta1.Value() instances.null_value = "NULL_VALUE" + contents = aiplatform_v1beta1.Content() + contents.parts.text = "text_value" + request = aiplatform_v1beta1.CountTokensRequest( endpoint="endpoint_value", + model="model_value", instances=instances, + contents=contents, ) # Make the request @@ -1272,6 +1278,130 @@ async def sample_count_tokens(): # Done; return the response. return response + def stream_generate_content( + self, + request: Optional[ + Union[prediction_service.GenerateContentRequest, dict] + ] = None, + *, + model: Optional[str] = None, + contents: Optional[MutableSequence[content.Content]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[AsyncIterable[prediction_service.GenerateContentResponse]]: + r"""Generate content with multimodal inputs with + streaming support. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_stream_generate_content(): + # Create a client + client = aiplatform_v1beta1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + contents = aiplatform_v1beta1.Content() + contents.parts.text = "text_value" + + request = aiplatform_v1beta1.GenerateContentRequest( + model="model_value", + contents=contents, + ) + + # Make the request + stream = await client.stream_generate_content(request=request) + + # Handle the response + async for response in stream: + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.GenerateContentRequest, dict]]): + The request object. Request message for [PredictionService.GenerateContent]. + model (:class:`str`): + Required. The name of the publisher model requested to + serve the prediction. Format: + ``projects/{project}/locations/{location}/publishers/*/models/*`` + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + contents (:class:`MutableSequence[google.cloud.aiplatform_v1beta1.types.Content]`): + Required. The content of the current + conversation with the model. + For single-turn queries, this is a + single instance. For multi-turn queries, + this is a repeated field that contains + conversation history + latest request. + + This corresponds to the ``contents`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[google.cloud.aiplatform_v1beta1.types.GenerateContentResponse]: + Response message for + [PredictionService.GenerateContent]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, contents]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = prediction_service.GenerateContentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if contents: + request.contents.extend(contents) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.stream_generate_content, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + async def list_operations( self, request: Optional[operations_pb2.ListOperationsRequest] = None, diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py index 07539299d6..3f57e4fa81 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py @@ -49,6 +49,7 @@ OptionalRetry = Union[retries.Retry, object] # type: ignore from google.api import httpbody_pb2 # type: ignore +from google.cloud.aiplatform_v1beta1.types import content from google.cloud.aiplatform_v1beta1.types import explanation from google.cloud.aiplatform_v1beta1.types import prediction_service from google.cloud.aiplatform_v1beta1.types import types @@ -1423,9 +1424,14 @@ def sample_count_tokens(): instances = aiplatform_v1beta1.Value() instances.null_value = "NULL_VALUE" + contents = aiplatform_v1beta1.Content() + contents.parts.text = "text_value" + request = aiplatform_v1beta1.CountTokensRequest( endpoint="endpoint_value", + model="model_value", instances=instances, + contents=contents, ) # Make the request @@ -1511,6 +1517,130 @@ def sample_count_tokens(): # Done; return the response. return response + def stream_generate_content( + self, + request: Optional[ + Union[prediction_service.GenerateContentRequest, dict] + ] = None, + *, + model: Optional[str] = None, + contents: Optional[MutableSequence[content.Content]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[prediction_service.GenerateContentResponse]: + r"""Generate content with multimodal inputs with + streaming support. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_stream_generate_content(): + # Create a client + client = aiplatform_v1beta1.PredictionServiceClient() + + # Initialize request argument(s) + contents = aiplatform_v1beta1.Content() + contents.parts.text = "text_value" + + request = aiplatform_v1beta1.GenerateContentRequest( + model="model_value", + contents=contents, + ) + + # Make the request + stream = client.stream_generate_content(request=request) + + # Handle the response + for response in stream: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GenerateContentRequest, dict]): + The request object. Request message for [PredictionService.GenerateContent]. + model (str): + Required. The name of the publisher model requested to + serve the prediction. Format: + ``projects/{project}/locations/{location}/publishers/*/models/*`` + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + contents (MutableSequence[google.cloud.aiplatform_v1beta1.types.Content]): + Required. The content of the current + conversation with the model. + For single-turn queries, this is a + single instance. For multi-turn queries, + this is a repeated field that contains + conversation history + latest request. + + This corresponds to the ``contents`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[google.cloud.aiplatform_v1beta1.types.GenerateContentResponse]: + Response message for + [PredictionService.GenerateContent]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, contents]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a prediction_service.GenerateContentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, prediction_service.GenerateContentRequest): + request = prediction_service.GenerateContentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if contents is not None: + request.contents = contents + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.stream_generate_content] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + def __enter__(self) -> "PredictionServiceClient": return self diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py index 91a8117158..221c62ee0e 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py @@ -173,6 +173,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=None, client_info=client_info, ), + self.stream_generate_content: gapic_v1.method.wrap_method( + self.stream_generate_content, + default_timeout=None, + client_info=client_info, + ), } def close(self): @@ -289,6 +294,18 @@ def count_tokens( ]: raise NotImplementedError() + @property + def stream_generate_content( + self, + ) -> Callable[ + [prediction_service.GenerateContentRequest], + Union[ + prediction_service.GenerateContentResponse, + Awaitable[prediction_service.GenerateContentResponse], + ], + ]: + raise NotImplementedError() + @property def list_operations( self, diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py index 6c54d99d25..dcb90d82ae 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py @@ -513,6 +513,36 @@ def count_tokens( ) return self._stubs["count_tokens"] + @property + def stream_generate_content( + self, + ) -> Callable[ + [prediction_service.GenerateContentRequest], + prediction_service.GenerateContentResponse, + ]: + r"""Return a callable for the stream generate content method over gRPC. + + Generate content with multimodal inputs with + streaming support. + + Returns: + Callable[[~.GenerateContentRequest], + ~.GenerateContentResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "stream_generate_content" not in self._stubs: + self._stubs["stream_generate_content"] = self.grpc_channel.unary_stream( + "/google.cloud.aiplatform.v1beta1.PredictionService/StreamGenerateContent", + request_serializer=prediction_service.GenerateContentRequest.serialize, + response_deserializer=prediction_service.GenerateContentResponse.deserialize, + ) + return self._stubs["stream_generate_content"] + def close(self): self.grpc_channel.close() diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py index e49c3dea5d..95cb4b0a12 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py @@ -521,6 +521,36 @@ def count_tokens( ) return self._stubs["count_tokens"] + @property + def stream_generate_content( + self, + ) -> Callable[ + [prediction_service.GenerateContentRequest], + Awaitable[prediction_service.GenerateContentResponse], + ]: + r"""Return a callable for the stream generate content method over gRPC. + + Generate content with multimodal inputs with + streaming support. + + Returns: + Callable[[~.GenerateContentRequest], + Awaitable[~.GenerateContentResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "stream_generate_content" not in self._stubs: + self._stubs["stream_generate_content"] = self.grpc_channel.unary_stream( + "/google.cloud.aiplatform.v1beta1.PredictionService/StreamGenerateContent", + request_serializer=prediction_service.GenerateContentRequest.serialize, + response_deserializer=prediction_service.GenerateContentResponse.deserialize, + ) + return self._stubs["stream_generate_content"] + def close(self): return self.grpc_channel.close() diff --git a/google/cloud/aiplatform_v1beta1/types/__init__.py b/google/cloud/aiplatform_v1beta1/types/__init__.py index daf73c1db1..2051bab280 100644 --- a/google/cloud/aiplatform_v1beta1/types/__init__.py +++ b/google/cloud/aiplatform_v1beta1/types/__init__.py @@ -28,6 +28,20 @@ from .completion_stats import ( CompletionStats, ) +from .content import ( + Blob, + Candidate, + Citation, + CitationMetadata, + Content, + FileData, + GenerationConfig, + Part, + SafetyRating, + SafetySetting, + VideoMetadata, + HarmCategory, +) from .context import ( Context, ) @@ -583,6 +597,10 @@ NasTrial, NasTrialDetail, ) +from .openapi import ( + Schema, + Type, +) from .operation import ( DeleteOperationMetadata, GenericOperationMetadata, @@ -637,6 +655,8 @@ DirectRawPredictResponse, ExplainRequest, ExplainResponse, + GenerateContentRequest, + GenerateContentResponse, PredictRequest, PredictResponse, RawPredictRequest, @@ -756,6 +776,12 @@ from .tensorboard_time_series import ( TensorboardTimeSeries, ) +from .tool import ( + FunctionCall, + FunctionDeclaration, + FunctionResponse, + Tool, +) from .training_pipeline import ( FilterSplit, FractionSplit, @@ -813,6 +839,18 @@ "Artifact", "BatchPredictionJob", "CompletionStats", + "Blob", + "Candidate", + "Citation", + "CitationMetadata", + "Content", + "FileData", + "GenerationConfig", + "Part", + "SafetyRating", + "SafetySetting", + "VideoMetadata", + "HarmCategory", "Context", "ContainerSpec", "CustomJob", @@ -1249,6 +1287,8 @@ "NasJobSpec", "NasTrial", "NasTrialDetail", + "Schema", + "Type", "DeleteOperationMetadata", "GenericOperationMetadata", "PersistentResource", @@ -1294,6 +1334,8 @@ "DirectRawPredictResponse", "ExplainRequest", "ExplainResponse", + "GenerateContentRequest", + "GenerateContentResponse", "PredictRequest", "PredictResponse", "RawPredictRequest", @@ -1384,6 +1426,10 @@ "WriteTensorboardRunDataRequest", "WriteTensorboardRunDataResponse", "TensorboardTimeSeries", + "FunctionCall", + "FunctionDeclaration", + "FunctionResponse", + "Tool", "FilterSplit", "FractionSplit", "InputDataConfig", diff --git a/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py b/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py index 3cd57f6841..db643f5560 100644 --- a/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py +++ b/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py @@ -399,8 +399,8 @@ class InstanceConfig(proto.Message): [excluded_fields][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InstanceConfig.excluded_fields] must be empty. - The input must be JSONL with objects at each line, CSV, - BigQuery or TfRecord. + The input must be JSONL with objects at each line, BigQuery + or TfRecord. excluded_fields (MutableSequence[str]): Fields that will be excluded in the prediction instance that is sent to the Model. @@ -413,8 +413,8 @@ class InstanceConfig(proto.Message): [included_fields][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InstanceConfig.included_fields] must be empty. - The input must be JSONL with objects at each line, CSV, - BigQuery or TfRecord. + The input must be JSONL with objects at each line, BigQuery + or TfRecord. """ instance_type: str = proto.Field( diff --git a/google/cloud/aiplatform_v1beta1/types/content.py b/google/cloud/aiplatform_v1beta1/types/content.py new file mode 100644 index 0000000000..06851c1c29 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/content.py @@ -0,0 +1,571 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import tool +from google.protobuf import duration_pb2 # type: ignore +from google.type import date_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "HarmCategory", + "Content", + "Part", + "Blob", + "FileData", + "VideoMetadata", + "GenerationConfig", + "SafetySetting", + "SafetyRating", + "CitationMetadata", + "Citation", + "Candidate", + }, +) + + +class HarmCategory(proto.Enum): + r"""Harm categories that will block the content. + + Values: + HARM_CATEGORY_UNSPECIFIED (0): + The harm category is unspecified. + HARM_CATEGORY_HATE_SPEECH (1): + The harm category is hate speech. + HARM_CATEGORY_DANGEROUS_CONTENT (2): + The harm category is dangerous content. + HARM_CATEGORY_HARASSMENT (3): + The harm category is harassment. + HARM_CATEGORY_SEXUALLY_EXPLICIT (4): + The harm category is sexually explicit + content. + """ + HARM_CATEGORY_UNSPECIFIED = 0 + HARM_CATEGORY_HATE_SPEECH = 1 + HARM_CATEGORY_DANGEROUS_CONTENT = 2 + HARM_CATEGORY_HARASSMENT = 3 + HARM_CATEGORY_SEXUALLY_EXPLICIT = 4 + + +class Content(proto.Message): + r"""The base structured datatype containing multi-part content of a + message. + + A ``Content`` includes a ``role`` field designating the producer of + the ``Content`` and a ``parts`` field containing multi-part data + that contains the content of the message turn. + + Attributes: + role (str): + Optional. The producer of the content. Must + be either 'user' or 'model'. + Useful to set for multi-turn conversations, + otherwise can be left blank or unset. + parts (MutableSequence[google.cloud.aiplatform_v1beta1.types.Part]): + Required. Ordered ``Parts`` that constitute a single + message. Parts may have different IANA MIME types. + """ + + role: str = proto.Field( + proto.STRING, + number=1, + ) + parts: MutableSequence["Part"] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="Part", + ) + + +class Part(proto.Message): + r"""A datatype containing media that is part of a multi-part ``Content`` + message. + + A ``Part`` consists of data which has an associated datatype. A + ``Part`` can only contain one of the accepted types in + ``Part.data``. + + A ``Part`` must have a fixed IANA MIME type identifying the type and + subtype of the media if ``inline_data`` or ``file_data`` field is + filled with raw bytes. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + text (str): + Optional. Text part (can be code). + + This field is a member of `oneof`_ ``data``. + inline_data (google.cloud.aiplatform_v1beta1.types.Blob): + Optional. Inlined bytes data. + + This field is a member of `oneof`_ ``data``. + file_data (google.cloud.aiplatform_v1beta1.types.FileData): + Optional. URI based data. + + This field is a member of `oneof`_ ``data``. + function_call (google.cloud.aiplatform_v1beta1.types.FunctionCall): + Optional. A predicted [FunctionCall] returned from the model + that contains a string representing the + [FunctionDeclaration.name] with the parameters and their + values. + + This field is a member of `oneof`_ ``data``. + function_response (google.cloud.aiplatform_v1beta1.types.FunctionResponse): + Optional. The result output of a [FunctionCall] that + contains a string representing the + [FunctionDeclaration.name] and a structured JSON object + containing any output from the function call. It is used as + context to the model. + + This field is a member of `oneof`_ ``data``. + video_metadata (google.cloud.aiplatform_v1beta1.types.VideoMetadata): + Optional. Video metadata. The metadata should only be + specified while the video data is presented in inline_data + or file_data. + + This field is a member of `oneof`_ ``metadata``. + """ + + text: str = proto.Field( + proto.STRING, + number=1, + oneof="data", + ) + inline_data: "Blob" = proto.Field( + proto.MESSAGE, + number=2, + oneof="data", + message="Blob", + ) + file_data: "FileData" = proto.Field( + proto.MESSAGE, + number=3, + oneof="data", + message="FileData", + ) + function_call: tool.FunctionCall = proto.Field( + proto.MESSAGE, + number=5, + oneof="data", + message=tool.FunctionCall, + ) + function_response: tool.FunctionResponse = proto.Field( + proto.MESSAGE, + number=6, + oneof="data", + message=tool.FunctionResponse, + ) + video_metadata: "VideoMetadata" = proto.Field( + proto.MESSAGE, + number=4, + oneof="metadata", + message="VideoMetadata", + ) + + +class Blob(proto.Message): + r"""Raw media bytes. + + Text should not be sent as raw bytes, use the 'text' field. + + Attributes: + mime_type (str): + Required. The IANA standard MIME type of the + source data. + data (bytes): + Required. Raw bytes for media formats. + """ + + mime_type: str = proto.Field( + proto.STRING, + number=1, + ) + data: bytes = proto.Field( + proto.BYTES, + number=2, + ) + + +class FileData(proto.Message): + r"""URI based data. + + Attributes: + mime_type (str): + Required. The IANA standard MIME type of the + source data. + file_uri (str): + Required. URI. + """ + + mime_type: str = proto.Field( + proto.STRING, + number=1, + ) + file_uri: str = proto.Field( + proto.STRING, + number=2, + ) + + +class VideoMetadata(proto.Message): + r"""Metadata describes the input video content. + + Attributes: + start_offset (google.protobuf.duration_pb2.Duration): + Optional. The start offset of the video. + end_offset (google.protobuf.duration_pb2.Duration): + Optional. The end offset of the video. + """ + + start_offset: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + end_offset: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=2, + message=duration_pb2.Duration, + ) + + +class GenerationConfig(proto.Message): + r"""Generation config. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + temperature (float): + Optional. Controls the randomness of + predictions. + + This field is a member of `oneof`_ ``_temperature``. + top_p (float): + Optional. If specified, nucleus sampling will + be used. + + This field is a member of `oneof`_ ``_top_p``. + top_k (float): + Optional. If specified, top-k sampling will + be used. + + This field is a member of `oneof`_ ``_top_k``. + candidate_count (int): + Optional. Number of candidates to generate. + + This field is a member of `oneof`_ ``_candidate_count``. + max_output_tokens (int): + Optional. The maximum number of output tokens + to generate per message. + + This field is a member of `oneof`_ ``_max_output_tokens``. + stop_sequences (MutableSequence[str]): + Optional. Stop sequences. + """ + + temperature: float = proto.Field( + proto.FLOAT, + number=1, + optional=True, + ) + top_p: float = proto.Field( + proto.FLOAT, + number=2, + optional=True, + ) + top_k: float = proto.Field( + proto.FLOAT, + number=3, + optional=True, + ) + candidate_count: int = proto.Field( + proto.INT32, + number=4, + optional=True, + ) + max_output_tokens: int = proto.Field( + proto.INT32, + number=5, + optional=True, + ) + stop_sequences: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=6, + ) + + +class SafetySetting(proto.Message): + r"""Safety settings. + + Attributes: + category (google.cloud.aiplatform_v1beta1.types.HarmCategory): + Required. Harm category. + threshold (google.cloud.aiplatform_v1beta1.types.SafetySetting.HarmBlockThreshold): + Required. The harm block threshold. + """ + + class HarmBlockThreshold(proto.Enum): + r"""Probability based thresholds levels for blocking. + + Values: + HARM_BLOCK_THRESHOLD_UNSPECIFIED (0): + Unspecified harm block threshold. + BLOCK_LOW_AND_ABOVE (1): + Block low threshold and above (i.e. block + more). + BLOCK_MEDIUM_AND_ABOVE (2): + Block medium threshold and above. + BLOCK_ONLY_HIGH (3): + Block only high threshold (i.e. block less). + BLOCK_NONE (4): + Block none. + """ + HARM_BLOCK_THRESHOLD_UNSPECIFIED = 0 + BLOCK_LOW_AND_ABOVE = 1 + BLOCK_MEDIUM_AND_ABOVE = 2 + BLOCK_ONLY_HIGH = 3 + BLOCK_NONE = 4 + + category: "HarmCategory" = proto.Field( + proto.ENUM, + number=1, + enum="HarmCategory", + ) + threshold: HarmBlockThreshold = proto.Field( + proto.ENUM, + number=2, + enum=HarmBlockThreshold, + ) + + +class SafetyRating(proto.Message): + r"""Safety rating corresponding to the generated content. + + Attributes: + category (google.cloud.aiplatform_v1beta1.types.HarmCategory): + Output only. Harm category. + probability (google.cloud.aiplatform_v1beta1.types.SafetyRating.HarmProbability): + Output only. Harm probability levels in the + content. + blocked (bool): + Output only. Indicates whether the content + was filtered out because of this rating. + """ + + class HarmProbability(proto.Enum): + r"""Harm probability levels in the content. + + Values: + HARM_PROBABILITY_UNSPECIFIED (0): + Harm probability unspecified. + NEGLIGIBLE (1): + Negligible level of harm. + LOW (2): + Low level of harm. + MEDIUM (3): + Medium level of harm. + HIGH (4): + High level of harm. + """ + HARM_PROBABILITY_UNSPECIFIED = 0 + NEGLIGIBLE = 1 + LOW = 2 + MEDIUM = 3 + HIGH = 4 + + category: "HarmCategory" = proto.Field( + proto.ENUM, + number=1, + enum="HarmCategory", + ) + probability: HarmProbability = proto.Field( + proto.ENUM, + number=2, + enum=HarmProbability, + ) + blocked: bool = proto.Field( + proto.BOOL, + number=3, + ) + + +class CitationMetadata(proto.Message): + r"""A collection of source attributions for a piece of content. + + Attributes: + citations (MutableSequence[google.cloud.aiplatform_v1beta1.types.Citation]): + Output only. List of citations. + """ + + citations: MutableSequence["Citation"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="Citation", + ) + + +class Citation(proto.Message): + r"""Source attributions for content. + + Attributes: + start_index (int): + Output only. Start index into the content. + end_index (int): + Output only. End index into the content. + uri (str): + Output only. Url reference of the + attribution. + title (str): + Output only. Title of the attribution. + license_ (str): + Output only. License of the attribution. + publication_date (google.type.date_pb2.Date): + Output only. Publication date of the + attribution. + """ + + start_index: int = proto.Field( + proto.INT32, + number=1, + ) + end_index: int = proto.Field( + proto.INT32, + number=2, + ) + uri: str = proto.Field( + proto.STRING, + number=3, + ) + title: str = proto.Field( + proto.STRING, + number=4, + ) + license_: str = proto.Field( + proto.STRING, + number=5, + ) + publication_date: date_pb2.Date = proto.Field( + proto.MESSAGE, + number=6, + message=date_pb2.Date, + ) + + +class Candidate(proto.Message): + r"""A response candidate generated from the model. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + index (int): + Output only. Index of the candidate. + content (google.cloud.aiplatform_v1beta1.types.Content): + Output only. Content parts of the candidate. + finish_reason (google.cloud.aiplatform_v1beta1.types.Candidate.FinishReason): + Output only. The reason why the model stopped + generating tokens. If empty, the model has not + stopped generating the tokens. + safety_ratings (MutableSequence[google.cloud.aiplatform_v1beta1.types.SafetyRating]): + Output only. List of ratings for the safety + of a response candidate. + There is at most one rating per category. + finish_message (str): + Output only. Describes the reason the mode stopped + generating tokens in more detail. This is only filled when + ``finish_reason`` is set. + + This field is a member of `oneof`_ ``_finish_message``. + citation_metadata (google.cloud.aiplatform_v1beta1.types.CitationMetadata): + Output only. Source attribution of the + generated content. + """ + + class FinishReason(proto.Enum): + r"""The reason why the model stopped generating tokens. + If empty, the model has not stopped generating the tokens. + + Values: + FINISH_REASON_UNSPECIFIED (0): + The finish reason is unspecified. + STOP (1): + Natural stop point of the model or provided + stop sequence. + MAX_TOKENS (2): + The maximum number of tokens as specified in + the request was reached. + SAFETY (3): + The token generation was stopped as the + response was flagged for safety reasons. NOTE: + When streaming the Candidate.content will be + empty if content filters blocked the output. + RECITATION (4): + The token generation was stopped as the + response was flagged for unauthorized citations. + OTHER (5): + All other reasons that stopped the token + generation + """ + FINISH_REASON_UNSPECIFIED = 0 + STOP = 1 + MAX_TOKENS = 2 + SAFETY = 3 + RECITATION = 4 + OTHER = 5 + + index: int = proto.Field( + proto.INT32, + number=1, + ) + content: "Content" = proto.Field( + proto.MESSAGE, + number=2, + message="Content", + ) + finish_reason: FinishReason = proto.Field( + proto.ENUM, + number=3, + enum=FinishReason, + ) + safety_ratings: MutableSequence["SafetyRating"] = proto.RepeatedField( + proto.MESSAGE, + number=4, + message="SafetyRating", + ) + finish_message: str = proto.Field( + proto.STRING, + number=5, + optional=True, + ) + citation_metadata: "CitationMetadata" = proto.Field( + proto.MESSAGE, + number=6, + message="CitationMetadata", + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/dataset_service.py b/google/cloud/aiplatform_v1beta1/types/dataset_service.py index f32eeedc64..b3a08fec74 100644 --- a/google/cloud/aiplatform_v1beta1/types/dataset_service.py +++ b/google/cloud/aiplatform_v1beta1/types/dataset_service.py @@ -349,8 +349,10 @@ class ExportDataResponse(proto.Message): Attributes: exported_files (MutableSequence[str]): - All of the files that are exported in this - export operation. + All of the files that are exported in this export operation. + For custom code training export, only three (training, + validation and test) GCS paths in wildcard format are + populated (e.g., gs://.../training-*). """ exported_files: MutableSequence[str] = proto.RepeatedField( diff --git a/google/cloud/aiplatform_v1beta1/types/deployed_index_ref.py b/google/cloud/aiplatform_v1beta1/types/deployed_index_ref.py index 2cb6219271..fc1a7fabe8 100644 --- a/google/cloud/aiplatform_v1beta1/types/deployed_index_ref.py +++ b/google/cloud/aiplatform_v1beta1/types/deployed_index_ref.py @@ -38,6 +38,9 @@ class DeployedIndexRef(proto.Message): deployed_index_id (str): Immutable. The ID of the DeployedIndex in the above IndexEndpoint. + display_name (str): + Output only. The display name of the + DeployedIndex. """ index_endpoint: str = proto.Field( @@ -48,6 +51,10 @@ class DeployedIndexRef(proto.Message): proto.STRING, number=2, ) + display_name: str = proto.Field( + proto.STRING, + number=3, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/feature_group.py b/google/cloud/aiplatform_v1beta1/types/feature_group.py index c60fcb8668..6501667735 100644 --- a/google/cloud/aiplatform_v1beta1/types/feature_group.py +++ b/google/cloud/aiplatform_v1beta1/types/feature_group.py @@ -45,7 +45,7 @@ class FeatureGroup(proto.Message): This field is a member of `oneof`_ ``source``. name (str): - Output only. Name of the FeatureGroup. Format: + Identifier. Name of the FeatureGroup. Format: ``projects/{project}/locations/{location}/featureGroups/{featureGroup}`` create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this FeatureGroup diff --git a/google/cloud/aiplatform_v1beta1/types/feature_online_store.py b/google/cloud/aiplatform_v1beta1/types/feature_online_store.py index 0d84e32b21..aca3395164 100644 --- a/google/cloud/aiplatform_v1beta1/types/feature_online_store.py +++ b/google/cloud/aiplatform_v1beta1/types/feature_online_store.py @@ -62,7 +62,7 @@ class FeatureOnlineStore(proto.Message): This field is a member of `oneof`_ ``storage_type``. name (str): - Output only. Name of the FeatureOnlineStore. Format: + Identifier. Name of the FeatureOnlineStore. Format: ``projects/{project}/locations/{location}/featureOnlineStores/{featureOnlineStore}`` create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this diff --git a/google/cloud/aiplatform_v1beta1/types/feature_view.py b/google/cloud/aiplatform_v1beta1/types/feature_view.py index d91bd20c26..0a65fb2238 100644 --- a/google/cloud/aiplatform_v1beta1/types/feature_view.py +++ b/google/cloud/aiplatform_v1beta1/types/feature_view.py @@ -55,7 +55,7 @@ class FeatureView(proto.Message): This field is a member of `oneof`_ ``source``. name (str): - Output only. Name of the FeatureView. Format: + Identifier. Name of the FeatureView. Format: ``projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}`` create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this FeatureView @@ -119,7 +119,7 @@ class BigQuerySource(proto.Message): ) class SyncConfig(proto.Message): - r""" + r"""Configuration for Sync. Only one option is set. Attributes: cron (str): diff --git a/google/cloud/aiplatform_v1beta1/types/feature_view_sync.py b/google/cloud/aiplatform_v1beta1/types/feature_view_sync.py index 5c0c3ed75f..071926b0ad 100644 --- a/google/cloud/aiplatform_v1beta1/types/feature_view_sync.py +++ b/google/cloud/aiplatform_v1beta1/types/feature_view_sync.py @@ -38,7 +38,7 @@ class FeatureViewSync(proto.Message): Attributes: name (str): - Output only. Name of the FeatureViewSync. Format: + Identifier. Name of the FeatureViewSync. Format: ``projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}/featureViewSyncs/{feature_view_sync}`` create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Time when this FeatureViewSync diff --git a/google/cloud/aiplatform_v1beta1/types/featurestore_service.py b/google/cloud/aiplatform_v1beta1/types/featurestore_service.py index ad180faef7..74416e977b 100644 --- a/google/cloud/aiplatform_v1beta1/types/featurestore_service.py +++ b/google/cloud/aiplatform_v1beta1/types/featurestore_service.py @@ -1274,7 +1274,7 @@ class ListFeaturesRequest(proto.Message): When paginating, all other parameters provided to [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures] - or or + or [FeatureRegistryService.ListFeatures][google.cloud.aiplatform.v1beta1.FeatureRegistryService.ListFeatures] must match the call that provided the page token. order_by (str): diff --git a/google/cloud/aiplatform_v1beta1/types/model.py b/google/cloud/aiplatform_v1beta1/types/model.py index c91950d19b..83864fe33a 100644 --- a/google/cloud/aiplatform_v1beta1/types/model.py +++ b/google/cloud/aiplatform_v1beta1/types/model.py @@ -998,6 +998,9 @@ class ModelSourceType(proto.Enum): Garden. GENIE (5): The Model is saved or tuned from Genie. + CUSTOM_TEXT_EMBEDDING (6): + The Model is uploaded by text embedding + finetuning pipeline. """ MODEL_SOURCE_TYPE_UNSPECIFIED = 0 AUTOML = 1 @@ -1005,6 +1008,7 @@ class ModelSourceType(proto.Enum): BQML = 3 MODEL_GARDEN = 4 GENIE = 5 + CUSTOM_TEXT_EMBEDDING = 6 source_type: ModelSourceType = proto.Field( proto.ENUM, diff --git a/google/cloud/aiplatform_v1beta1/types/model_service.py b/google/cloud/aiplatform_v1beta1/types/model_service.py index f7e1f717b8..5ff8ce2105 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_service.py +++ b/google/cloud/aiplatform_v1beta1/types/model_service.py @@ -97,11 +97,12 @@ class UploadModelRequest(proto.Message): Optional. The user-provided custom service account to use to do the model upload. If empty, `Vertex AI Service Agent `__ - will be used. Users uploading the Model must have the - ``iam.serviceAccounts.actAs`` permission on this service - account. Also, this account must belong to the project - specified in the ``parent`` field and have all necessary - read permissions. + will be used to access resources needed to upload the model. + This account must belong to the target project where the + model is uploaded to, i.e., the project specified in the + ``parent`` field of this request and have necessary read + permissions (to Google Cloud Storage, Artifact Registry, + etc.). """ parent: str = proto.Field( diff --git a/google/cloud/aiplatform_v1beta1/types/openapi.py b/google/cloud/aiplatform_v1beta1/types/openapi.py new file mode 100644 index 0000000000..4962766cd1 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/openapi.py @@ -0,0 +1,142 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "Type", + "Schema", + }, +) + + +class Type(proto.Enum): + r"""Type contains the list of OpenAPI data types as defined by + https://swagger.io/docs/specification/data-models/data-types/ + + Values: + TYPE_UNSPECIFIED (0): + Not specified, should not be used. + STRING (1): + OpenAPI string type + NUMBER (2): + OpenAPI number type + INTEGER (3): + OpenAPI integer type + BOOLEAN (4): + OpenAPI boolean type + ARRAY (5): + OpenAPI array type + OBJECT (6): + OpenAPI object type + """ + TYPE_UNSPECIFIED = 0 + STRING = 1 + NUMBER = 2 + INTEGER = 3 + BOOLEAN = 4 + ARRAY = 5 + OBJECT = 6 + + +class Schema(proto.Message): + r"""Schema is used to define the format of input/output data. Represents + a select subset of an `OpenAPI 3.0 schema + object `__. More fields + may be added in the future as needed. + + Attributes: + type_ (google.cloud.aiplatform_v1beta1.types.Type): + Optional. The type of the data. + format_ (str): + Optional. The format of the data. + Supported formats: + + for NUMBER type: float, double + for INTEGER type: int32, int64 + description (str): + Optional. The description of the data. + nullable (bool): + Optional. Indicates if the value may be null. + items (google.cloud.aiplatform_v1beta1.types.Schema): + Optional. Schema of the elements of + Type.ARRAY. + enum (MutableSequence[str]): + Optional. Possible values of the element of Type.STRING with + enum format. For example we can define an Enum Direction as + : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", + "WEST"]} + properties (MutableMapping[str, google.cloud.aiplatform_v1beta1.types.Schema]): + Optional. Properties of Type.OBJECT. + required (MutableSequence[str]): + Optional. Required properties of Type.OBJECT. + example (google.protobuf.struct_pb2.Value): + Optional. Example of the object. Will only + populated when the object is the root. + """ + + type_: "Type" = proto.Field( + proto.ENUM, + number=1, + enum="Type", + ) + format_: str = proto.Field( + proto.STRING, + number=7, + ) + description: str = proto.Field( + proto.STRING, + number=8, + ) + nullable: bool = proto.Field( + proto.BOOL, + number=6, + ) + items: "Schema" = proto.Field( + proto.MESSAGE, + number=2, + message="Schema", + ) + enum: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=9, + ) + properties: MutableMapping[str, "Schema"] = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=3, + message="Schema", + ) + required: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=5, + ) + example: struct_pb2.Value = proto.Field( + proto.MESSAGE, + number=4, + message=struct_pb2.Value, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/prediction_service.py b/google/cloud/aiplatform_v1beta1/types/prediction_service.py index b01d96d06f..b1a3c63395 100644 --- a/google/cloud/aiplatform_v1beta1/types/prediction_service.py +++ b/google/cloud/aiplatform_v1beta1/types/prediction_service.py @@ -20,7 +20,9 @@ import proto # type: ignore from google.api import httpbody_pb2 # type: ignore +from google.cloud.aiplatform_v1beta1.types import content from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import tool from google.cloud.aiplatform_v1beta1.types import types from google.protobuf import struct_pb2 # type: ignore @@ -43,6 +45,8 @@ "ExplainResponse", "CountTokensRequest", "CountTokensResponse", + "GenerateContentRequest", + "GenerateContentResponse", }, ) @@ -581,21 +585,36 @@ class CountTokensRequest(proto.Message): Required. The name of the Endpoint requested to perform token counting. Format: ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + model (str): + Required. The name of the publisher model requested to serve + the prediction. Format: + ``projects/{project}/locations/{location}/publishers/*/models/*`` instances (MutableSequence[google.protobuf.struct_pb2.Value]): Required. The instances that are the input to token counting call. Schema is identical to the prediction schema of the underlying model. + contents (MutableSequence[google.cloud.aiplatform_v1beta1.types.Content]): + Required. Input content. """ endpoint: str = proto.Field( proto.STRING, number=1, ) + model: str = proto.Field( + proto.STRING, + number=3, + ) instances: MutableSequence[struct_pb2.Value] = proto.RepeatedField( proto.MESSAGE, number=2, message=struct_pb2.Value, ) + contents: MutableSequence[content.Content] = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=content.Content, + ) class CountTokensResponse(proto.Message): @@ -621,4 +640,163 @@ class CountTokensResponse(proto.Message): ) +class GenerateContentRequest(proto.Message): + r"""Request message for [PredictionService.GenerateContent]. + + Attributes: + model (str): + Required. The name of the publisher model requested to serve + the prediction. Format: + ``projects/{project}/locations/{location}/publishers/*/models/*`` + contents (MutableSequence[google.cloud.aiplatform_v1beta1.types.Content]): + Required. The content of the current + conversation with the model. + For single-turn queries, this is a single + instance. For multi-turn queries, this is a + repeated field that contains conversation + history + latest request. + tools (MutableSequence[google.cloud.aiplatform_v1beta1.types.Tool]): + Optional. A list of ``Tools`` the model may use to generate + the next response. + + A ``Tool`` is a piece of code that enables the system to + interact with external systems to perform an action, or set + of actions, outside of knowledge and scope of the model. The + only supported tool is currently ``Function`` + safety_settings (MutableSequence[google.cloud.aiplatform_v1beta1.types.SafetySetting]): + Optional. Per request settings for blocking + unsafe content. Enforced on + GenerateContentResponse.candidates. + generation_config (google.cloud.aiplatform_v1beta1.types.GenerationConfig): + Optional. Generation config. + """ + + model: str = proto.Field( + proto.STRING, + number=5, + ) + contents: MutableSequence[content.Content] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=content.Content, + ) + tools: MutableSequence[tool.Tool] = proto.RepeatedField( + proto.MESSAGE, + number=6, + message=tool.Tool, + ) + safety_settings: MutableSequence[content.SafetySetting] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=content.SafetySetting, + ) + generation_config: content.GenerationConfig = proto.Field( + proto.MESSAGE, + number=4, + message=content.GenerationConfig, + ) + + +class GenerateContentResponse(proto.Message): + r"""Response message for [PredictionService.GenerateContent]. + + Attributes: + candidates (MutableSequence[google.cloud.aiplatform_v1beta1.types.Candidate]): + Output only. Generated candidates. + prompt_feedback (google.cloud.aiplatform_v1beta1.types.GenerateContentResponse.PromptFeedback): + Output only. Content filter results for a + prompt sent in the request. Note: Sent only in + the first stream chunk. Only happens when no + candidates were generated due to content + violations. + usage_metadata (google.cloud.aiplatform_v1beta1.types.GenerateContentResponse.UsageMetadata): + Usage metadata about the response(s). + """ + + class PromptFeedback(proto.Message): + r"""Content filter results for a prompt sent in the request. + + Attributes: + block_reason (google.cloud.aiplatform_v1beta1.types.GenerateContentResponse.PromptFeedback.BlockedReason): + Output only. Blocked reason. + safety_ratings (MutableSequence[google.cloud.aiplatform_v1beta1.types.SafetyRating]): + Output only. Safety ratings. + block_reason_message (str): + Output only. A readable block reason message. + """ + + class BlockedReason(proto.Enum): + r"""Blocked reason enumeration. + + Values: + BLOCKED_REASON_UNSPECIFIED (0): + Unspecified blocked reason. + SAFETY (1): + Candidates blocked due to safety. + OTHER (2): + Candidates blocked due to other reason. + """ + BLOCKED_REASON_UNSPECIFIED = 0 + SAFETY = 1 + OTHER = 2 + + block_reason: "GenerateContentResponse.PromptFeedback.BlockedReason" = ( + proto.Field( + proto.ENUM, + number=1, + enum="GenerateContentResponse.PromptFeedback.BlockedReason", + ) + ) + safety_ratings: MutableSequence[content.SafetyRating] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=content.SafetyRating, + ) + block_reason_message: str = proto.Field( + proto.STRING, + number=3, + ) + + class UsageMetadata(proto.Message): + r"""Usage metadata about response(s). + + Attributes: + prompt_token_count (int): + Number of tokens in the request. + candidates_token_count (int): + Number of tokens in the response(s). + total_token_count (int): + + """ + + prompt_token_count: int = proto.Field( + proto.INT32, + number=1, + ) + candidates_token_count: int = proto.Field( + proto.INT32, + number=2, + ) + total_token_count: int = proto.Field( + proto.INT32, + number=3, + ) + + candidates: MutableSequence[content.Candidate] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=content.Candidate, + ) + prompt_feedback: PromptFeedback = proto.Field( + proto.MESSAGE, + number=3, + message=PromptFeedback, + ) + usage_metadata: UsageMetadata = proto.Field( + proto.MESSAGE, + number=4, + message=UsageMetadata, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/publisher_model.py b/google/cloud/aiplatform_v1beta1/types/publisher_model.py index a7d50bd8b9..58f903aeb7 100644 --- a/google/cloud/aiplatform_v1beta1/types/publisher_model.py +++ b/google/cloud/aiplatform_v1beta1/types/publisher_model.py @@ -60,6 +60,9 @@ class PublisherModel(proto.Message): launch_stage (google.cloud.aiplatform_v1beta1.types.PublisherModel.LaunchStage): Optional. Indicates the launch stage of the model. + version_state (google.cloud.aiplatform_v1beta1.types.PublisherModel.VersionState): + Optional. Indicates the state of the model + version. publisher_model_template (str): Optional. Output only. Immutable. Used to indicate this model has a publisher model and @@ -129,6 +132,21 @@ class LaunchStage(proto.Enum): PUBLIC_PREVIEW = 3 GA = 4 + class VersionState(proto.Enum): + r"""An enum representing the state of the PublicModelVersion. + + Values: + VERSION_STATE_UNSPECIFIED (0): + The version state is unspecified. + VERSION_STATE_STABLE (1): + Used to indicate the version is stable. + VERSION_STATE_UNSTABLE (2): + Used to indicate the version is unstable. + """ + VERSION_STATE_UNSPECIFIED = 0 + VERSION_STATE_STABLE = 1 + VERSION_STATE_UNSTABLE = 2 + class ResourceReference(proto.Message): r"""Reference to a resource. @@ -148,6 +166,14 @@ class ResourceReference(proto.Message): The resource name of the Google Cloud resource. + This field is a member of `oneof`_ ``reference``. + use_case (str): + Use case (CUJ) of the resource. + + This field is a member of `oneof`_ ``reference``. + description (str): + Description of the resource. + This field is a member of `oneof`_ ``reference``. """ @@ -161,6 +187,16 @@ class ResourceReference(proto.Message): number=2, oneof="reference", ) + use_case: str = proto.Field( + proto.STRING, + number=3, + oneof="reference", + ) + description: str = proto.Field( + proto.STRING, + number=4, + oneof="reference", + ) class Parent(proto.Message): r"""The information about the parent of a model. @@ -332,6 +368,9 @@ class Deploy(proto.Message): title (str): Required. The title of the regional resource reference. + public_artifact_uri (str): + Optional. The signed URI for ephemeral Cloud + Storage access to model artifact. """ dedicated_resources: machine_resources.DedicatedResources = proto.Field( @@ -373,6 +412,10 @@ class Deploy(proto.Message): proto.STRING, number=8, ) + public_artifact_uri: str = proto.Field( + proto.STRING, + number=9, + ) view_rest_api: "PublisherModel.CallToAction.ViewRestApi" = proto.Field( proto.MESSAGE, @@ -465,6 +508,11 @@ class Deploy(proto.Message): number=29, enum=LaunchStage, ) + version_state: VersionState = proto.Field( + proto.ENUM, + number=37, + enum=VersionState, + ) publisher_model_template: str = proto.Field( proto.STRING, number=30, diff --git a/google/cloud/aiplatform_v1beta1/types/tool.py b/google/cloud/aiplatform_v1beta1/types/tool.py new file mode 100644 index 0000000000..7a141cf90f --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/tool.py @@ -0,0 +1,172 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import openapi +from google.protobuf import struct_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "Tool", + "FunctionDeclaration", + "FunctionCall", + "FunctionResponse", + }, +) + + +class Tool(proto.Message): + r"""Tool details that the model may use to generate response. + + A ``Tool`` is a piece of code that enables the system to interact + with external systems to perform an action, or set of actions, + outside of knowledge and scope of the model. + + Attributes: + function_declarations (MutableSequence[google.cloud.aiplatform_v1beta1.types.FunctionDeclaration]): + Optional. One or more function declarations to be passed to + the model along with the current user query. Model may + decide to call a subset of these functions by populating + [FunctionCall][content.part.function_call] in the response. + User should provide a + [FunctionResponse][content.part.function_response] for each + function call in the next turn. Based on the function + responses, Model will generate the final response back to + the user. Maximum 64 function declarations can be provided. + """ + + function_declarations: MutableSequence["FunctionDeclaration"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="FunctionDeclaration", + ) + + +class FunctionDeclaration(proto.Message): + r"""Structured representation of a function declaration as defined by + the `OpenAPI 3.0 + specification `__. Included in + this declaration are the function name and parameters. This + FunctionDeclaration is a representation of a block of code that can + be used as a ``Tool`` by the model and executed by the client. + + Attributes: + name (str): + Required. The name of the function to call. + Must start with a letter or an underscore. + Must be a-z, A-Z, 0-9, or contain underscores + and dashes, with a maximum length of 64. + description (str): + Optional. Description and purpose of the + function. Model uses it to decide how and + whether to call the function. + parameters (google.cloud.aiplatform_v1beta1.types.Schema): + Optional. Describes the parameters to this + function in JSON Schema Object format. Reflects + the Open API 3.03 Parameter Object. string Key: + the name of the parameter. Parameter names are + case sensitive. Schema Value: the Schema + defining the type used for the parameter. For + function with no parameters, this can be left + unset. Example with 1 required and 1 optional + parameter: type: OBJECT properties: + + param1: + + type: STRING + param2: + + type: INTEGER + required: + + - param1 + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + description: str = proto.Field( + proto.STRING, + number=2, + ) + parameters: openapi.Schema = proto.Field( + proto.MESSAGE, + number=3, + message=openapi.Schema, + ) + + +class FunctionCall(proto.Message): + r"""A predicted [FunctionCall] returned from the model that contains a + string representing the [FunctionDeclaration.name] and a structured + JSON object containing the parameters and their values. + + Attributes: + name (str): + Required. The name of the function to call. Matches + [FunctionDeclaration.name]. + args (google.protobuf.struct_pb2.Struct): + Optional. Required. The function parameters and values in + JSON object format. See [FunctionDeclaration.parameters] for + parameter details. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + args: struct_pb2.Struct = proto.Field( + proto.MESSAGE, + number=2, + message=struct_pb2.Struct, + ) + + +class FunctionResponse(proto.Message): + r"""The result output from a [FunctionCall] that contains a string + representing the [FunctionDeclaration.name] and a structured JSON + object containing any output from the function is used as context to + the model. This should contain the result of a [FunctionCall] made + based on model prediction. + + Attributes: + name (str): + Required. The name of the function to call. Matches + [FunctionDeclaration.name] and [FunctionCall.name]. + response (google.protobuf.struct_pb2.Struct): + Required. The function response in JSON + object format. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + response: struct_pb2.Struct = proto.Field( + proto.MESSAGE, + number=2, + message=struct_pb2.Struct, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/samples/generated_samples/aiplatform_v1_generated_deployment_resource_pool_service_create_deployment_resource_pool_async.py b/samples/generated_samples/aiplatform_v1_generated_deployment_resource_pool_service_create_deployment_resource_pool_async.py new file mode 100644 index 0000000000..cdb6077c6e --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_deployment_resource_pool_service_create_deployment_resource_pool_async.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateDeploymentResourcePool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_DeploymentResourcePoolService_CreateDeploymentResourcePool_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_create_deployment_resource_pool(): + # Create a client + client = aiplatform_v1.DeploymentResourcePoolServiceAsyncClient() + + # Initialize request argument(s) + deployment_resource_pool = aiplatform_v1.DeploymentResourcePool() + deployment_resource_pool.dedicated_resources.min_replica_count = 1803 + + request = aiplatform_v1.CreateDeploymentResourcePoolRequest( + parent="parent_value", + deployment_resource_pool=deployment_resource_pool, + deployment_resource_pool_id="deployment_resource_pool_id_value", + ) + + # Make the request + operation = client.create_deployment_resource_pool(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_DeploymentResourcePoolService_CreateDeploymentResourcePool_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_deployment_resource_pool_service_create_deployment_resource_pool_sync.py b/samples/generated_samples/aiplatform_v1_generated_deployment_resource_pool_service_create_deployment_resource_pool_sync.py new file mode 100644 index 0000000000..c3bad4d477 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_deployment_resource_pool_service_create_deployment_resource_pool_sync.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateDeploymentResourcePool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_DeploymentResourcePoolService_CreateDeploymentResourcePool_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_create_deployment_resource_pool(): + # Create a client + client = aiplatform_v1.DeploymentResourcePoolServiceClient() + + # Initialize request argument(s) + deployment_resource_pool = aiplatform_v1.DeploymentResourcePool() + deployment_resource_pool.dedicated_resources.min_replica_count = 1803 + + request = aiplatform_v1.CreateDeploymentResourcePoolRequest( + parent="parent_value", + deployment_resource_pool=deployment_resource_pool, + deployment_resource_pool_id="deployment_resource_pool_id_value", + ) + + # Make the request + operation = client.create_deployment_resource_pool(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_DeploymentResourcePoolService_CreateDeploymentResourcePool_sync] diff --git a/samples/generated_samples/aiplatform_v1_generated_deployment_resource_pool_service_delete_deployment_resource_pool_async.py b/samples/generated_samples/aiplatform_v1_generated_deployment_resource_pool_service_delete_deployment_resource_pool_async.py new file mode 100644 index 0000000000..f1dac5ff6c --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_deployment_resource_pool_service_delete_deployment_resource_pool_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteDeploymentResourcePool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_DeploymentResourcePoolService_DeleteDeploymentResourcePool_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_delete_deployment_resource_pool(): + # Create a client + client = aiplatform_v1.DeploymentResourcePoolServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteDeploymentResourcePoolRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_deployment_resource_pool(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_DeploymentResourcePoolService_DeleteDeploymentResourcePool_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_deployment_resource_pool_service_delete_deployment_resource_pool_sync.py b/samples/generated_samples/aiplatform_v1_generated_deployment_resource_pool_service_delete_deployment_resource_pool_sync.py new file mode 100644 index 0000000000..86eea95454 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_deployment_resource_pool_service_delete_deployment_resource_pool_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteDeploymentResourcePool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_DeploymentResourcePoolService_DeleteDeploymentResourcePool_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_delete_deployment_resource_pool(): + # Create a client + client = aiplatform_v1.DeploymentResourcePoolServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteDeploymentResourcePoolRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_deployment_resource_pool(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_DeploymentResourcePoolService_DeleteDeploymentResourcePool_sync] diff --git a/samples/generated_samples/aiplatform_v1_generated_deployment_resource_pool_service_get_deployment_resource_pool_async.py b/samples/generated_samples/aiplatform_v1_generated_deployment_resource_pool_service_get_deployment_resource_pool_async.py new file mode 100644 index 0000000000..0d30d6a3fd --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_deployment_resource_pool_service_get_deployment_resource_pool_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetDeploymentResourcePool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_DeploymentResourcePoolService_GetDeploymentResourcePool_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_get_deployment_resource_pool(): + # Create a client + client = aiplatform_v1.DeploymentResourcePoolServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetDeploymentResourcePoolRequest( + name="name_value", + ) + + # Make the request + response = await client.get_deployment_resource_pool(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_DeploymentResourcePoolService_GetDeploymentResourcePool_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_deployment_resource_pool_service_get_deployment_resource_pool_sync.py b/samples/generated_samples/aiplatform_v1_generated_deployment_resource_pool_service_get_deployment_resource_pool_sync.py new file mode 100644 index 0000000000..e317d14fb2 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_deployment_resource_pool_service_get_deployment_resource_pool_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetDeploymentResourcePool +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_DeploymentResourcePoolService_GetDeploymentResourcePool_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_get_deployment_resource_pool(): + # Create a client + client = aiplatform_v1.DeploymentResourcePoolServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetDeploymentResourcePoolRequest( + name="name_value", + ) + + # Make the request + response = client.get_deployment_resource_pool(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_DeploymentResourcePoolService_GetDeploymentResourcePool_sync] diff --git a/samples/generated_samples/aiplatform_v1_generated_deployment_resource_pool_service_list_deployment_resource_pools_async.py b/samples/generated_samples/aiplatform_v1_generated_deployment_resource_pool_service_list_deployment_resource_pools_async.py new file mode 100644 index 0000000000..f0b2466f03 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_deployment_resource_pool_service_list_deployment_resource_pools_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDeploymentResourcePools +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_DeploymentResourcePoolService_ListDeploymentResourcePools_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_list_deployment_resource_pools(): + # Create a client + client = aiplatform_v1.DeploymentResourcePoolServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListDeploymentResourcePoolsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_deployment_resource_pools(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_DeploymentResourcePoolService_ListDeploymentResourcePools_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_deployment_resource_pool_service_list_deployment_resource_pools_sync.py b/samples/generated_samples/aiplatform_v1_generated_deployment_resource_pool_service_list_deployment_resource_pools_sync.py new file mode 100644 index 0000000000..65b83c886a --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_deployment_resource_pool_service_list_deployment_resource_pools_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListDeploymentResourcePools +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_DeploymentResourcePoolService_ListDeploymentResourcePools_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_list_deployment_resource_pools(): + # Create a client + client = aiplatform_v1.DeploymentResourcePoolServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListDeploymentResourcePoolsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_deployment_resource_pools(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_DeploymentResourcePoolService_ListDeploymentResourcePools_sync] diff --git a/samples/generated_samples/aiplatform_v1_generated_deployment_resource_pool_service_query_deployed_models_async.py b/samples/generated_samples/aiplatform_v1_generated_deployment_resource_pool_service_query_deployed_models_async.py new file mode 100644 index 0000000000..ac307d0777 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_deployment_resource_pool_service_query_deployed_models_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryDeployedModels +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_DeploymentResourcePoolService_QueryDeployedModels_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_query_deployed_models(): + # Create a client + client = aiplatform_v1.DeploymentResourcePoolServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.QueryDeployedModelsRequest( + deployment_resource_pool="deployment_resource_pool_value", + ) + + # Make the request + page_result = client.query_deployed_models(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_DeploymentResourcePoolService_QueryDeployedModels_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_deployment_resource_pool_service_query_deployed_models_sync.py b/samples/generated_samples/aiplatform_v1_generated_deployment_resource_pool_service_query_deployed_models_sync.py new file mode 100644 index 0000000000..51918b472a --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_deployment_resource_pool_service_query_deployed_models_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryDeployedModels +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_DeploymentResourcePoolService_QueryDeployedModels_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_query_deployed_models(): + # Create a client + client = aiplatform_v1.DeploymentResourcePoolServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.QueryDeployedModelsRequest( + deployment_resource_pool="deployment_resource_pool_value", + ) + + # Make the request + page_result = client.query_deployed_models(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_DeploymentResourcePoolService_QueryDeployedModels_sync] diff --git a/samples/generated_samples/aiplatform_v1_generated_llm_utility_service_count_tokens_async.py b/samples/generated_samples/aiplatform_v1_generated_llm_utility_service_count_tokens_async.py index 5a26c1b5cd..cfd6883865 100644 --- a/samples/generated_samples/aiplatform_v1_generated_llm_utility_service_count_tokens_async.py +++ b/samples/generated_samples/aiplatform_v1_generated_llm_utility_service_count_tokens_async.py @@ -42,9 +42,14 @@ async def sample_count_tokens(): instances = aiplatform_v1.Value() instances.null_value = "NULL_VALUE" + contents = aiplatform_v1.Content() + contents.parts.text = "text_value" + request = aiplatform_v1.CountTokensRequest( endpoint="endpoint_value", + model="model_value", instances=instances, + contents=contents, ) # Make the request diff --git a/samples/generated_samples/aiplatform_v1_generated_llm_utility_service_count_tokens_sync.py b/samples/generated_samples/aiplatform_v1_generated_llm_utility_service_count_tokens_sync.py index 3b3b1c0e72..e7b1eeb136 100644 --- a/samples/generated_samples/aiplatform_v1_generated_llm_utility_service_count_tokens_sync.py +++ b/samples/generated_samples/aiplatform_v1_generated_llm_utility_service_count_tokens_sync.py @@ -42,9 +42,14 @@ def sample_count_tokens(): instances = aiplatform_v1.Value() instances.null_value = "NULL_VALUE" + contents = aiplatform_v1.Content() + contents.parts.text = "text_value" + request = aiplatform_v1.CountTokensRequest( endpoint="endpoint_value", + model="model_value", instances=instances, + contents=contents, ) # Make the request diff --git a/samples/generated_samples/aiplatform_v1_generated_prediction_service_stream_generate_content_async.py b/samples/generated_samples/aiplatform_v1_generated_prediction_service_stream_generate_content_async.py new file mode 100644 index 0000000000..31a5850934 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_prediction_service_stream_generate_content_async.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for StreamGenerateContent +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PredictionService_StreamGenerateContent_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_stream_generate_content(): + # Create a client + client = aiplatform_v1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + contents = aiplatform_v1.Content() + contents.parts.text = "text_value" + + request = aiplatform_v1.GenerateContentRequest( + model="model_value", + contents=contents, + ) + + # Make the request + stream = await client.stream_generate_content(request=request) + + # Handle the response + async for response in stream: + print(response) + +# [END aiplatform_v1_generated_PredictionService_StreamGenerateContent_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_prediction_service_stream_generate_content_sync.py b/samples/generated_samples/aiplatform_v1_generated_prediction_service_stream_generate_content_sync.py new file mode 100644 index 0000000000..f1fd8c160b --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_prediction_service_stream_generate_content_sync.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for StreamGenerateContent +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PredictionService_StreamGenerateContent_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_stream_generate_content(): + # Create a client + client = aiplatform_v1.PredictionServiceClient() + + # Initialize request argument(s) + contents = aiplatform_v1.Content() + contents.parts.text = "text_value" + + request = aiplatform_v1.GenerateContentRequest( + model="model_value", + contents=contents, + ) + + # Make the request + stream = client.stream_generate_content(request=request) + + # Handle the response + for response in stream: + print(response) + +# [END aiplatform_v1_generated_PredictionService_StreamGenerateContent_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_count_tokens_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_count_tokens_async.py index 9b28517f87..78ddf30678 100644 --- a/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_count_tokens_async.py +++ b/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_count_tokens_async.py @@ -42,9 +42,14 @@ async def sample_count_tokens(): instances = aiplatform_v1beta1.Value() instances.null_value = "NULL_VALUE" + contents = aiplatform_v1beta1.Content() + contents.parts.text = "text_value" + request = aiplatform_v1beta1.CountTokensRequest( endpoint="endpoint_value", + model="model_value", instances=instances, + contents=contents, ) # Make the request diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_count_tokens_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_count_tokens_sync.py index 4a08a5dce8..81a8d93811 100644 --- a/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_count_tokens_sync.py +++ b/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_count_tokens_sync.py @@ -42,9 +42,14 @@ def sample_count_tokens(): instances = aiplatform_v1beta1.Value() instances.null_value = "NULL_VALUE" + contents = aiplatform_v1beta1.Content() + contents.parts.text = "text_value" + request = aiplatform_v1beta1.CountTokensRequest( endpoint="endpoint_value", + model="model_value", instances=instances, + contents=contents, ) # Make the request diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_stream_generate_content_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_stream_generate_content_async.py new file mode 100644 index 0000000000..2ca30792ec --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_stream_generate_content_async.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for StreamGenerateContent +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PredictionService_StreamGenerateContent_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_stream_generate_content(): + # Create a client + client = aiplatform_v1beta1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + contents = aiplatform_v1beta1.Content() + contents.parts.text = "text_value" + + request = aiplatform_v1beta1.GenerateContentRequest( + model="model_value", + contents=contents, + ) + + # Make the request + stream = await client.stream_generate_content(request=request) + + # Handle the response + async for response in stream: + print(response) + +# [END aiplatform_v1beta1_generated_PredictionService_StreamGenerateContent_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_stream_generate_content_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_stream_generate_content_sync.py new file mode 100644 index 0000000000..7506f2c205 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_stream_generate_content_sync.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for StreamGenerateContent +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PredictionService_StreamGenerateContent_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_stream_generate_content(): + # Create a client + client = aiplatform_v1beta1.PredictionServiceClient() + + # Initialize request argument(s) + contents = aiplatform_v1beta1.Content() + contents.parts.text = "text_value" + + request = aiplatform_v1beta1.GenerateContentRequest( + model="model_value", + contents=contents, + ) + + # Make the request + stream = client.stream_generate_content(request=request) + + # Handle the response + for response in stream: + print(response) + +# [END aiplatform_v1beta1_generated_PredictionService_StreamGenerateContent_sync] diff --git a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json index 6b42d3621e..84bd0b0fff 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-aiplatform", - "version": "1.37.0" + "version": "0.1.0" }, "snippets": [ { @@ -2946,33 +2946,33 @@ "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient", - "shortName": "EndpointServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.DeploymentResourcePoolServiceAsyncClient", + "shortName": "DeploymentResourcePoolServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient.create_endpoint", + "fullName": "google.cloud.aiplatform_v1.DeploymentResourcePoolServiceAsyncClient.create_deployment_resource_pool", "method": { - "fullName": "google.cloud.aiplatform.v1.EndpointService.CreateEndpoint", + "fullName": "google.cloud.aiplatform.v1.DeploymentResourcePoolService.CreateDeploymentResourcePool", "service": { - "fullName": "google.cloud.aiplatform.v1.EndpointService", - "shortName": "EndpointService" + "fullName": "google.cloud.aiplatform.v1.DeploymentResourcePoolService", + "shortName": "DeploymentResourcePoolService" }, - "shortName": "CreateEndpoint" + "shortName": "CreateDeploymentResourcePool" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CreateEndpointRequest" + "type": "google.cloud.aiplatform_v1.types.CreateDeploymentResourcePoolRequest" }, { "name": "parent", "type": "str" }, { - "name": "endpoint", - "type": "google.cloud.aiplatform_v1.types.Endpoint" + "name": "deployment_resource_pool", + "type": "google.cloud.aiplatform_v1.types.DeploymentResourcePool" }, { - "name": "endpoint_id", + "name": "deployment_resource_pool_id", "type": "str" }, { @@ -2989,21 +2989,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "create_endpoint" + "shortName": "create_deployment_resource_pool" }, - "description": "Sample for CreateEndpoint", - "file": "aiplatform_v1_generated_endpoint_service_create_endpoint_async.py", + "description": "Sample for CreateDeploymentResourcePool", + "file": "aiplatform_v1_generated_deployment_resource_pool_service_create_deployment_resource_pool_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_EndpointService_CreateEndpoint_async", + "regionTag": "aiplatform_v1_generated_DeploymentResourcePoolService_CreateDeploymentResourcePool_async", "segments": [ { - "end": 59, + "end": 60, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 60, "start": 27, "type": "SHORT" }, @@ -3013,54 +3013,54 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "end": 57, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 61, + "start": 58, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_endpoint_service_create_endpoint_async.py" + "title": "aiplatform_v1_generated_deployment_resource_pool_service_create_deployment_resource_pool_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient", - "shortName": "EndpointServiceClient" + "fullName": "google.cloud.aiplatform_v1.DeploymentResourcePoolServiceClient", + "shortName": "DeploymentResourcePoolServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient.create_endpoint", + "fullName": "google.cloud.aiplatform_v1.DeploymentResourcePoolServiceClient.create_deployment_resource_pool", "method": { - "fullName": "google.cloud.aiplatform.v1.EndpointService.CreateEndpoint", + "fullName": "google.cloud.aiplatform.v1.DeploymentResourcePoolService.CreateDeploymentResourcePool", "service": { - "fullName": "google.cloud.aiplatform.v1.EndpointService", - "shortName": "EndpointService" + "fullName": "google.cloud.aiplatform.v1.DeploymentResourcePoolService", + "shortName": "DeploymentResourcePoolService" }, - "shortName": "CreateEndpoint" + "shortName": "CreateDeploymentResourcePool" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CreateEndpointRequest" + "type": "google.cloud.aiplatform_v1.types.CreateDeploymentResourcePoolRequest" }, { "name": "parent", "type": "str" }, { - "name": "endpoint", - "type": "google.cloud.aiplatform_v1.types.Endpoint" + "name": "deployment_resource_pool", + "type": "google.cloud.aiplatform_v1.types.DeploymentResourcePool" }, { - "name": "endpoint_id", + "name": "deployment_resource_pool_id", "type": "str" }, { @@ -3077,21 +3077,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "create_endpoint" + "shortName": "create_deployment_resource_pool" }, - "description": "Sample for CreateEndpoint", - "file": "aiplatform_v1_generated_endpoint_service_create_endpoint_sync.py", + "description": "Sample for CreateDeploymentResourcePool", + "file": "aiplatform_v1_generated_deployment_resource_pool_service_create_deployment_resource_pool_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_EndpointService_CreateEndpoint_sync", + "regionTag": "aiplatform_v1_generated_DeploymentResourcePoolService_CreateDeploymentResourcePool_sync", "segments": [ { - "end": 59, + "end": 60, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 60, "start": 27, "type": "SHORT" }, @@ -3101,44 +3101,44 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "end": 57, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 61, + "start": 58, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_endpoint_service_create_endpoint_sync.py" + "title": "aiplatform_v1_generated_deployment_resource_pool_service_create_deployment_resource_pool_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient", - "shortName": "EndpointServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.DeploymentResourcePoolServiceAsyncClient", + "shortName": "DeploymentResourcePoolServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient.delete_endpoint", + "fullName": "google.cloud.aiplatform_v1.DeploymentResourcePoolServiceAsyncClient.delete_deployment_resource_pool", "method": { - "fullName": "google.cloud.aiplatform.v1.EndpointService.DeleteEndpoint", + "fullName": "google.cloud.aiplatform.v1.DeploymentResourcePoolService.DeleteDeploymentResourcePool", "service": { - "fullName": "google.cloud.aiplatform.v1.EndpointService", - "shortName": "EndpointService" + "fullName": "google.cloud.aiplatform.v1.DeploymentResourcePoolService", + "shortName": "DeploymentResourcePoolService" }, - "shortName": "DeleteEndpoint" + "shortName": "DeleteDeploymentResourcePool" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteEndpointRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteDeploymentResourcePoolRequest" }, { "name": "name", @@ -3158,13 +3158,13 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_endpoint" + "shortName": "delete_deployment_resource_pool" }, - "description": "Sample for DeleteEndpoint", - "file": "aiplatform_v1_generated_endpoint_service_delete_endpoint_async.py", + "description": "Sample for DeleteDeploymentResourcePool", + "file": "aiplatform_v1_generated_deployment_resource_pool_service_delete_deployment_resource_pool_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_EndpointService_DeleteEndpoint_async", + "regionTag": "aiplatform_v1_generated_DeploymentResourcePoolService_DeleteDeploymentResourcePool_async", "segments": [ { "end": 55, @@ -3197,28 +3197,28 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_endpoint_service_delete_endpoint_async.py" + "title": "aiplatform_v1_generated_deployment_resource_pool_service_delete_deployment_resource_pool_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient", - "shortName": "EndpointServiceClient" + "fullName": "google.cloud.aiplatform_v1.DeploymentResourcePoolServiceClient", + "shortName": "DeploymentResourcePoolServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient.delete_endpoint", + "fullName": "google.cloud.aiplatform_v1.DeploymentResourcePoolServiceClient.delete_deployment_resource_pool", "method": { - "fullName": "google.cloud.aiplatform.v1.EndpointService.DeleteEndpoint", + "fullName": "google.cloud.aiplatform.v1.DeploymentResourcePoolService.DeleteDeploymentResourcePool", "service": { - "fullName": "google.cloud.aiplatform.v1.EndpointService", - "shortName": "EndpointService" + "fullName": "google.cloud.aiplatform.v1.DeploymentResourcePoolService", + "shortName": "DeploymentResourcePoolService" }, - "shortName": "DeleteEndpoint" + "shortName": "DeleteDeploymentResourcePool" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteEndpointRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteDeploymentResourcePoolRequest" }, { "name": "name", @@ -3238,13 +3238,13 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "delete_endpoint" + "shortName": "delete_deployment_resource_pool" }, - "description": "Sample for DeleteEndpoint", - "file": "aiplatform_v1_generated_endpoint_service_delete_endpoint_sync.py", + "description": "Sample for DeleteDeploymentResourcePool", + "file": "aiplatform_v1_generated_deployment_resource_pool_service_delete_deployment_resource_pool_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_EndpointService_DeleteEndpoint_sync", + "regionTag": "aiplatform_v1_generated_DeploymentResourcePoolService_DeleteDeploymentResourcePool_sync", "segments": [ { "end": 55, @@ -3277,130 +3277,34 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_endpoint_service_delete_endpoint_sync.py" + "title": "aiplatform_v1_generated_deployment_resource_pool_service_delete_deployment_resource_pool_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient", - "shortName": "EndpointServiceAsyncClient" - }, - "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient.deploy_model", - "method": { - "fullName": "google.cloud.aiplatform.v1.EndpointService.DeployModel", - "service": { - "fullName": "google.cloud.aiplatform.v1.EndpointService", - "shortName": "EndpointService" - }, - "shortName": "DeployModel" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeployModelRequest" - }, - { - "name": "endpoint", - "type": "str" - }, - { - "name": "deployed_model", - "type": "google.cloud.aiplatform_v1.types.DeployedModel" - }, - { - "name": "traffic_split", - "type": "MutableMapping[str, int]" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "deploy_model" - }, - "description": "Sample for DeployModel", - "file": "aiplatform_v1_generated_endpoint_service_deploy_model_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_EndpointService_DeployModel_async", - "segments": [ - { - "end": 60, - "start": 27, - "type": "FULL" - }, - { - "end": 60, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 50, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 57, - "start": 51, - "type": "REQUEST_EXECUTION" - }, - { - "end": 61, - "start": 58, - "type": "RESPONSE_HANDLING" - } - ], - "title": "aiplatform_v1_generated_endpoint_service_deploy_model_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient", - "shortName": "EndpointServiceClient" + "fullName": "google.cloud.aiplatform_v1.DeploymentResourcePoolServiceAsyncClient", + "shortName": "DeploymentResourcePoolServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient.deploy_model", + "fullName": "google.cloud.aiplatform_v1.DeploymentResourcePoolServiceAsyncClient.get_deployment_resource_pool", "method": { - "fullName": "google.cloud.aiplatform.v1.EndpointService.DeployModel", + "fullName": "google.cloud.aiplatform.v1.DeploymentResourcePoolService.GetDeploymentResourcePool", "service": { - "fullName": "google.cloud.aiplatform.v1.EndpointService", - "shortName": "EndpointService" + "fullName": "google.cloud.aiplatform.v1.DeploymentResourcePoolService", + "shortName": "DeploymentResourcePoolService" }, - "shortName": "DeployModel" + "shortName": "GetDeploymentResourcePool" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeployModelRequest" + "type": "google.cloud.aiplatform_v1.types.GetDeploymentResourcePoolRequest" }, { - "name": "endpoint", + "name": "name", "type": "str" }, - { - "name": "deployed_model", - "type": "google.cloud.aiplatform_v1.types.DeployedModel" - }, - { - "name": "traffic_split", - "type": "MutableMapping[str, int]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -3414,22 +3318,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "deploy_model" + "resultType": "google.cloud.aiplatform_v1.types.DeploymentResourcePool", + "shortName": "get_deployment_resource_pool" }, - "description": "Sample for DeployModel", - "file": "aiplatform_v1_generated_endpoint_service_deploy_model_sync.py", + "description": "Sample for GetDeploymentResourcePool", + "file": "aiplatform_v1_generated_deployment_resource_pool_service_get_deployment_resource_pool_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_EndpointService_DeployModel_sync", + "regionTag": "aiplatform_v1_generated_DeploymentResourcePoolService_GetDeploymentResourcePool_async", "segments": [ { - "end": 60, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 60, + "end": 51, "start": 27, "type": "SHORT" }, @@ -3439,44 +3343,43 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 57, - "start": 51, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 61, - "start": 58, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_endpoint_service_deploy_model_sync.py" + "title": "aiplatform_v1_generated_deployment_resource_pool_service_get_deployment_resource_pool_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient", - "shortName": "EndpointServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.DeploymentResourcePoolServiceClient", + "shortName": "DeploymentResourcePoolServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient.get_endpoint", + "fullName": "google.cloud.aiplatform_v1.DeploymentResourcePoolServiceClient.get_deployment_resource_pool", "method": { - "fullName": "google.cloud.aiplatform.v1.EndpointService.GetEndpoint", + "fullName": "google.cloud.aiplatform.v1.DeploymentResourcePoolService.GetDeploymentResourcePool", "service": { - "fullName": "google.cloud.aiplatform.v1.EndpointService", - "shortName": "EndpointService" + "fullName": "google.cloud.aiplatform.v1.DeploymentResourcePoolService", + "shortName": "DeploymentResourcePoolService" }, - "shortName": "GetEndpoint" + "shortName": "GetDeploymentResourcePool" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetEndpointRequest" + "type": "google.cloud.aiplatform_v1.types.GetDeploymentResourcePoolRequest" }, { "name": "name", @@ -3495,14 +3398,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.Endpoint", - "shortName": "get_endpoint" + "resultType": "google.cloud.aiplatform_v1.types.DeploymentResourcePool", + "shortName": "get_deployment_resource_pool" }, - "description": "Sample for GetEndpoint", - "file": "aiplatform_v1_generated_endpoint_service_get_endpoint_async.py", + "description": "Sample for GetDeploymentResourcePool", + "file": "aiplatform_v1_generated_deployment_resource_pool_service_get_deployment_resource_pool_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_EndpointService_GetEndpoint_async", + "regionTag": "aiplatform_v1_generated_DeploymentResourcePoolService_GetDeploymentResourcePool_sync", "segments": [ { "end": 51, @@ -3535,31 +3438,32 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_endpoint_service_get_endpoint_async.py" + "title": "aiplatform_v1_generated_deployment_resource_pool_service_get_deployment_resource_pool_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient", - "shortName": "EndpointServiceClient" + "fullName": "google.cloud.aiplatform_v1.DeploymentResourcePoolServiceAsyncClient", + "shortName": "DeploymentResourcePoolServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient.get_endpoint", + "fullName": "google.cloud.aiplatform_v1.DeploymentResourcePoolServiceAsyncClient.list_deployment_resource_pools", "method": { - "fullName": "google.cloud.aiplatform.v1.EndpointService.GetEndpoint", + "fullName": "google.cloud.aiplatform.v1.DeploymentResourcePoolService.ListDeploymentResourcePools", "service": { - "fullName": "google.cloud.aiplatform.v1.EndpointService", - "shortName": "EndpointService" + "fullName": "google.cloud.aiplatform.v1.DeploymentResourcePoolService", + "shortName": "DeploymentResourcePoolService" }, - "shortName": "GetEndpoint" + "shortName": "ListDeploymentResourcePools" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetEndpointRequest" + "type": "google.cloud.aiplatform_v1.types.ListDeploymentResourcePoolsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -3575,22 +3479,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.Endpoint", - "shortName": "get_endpoint" + "resultType": "google.cloud.aiplatform_v1.services.deployment_resource_pool_service.pagers.ListDeploymentResourcePoolsAsyncPager", + "shortName": "list_deployment_resource_pools" }, - "description": "Sample for GetEndpoint", - "file": "aiplatform_v1_generated_endpoint_service_get_endpoint_sync.py", + "description": "Sample for ListDeploymentResourcePools", + "file": "aiplatform_v1_generated_deployment_resource_pool_service_list_deployment_resource_pools_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_EndpointService_GetEndpoint_sync", + "regionTag": "aiplatform_v1_generated_DeploymentResourcePoolService_ListDeploymentResourcePools_async", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -3610,34 +3514,33 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_endpoint_service_get_endpoint_sync.py" + "title": "aiplatform_v1_generated_deployment_resource_pool_service_list_deployment_resource_pools_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient", - "shortName": "EndpointServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.DeploymentResourcePoolServiceClient", + "shortName": "DeploymentResourcePoolServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient.list_endpoints", + "fullName": "google.cloud.aiplatform_v1.DeploymentResourcePoolServiceClient.list_deployment_resource_pools", "method": { - "fullName": "google.cloud.aiplatform.v1.EndpointService.ListEndpoints", + "fullName": "google.cloud.aiplatform.v1.DeploymentResourcePoolService.ListDeploymentResourcePools", "service": { - "fullName": "google.cloud.aiplatform.v1.EndpointService", - "shortName": "EndpointService" + "fullName": "google.cloud.aiplatform.v1.DeploymentResourcePoolService", + "shortName": "DeploymentResourcePoolService" }, - "shortName": "ListEndpoints" + "shortName": "ListDeploymentResourcePools" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListEndpointsRequest" + "type": "google.cloud.aiplatform_v1.types.ListDeploymentResourcePoolsRequest" }, { "name": "parent", @@ -3656,14 +3559,932 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.endpoint_service.pagers.ListEndpointsAsyncPager", - "shortName": "list_endpoints" + "resultType": "google.cloud.aiplatform_v1.services.deployment_resource_pool_service.pagers.ListDeploymentResourcePoolsPager", + "shortName": "list_deployment_resource_pools" }, - "description": "Sample for ListEndpoints", - "file": "aiplatform_v1_generated_endpoint_service_list_endpoints_async.py", + "description": "Sample for ListDeploymentResourcePools", + "file": "aiplatform_v1_generated_deployment_resource_pool_service_list_deployment_resource_pools_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_EndpointService_ListEndpoints_async", + "regionTag": "aiplatform_v1_generated_DeploymentResourcePoolService_ListDeploymentResourcePools_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_deployment_resource_pool_service_list_deployment_resource_pools_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.DeploymentResourcePoolServiceAsyncClient", + "shortName": "DeploymentResourcePoolServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.DeploymentResourcePoolServiceAsyncClient.query_deployed_models", + "method": { + "fullName": "google.cloud.aiplatform.v1.DeploymentResourcePoolService.QueryDeployedModels", + "service": { + "fullName": "google.cloud.aiplatform.v1.DeploymentResourcePoolService", + "shortName": "DeploymentResourcePoolService" + }, + "shortName": "QueryDeployedModels" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.QueryDeployedModelsRequest" + }, + { + "name": "deployment_resource_pool", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.deployment_resource_pool_service.pagers.QueryDeployedModelsAsyncPager", + "shortName": "query_deployed_models" + }, + "description": "Sample for QueryDeployedModels", + "file": "aiplatform_v1_generated_deployment_resource_pool_service_query_deployed_models_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_DeploymentResourcePoolService_QueryDeployedModels_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_deployment_resource_pool_service_query_deployed_models_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.DeploymentResourcePoolServiceClient", + "shortName": "DeploymentResourcePoolServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.DeploymentResourcePoolServiceClient.query_deployed_models", + "method": { + "fullName": "google.cloud.aiplatform.v1.DeploymentResourcePoolService.QueryDeployedModels", + "service": { + "fullName": "google.cloud.aiplatform.v1.DeploymentResourcePoolService", + "shortName": "DeploymentResourcePoolService" + }, + "shortName": "QueryDeployedModels" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.QueryDeployedModelsRequest" + }, + { + "name": "deployment_resource_pool", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.deployment_resource_pool_service.pagers.QueryDeployedModelsPager", + "shortName": "query_deployed_models" + }, + "description": "Sample for QueryDeployedModels", + "file": "aiplatform_v1_generated_deployment_resource_pool_service_query_deployed_models_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_DeploymentResourcePoolService_QueryDeployedModels_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_deployment_resource_pool_service_query_deployed_models_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient.create_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.CreateEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "CreateEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateEndpointRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "endpoint", + "type": "google.cloud.aiplatform_v1.types.Endpoint" + }, + { + "name": "endpoint_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_endpoint" + }, + "description": "Sample for CreateEndpoint", + "file": "aiplatform_v1_generated_endpoint_service_create_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_EndpointService_CreateEndpoint_async", + "segments": [ + { + "end": 59, + "start": 27, + "type": "FULL" + }, + { + "end": 59, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 56, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 60, + "start": 57, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_endpoint_service_create_endpoint_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient.create_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.CreateEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "CreateEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CreateEndpointRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "endpoint", + "type": "google.cloud.aiplatform_v1.types.Endpoint" + }, + { + "name": "endpoint_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_endpoint" + }, + "description": "Sample for CreateEndpoint", + "file": "aiplatform_v1_generated_endpoint_service_create_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_EndpointService_CreateEndpoint_sync", + "segments": [ + { + "end": 59, + "start": 27, + "type": "FULL" + }, + { + "end": 59, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 56, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 60, + "start": 57, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_endpoint_service_create_endpoint_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient.delete_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.DeleteEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "DeleteEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_endpoint" + }, + "description": "Sample for DeleteEndpoint", + "file": "aiplatform_v1_generated_endpoint_service_delete_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_EndpointService_DeleteEndpoint_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_endpoint_service_delete_endpoint_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient.delete_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.DeleteEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "DeleteEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_endpoint" + }, + "description": "Sample for DeleteEndpoint", + "file": "aiplatform_v1_generated_endpoint_service_delete_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_EndpointService_DeleteEndpoint_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_endpoint_service_delete_endpoint_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient.deploy_model", + "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.DeployModel", + "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "DeployModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeployModelRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "deployed_model", + "type": "google.cloud.aiplatform_v1.types.DeployedModel" + }, + { + "name": "traffic_split", + "type": "MutableMapping[str, int]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "deploy_model" + }, + "description": "Sample for DeployModel", + "file": "aiplatform_v1_generated_endpoint_service_deploy_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_EndpointService_DeployModel_async", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_endpoint_service_deploy_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient.deploy_model", + "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.DeployModel", + "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "DeployModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeployModelRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "deployed_model", + "type": "google.cloud.aiplatform_v1.types.DeployedModel" + }, + { + "name": "traffic_split", + "type": "MutableMapping[str, int]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "deploy_model" + }, + "description": "Sample for DeployModel", + "file": "aiplatform_v1_generated_endpoint_service_deploy_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_EndpointService_DeployModel_sync", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_endpoint_service_deploy_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient.get_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.GetEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "GetEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Endpoint", + "shortName": "get_endpoint" + }, + "description": "Sample for GetEndpoint", + "file": "aiplatform_v1_generated_endpoint_service_get_endpoint_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_EndpointService_GetEndpoint_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_endpoint_service_get_endpoint_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient", + "shortName": "EndpointServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceClient.get_endpoint", + "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.GetEndpoint", + "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "GetEndpoint" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetEndpointRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Endpoint", + "shortName": "get_endpoint" + }, + "description": "Sample for GetEndpoint", + "file": "aiplatform_v1_generated_endpoint_service_get_endpoint_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_EndpointService_GetEndpoint_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_endpoint_service_get_endpoint_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient", + "shortName": "EndpointServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.EndpointServiceAsyncClient.list_endpoints", + "method": { + "fullName": "google.cloud.aiplatform.v1.EndpointService.ListEndpoints", + "service": { + "fullName": "google.cloud.aiplatform.v1.EndpointService", + "shortName": "EndpointService" + }, + "shortName": "ListEndpoints" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListEndpointsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.endpoint_service.pagers.ListEndpointsAsyncPager", + "shortName": "list_endpoints" + }, + "description": "Sample for ListEndpoints", + "file": "aiplatform_v1_generated_endpoint_service_list_endpoints_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_EndpointService_ListEndpoints_async", "segments": [ { "end": 52, @@ -16745,168 +17566,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_delete_custom_job_async.py" - }, - { - "canonical": true, - "clientMethod": { - "client": { - "fullName": "google.cloud.aiplatform_v1.JobServiceClient", - "shortName": "JobServiceClient" - }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_custom_job", - "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.DeleteCustomJob", - "service": { - "fullName": "google.cloud.aiplatform.v1.JobService", - "shortName": "JobService" - }, - "shortName": "DeleteCustomJob" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteCustomJobRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_custom_job" - }, - "description": "Sample for DeleteCustomJob", - "file": "aiplatform_v1_generated_job_service_delete_custom_job_sync.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_DeleteCustomJob_sync", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "aiplatform_v1_generated_job_service_delete_custom_job_sync.py" - }, - { - "canonical": true, - "clientMethod": { - "async": true, - "client": { - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", - "shortName": "JobServiceAsyncClient" - }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.delete_data_labeling_job", - "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.DeleteDataLabelingJob", - "service": { - "fullName": "google.cloud.aiplatform.v1.JobService", - "shortName": "JobService" - }, - "shortName": "DeleteDataLabelingJob" - }, - "parameters": [ - { - "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteDataLabelingJobRequest" - }, - { - "name": "name", - "type": "str" - }, - { - "name": "retry", - "type": "google.api_core.retry.Retry" - }, - { - "name": "timeout", - "type": "float" - }, - { - "name": "metadata", - "type": "Sequence[Tuple[str, str]" - } - ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_data_labeling_job" - }, - "description": "Sample for DeleteDataLabelingJob", - "file": "aiplatform_v1_generated_job_service_delete_data_labeling_job_async.py", - "language": "PYTHON", - "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_DeleteDataLabelingJob_async", - "segments": [ - { - "end": 55, - "start": 27, - "type": "FULL" - }, - { - "end": 55, - "start": 27, - "type": "SHORT" - }, - { - "end": 40, - "start": 38, - "type": "CLIENT_INITIALIZATION" - }, - { - "end": 45, - "start": 41, - "type": "REQUEST_INITIALIZATION" - }, - { - "end": 52, - "start": 46, - "type": "REQUEST_EXECUTION" - }, - { - "end": 56, - "start": 53, - "type": "RESPONSE_HANDLING" - } - ], - "title": "aiplatform_v1_generated_job_service_delete_data_labeling_job_async.py" + "title": "aiplatform_v1_generated_job_service_delete_custom_job_async.py" }, { "canonical": true, @@ -16915,19 +17575,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_data_labeling_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_custom_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.DeleteDataLabelingJob", + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteCustomJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "DeleteDataLabelingJob" + "shortName": "DeleteCustomJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteDataLabelingJobRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteCustomJobRequest" }, { "name": "name", @@ -16947,13 +17607,13 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "delete_data_labeling_job" + "shortName": "delete_custom_job" }, - "description": "Sample for DeleteDataLabelingJob", - "file": "aiplatform_v1_generated_job_service_delete_data_labeling_job_sync.py", + "description": "Sample for DeleteCustomJob", + "file": "aiplatform_v1_generated_job_service_delete_custom_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_DeleteDataLabelingJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_DeleteCustomJob_sync", "segments": [ { "end": 55, @@ -16986,7 +17646,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_delete_data_labeling_job_sync.py" + "title": "aiplatform_v1_generated_job_service_delete_custom_job_sync.py" }, { "canonical": true, @@ -16996,19 +17656,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.delete_hyperparameter_tuning_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.delete_data_labeling_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.DeleteHyperparameterTuningJob", + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteDataLabelingJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "DeleteHyperparameterTuningJob" + "shortName": "DeleteDataLabelingJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteHyperparameterTuningJobRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteDataLabelingJobRequest" }, { "name": "name", @@ -17028,13 +17688,13 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_hyperparameter_tuning_job" + "shortName": "delete_data_labeling_job" }, - "description": "Sample for DeleteHyperparameterTuningJob", - "file": "aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_async.py", + "description": "Sample for DeleteDataLabelingJob", + "file": "aiplatform_v1_generated_job_service_delete_data_labeling_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_DeleteHyperparameterTuningJob_async", + "regionTag": "aiplatform_v1_generated_JobService_DeleteDataLabelingJob_async", "segments": [ { "end": 55, @@ -17067,7 +17727,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_async.py" + "title": "aiplatform_v1_generated_job_service_delete_data_labeling_job_async.py" }, { "canonical": true, @@ -17076,19 +17736,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_hyperparameter_tuning_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_data_labeling_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.DeleteHyperparameterTuningJob", + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteDataLabelingJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "DeleteHyperparameterTuningJob" + "shortName": "DeleteDataLabelingJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteHyperparameterTuningJobRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteDataLabelingJobRequest" }, { "name": "name", @@ -17108,13 +17768,13 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "delete_hyperparameter_tuning_job" + "shortName": "delete_data_labeling_job" }, - "description": "Sample for DeleteHyperparameterTuningJob", - "file": "aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_sync.py", + "description": "Sample for DeleteDataLabelingJob", + "file": "aiplatform_v1_generated_job_service_delete_data_labeling_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_DeleteHyperparameterTuningJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_DeleteDataLabelingJob_sync", "segments": [ { "end": 55, @@ -17147,7 +17807,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_sync.py" + "title": "aiplatform_v1_generated_job_service_delete_data_labeling_job_sync.py" }, { "canonical": true, @@ -17157,19 +17817,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.delete_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.delete_hyperparameter_tuning_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.DeleteModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteHyperparameterTuningJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "DeleteModelDeploymentMonitoringJob" + "shortName": "DeleteHyperparameterTuningJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteModelDeploymentMonitoringJobRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteHyperparameterTuningJobRequest" }, { "name": "name", @@ -17189,13 +17849,13 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_model_deployment_monitoring_job" + "shortName": "delete_hyperparameter_tuning_job" }, - "description": "Sample for DeleteModelDeploymentMonitoringJob", - "file": "aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_async.py", + "description": "Sample for DeleteHyperparameterTuningJob", + "file": "aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_DeleteModelDeploymentMonitoringJob_async", + "regionTag": "aiplatform_v1_generated_JobService_DeleteHyperparameterTuningJob_async", "segments": [ { "end": 55, @@ -17228,7 +17888,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_async.py" + "title": "aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_async.py" }, { "canonical": true, @@ -17237,19 +17897,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_hyperparameter_tuning_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.DeleteModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteHyperparameterTuningJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "DeleteModelDeploymentMonitoringJob" + "shortName": "DeleteHyperparameterTuningJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteModelDeploymentMonitoringJobRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteHyperparameterTuningJobRequest" }, { "name": "name", @@ -17269,13 +17929,13 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "delete_model_deployment_monitoring_job" + "shortName": "delete_hyperparameter_tuning_job" }, - "description": "Sample for DeleteModelDeploymentMonitoringJob", - "file": "aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_sync.py", + "description": "Sample for DeleteHyperparameterTuningJob", + "file": "aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_DeleteModelDeploymentMonitoringJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_DeleteHyperparameterTuningJob_sync", "segments": [ { "end": 55, @@ -17308,7 +17968,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_sync.py" + "title": "aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_sync.py" }, { "canonical": true, @@ -17318,19 +17978,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.delete_nas_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.delete_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.DeleteNasJob", + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteModelDeploymentMonitoringJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "DeleteNasJob" + "shortName": "DeleteModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteNasJobRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteModelDeploymentMonitoringJobRequest" }, { "name": "name", @@ -17350,13 +18010,13 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_nas_job" + "shortName": "delete_model_deployment_monitoring_job" }, - "description": "Sample for DeleteNasJob", - "file": "aiplatform_v1_generated_job_service_delete_nas_job_async.py", + "description": "Sample for DeleteModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_DeleteNasJob_async", + "regionTag": "aiplatform_v1_generated_JobService_DeleteModelDeploymentMonitoringJob_async", "segments": [ { "end": 55, @@ -17389,7 +18049,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_delete_nas_job_async.py" + "title": "aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_async.py" }, { "canonical": true, @@ -17398,19 +18058,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_nas_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.DeleteNasJob", + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteModelDeploymentMonitoringJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "DeleteNasJob" + "shortName": "DeleteModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteNasJobRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteModelDeploymentMonitoringJobRequest" }, { "name": "name", @@ -17430,13 +18090,13 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "delete_nas_job" + "shortName": "delete_model_deployment_monitoring_job" }, - "description": "Sample for DeleteNasJob", - "file": "aiplatform_v1_generated_job_service_delete_nas_job_sync.py", + "description": "Sample for DeleteModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_DeleteNasJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_DeleteModelDeploymentMonitoringJob_sync", "segments": [ { "end": 55, @@ -17469,7 +18129,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_delete_nas_job_sync.py" + "title": "aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_sync.py" }, { "canonical": true, @@ -17479,19 +18139,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_batch_prediction_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.delete_nas_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob", + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteNasJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "GetBatchPredictionJob" + "shortName": "DeleteNasJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetBatchPredictionJobRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteNasJobRequest" }, { "name": "name", @@ -17510,22 +18170,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.BatchPredictionJob", - "shortName": "get_batch_prediction_job" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_nas_job" }, - "description": "Sample for GetBatchPredictionJob", - "file": "aiplatform_v1_generated_job_service_get_batch_prediction_job_async.py", + "description": "Sample for DeleteNasJob", + "file": "aiplatform_v1_generated_job_service_delete_nas_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_GetBatchPredictionJob_async", + "regionTag": "aiplatform_v1_generated_JobService_DeleteNasJob_async", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -17540,17 +18200,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_get_batch_prediction_job_async.py" + "title": "aiplatform_v1_generated_job_service_delete_nas_job_async.py" }, { "canonical": true, @@ -17559,19 +18219,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_batch_prediction_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_nas_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob", + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteNasJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "GetBatchPredictionJob" + "shortName": "DeleteNasJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetBatchPredictionJobRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteNasJobRequest" }, { "name": "name", @@ -17590,22 +18250,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.BatchPredictionJob", - "shortName": "get_batch_prediction_job" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_nas_job" }, - "description": "Sample for GetBatchPredictionJob", - "file": "aiplatform_v1_generated_job_service_get_batch_prediction_job_sync.py", + "description": "Sample for DeleteNasJob", + "file": "aiplatform_v1_generated_job_service_delete_nas_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_GetBatchPredictionJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_DeleteNasJob_sync", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -17620,17 +18280,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_get_batch_prediction_job_sync.py" + "title": "aiplatform_v1_generated_job_service_delete_nas_job_sync.py" }, { "canonical": true, @@ -17640,19 +18300,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_custom_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_batch_prediction_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.GetCustomJob", + "fullName": "google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "GetCustomJob" + "shortName": "GetBatchPredictionJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetCustomJobRequest" + "type": "google.cloud.aiplatform_v1.types.GetBatchPredictionJobRequest" }, { "name": "name", @@ -17671,14 +18331,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.CustomJob", - "shortName": "get_custom_job" + "resultType": "google.cloud.aiplatform_v1.types.BatchPredictionJob", + "shortName": "get_batch_prediction_job" }, - "description": "Sample for GetCustomJob", - "file": "aiplatform_v1_generated_job_service_get_custom_job_async.py", + "description": "Sample for GetBatchPredictionJob", + "file": "aiplatform_v1_generated_job_service_get_batch_prediction_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_GetCustomJob_async", + "regionTag": "aiplatform_v1_generated_JobService_GetBatchPredictionJob_async", "segments": [ { "end": 51, @@ -17711,7 +18371,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_get_custom_job_async.py" + "title": "aiplatform_v1_generated_job_service_get_batch_prediction_job_async.py" }, { "canonical": true, @@ -17720,19 +18380,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_custom_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_batch_prediction_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.GetCustomJob", + "fullName": "google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "GetCustomJob" + "shortName": "GetBatchPredictionJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetCustomJobRequest" + "type": "google.cloud.aiplatform_v1.types.GetBatchPredictionJobRequest" }, { "name": "name", @@ -17751,14 +18411,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.CustomJob", - "shortName": "get_custom_job" + "resultType": "google.cloud.aiplatform_v1.types.BatchPredictionJob", + "shortName": "get_batch_prediction_job" }, - "description": "Sample for GetCustomJob", - "file": "aiplatform_v1_generated_job_service_get_custom_job_sync.py", + "description": "Sample for GetBatchPredictionJob", + "file": "aiplatform_v1_generated_job_service_get_batch_prediction_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_GetCustomJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_GetBatchPredictionJob_sync", "segments": [ { "end": 51, @@ -17791,7 +18451,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_get_custom_job_sync.py" + "title": "aiplatform_v1_generated_job_service_get_batch_prediction_job_sync.py" }, { "canonical": true, @@ -17801,19 +18461,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_data_labeling_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_custom_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.GetDataLabelingJob", + "fullName": "google.cloud.aiplatform.v1.JobService.GetCustomJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "GetDataLabelingJob" + "shortName": "GetCustomJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetDataLabelingJobRequest" + "type": "google.cloud.aiplatform_v1.types.GetCustomJobRequest" }, { "name": "name", @@ -17832,14 +18492,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.DataLabelingJob", - "shortName": "get_data_labeling_job" + "resultType": "google.cloud.aiplatform_v1.types.CustomJob", + "shortName": "get_custom_job" }, - "description": "Sample for GetDataLabelingJob", - "file": "aiplatform_v1_generated_job_service_get_data_labeling_job_async.py", + "description": "Sample for GetCustomJob", + "file": "aiplatform_v1_generated_job_service_get_custom_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_GetDataLabelingJob_async", + "regionTag": "aiplatform_v1_generated_JobService_GetCustomJob_async", "segments": [ { "end": 51, @@ -17872,7 +18532,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_get_data_labeling_job_async.py" + "title": "aiplatform_v1_generated_job_service_get_custom_job_async.py" }, { "canonical": true, @@ -17881,19 +18541,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_data_labeling_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_custom_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.GetDataLabelingJob", + "fullName": "google.cloud.aiplatform.v1.JobService.GetCustomJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "GetDataLabelingJob" + "shortName": "GetCustomJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetDataLabelingJobRequest" + "type": "google.cloud.aiplatform_v1.types.GetCustomJobRequest" }, { "name": "name", @@ -17912,14 +18572,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.DataLabelingJob", - "shortName": "get_data_labeling_job" + "resultType": "google.cloud.aiplatform_v1.types.CustomJob", + "shortName": "get_custom_job" }, - "description": "Sample for GetDataLabelingJob", - "file": "aiplatform_v1_generated_job_service_get_data_labeling_job_sync.py", + "description": "Sample for GetCustomJob", + "file": "aiplatform_v1_generated_job_service_get_custom_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_GetDataLabelingJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_GetCustomJob_sync", "segments": [ { "end": 51, @@ -17952,7 +18612,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_get_data_labeling_job_sync.py" + "title": "aiplatform_v1_generated_job_service_get_custom_job_sync.py" }, { "canonical": true, @@ -17962,19 +18622,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_hyperparameter_tuning_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_data_labeling_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob", + "fullName": "google.cloud.aiplatform.v1.JobService.GetDataLabelingJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "GetHyperparameterTuningJob" + "shortName": "GetDataLabelingJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetHyperparameterTuningJobRequest" + "type": "google.cloud.aiplatform_v1.types.GetDataLabelingJobRequest" }, { "name": "name", @@ -17993,14 +18653,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.HyperparameterTuningJob", - "shortName": "get_hyperparameter_tuning_job" + "resultType": "google.cloud.aiplatform_v1.types.DataLabelingJob", + "shortName": "get_data_labeling_job" }, - "description": "Sample for GetHyperparameterTuningJob", - "file": "aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_async.py", + "description": "Sample for GetDataLabelingJob", + "file": "aiplatform_v1_generated_job_service_get_data_labeling_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_GetHyperparameterTuningJob_async", + "regionTag": "aiplatform_v1_generated_JobService_GetDataLabelingJob_async", "segments": [ { "end": 51, @@ -18033,7 +18693,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_async.py" + "title": "aiplatform_v1_generated_job_service_get_data_labeling_job_async.py" }, { "canonical": true, @@ -18042,19 +18702,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_hyperparameter_tuning_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_data_labeling_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob", + "fullName": "google.cloud.aiplatform.v1.JobService.GetDataLabelingJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "GetHyperparameterTuningJob" + "shortName": "GetDataLabelingJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetHyperparameterTuningJobRequest" + "type": "google.cloud.aiplatform_v1.types.GetDataLabelingJobRequest" }, { "name": "name", @@ -18073,14 +18733,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.HyperparameterTuningJob", - "shortName": "get_hyperparameter_tuning_job" + "resultType": "google.cloud.aiplatform_v1.types.DataLabelingJob", + "shortName": "get_data_labeling_job" }, - "description": "Sample for GetHyperparameterTuningJob", - "file": "aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_sync.py", + "description": "Sample for GetDataLabelingJob", + "file": "aiplatform_v1_generated_job_service_get_data_labeling_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_GetHyperparameterTuningJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_GetDataLabelingJob_sync", "segments": [ { "end": 51, @@ -18113,7 +18773,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_sync.py" + "title": "aiplatform_v1_generated_job_service_get_data_labeling_job_sync.py" }, { "canonical": true, @@ -18123,19 +18783,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_hyperparameter_tuning_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.GetModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "GetModelDeploymentMonitoringJob" + "shortName": "GetHyperparameterTuningJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetModelDeploymentMonitoringJobRequest" + "type": "google.cloud.aiplatform_v1.types.GetHyperparameterTuningJobRequest" }, { "name": "name", @@ -18154,14 +18814,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob", - "shortName": "get_model_deployment_monitoring_job" + "resultType": "google.cloud.aiplatform_v1.types.HyperparameterTuningJob", + "shortName": "get_hyperparameter_tuning_job" }, - "description": "Sample for GetModelDeploymentMonitoringJob", - "file": "aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_async.py", + "description": "Sample for GetHyperparameterTuningJob", + "file": "aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_GetModelDeploymentMonitoringJob_async", + "regionTag": "aiplatform_v1_generated_JobService_GetHyperparameterTuningJob_async", "segments": [ { "end": 51, @@ -18194,7 +18854,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_async.py" + "title": "aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_async.py" }, { "canonical": true, @@ -18203,19 +18863,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_hyperparameter_tuning_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.GetModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "GetModelDeploymentMonitoringJob" + "shortName": "GetHyperparameterTuningJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetModelDeploymentMonitoringJobRequest" + "type": "google.cloud.aiplatform_v1.types.GetHyperparameterTuningJobRequest" }, { "name": "name", @@ -18234,14 +18894,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob", - "shortName": "get_model_deployment_monitoring_job" + "resultType": "google.cloud.aiplatform_v1.types.HyperparameterTuningJob", + "shortName": "get_hyperparameter_tuning_job" }, - "description": "Sample for GetModelDeploymentMonitoringJob", - "file": "aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_sync.py", + "description": "Sample for GetHyperparameterTuningJob", + "file": "aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_GetModelDeploymentMonitoringJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_GetHyperparameterTuningJob_sync", "segments": [ { "end": 51, @@ -18274,7 +18934,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_sync.py" + "title": "aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_sync.py" }, { "canonical": true, @@ -18284,19 +18944,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_nas_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.GetNasJob", + "fullName": "google.cloud.aiplatform.v1.JobService.GetModelDeploymentMonitoringJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "GetNasJob" + "shortName": "GetModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetNasJobRequest" + "type": "google.cloud.aiplatform_v1.types.GetModelDeploymentMonitoringJobRequest" }, { "name": "name", @@ -18315,14 +18975,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.NasJob", - "shortName": "get_nas_job" + "resultType": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob", + "shortName": "get_model_deployment_monitoring_job" }, - "description": "Sample for GetNasJob", - "file": "aiplatform_v1_generated_job_service_get_nas_job_async.py", + "description": "Sample for GetModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_GetNasJob_async", + "regionTag": "aiplatform_v1_generated_JobService_GetModelDeploymentMonitoringJob_async", "segments": [ { "end": 51, @@ -18355,7 +19015,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_get_nas_job_async.py" + "title": "aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_async.py" }, { "canonical": true, @@ -18364,19 +19024,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_nas_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.GetNasJob", + "fullName": "google.cloud.aiplatform.v1.JobService.GetModelDeploymentMonitoringJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "GetNasJob" + "shortName": "GetModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetNasJobRequest" + "type": "google.cloud.aiplatform_v1.types.GetModelDeploymentMonitoringJobRequest" }, { "name": "name", @@ -18395,14 +19055,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.NasJob", - "shortName": "get_nas_job" + "resultType": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob", + "shortName": "get_model_deployment_monitoring_job" }, - "description": "Sample for GetNasJob", - "file": "aiplatform_v1_generated_job_service_get_nas_job_sync.py", + "description": "Sample for GetModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_GetNasJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_GetModelDeploymentMonitoringJob_sync", "segments": [ { "end": 51, @@ -18435,7 +19095,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_get_nas_job_sync.py" + "title": "aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_sync.py" }, { "canonical": true, @@ -18445,19 +19105,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_nas_trial_detail", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_nas_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.GetNasTrialDetail", + "fullName": "google.cloud.aiplatform.v1.JobService.GetNasJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "GetNasTrialDetail" + "shortName": "GetNasJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetNasTrialDetailRequest" + "type": "google.cloud.aiplatform_v1.types.GetNasJobRequest" }, { "name": "name", @@ -18476,14 +19136,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.NasTrialDetail", - "shortName": "get_nas_trial_detail" + "resultType": "google.cloud.aiplatform_v1.types.NasJob", + "shortName": "get_nas_job" }, - "description": "Sample for GetNasTrialDetail", - "file": "aiplatform_v1_generated_job_service_get_nas_trial_detail_async.py", + "description": "Sample for GetNasJob", + "file": "aiplatform_v1_generated_job_service_get_nas_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_GetNasTrialDetail_async", + "regionTag": "aiplatform_v1_generated_JobService_GetNasJob_async", "segments": [ { "end": 51, @@ -18516,7 +19176,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_get_nas_trial_detail_async.py" + "title": "aiplatform_v1_generated_job_service_get_nas_job_async.py" }, { "canonical": true, @@ -18525,19 +19185,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_nas_trial_detail", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_nas_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.GetNasTrialDetail", + "fullName": "google.cloud.aiplatform.v1.JobService.GetNasJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "GetNasTrialDetail" + "shortName": "GetNasJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetNasTrialDetailRequest" + "type": "google.cloud.aiplatform_v1.types.GetNasJobRequest" }, { "name": "name", @@ -18556,14 +19216,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.NasTrialDetail", - "shortName": "get_nas_trial_detail" + "resultType": "google.cloud.aiplatform_v1.types.NasJob", + "shortName": "get_nas_job" }, - "description": "Sample for GetNasTrialDetail", - "file": "aiplatform_v1_generated_job_service_get_nas_trial_detail_sync.py", + "description": "Sample for GetNasJob", + "file": "aiplatform_v1_generated_job_service_get_nas_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_GetNasTrialDetail_sync", + "regionTag": "aiplatform_v1_generated_JobService_GetNasJob_sync", "segments": [ { "end": 51, @@ -18596,7 +19256,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_get_nas_trial_detail_sync.py" + "title": "aiplatform_v1_generated_job_service_get_nas_job_sync.py" }, { "canonical": true, @@ -18606,22 +19266,22 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_batch_prediction_jobs", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_nas_trial_detail", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs", + "fullName": "google.cloud.aiplatform.v1.JobService.GetNasTrialDetail", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ListBatchPredictionJobs" + "shortName": "GetNasTrialDetail" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest" + "type": "google.cloud.aiplatform_v1.types.GetNasTrialDetailRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -18637,22 +19297,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListBatchPredictionJobsAsyncPager", - "shortName": "list_batch_prediction_jobs" + "resultType": "google.cloud.aiplatform_v1.types.NasTrialDetail", + "shortName": "get_nas_trial_detail" }, - "description": "Sample for ListBatchPredictionJobs", - "file": "aiplatform_v1_generated_job_service_list_batch_prediction_jobs_async.py", + "description": "Sample for GetNasTrialDetail", + "file": "aiplatform_v1_generated_job_service_get_nas_trial_detail_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ListBatchPredictionJobs_async", + "regionTag": "aiplatform_v1_generated_JobService_GetNasTrialDetail_async", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -18672,12 +19332,12 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_list_batch_prediction_jobs_async.py" + "title": "aiplatform_v1_generated_job_service_get_nas_trial_detail_async.py" }, { "canonical": true, @@ -18686,22 +19346,22 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_batch_prediction_jobs", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_nas_trial_detail", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs", + "fullName": "google.cloud.aiplatform.v1.JobService.GetNasTrialDetail", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ListBatchPredictionJobs" + "shortName": "GetNasTrialDetail" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest" + "type": "google.cloud.aiplatform_v1.types.GetNasTrialDetailRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -18717,22 +19377,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListBatchPredictionJobsPager", - "shortName": "list_batch_prediction_jobs" + "resultType": "google.cloud.aiplatform_v1.types.NasTrialDetail", + "shortName": "get_nas_trial_detail" }, - "description": "Sample for ListBatchPredictionJobs", - "file": "aiplatform_v1_generated_job_service_list_batch_prediction_jobs_sync.py", + "description": "Sample for GetNasTrialDetail", + "file": "aiplatform_v1_generated_job_service_get_nas_trial_detail_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ListBatchPredictionJobs_sync", + "regionTag": "aiplatform_v1_generated_JobService_GetNasTrialDetail_sync", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -18752,12 +19412,12 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_list_batch_prediction_jobs_sync.py" + "title": "aiplatform_v1_generated_job_service_get_nas_trial_detail_sync.py" }, { "canonical": true, @@ -18767,19 +19427,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_custom_jobs", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_batch_prediction_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ListCustomJobs", + "fullName": "google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ListCustomJobs" + "shortName": "ListBatchPredictionJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListCustomJobsRequest" + "type": "google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest" }, { "name": "parent", @@ -18798,14 +19458,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListCustomJobsAsyncPager", - "shortName": "list_custom_jobs" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListBatchPredictionJobsAsyncPager", + "shortName": "list_batch_prediction_jobs" }, - "description": "Sample for ListCustomJobs", - "file": "aiplatform_v1_generated_job_service_list_custom_jobs_async.py", + "description": "Sample for ListBatchPredictionJobs", + "file": "aiplatform_v1_generated_job_service_list_batch_prediction_jobs_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ListCustomJobs_async", + "regionTag": "aiplatform_v1_generated_JobService_ListBatchPredictionJobs_async", "segments": [ { "end": 52, @@ -18838,7 +19498,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_list_custom_jobs_async.py" + "title": "aiplatform_v1_generated_job_service_list_batch_prediction_jobs_async.py" }, { "canonical": true, @@ -18847,19 +19507,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_custom_jobs", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_batch_prediction_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ListCustomJobs", + "fullName": "google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ListCustomJobs" + "shortName": "ListBatchPredictionJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListCustomJobsRequest" + "type": "google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest" }, { "name": "parent", @@ -18878,14 +19538,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListCustomJobsPager", - "shortName": "list_custom_jobs" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListBatchPredictionJobsPager", + "shortName": "list_batch_prediction_jobs" }, - "description": "Sample for ListCustomJobs", - "file": "aiplatform_v1_generated_job_service_list_custom_jobs_sync.py", + "description": "Sample for ListBatchPredictionJobs", + "file": "aiplatform_v1_generated_job_service_list_batch_prediction_jobs_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ListCustomJobs_sync", + "regionTag": "aiplatform_v1_generated_JobService_ListBatchPredictionJobs_sync", "segments": [ { "end": 52, @@ -18918,7 +19578,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_list_custom_jobs_sync.py" + "title": "aiplatform_v1_generated_job_service_list_batch_prediction_jobs_sync.py" }, { "canonical": true, @@ -18928,19 +19588,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_data_labeling_jobs", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_custom_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs", + "fullName": "google.cloud.aiplatform.v1.JobService.ListCustomJobs", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ListDataLabelingJobs" + "shortName": "ListCustomJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest" + "type": "google.cloud.aiplatform_v1.types.ListCustomJobsRequest" }, { "name": "parent", @@ -18959,14 +19619,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListDataLabelingJobsAsyncPager", - "shortName": "list_data_labeling_jobs" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListCustomJobsAsyncPager", + "shortName": "list_custom_jobs" }, - "description": "Sample for ListDataLabelingJobs", - "file": "aiplatform_v1_generated_job_service_list_data_labeling_jobs_async.py", + "description": "Sample for ListCustomJobs", + "file": "aiplatform_v1_generated_job_service_list_custom_jobs_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ListDataLabelingJobs_async", + "regionTag": "aiplatform_v1_generated_JobService_ListCustomJobs_async", "segments": [ { "end": 52, @@ -18999,7 +19659,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_list_data_labeling_jobs_async.py" + "title": "aiplatform_v1_generated_job_service_list_custom_jobs_async.py" }, { "canonical": true, @@ -19008,19 +19668,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_data_labeling_jobs", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_custom_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs", + "fullName": "google.cloud.aiplatform.v1.JobService.ListCustomJobs", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ListDataLabelingJobs" + "shortName": "ListCustomJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest" + "type": "google.cloud.aiplatform_v1.types.ListCustomJobsRequest" }, { "name": "parent", @@ -19039,14 +19699,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListDataLabelingJobsPager", - "shortName": "list_data_labeling_jobs" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListCustomJobsPager", + "shortName": "list_custom_jobs" }, - "description": "Sample for ListDataLabelingJobs", - "file": "aiplatform_v1_generated_job_service_list_data_labeling_jobs_sync.py", + "description": "Sample for ListCustomJobs", + "file": "aiplatform_v1_generated_job_service_list_custom_jobs_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ListDataLabelingJobs_sync", + "regionTag": "aiplatform_v1_generated_JobService_ListCustomJobs_sync", "segments": [ { "end": 52, @@ -19079,7 +19739,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_list_data_labeling_jobs_sync.py" + "title": "aiplatform_v1_generated_job_service_list_custom_jobs_sync.py" }, { "canonical": true, @@ -19089,19 +19749,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_hyperparameter_tuning_jobs", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_data_labeling_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs", + "fullName": "google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ListHyperparameterTuningJobs" + "shortName": "ListDataLabelingJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest" + "type": "google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest" }, { "name": "parent", @@ -19120,14 +19780,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListHyperparameterTuningJobsAsyncPager", - "shortName": "list_hyperparameter_tuning_jobs" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListDataLabelingJobsAsyncPager", + "shortName": "list_data_labeling_jobs" }, - "description": "Sample for ListHyperparameterTuningJobs", - "file": "aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_async.py", + "description": "Sample for ListDataLabelingJobs", + "file": "aiplatform_v1_generated_job_service_list_data_labeling_jobs_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ListHyperparameterTuningJobs_async", + "regionTag": "aiplatform_v1_generated_JobService_ListDataLabelingJobs_async", "segments": [ { "end": 52, @@ -19160,7 +19820,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_async.py" + "title": "aiplatform_v1_generated_job_service_list_data_labeling_jobs_async.py" }, { "canonical": true, @@ -19169,19 +19829,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_hyperparameter_tuning_jobs", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_data_labeling_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs", + "fullName": "google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ListHyperparameterTuningJobs" + "shortName": "ListDataLabelingJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest" + "type": "google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest" }, { "name": "parent", @@ -19200,14 +19860,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListHyperparameterTuningJobsPager", - "shortName": "list_hyperparameter_tuning_jobs" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListDataLabelingJobsPager", + "shortName": "list_data_labeling_jobs" }, - "description": "Sample for ListHyperparameterTuningJobs", - "file": "aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_sync.py", + "description": "Sample for ListDataLabelingJobs", + "file": "aiplatform_v1_generated_job_service_list_data_labeling_jobs_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ListHyperparameterTuningJobs_sync", + "regionTag": "aiplatform_v1_generated_JobService_ListDataLabelingJobs_sync", "segments": [ { "end": 52, @@ -19240,7 +19900,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_sync.py" + "title": "aiplatform_v1_generated_job_service_list_data_labeling_jobs_sync.py" }, { "canonical": true, @@ -19250,19 +19910,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_model_deployment_monitoring_jobs", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_hyperparameter_tuning_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ListModelDeploymentMonitoringJobs", + "fullName": "google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ListModelDeploymentMonitoringJobs" + "shortName": "ListHyperparameterTuningJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsRequest" + "type": "google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest" }, { "name": "parent", @@ -19281,14 +19941,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListModelDeploymentMonitoringJobsAsyncPager", - "shortName": "list_model_deployment_monitoring_jobs" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListHyperparameterTuningJobsAsyncPager", + "shortName": "list_hyperparameter_tuning_jobs" }, - "description": "Sample for ListModelDeploymentMonitoringJobs", - "file": "aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_async.py", + "description": "Sample for ListHyperparameterTuningJobs", + "file": "aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ListModelDeploymentMonitoringJobs_async", + "regionTag": "aiplatform_v1_generated_JobService_ListHyperparameterTuningJobs_async", "segments": [ { "end": 52, @@ -19321,7 +19981,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_async.py" + "title": "aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_async.py" }, { "canonical": true, @@ -19330,19 +19990,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_model_deployment_monitoring_jobs", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_hyperparameter_tuning_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ListModelDeploymentMonitoringJobs", + "fullName": "google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ListModelDeploymentMonitoringJobs" + "shortName": "ListHyperparameterTuningJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsRequest" + "type": "google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest" }, { "name": "parent", @@ -19361,14 +20021,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListModelDeploymentMonitoringJobsPager", - "shortName": "list_model_deployment_monitoring_jobs" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListHyperparameterTuningJobsPager", + "shortName": "list_hyperparameter_tuning_jobs" }, - "description": "Sample for ListModelDeploymentMonitoringJobs", - "file": "aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_sync.py", + "description": "Sample for ListHyperparameterTuningJobs", + "file": "aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ListModelDeploymentMonitoringJobs_sync", + "regionTag": "aiplatform_v1_generated_JobService_ListHyperparameterTuningJobs_sync", "segments": [ { "end": 52, @@ -19401,7 +20061,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_sync.py" + "title": "aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_sync.py" }, { "canonical": true, @@ -19411,19 +20071,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_nas_jobs", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_model_deployment_monitoring_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ListNasJobs", + "fullName": "google.cloud.aiplatform.v1.JobService.ListModelDeploymentMonitoringJobs", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ListNasJobs" + "shortName": "ListModelDeploymentMonitoringJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListNasJobsRequest" + "type": "google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsRequest" }, { "name": "parent", @@ -19442,14 +20102,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListNasJobsAsyncPager", - "shortName": "list_nas_jobs" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListModelDeploymentMonitoringJobsAsyncPager", + "shortName": "list_model_deployment_monitoring_jobs" }, - "description": "Sample for ListNasJobs", - "file": "aiplatform_v1_generated_job_service_list_nas_jobs_async.py", + "description": "Sample for ListModelDeploymentMonitoringJobs", + "file": "aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ListNasJobs_async", + "regionTag": "aiplatform_v1_generated_JobService_ListModelDeploymentMonitoringJobs_async", "segments": [ { "end": 52, @@ -19482,7 +20142,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_list_nas_jobs_async.py" + "title": "aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_async.py" }, { "canonical": true, @@ -19491,19 +20151,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_nas_jobs", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_model_deployment_monitoring_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ListNasJobs", + "fullName": "google.cloud.aiplatform.v1.JobService.ListModelDeploymentMonitoringJobs", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ListNasJobs" + "shortName": "ListModelDeploymentMonitoringJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListNasJobsRequest" + "type": "google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsRequest" }, { "name": "parent", @@ -19522,14 +20182,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListNasJobsPager", - "shortName": "list_nas_jobs" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListModelDeploymentMonitoringJobsPager", + "shortName": "list_model_deployment_monitoring_jobs" }, - "description": "Sample for ListNasJobs", - "file": "aiplatform_v1_generated_job_service_list_nas_jobs_sync.py", + "description": "Sample for ListModelDeploymentMonitoringJobs", + "file": "aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ListNasJobs_sync", + "regionTag": "aiplatform_v1_generated_JobService_ListModelDeploymentMonitoringJobs_sync", "segments": [ { "end": 52, @@ -19562,7 +20222,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_list_nas_jobs_sync.py" + "title": "aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_sync.py" }, { "canonical": true, @@ -19572,19 +20232,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_nas_trial_details", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_nas_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ListNasTrialDetails", + "fullName": "google.cloud.aiplatform.v1.JobService.ListNasJobs", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ListNasTrialDetails" + "shortName": "ListNasJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListNasTrialDetailsRequest" + "type": "google.cloud.aiplatform_v1.types.ListNasJobsRequest" }, { "name": "parent", @@ -19603,14 +20263,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListNasTrialDetailsAsyncPager", - "shortName": "list_nas_trial_details" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListNasJobsAsyncPager", + "shortName": "list_nas_jobs" }, - "description": "Sample for ListNasTrialDetails", - "file": "aiplatform_v1_generated_job_service_list_nas_trial_details_async.py", + "description": "Sample for ListNasJobs", + "file": "aiplatform_v1_generated_job_service_list_nas_jobs_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ListNasTrialDetails_async", + "regionTag": "aiplatform_v1_generated_JobService_ListNasJobs_async", "segments": [ { "end": 52, @@ -19643,7 +20303,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_list_nas_trial_details_async.py" + "title": "aiplatform_v1_generated_job_service_list_nas_jobs_async.py" }, { "canonical": true, @@ -19652,19 +20312,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_nas_trial_details", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_nas_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ListNasTrialDetails", + "fullName": "google.cloud.aiplatform.v1.JobService.ListNasJobs", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ListNasTrialDetails" + "shortName": "ListNasJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListNasTrialDetailsRequest" + "type": "google.cloud.aiplatform_v1.types.ListNasJobsRequest" }, { "name": "parent", @@ -19683,14 +20343,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListNasTrialDetailsPager", - "shortName": "list_nas_trial_details" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListNasJobsPager", + "shortName": "list_nas_jobs" }, - "description": "Sample for ListNasTrialDetails", - "file": "aiplatform_v1_generated_job_service_list_nas_trial_details_sync.py", + "description": "Sample for ListNasJobs", + "file": "aiplatform_v1_generated_job_service_list_nas_jobs_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ListNasTrialDetails_sync", + "regionTag": "aiplatform_v1_generated_JobService_ListNasJobs_sync", "segments": [ { "end": 52, @@ -19723,7 +20383,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_list_nas_trial_details_sync.py" + "title": "aiplatform_v1_generated_job_service_list_nas_jobs_sync.py" }, { "canonical": true, @@ -19733,22 +20393,22 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.pause_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_nas_trial_details", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.PauseModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1.JobService.ListNasTrialDetails", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "PauseModelDeploymentMonitoringJob" + "shortName": "ListNasTrialDetails" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.PauseModelDeploymentMonitoringJobRequest" + "type": "google.cloud.aiplatform_v1.types.ListNasTrialDetailsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -19764,21 +20424,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "pause_model_deployment_monitoring_job" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListNasTrialDetailsAsyncPager", + "shortName": "list_nas_trial_details" }, - "description": "Sample for PauseModelDeploymentMonitoringJob", - "file": "aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_async.py", + "description": "Sample for ListNasTrialDetails", + "file": "aiplatform_v1_generated_job_service_list_nas_trial_details_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_PauseModelDeploymentMonitoringJob_async", + "regionTag": "aiplatform_v1_generated_JobService_ListNasTrialDetails_async", "segments": [ { - "end": 49, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 52, "start": 27, "type": "SHORT" }, @@ -19793,15 +20454,17 @@ "type": "REQUEST_INITIALIZATION" }, { + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_async.py" + "title": "aiplatform_v1_generated_job_service_list_nas_trial_details_async.py" }, { "canonical": true, @@ -19810,22 +20473,22 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.pause_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_nas_trial_details", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.PauseModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1.JobService.ListNasTrialDetails", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "PauseModelDeploymentMonitoringJob" + "shortName": "ListNasTrialDetails" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.PauseModelDeploymentMonitoringJobRequest" + "type": "google.cloud.aiplatform_v1.types.ListNasTrialDetailsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -19841,21 +20504,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "pause_model_deployment_monitoring_job" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListNasTrialDetailsPager", + "shortName": "list_nas_trial_details" }, - "description": "Sample for PauseModelDeploymentMonitoringJob", - "file": "aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_sync.py", + "description": "Sample for ListNasTrialDetails", + "file": "aiplatform_v1_generated_job_service_list_nas_trial_details_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_PauseModelDeploymentMonitoringJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_ListNasTrialDetails_sync", "segments": [ { - "end": 49, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 52, "start": 27, "type": "SHORT" }, @@ -19870,15 +20534,17 @@ "type": "REQUEST_INITIALIZATION" }, { + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_sync.py" + "title": "aiplatform_v1_generated_job_service_list_nas_trial_details_sync.py" }, { "canonical": true, @@ -19888,19 +20554,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.resume_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.pause_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ResumeModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1.JobService.PauseModelDeploymentMonitoringJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ResumeModelDeploymentMonitoringJob" + "shortName": "PauseModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ResumeModelDeploymentMonitoringJobRequest" + "type": "google.cloud.aiplatform_v1.types.PauseModelDeploymentMonitoringJobRequest" }, { "name": "name", @@ -19919,13 +20585,13 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "resume_model_deployment_monitoring_job" + "shortName": "pause_model_deployment_monitoring_job" }, - "description": "Sample for ResumeModelDeploymentMonitoringJob", - "file": "aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_async.py", + "description": "Sample for PauseModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ResumeModelDeploymentMonitoringJob_async", + "regionTag": "aiplatform_v1_generated_JobService_PauseModelDeploymentMonitoringJob_async", "segments": [ { "end": 49, @@ -19956,7 +20622,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_async.py" + "title": "aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_async.py" }, { "canonical": true, @@ -19965,19 +20631,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.resume_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.pause_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ResumeModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1.JobService.PauseModelDeploymentMonitoringJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ResumeModelDeploymentMonitoringJob" + "shortName": "PauseModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ResumeModelDeploymentMonitoringJobRequest" + "type": "google.cloud.aiplatform_v1.types.PauseModelDeploymentMonitoringJobRequest" }, { "name": "name", @@ -19996,13 +20662,13 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "resume_model_deployment_monitoring_job" + "shortName": "pause_model_deployment_monitoring_job" }, - "description": "Sample for ResumeModelDeploymentMonitoringJob", - "file": "aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_sync.py", + "description": "Sample for PauseModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ResumeModelDeploymentMonitoringJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_PauseModelDeploymentMonitoringJob_sync", "segments": [ { "end": 49, @@ -20033,7 +20699,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_sync.py" + "title": "aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_sync.py" }, { "canonical": true, @@ -20043,26 +20709,22 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.search_model_deployment_monitoring_stats_anomalies", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.resume_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.SearchModelDeploymentMonitoringStatsAnomalies", + "fullName": "google.cloud.aiplatform.v1.JobService.ResumeModelDeploymentMonitoringJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "SearchModelDeploymentMonitoringStatsAnomalies" + "shortName": "ResumeModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest" - }, - { - "name": "model_deployment_monitoring_job", - "type": "str" + "type": "google.cloud.aiplatform_v1.types.ResumeModelDeploymentMonitoringJobRequest" }, { - "name": "deployed_model_id", + "name": "name", "type": "str" }, { @@ -20078,22 +20740,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager", - "shortName": "search_model_deployment_monitoring_stats_anomalies" + "shortName": "resume_model_deployment_monitoring_job" }, - "description": "Sample for SearchModelDeploymentMonitoringStatsAnomalies", - "file": "aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_async.py", + "description": "Sample for ResumeModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_SearchModelDeploymentMonitoringStatsAnomalies_async", + "regionTag": "aiplatform_v1_generated_JobService_ResumeModelDeploymentMonitoringJob_async", "segments": [ { - "end": 53, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 53, + "end": 49, "start": 27, "type": "SHORT" }, @@ -20103,22 +20764,20 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 49, - "start": 47, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 54, - "start": 50, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_async.py" + "title": "aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_async.py" }, { "canonical": true, @@ -20127,26 +20786,22 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.search_model_deployment_monitoring_stats_anomalies", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.resume_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.SearchModelDeploymentMonitoringStatsAnomalies", + "fullName": "google.cloud.aiplatform.v1.JobService.ResumeModelDeploymentMonitoringJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "SearchModelDeploymentMonitoringStatsAnomalies" + "shortName": "ResumeModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest" - }, - { - "name": "model_deployment_monitoring_job", - "type": "str" + "type": "google.cloud.aiplatform_v1.types.ResumeModelDeploymentMonitoringJobRequest" }, { - "name": "deployed_model_id", + "name": "name", "type": "str" }, { @@ -20162,22 +20817,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager", - "shortName": "search_model_deployment_monitoring_stats_anomalies" + "shortName": "resume_model_deployment_monitoring_job" }, - "description": "Sample for SearchModelDeploymentMonitoringStatsAnomalies", - "file": "aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py", + "description": "Sample for ResumeModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_SearchModelDeploymentMonitoringStatsAnomalies_sync", + "regionTag": "aiplatform_v1_generated_JobService_ResumeModelDeploymentMonitoringJob_sync", "segments": [ { - "end": 53, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 53, + "end": 49, "start": 27, "type": "SHORT" }, @@ -20187,22 +20841,20 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 49, - "start": 47, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 54, - "start": 50, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py" + "title": "aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_sync.py" }, { "canonical": true, @@ -20212,27 +20864,27 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.update_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.search_model_deployment_monitoring_stats_anomalies", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.UpdateModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1.JobService.SearchModelDeploymentMonitoringStatsAnomalies", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "UpdateModelDeploymentMonitoringJob" + "shortName": "SearchModelDeploymentMonitoringStatsAnomalies" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.UpdateModelDeploymentMonitoringJobRequest" + "type": "google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest" }, { "name": "model_deployment_monitoring_job", - "type": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob" + "type": "str" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "deployed_model_id", + "type": "str" }, { "name": "retry", @@ -20247,22 +20899,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "update_model_deployment_monitoring_job" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager", + "shortName": "search_model_deployment_monitoring_stats_anomalies" }, - "description": "Sample for UpdateModelDeploymentMonitoringJob", - "file": "aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_async.py", + "description": "Sample for SearchModelDeploymentMonitoringStatsAnomalies", + "file": "aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_UpdateModelDeploymentMonitoringJob_async", + "regionTag": "aiplatform_v1_generated_JobService_SearchModelDeploymentMonitoringStatsAnomalies_async", "segments": [ { - "end": 59, + "end": 53, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 53, "start": 27, "type": "SHORT" }, @@ -20272,22 +20924,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "end": 49, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 54, + "start": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_async.py" + "title": "aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_async.py" }, { "canonical": true, @@ -20296,27 +20948,27 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.update_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.search_model_deployment_monitoring_stats_anomalies", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.UpdateModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1.JobService.SearchModelDeploymentMonitoringStatsAnomalies", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "UpdateModelDeploymentMonitoringJob" + "shortName": "SearchModelDeploymentMonitoringStatsAnomalies" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.UpdateModelDeploymentMonitoringJobRequest" + "type": "google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest" }, { "name": "model_deployment_monitoring_job", - "type": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob" + "type": "str" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "deployed_model_id", + "type": "str" }, { "name": "retry", @@ -20331,22 +20983,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "update_model_deployment_monitoring_job" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager", + "shortName": "search_model_deployment_monitoring_stats_anomalies" }, - "description": "Sample for UpdateModelDeploymentMonitoringJob", - "file": "aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_sync.py", + "description": "Sample for SearchModelDeploymentMonitoringStatsAnomalies", + "file": "aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_UpdateModelDeploymentMonitoringJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_SearchModelDeploymentMonitoringStatsAnomalies_sync", "segments": [ { - "end": 59, + "end": 53, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 53, "start": 27, "type": "SHORT" }, @@ -20356,52 +21008,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "end": 49, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 54, + "start": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_sync.py" + "title": "aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.LlmUtilityServiceAsyncClient", - "shortName": "LlmUtilityServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.LlmUtilityServiceAsyncClient.compute_tokens", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.update_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1.LlmUtilityService.ComputeTokens", + "fullName": "google.cloud.aiplatform.v1.JobService.UpdateModelDeploymentMonitoringJob", "service": { - "fullName": "google.cloud.aiplatform.v1.LlmUtilityService", - "shortName": "LlmUtilityService" + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" }, - "shortName": "ComputeTokens" + "shortName": "UpdateModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ComputeTokensRequest" + "type": "google.cloud.aiplatform_v1.types.UpdateModelDeploymentMonitoringJobRequest" }, { - "name": "endpoint", - "type": "str" + "name": "model_deployment_monitoring_job", + "type": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob" }, { - "name": "instances", - "type": "MutableSequence[google.protobuf.struct_pb2.Value]" + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -20416,22 +21068,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.ComputeTokensResponse", - "shortName": "compute_tokens" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_model_deployment_monitoring_job" }, - "description": "Sample for ComputeTokens", - "file": "aiplatform_v1_generated_llm_utility_service_compute_tokens_async.py", + "description": "Sample for UpdateModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_LlmUtilityService_ComputeTokens_async", + "regionTag": "aiplatform_v1_generated_JobService_UpdateModelDeploymentMonitoringJob_async", "segments": [ { - "end": 55, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 59, "start": 27, "type": "SHORT" }, @@ -20446,46 +21098,46 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 56, "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_llm_utility_service_compute_tokens_async.py" + "title": "aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.LlmUtilityServiceClient", - "shortName": "LlmUtilityServiceClient" + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.LlmUtilityServiceClient.compute_tokens", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.update_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1.LlmUtilityService.ComputeTokens", + "fullName": "google.cloud.aiplatform.v1.JobService.UpdateModelDeploymentMonitoringJob", "service": { - "fullName": "google.cloud.aiplatform.v1.LlmUtilityService", - "shortName": "LlmUtilityService" + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" }, - "shortName": "ComputeTokens" + "shortName": "UpdateModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ComputeTokensRequest" + "type": "google.cloud.aiplatform_v1.types.UpdateModelDeploymentMonitoringJobRequest" }, { - "name": "endpoint", - "type": "str" + "name": "model_deployment_monitoring_job", + "type": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob" }, { - "name": "instances", - "type": "MutableSequence[google.protobuf.struct_pb2.Value]" + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -20500,22 +21152,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.ComputeTokensResponse", - "shortName": "compute_tokens" + "resultType": "google.api_core.operation.Operation", + "shortName": "update_model_deployment_monitoring_job" }, - "description": "Sample for ComputeTokens", - "file": "aiplatform_v1_generated_llm_utility_service_compute_tokens_sync.py", + "description": "Sample for UpdateModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_LlmUtilityService_ComputeTokens_sync", + "regionTag": "aiplatform_v1_generated_JobService_UpdateModelDeploymentMonitoringJob_sync", "segments": [ { - "end": 55, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 59, "start": 27, "type": "SHORT" }, @@ -20530,17 +21182,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 56, "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_llm_utility_service_compute_tokens_sync.py" + "title": "aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_sync.py" }, { "canonical": true, @@ -20550,19 +21202,19 @@ "fullName": "google.cloud.aiplatform_v1.LlmUtilityServiceAsyncClient", "shortName": "LlmUtilityServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.LlmUtilityServiceAsyncClient.count_tokens", + "fullName": "google.cloud.aiplatform_v1.LlmUtilityServiceAsyncClient.compute_tokens", "method": { - "fullName": "google.cloud.aiplatform.v1.LlmUtilityService.CountTokens", + "fullName": "google.cloud.aiplatform.v1.LlmUtilityService.ComputeTokens", "service": { "fullName": "google.cloud.aiplatform.v1.LlmUtilityService", "shortName": "LlmUtilityService" }, - "shortName": "CountTokens" + "shortName": "ComputeTokens" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CountTokensRequest" + "type": "google.cloud.aiplatform_v1.types.ComputeTokensRequest" }, { "name": "endpoint", @@ -20585,14 +21237,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.CountTokensResponse", - "shortName": "count_tokens" + "resultType": "google.cloud.aiplatform_v1.types.ComputeTokensResponse", + "shortName": "compute_tokens" }, - "description": "Sample for CountTokens", - "file": "aiplatform_v1_generated_llm_utility_service_count_tokens_async.py", + "description": "Sample for ComputeTokens", + "file": "aiplatform_v1_generated_llm_utility_service_compute_tokens_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_LlmUtilityService_CountTokens_async", + "regionTag": "aiplatform_v1_generated_LlmUtilityService_ComputeTokens_async", "segments": [ { "end": 55, @@ -20625,7 +21277,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_llm_utility_service_count_tokens_async.py" + "title": "aiplatform_v1_generated_llm_utility_service_compute_tokens_async.py" }, { "canonical": true, @@ -20634,19 +21286,19 @@ "fullName": "google.cloud.aiplatform_v1.LlmUtilityServiceClient", "shortName": "LlmUtilityServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.LlmUtilityServiceClient.count_tokens", + "fullName": "google.cloud.aiplatform_v1.LlmUtilityServiceClient.compute_tokens", "method": { - "fullName": "google.cloud.aiplatform.v1.LlmUtilityService.CountTokens", + "fullName": "google.cloud.aiplatform.v1.LlmUtilityService.ComputeTokens", "service": { "fullName": "google.cloud.aiplatform.v1.LlmUtilityService", "shortName": "LlmUtilityService" }, - "shortName": "CountTokens" + "shortName": "ComputeTokens" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CountTokensRequest" + "type": "google.cloud.aiplatform_v1.types.ComputeTokensRequest" }, { "name": "endpoint", @@ -20669,14 +21321,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.CountTokensResponse", - "shortName": "count_tokens" + "resultType": "google.cloud.aiplatform_v1.types.ComputeTokensResponse", + "shortName": "compute_tokens" }, - "description": "Sample for CountTokens", - "file": "aiplatform_v1_generated_llm_utility_service_count_tokens_sync.py", + "description": "Sample for ComputeTokens", + "file": "aiplatform_v1_generated_llm_utility_service_compute_tokens_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_LlmUtilityService_CountTokens_sync", + "regionTag": "aiplatform_v1_generated_LlmUtilityService_ComputeTokens_sync", "segments": [ { "end": 55, @@ -20709,6 +21361,175 @@ "type": "RESPONSE_HANDLING" } ], + "title": "aiplatform_v1_generated_llm_utility_service_compute_tokens_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.LlmUtilityServiceAsyncClient", + "shortName": "LlmUtilityServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.LlmUtilityServiceAsyncClient.count_tokens", + "method": { + "fullName": "google.cloud.aiplatform.v1.LlmUtilityService.CountTokens", + "service": { + "fullName": "google.cloud.aiplatform.v1.LlmUtilityService", + "shortName": "LlmUtilityService" + }, + "shortName": "CountTokens" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CountTokensRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "instances", + "type": "MutableSequence[google.protobuf.struct_pb2.Value]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.CountTokensResponse", + "shortName": "count_tokens" + }, + "description": "Sample for CountTokens", + "file": "aiplatform_v1_generated_llm_utility_service_count_tokens_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_LlmUtilityService_CountTokens_async", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 54, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 55, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_llm_utility_service_count_tokens_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.LlmUtilityServiceClient", + "shortName": "LlmUtilityServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.LlmUtilityServiceClient.count_tokens", + "method": { + "fullName": "google.cloud.aiplatform.v1.LlmUtilityService.CountTokens", + "service": { + "fullName": "google.cloud.aiplatform.v1.LlmUtilityService", + "shortName": "LlmUtilityService" + }, + "shortName": "CountTokens" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CountTokensRequest" + }, + { + "name": "endpoint", + "type": "str" + }, + { + "name": "instances", + "type": "MutableSequence[google.protobuf.struct_pb2.Value]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.CountTokensResponse", + "shortName": "count_tokens" + }, + "description": "Sample for CountTokens", + "file": "aiplatform_v1_generated_llm_utility_service_count_tokens_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_LlmUtilityService_CountTokens_sync", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 54, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 55, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], "title": "aiplatform_v1_generated_llm_utility_service_count_tokens_sync.py" }, { @@ -32378,6 +33199,175 @@ ], "title": "aiplatform_v1_generated_prediction_service_server_streaming_predict_sync.py" }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.PredictionServiceAsyncClient", + "shortName": "PredictionServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.PredictionServiceAsyncClient.stream_generate_content", + "method": { + "fullName": "google.cloud.aiplatform.v1.PredictionService.StreamGenerateContent", + "service": { + "fullName": "google.cloud.aiplatform.v1.PredictionService", + "shortName": "PredictionService" + }, + "shortName": "StreamGenerateContent" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GenerateContentRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "contents", + "type": "MutableSequence[google.cloud.aiplatform_v1.types.Content]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.aiplatform_v1.types.GenerateContentResponse]", + "shortName": "stream_generate_content" + }, + "description": "Sample for StreamGenerateContent", + "file": "aiplatform_v1_generated_prediction_service_stream_generate_content_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_PredictionService_StreamGenerateContent_async", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_prediction_service_stream_generate_content_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.PredictionServiceClient", + "shortName": "PredictionServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.PredictionServiceClient.stream_generate_content", + "method": { + "fullName": "google.cloud.aiplatform.v1.PredictionService.StreamGenerateContent", + "service": { + "fullName": "google.cloud.aiplatform.v1.PredictionService", + "shortName": "PredictionService" + }, + "shortName": "StreamGenerateContent" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GenerateContentRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "contents", + "type": "MutableSequence[google.cloud.aiplatform_v1.types.Content]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.aiplatform_v1.types.GenerateContentResponse]", + "shortName": "stream_generate_content" + }, + "description": "Sample for StreamGenerateContent", + "file": "aiplatform_v1_generated_prediction_service_stream_generate_content_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_PredictionService_StreamGenerateContent_sync", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_prediction_service_stream_generate_content_sync.py" + }, { "canonical": true, "clientMethod": { diff --git a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json index 8b2ec1a558..260a2600ad 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-aiplatform", - "version": "1.37.0" + "version": "0.1.0" }, "snippets": [ { @@ -33405,12 +33405,12 @@ "regionTag": "aiplatform_v1beta1_generated_PredictionService_CountTokens_async", "segments": [ { - "end": 55, + "end": 60, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 60, "start": 27, "type": "SHORT" }, @@ -33420,18 +33420,18 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 54, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 50, + "end": 57, + "start": 55, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 61, + "start": 58, "type": "RESPONSE_HANDLING" } ], @@ -33489,12 +33489,12 @@ "regionTag": "aiplatform_v1beta1_generated_PredictionService_CountTokens_sync", "segments": [ { - "end": 55, + "end": 60, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 60, "start": 27, "type": "SHORT" }, @@ -33504,18 +33504,18 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 54, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 50, + "end": 57, + "start": 55, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 61, + "start": 58, "type": "RESPONSE_HANDLING" } ], @@ -34511,6 +34511,175 @@ ], "title": "aiplatform_v1beta1_generated_prediction_service_server_streaming_predict_sync.py" }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", + "shortName": "PredictionServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.stream_generate_content", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.StreamGenerateContent", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", + "shortName": "PredictionService" + }, + "shortName": "StreamGenerateContent" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GenerateContentRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "contents", + "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.Content]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.GenerateContentResponse]", + "shortName": "stream_generate_content" + }, + "description": "Sample for StreamGenerateContent", + "file": "aiplatform_v1beta1_generated_prediction_service_stream_generate_content_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_StreamGenerateContent_async", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_prediction_service_stream_generate_content_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", + "shortName": "PredictionServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.stream_generate_content", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.StreamGenerateContent", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", + "shortName": "PredictionService" + }, + "shortName": "StreamGenerateContent" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GenerateContentRequest" + }, + { + "name": "model", + "type": "str" + }, + { + "name": "contents", + "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.Content]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.GenerateContentResponse]", + "shortName": "stream_generate_content" + }, + "description": "Sample for StreamGenerateContent", + "file": "aiplatform_v1beta1_generated_prediction_service_stream_generate_content_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_StreamGenerateContent_sync", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_prediction_service_stream_generate_content_sync.py" + }, { "canonical": true, "clientMethod": { diff --git a/tests/unit/gapic/aiplatform_v1/test_deployment_resource_pool_service.py b/tests/unit/gapic/aiplatform_v1/test_deployment_resource_pool_service.py new file mode 100644 index 0000000000..fdf6eef3df --- /dev/null +++ b/tests/unit/gapic/aiplatform_v1/test_deployment_resource_pool_service.py @@ -0,0 +1,4812 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule +from proto.marshal.rules import wrappers + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1.services.deployment_resource_pool_service import ( + DeploymentResourcePoolServiceAsyncClient, +) +from google.cloud.aiplatform_v1.services.deployment_resource_pool_service import ( + DeploymentResourcePoolServiceClient, +) +from google.cloud.aiplatform_v1.services.deployment_resource_pool_service import pagers +from google.cloud.aiplatform_v1.services.deployment_resource_pool_service import ( + transports, +) +from google.cloud.aiplatform_v1.types import accelerator_type +from google.cloud.aiplatform_v1.types import deployment_resource_pool +from google.cloud.aiplatform_v1.types import ( + deployment_resource_pool as gca_deployment_resource_pool, +) +from google.cloud.aiplatform_v1.types import deployment_resource_pool_service +from google.cloud.aiplatform_v1.types import endpoint +from google.cloud.aiplatform_v1.types import machine_resources +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert DeploymentResourcePoolServiceClient._get_default_mtls_endpoint(None) is None + assert ( + DeploymentResourcePoolServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + DeploymentResourcePoolServiceClient._get_default_mtls_endpoint( + api_mtls_endpoint + ) + == api_mtls_endpoint + ) + assert ( + DeploymentResourcePoolServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + DeploymentResourcePoolServiceClient._get_default_mtls_endpoint( + sandbox_mtls_endpoint + ) + == sandbox_mtls_endpoint + ) + assert ( + DeploymentResourcePoolServiceClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (DeploymentResourcePoolServiceClient, "grpc"), + (DeploymentResourcePoolServiceAsyncClient, "grpc_asyncio"), + ], +) +def test_deployment_resource_pool_service_client_from_service_account_info( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ("aiplatform.googleapis.com:443") + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.DeploymentResourcePoolServiceGrpcTransport, "grpc"), + (transports.DeploymentResourcePoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_deployment_resource_pool_service_client_service_account_always_use_jwt( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (DeploymentResourcePoolServiceClient, "grpc"), + (DeploymentResourcePoolServiceAsyncClient, "grpc_asyncio"), + ], +) +def test_deployment_resource_pool_service_client_from_service_account_file( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ("aiplatform.googleapis.com:443") + + +def test_deployment_resource_pool_service_client_get_transport_class(): + transport = DeploymentResourcePoolServiceClient.get_transport_class() + available_transports = [ + transports.DeploymentResourcePoolServiceGrpcTransport, + ] + assert transport in available_transports + + transport = DeploymentResourcePoolServiceClient.get_transport_class("grpc") + assert transport == transports.DeploymentResourcePoolServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + DeploymentResourcePoolServiceClient, + transports.DeploymentResourcePoolServiceGrpcTransport, + "grpc", + ), + ( + DeploymentResourcePoolServiceAsyncClient, + transports.DeploymentResourcePoolServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + DeploymentResourcePoolServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(DeploymentResourcePoolServiceClient), +) +@mock.patch.object( + DeploymentResourcePoolServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(DeploymentResourcePoolServiceAsyncClient), +) +def test_deployment_resource_pool_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object( + DeploymentResourcePoolServiceClient, "get_transport_class" + ) as gtc: + transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object( + DeploymentResourcePoolServiceClient, "get_transport_class" + ) as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions( + api_audience="https://language.googleapis.com" + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + DeploymentResourcePoolServiceClient, + transports.DeploymentResourcePoolServiceGrpcTransport, + "grpc", + "true", + ), + ( + DeploymentResourcePoolServiceAsyncClient, + transports.DeploymentResourcePoolServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + DeploymentResourcePoolServiceClient, + transports.DeploymentResourcePoolServiceGrpcTransport, + "grpc", + "false", + ), + ( + DeploymentResourcePoolServiceAsyncClient, + transports.DeploymentResourcePoolServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + DeploymentResourcePoolServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(DeploymentResourcePoolServiceClient), +) +@mock.patch.object( + DeploymentResourcePoolServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(DeploymentResourcePoolServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_deployment_resource_pool_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class", + [DeploymentResourcePoolServiceClient, DeploymentResourcePoolServiceAsyncClient], +) +@mock.patch.object( + DeploymentResourcePoolServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(DeploymentResourcePoolServiceClient), +) +@mock.patch.object( + DeploymentResourcePoolServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(DeploymentResourcePoolServiceAsyncClient), +) +def test_deployment_resource_pool_service_client_get_mtls_endpoint_and_cert_source( + client_class, +): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_client_cert_source, + ): + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + DeploymentResourcePoolServiceClient, + transports.DeploymentResourcePoolServiceGrpcTransport, + "grpc", + ), + ( + DeploymentResourcePoolServiceAsyncClient, + transports.DeploymentResourcePoolServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_deployment_resource_pool_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + DeploymentResourcePoolServiceClient, + transports.DeploymentResourcePoolServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + DeploymentResourcePoolServiceAsyncClient, + transports.DeploymentResourcePoolServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ], +) +def test_deployment_resource_pool_service_client_client_options_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +def test_deployment_resource_pool_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.aiplatform_v1.services.deployment_resource_pool_service.transports.DeploymentResourcePoolServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = DeploymentResourcePoolServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + DeploymentResourcePoolServiceClient, + transports.DeploymentResourcePoolServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + DeploymentResourcePoolServiceAsyncClient, + transports.DeploymentResourcePoolServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ], +) +def test_deployment_resource_pool_service_client_create_channel_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + deployment_resource_pool_service.CreateDeploymentResourcePoolRequest, + dict, + ], +) +def test_create_deployment_resource_pool(request_type, transport: str = "grpc"): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_deployment_resource_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.create_deployment_resource_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert ( + args[0] + == deployment_resource_pool_service.CreateDeploymentResourcePoolRequest() + ) + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_deployment_resource_pool_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_deployment_resource_pool), "__call__" + ) as call: + client.create_deployment_resource_pool() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert ( + args[0] + == deployment_resource_pool_service.CreateDeploymentResourcePoolRequest() + ) + + +@pytest.mark.asyncio +async def test_create_deployment_resource_pool_async( + transport: str = "grpc_asyncio", + request_type=deployment_resource_pool_service.CreateDeploymentResourcePoolRequest, +): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_deployment_resource_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_deployment_resource_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert ( + args[0] + == deployment_resource_pool_service.CreateDeploymentResourcePoolRequest() + ) + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_deployment_resource_pool_async_from_dict(): + await test_create_deployment_resource_pool_async(request_type=dict) + + +def test_create_deployment_resource_pool_field_headers(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = deployment_resource_pool_service.CreateDeploymentResourcePoolRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_deployment_resource_pool), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_deployment_resource_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_deployment_resource_pool_field_headers_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = deployment_resource_pool_service.CreateDeploymentResourcePoolRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_deployment_resource_pool), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.create_deployment_resource_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_create_deployment_resource_pool_flattened(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_deployment_resource_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_deployment_resource_pool( + parent="parent_value", + deployment_resource_pool=gca_deployment_resource_pool.DeploymentResourcePool( + name="name_value" + ), + deployment_resource_pool_id="deployment_resource_pool_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].deployment_resource_pool + mock_val = gca_deployment_resource_pool.DeploymentResourcePool( + name="name_value" + ) + assert arg == mock_val + arg = args[0].deployment_resource_pool_id + mock_val = "deployment_resource_pool_id_value" + assert arg == mock_val + + +def test_create_deployment_resource_pool_flattened_error(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_deployment_resource_pool( + deployment_resource_pool_service.CreateDeploymentResourcePoolRequest(), + parent="parent_value", + deployment_resource_pool=gca_deployment_resource_pool.DeploymentResourcePool( + name="name_value" + ), + deployment_resource_pool_id="deployment_resource_pool_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_deployment_resource_pool_flattened_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_deployment_resource_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_deployment_resource_pool( + parent="parent_value", + deployment_resource_pool=gca_deployment_resource_pool.DeploymentResourcePool( + name="name_value" + ), + deployment_resource_pool_id="deployment_resource_pool_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].deployment_resource_pool + mock_val = gca_deployment_resource_pool.DeploymentResourcePool( + name="name_value" + ) + assert arg == mock_val + arg = args[0].deployment_resource_pool_id + mock_val = "deployment_resource_pool_id_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_create_deployment_resource_pool_flattened_error_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_deployment_resource_pool( + deployment_resource_pool_service.CreateDeploymentResourcePoolRequest(), + parent="parent_value", + deployment_resource_pool=gca_deployment_resource_pool.DeploymentResourcePool( + name="name_value" + ), + deployment_resource_pool_id="deployment_resource_pool_id_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + deployment_resource_pool_service.GetDeploymentResourcePoolRequest, + dict, + ], +) +def test_get_deployment_resource_pool(request_type, transport: str = "grpc"): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_deployment_resource_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = deployment_resource_pool.DeploymentResourcePool( + name="name_value", + ) + response = client.get_deployment_resource_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert ( + args[0] + == deployment_resource_pool_service.GetDeploymentResourcePoolRequest() + ) + + # Establish that the response is the type that we expect. + assert isinstance(response, deployment_resource_pool.DeploymentResourcePool) + assert response.name == "name_value" + + +def test_get_deployment_resource_pool_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_deployment_resource_pool), "__call__" + ) as call: + client.get_deployment_resource_pool() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert ( + args[0] + == deployment_resource_pool_service.GetDeploymentResourcePoolRequest() + ) + + +@pytest.mark.asyncio +async def test_get_deployment_resource_pool_async( + transport: str = "grpc_asyncio", + request_type=deployment_resource_pool_service.GetDeploymentResourcePoolRequest, +): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_deployment_resource_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + deployment_resource_pool.DeploymentResourcePool( + name="name_value", + ) + ) + response = await client.get_deployment_resource_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert ( + args[0] + == deployment_resource_pool_service.GetDeploymentResourcePoolRequest() + ) + + # Establish that the response is the type that we expect. + assert isinstance(response, deployment_resource_pool.DeploymentResourcePool) + assert response.name == "name_value" + + +@pytest.mark.asyncio +async def test_get_deployment_resource_pool_async_from_dict(): + await test_get_deployment_resource_pool_async(request_type=dict) + + +def test_get_deployment_resource_pool_field_headers(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = deployment_resource_pool_service.GetDeploymentResourcePoolRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_deployment_resource_pool), "__call__" + ) as call: + call.return_value = deployment_resource_pool.DeploymentResourcePool() + client.get_deployment_resource_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_deployment_resource_pool_field_headers_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = deployment_resource_pool_service.GetDeploymentResourcePoolRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_deployment_resource_pool), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + deployment_resource_pool.DeploymentResourcePool() + ) + await client.get_deployment_resource_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_deployment_resource_pool_flattened(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_deployment_resource_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = deployment_resource_pool.DeploymentResourcePool() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_deployment_resource_pool( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_deployment_resource_pool_flattened_error(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_deployment_resource_pool( + deployment_resource_pool_service.GetDeploymentResourcePoolRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_deployment_resource_pool_flattened_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_deployment_resource_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = deployment_resource_pool.DeploymentResourcePool() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + deployment_resource_pool.DeploymentResourcePool() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_deployment_resource_pool( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_deployment_resource_pool_flattened_error_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_deployment_resource_pool( + deployment_resource_pool_service.GetDeploymentResourcePoolRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + deployment_resource_pool_service.ListDeploymentResourcePoolsRequest, + dict, + ], +) +def test_list_deployment_resource_pools(request_type, transport: str = "grpc"): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_deployment_resource_pools), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + deployment_resource_pool_service.ListDeploymentResourcePoolsResponse( + next_page_token="next_page_token_value", + ) + ) + response = client.list_deployment_resource_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert ( + args[0] + == deployment_resource_pool_service.ListDeploymentResourcePoolsRequest() + ) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDeploymentResourcePoolsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_deployment_resource_pools_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_deployment_resource_pools), "__call__" + ) as call: + client.list_deployment_resource_pools() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert ( + args[0] + == deployment_resource_pool_service.ListDeploymentResourcePoolsRequest() + ) + + +@pytest.mark.asyncio +async def test_list_deployment_resource_pools_async( + transport: str = "grpc_asyncio", + request_type=deployment_resource_pool_service.ListDeploymentResourcePoolsRequest, +): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_deployment_resource_pools), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + deployment_resource_pool_service.ListDeploymentResourcePoolsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_deployment_resource_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert ( + args[0] + == deployment_resource_pool_service.ListDeploymentResourcePoolsRequest() + ) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDeploymentResourcePoolsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_deployment_resource_pools_async_from_dict(): + await test_list_deployment_resource_pools_async(request_type=dict) + + +def test_list_deployment_resource_pools_field_headers(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = deployment_resource_pool_service.ListDeploymentResourcePoolsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_deployment_resource_pools), "__call__" + ) as call: + call.return_value = ( + deployment_resource_pool_service.ListDeploymentResourcePoolsResponse() + ) + client.list_deployment_resource_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_deployment_resource_pools_field_headers_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = deployment_resource_pool_service.ListDeploymentResourcePoolsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_deployment_resource_pools), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + deployment_resource_pool_service.ListDeploymentResourcePoolsResponse() + ) + await client.list_deployment_resource_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_deployment_resource_pools_flattened(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_deployment_resource_pools), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + deployment_resource_pool_service.ListDeploymentResourcePoolsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_deployment_resource_pools( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_deployment_resource_pools_flattened_error(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_deployment_resource_pools( + deployment_resource_pool_service.ListDeploymentResourcePoolsRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_deployment_resource_pools_flattened_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_deployment_resource_pools), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + deployment_resource_pool_service.ListDeploymentResourcePoolsResponse() + ) + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + deployment_resource_pool_service.ListDeploymentResourcePoolsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_deployment_resource_pools( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_deployment_resource_pools_flattened_error_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_deployment_resource_pools( + deployment_resource_pool_service.ListDeploymentResourcePoolsRequest(), + parent="parent_value", + ) + + +def test_list_deployment_resource_pools_pager(transport_name: str = "grpc"): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_deployment_resource_pools), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + deployment_resource_pool_service.ListDeploymentResourcePoolsResponse( + deployment_resource_pools=[ + deployment_resource_pool.DeploymentResourcePool(), + deployment_resource_pool.DeploymentResourcePool(), + deployment_resource_pool.DeploymentResourcePool(), + ], + next_page_token="abc", + ), + deployment_resource_pool_service.ListDeploymentResourcePoolsResponse( + deployment_resource_pools=[], + next_page_token="def", + ), + deployment_resource_pool_service.ListDeploymentResourcePoolsResponse( + deployment_resource_pools=[ + deployment_resource_pool.DeploymentResourcePool(), + ], + next_page_token="ghi", + ), + deployment_resource_pool_service.ListDeploymentResourcePoolsResponse( + deployment_resource_pools=[ + deployment_resource_pool.DeploymentResourcePool(), + deployment_resource_pool.DeploymentResourcePool(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_deployment_resource_pools(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, deployment_resource_pool.DeploymentResourcePool) + for i in results + ) + + +def test_list_deployment_resource_pools_pages(transport_name: str = "grpc"): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_deployment_resource_pools), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + deployment_resource_pool_service.ListDeploymentResourcePoolsResponse( + deployment_resource_pools=[ + deployment_resource_pool.DeploymentResourcePool(), + deployment_resource_pool.DeploymentResourcePool(), + deployment_resource_pool.DeploymentResourcePool(), + ], + next_page_token="abc", + ), + deployment_resource_pool_service.ListDeploymentResourcePoolsResponse( + deployment_resource_pools=[], + next_page_token="def", + ), + deployment_resource_pool_service.ListDeploymentResourcePoolsResponse( + deployment_resource_pools=[ + deployment_resource_pool.DeploymentResourcePool(), + ], + next_page_token="ghi", + ), + deployment_resource_pool_service.ListDeploymentResourcePoolsResponse( + deployment_resource_pools=[ + deployment_resource_pool.DeploymentResourcePool(), + deployment_resource_pool.DeploymentResourcePool(), + ], + ), + RuntimeError, + ) + pages = list(client.list_deployment_resource_pools(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_deployment_resource_pools_async_pager(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_deployment_resource_pools), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + deployment_resource_pool_service.ListDeploymentResourcePoolsResponse( + deployment_resource_pools=[ + deployment_resource_pool.DeploymentResourcePool(), + deployment_resource_pool.DeploymentResourcePool(), + deployment_resource_pool.DeploymentResourcePool(), + ], + next_page_token="abc", + ), + deployment_resource_pool_service.ListDeploymentResourcePoolsResponse( + deployment_resource_pools=[], + next_page_token="def", + ), + deployment_resource_pool_service.ListDeploymentResourcePoolsResponse( + deployment_resource_pools=[ + deployment_resource_pool.DeploymentResourcePool(), + ], + next_page_token="ghi", + ), + deployment_resource_pool_service.ListDeploymentResourcePoolsResponse( + deployment_resource_pools=[ + deployment_resource_pool.DeploymentResourcePool(), + deployment_resource_pool.DeploymentResourcePool(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_deployment_resource_pools( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all( + isinstance(i, deployment_resource_pool.DeploymentResourcePool) + for i in responses + ) + + +@pytest.mark.asyncio +async def test_list_deployment_resource_pools_async_pages(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_deployment_resource_pools), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + deployment_resource_pool_service.ListDeploymentResourcePoolsResponse( + deployment_resource_pools=[ + deployment_resource_pool.DeploymentResourcePool(), + deployment_resource_pool.DeploymentResourcePool(), + deployment_resource_pool.DeploymentResourcePool(), + ], + next_page_token="abc", + ), + deployment_resource_pool_service.ListDeploymentResourcePoolsResponse( + deployment_resource_pools=[], + next_page_token="def", + ), + deployment_resource_pool_service.ListDeploymentResourcePoolsResponse( + deployment_resource_pools=[ + deployment_resource_pool.DeploymentResourcePool(), + ], + next_page_token="ghi", + ), + deployment_resource_pool_service.ListDeploymentResourcePoolsResponse( + deployment_resource_pools=[ + deployment_resource_pool.DeploymentResourcePool(), + deployment_resource_pool.DeploymentResourcePool(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_deployment_resource_pools(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + deployment_resource_pool_service.DeleteDeploymentResourcePoolRequest, + dict, + ], +) +def test_delete_deployment_resource_pool(request_type, transport: str = "grpc"): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_deployment_resource_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.delete_deployment_resource_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert ( + args[0] + == deployment_resource_pool_service.DeleteDeploymentResourcePoolRequest() + ) + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_deployment_resource_pool_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_deployment_resource_pool), "__call__" + ) as call: + client.delete_deployment_resource_pool() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert ( + args[0] + == deployment_resource_pool_service.DeleteDeploymentResourcePoolRequest() + ) + + +@pytest.mark.asyncio +async def test_delete_deployment_resource_pool_async( + transport: str = "grpc_asyncio", + request_type=deployment_resource_pool_service.DeleteDeploymentResourcePoolRequest, +): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_deployment_resource_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_deployment_resource_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert ( + args[0] + == deployment_resource_pool_service.DeleteDeploymentResourcePoolRequest() + ) + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_deployment_resource_pool_async_from_dict(): + await test_delete_deployment_resource_pool_async(request_type=dict) + + +def test_delete_deployment_resource_pool_field_headers(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = deployment_resource_pool_service.DeleteDeploymentResourcePoolRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_deployment_resource_pool), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_deployment_resource_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_deployment_resource_pool_field_headers_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = deployment_resource_pool_service.DeleteDeploymentResourcePoolRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_deployment_resource_pool), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.delete_deployment_resource_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_delete_deployment_resource_pool_flattened(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_deployment_resource_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_deployment_resource_pool( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_delete_deployment_resource_pool_flattened_error(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_deployment_resource_pool( + deployment_resource_pool_service.DeleteDeploymentResourcePoolRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_deployment_resource_pool_flattened_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_deployment_resource_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_deployment_resource_pool( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_delete_deployment_resource_pool_flattened_error_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_deployment_resource_pool( + deployment_resource_pool_service.DeleteDeploymentResourcePoolRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + deployment_resource_pool_service.QueryDeployedModelsRequest, + dict, + ], +) +def test_query_deployed_models(request_type, transport: str = "grpc"): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_deployed_models), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + deployment_resource_pool_service.QueryDeployedModelsResponse( + next_page_token="next_page_token_value", + total_deployed_model_count=2769, + total_endpoint_count=2156, + ) + ) + response = client.query_deployed_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == deployment_resource_pool_service.QueryDeployedModelsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.QueryDeployedModelsPager) + assert response.next_page_token == "next_page_token_value" + assert response.total_deployed_model_count == 2769 + assert response.total_endpoint_count == 2156 + + +def test_query_deployed_models_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_deployed_models), "__call__" + ) as call: + client.query_deployed_models() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == deployment_resource_pool_service.QueryDeployedModelsRequest() + + +@pytest.mark.asyncio +async def test_query_deployed_models_async( + transport: str = "grpc_asyncio", + request_type=deployment_resource_pool_service.QueryDeployedModelsRequest, +): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_deployed_models), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + deployment_resource_pool_service.QueryDeployedModelsResponse( + next_page_token="next_page_token_value", + total_deployed_model_count=2769, + total_endpoint_count=2156, + ) + ) + response = await client.query_deployed_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == deployment_resource_pool_service.QueryDeployedModelsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.QueryDeployedModelsAsyncPager) + assert response.next_page_token == "next_page_token_value" + assert response.total_deployed_model_count == 2769 + assert response.total_endpoint_count == 2156 + + +@pytest.mark.asyncio +async def test_query_deployed_models_async_from_dict(): + await test_query_deployed_models_async(request_type=dict) + + +def test_query_deployed_models_field_headers(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = deployment_resource_pool_service.QueryDeployedModelsRequest() + + request.deployment_resource_pool = "deployment_resource_pool_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_deployed_models), "__call__" + ) as call: + call.return_value = ( + deployment_resource_pool_service.QueryDeployedModelsResponse() + ) + client.query_deployed_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "deployment_resource_pool=deployment_resource_pool_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_query_deployed_models_field_headers_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = deployment_resource_pool_service.QueryDeployedModelsRequest() + + request.deployment_resource_pool = "deployment_resource_pool_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_deployed_models), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + deployment_resource_pool_service.QueryDeployedModelsResponse() + ) + await client.query_deployed_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "deployment_resource_pool=deployment_resource_pool_value", + ) in kw["metadata"] + + +def test_query_deployed_models_flattened(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_deployed_models), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + deployment_resource_pool_service.QueryDeployedModelsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.query_deployed_models( + deployment_resource_pool="deployment_resource_pool_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].deployment_resource_pool + mock_val = "deployment_resource_pool_value" + assert arg == mock_val + + +def test_query_deployed_models_flattened_error(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.query_deployed_models( + deployment_resource_pool_service.QueryDeployedModelsRequest(), + deployment_resource_pool="deployment_resource_pool_value", + ) + + +@pytest.mark.asyncio +async def test_query_deployed_models_flattened_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_deployed_models), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + deployment_resource_pool_service.QueryDeployedModelsResponse() + ) + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + deployment_resource_pool_service.QueryDeployedModelsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.query_deployed_models( + deployment_resource_pool="deployment_resource_pool_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].deployment_resource_pool + mock_val = "deployment_resource_pool_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_query_deployed_models_flattened_error_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.query_deployed_models( + deployment_resource_pool_service.QueryDeployedModelsRequest(), + deployment_resource_pool="deployment_resource_pool_value", + ) + + +def test_query_deployed_models_pager(transport_name: str = "grpc"): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_deployed_models), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + deployment_resource_pool_service.QueryDeployedModelsResponse( + deployed_models=[ + endpoint.DeployedModel(), + endpoint.DeployedModel(), + endpoint.DeployedModel(), + ], + next_page_token="abc", + ), + deployment_resource_pool_service.QueryDeployedModelsResponse( + deployed_models=[], + next_page_token="def", + ), + deployment_resource_pool_service.QueryDeployedModelsResponse( + deployed_models=[ + endpoint.DeployedModel(), + ], + next_page_token="ghi", + ), + deployment_resource_pool_service.QueryDeployedModelsResponse( + deployed_models=[ + endpoint.DeployedModel(), + endpoint.DeployedModel(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("deployment_resource_pool", ""),) + ), + ) + pager = client.query_deployed_models(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, endpoint.DeployedModel) for i in results) + + +def test_query_deployed_models_pages(transport_name: str = "grpc"): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_deployed_models), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + deployment_resource_pool_service.QueryDeployedModelsResponse( + deployed_models=[ + endpoint.DeployedModel(), + endpoint.DeployedModel(), + endpoint.DeployedModel(), + ], + next_page_token="abc", + ), + deployment_resource_pool_service.QueryDeployedModelsResponse( + deployed_models=[], + next_page_token="def", + ), + deployment_resource_pool_service.QueryDeployedModelsResponse( + deployed_models=[ + endpoint.DeployedModel(), + ], + next_page_token="ghi", + ), + deployment_resource_pool_service.QueryDeployedModelsResponse( + deployed_models=[ + endpoint.DeployedModel(), + endpoint.DeployedModel(), + ], + ), + RuntimeError, + ) + pages = list(client.query_deployed_models(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_query_deployed_models_async_pager(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_deployed_models), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + deployment_resource_pool_service.QueryDeployedModelsResponse( + deployed_models=[ + endpoint.DeployedModel(), + endpoint.DeployedModel(), + endpoint.DeployedModel(), + ], + next_page_token="abc", + ), + deployment_resource_pool_service.QueryDeployedModelsResponse( + deployed_models=[], + next_page_token="def", + ), + deployment_resource_pool_service.QueryDeployedModelsResponse( + deployed_models=[ + endpoint.DeployedModel(), + ], + next_page_token="ghi", + ), + deployment_resource_pool_service.QueryDeployedModelsResponse( + deployed_models=[ + endpoint.DeployedModel(), + endpoint.DeployedModel(), + ], + ), + RuntimeError, + ) + async_pager = await client.query_deployed_models( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, endpoint.DeployedModel) for i in responses) + + +@pytest.mark.asyncio +async def test_query_deployed_models_async_pages(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_deployed_models), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + deployment_resource_pool_service.QueryDeployedModelsResponse( + deployed_models=[ + endpoint.DeployedModel(), + endpoint.DeployedModel(), + endpoint.DeployedModel(), + ], + next_page_token="abc", + ), + deployment_resource_pool_service.QueryDeployedModelsResponse( + deployed_models=[], + next_page_token="def", + ), + deployment_resource_pool_service.QueryDeployedModelsResponse( + deployed_models=[ + endpoint.DeployedModel(), + ], + next_page_token="ghi", + ), + deployment_resource_pool_service.QueryDeployedModelsResponse( + deployed_models=[ + endpoint.DeployedModel(), + endpoint.DeployedModel(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.query_deployed_models(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.DeploymentResourcePoolServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.DeploymentResourcePoolServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = DeploymentResourcePoolServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.DeploymentResourcePoolServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = DeploymentResourcePoolServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = DeploymentResourcePoolServiceClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.DeploymentResourcePoolServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = DeploymentResourcePoolServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.DeploymentResourcePoolServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = DeploymentResourcePoolServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.DeploymentResourcePoolServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.DeploymentResourcePoolServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.DeploymentResourcePoolServiceGrpcTransport, + transports.DeploymentResourcePoolServiceGrpcAsyncIOTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = DeploymentResourcePoolServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.DeploymentResourcePoolServiceGrpcTransport, + ) + + +def test_deployment_resource_pool_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.DeploymentResourcePoolServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_deployment_resource_pool_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.aiplatform_v1.services.deployment_resource_pool_service.transports.DeploymentResourcePoolServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.DeploymentResourcePoolServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_deployment_resource_pool", + "get_deployment_resource_pool", + "list_deployment_resource_pools", + "delete_deployment_resource_pool", + "query_deployed_models", + "set_iam_policy", + "get_iam_policy", + "test_iam_permissions", + "get_location", + "list_locations", + "get_operation", + "wait_operation", + "cancel_operation", + "delete_operation", + "list_operations", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_deployment_resource_pool_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.cloud.aiplatform_v1.services.deployment_resource_pool_service.transports.DeploymentResourcePoolServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.DeploymentResourcePoolServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_deployment_resource_pool_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( + "google.cloud.aiplatform_v1.services.deployment_resource_pool_service.transports.DeploymentResourcePoolServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.DeploymentResourcePoolServiceTransport() + adc.assert_called_once() + + +def test_deployment_resource_pool_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + DeploymentResourcePoolServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.DeploymentResourcePoolServiceGrpcTransport, + transports.DeploymentResourcePoolServiceGrpcAsyncIOTransport, + ], +) +def test_deployment_resource_pool_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.DeploymentResourcePoolServiceGrpcTransport, + transports.DeploymentResourcePoolServiceGrpcAsyncIOTransport, + ], +) +def test_deployment_resource_pool_service_transport_auth_gdch_credentials( + transport_class, +): + host = "https://language.com" + api_audience_tests = [None, "https://language2.com"] + api_audience_expect = [host, "https://language2.com"] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, "default", autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock( + return_value=gdch_mock + ) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with(e) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.DeploymentResourcePoolServiceGrpcTransport, grpc_helpers), + ( + transports.DeploymentResourcePoolServiceGrpcAsyncIOTransport, + grpc_helpers_async, + ), + ], +) +def test_deployment_resource_pool_service_transport_create_channel( + transport_class, grpc_helpers +): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.DeploymentResourcePoolServiceGrpcTransport, + transports.DeploymentResourcePoolServiceGrpcAsyncIOTransport, + ], +) +def test_deployment_resource_pool_service_grpc_transport_client_cert_source_for_mtls( + transport_class, +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_deployment_resource_pool_service_host_no_port(transport_name): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com" + ), + transport=transport_name, + ) + assert client.transport._host == ("aiplatform.googleapis.com:443") + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_deployment_resource_pool_service_host_with_port(transport_name): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com:8000" + ), + transport=transport_name, + ) + assert client.transport._host == ("aiplatform.googleapis.com:8000") + + +def test_deployment_resource_pool_service_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.DeploymentResourcePoolServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_deployment_resource_pool_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.DeploymentResourcePoolServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.DeploymentResourcePoolServiceGrpcTransport, + transports.DeploymentResourcePoolServiceGrpcAsyncIOTransport, + ], +) +def test_deployment_resource_pool_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.DeploymentResourcePoolServiceGrpcTransport, + transports.DeploymentResourcePoolServiceGrpcAsyncIOTransport, + ], +) +def test_deployment_resource_pool_service_transport_channel_mtls_with_adc( + transport_class, +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_deployment_resource_pool_service_grpc_lro_client(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_deployment_resource_pool_service_grpc_lro_async_client(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_deployment_resource_pool_path(): + project = "squid" + location = "clam" + deployment_resource_pool = "whelk" + expected = "projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}".format( + project=project, + location=location, + deployment_resource_pool=deployment_resource_pool, + ) + actual = DeploymentResourcePoolServiceClient.deployment_resource_pool_path( + project, location, deployment_resource_pool + ) + assert expected == actual + + +def test_parse_deployment_resource_pool_path(): + expected = { + "project": "octopus", + "location": "oyster", + "deployment_resource_pool": "nudibranch", + } + path = DeploymentResourcePoolServiceClient.deployment_resource_pool_path(**expected) + + # Check that the path construction is reversible. + actual = DeploymentResourcePoolServiceClient.parse_deployment_resource_pool_path( + path + ) + assert expected == actual + + +def test_endpoint_path(): + project = "cuttlefish" + location = "mussel" + endpoint = "winkle" + expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format( + project=project, + location=location, + endpoint=endpoint, + ) + actual = DeploymentResourcePoolServiceClient.endpoint_path( + project, location, endpoint + ) + assert expected == actual + + +def test_parse_endpoint_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "endpoint": "abalone", + } + path = DeploymentResourcePoolServiceClient.endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = DeploymentResourcePoolServiceClient.parse_endpoint_path(path) + assert expected == actual + + +def test_model_path(): + project = "squid" + location = "clam" + model = "whelk" + expected = "projects/{project}/locations/{location}/models/{model}".format( + project=project, + location=location, + model=model, + ) + actual = DeploymentResourcePoolServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "octopus", + "location": "oyster", + "model": "nudibranch", + } + path = DeploymentResourcePoolServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = DeploymentResourcePoolServiceClient.parse_model_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "cuttlefish" + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = DeploymentResourcePoolServiceClient.common_billing_account_path( + billing_account + ) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "mussel", + } + path = DeploymentResourcePoolServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = DeploymentResourcePoolServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "winkle" + expected = "folders/{folder}".format( + folder=folder, + ) + actual = DeploymentResourcePoolServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nautilus", + } + path = DeploymentResourcePoolServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = DeploymentResourcePoolServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "scallop" + expected = "organizations/{organization}".format( + organization=organization, + ) + actual = DeploymentResourcePoolServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "abalone", + } + path = DeploymentResourcePoolServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = DeploymentResourcePoolServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "squid" + expected = "projects/{project}".format( + project=project, + ) + actual = DeploymentResourcePoolServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "clam", + } + path = DeploymentResourcePoolServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = DeploymentResourcePoolServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "whelk" + location = "octopus" + expected = "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + actual = DeploymentResourcePoolServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + } + path = DeploymentResourcePoolServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = DeploymentResourcePoolServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.DeploymentResourcePoolServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.DeploymentResourcePoolServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = DeploymentResourcePoolServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_delete_operation(transport: str = "grpc"): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_operation_async(transport: str = "grpc_asyncio"): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_operation_field_headers(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_delete_operation_from_dict(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_operation_async(transport: str = "grpc_asyncio"): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_operation_field_headers(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_cancel_operation_from_dict(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_wait_operation(transport: str = "grpc"): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc_asyncio"): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_wait_operation_field_headers(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_wait_operation_from_dict(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +@pytest.mark.asyncio +async def test_get_operation_async(transport: str = "grpc_asyncio"): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_get_operation_field_headers(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_get_operation_from_dict(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +@pytest.mark.asyncio +async def test_list_operations_async(transport: str = "grpc_asyncio"): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +def test_list_operations_field_headers(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_list_operations_from_dict(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +@pytest.mark.asyncio +async def test_list_locations_async(transport: str = "grpc_asyncio"): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +def test_list_locations_field_headers(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_list_locations_from_dict(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +def test_get_location_field_headers(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations/abc", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations/abc", + ) in kw["metadata"] + + +def test_get_location_from_dict(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_set_iam_policy_field_headers(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_set_iam_policy_from_dict(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +def test_get_iam_policy(transport: str = "grpc"): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +def test_test_iam_permissions(transport: str = "grpc"): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = DeploymentResourcePoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = DeploymentResourcePoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + + +@pytest.mark.parametrize( + "client_class,transport_class", + [ + ( + DeploymentResourcePoolServiceClient, + transports.DeploymentResourcePoolServiceGrpcTransport, + ), + ( + DeploymentResourcePoolServiceAsyncClient, + transports.DeploymentResourcePoolServiceGrpcAsyncIOTransport, + ), + ], +) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) diff --git a/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py b/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py index d8c84833d9..20af505eeb 100644 --- a/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py @@ -3506,10 +3506,38 @@ def test_endpoint_service_grpc_lro_async_client(): assert transport.operations_client is transport.operations_client -def test_endpoint_path(): +def test_deployment_resource_pool_path(): project = "squid" location = "clam" - endpoint = "whelk" + deployment_resource_pool = "whelk" + expected = "projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}".format( + project=project, + location=location, + deployment_resource_pool=deployment_resource_pool, + ) + actual = EndpointServiceClient.deployment_resource_pool_path( + project, location, deployment_resource_pool + ) + assert expected == actual + + +def test_parse_deployment_resource_pool_path(): + expected = { + "project": "octopus", + "location": "oyster", + "deployment_resource_pool": "nudibranch", + } + path = EndpointServiceClient.deployment_resource_pool_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_deployment_resource_pool_path(path) + assert expected == actual + + +def test_endpoint_path(): + project = "cuttlefish" + location = "mussel" + endpoint = "winkle" expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format( project=project, location=location, @@ -3521,9 +3549,9 @@ def test_endpoint_path(): def test_parse_endpoint_path(): expected = { - "project": "octopus", - "location": "oyster", - "endpoint": "nudibranch", + "project": "nautilus", + "location": "scallop", + "endpoint": "abalone", } path = EndpointServiceClient.endpoint_path(**expected) @@ -3533,9 +3561,9 @@ def test_parse_endpoint_path(): def test_model_path(): - project = "cuttlefish" - location = "mussel" - model = "winkle" + project = "squid" + location = "clam" + model = "whelk" expected = "projects/{project}/locations/{location}/models/{model}".format( project=project, location=location, @@ -3547,9 +3575,9 @@ def test_model_path(): def test_parse_model_path(): expected = { - "project": "nautilus", - "location": "scallop", - "model": "abalone", + "project": "octopus", + "location": "oyster", + "model": "nudibranch", } path = EndpointServiceClient.model_path(**expected) @@ -3559,9 +3587,9 @@ def test_parse_model_path(): def test_model_deployment_monitoring_job_path(): - project = "squid" - location = "clam" - model_deployment_monitoring_job = "whelk" + project = "cuttlefish" + location = "mussel" + model_deployment_monitoring_job = "winkle" expected = "projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}".format( project=project, location=location, @@ -3575,9 +3603,9 @@ def test_model_deployment_monitoring_job_path(): def test_parse_model_deployment_monitoring_job_path(): expected = { - "project": "octopus", - "location": "oyster", - "model_deployment_monitoring_job": "nudibranch", + "project": "nautilus", + "location": "scallop", + "model_deployment_monitoring_job": "abalone", } path = EndpointServiceClient.model_deployment_monitoring_job_path(**expected) @@ -3587,8 +3615,8 @@ def test_parse_model_deployment_monitoring_job_path(): def test_network_path(): - project = "cuttlefish" - network = "mussel" + project = "squid" + network = "clam" expected = "projects/{project}/global/networks/{network}".format( project=project, network=network, @@ -3599,8 +3627,8 @@ def test_network_path(): def test_parse_network_path(): expected = { - "project": "winkle", - "network": "nautilus", + "project": "whelk", + "network": "octopus", } path = EndpointServiceClient.network_path(**expected) @@ -3610,7 +3638,7 @@ def test_parse_network_path(): def test_common_billing_account_path(): - billing_account = "scallop" + billing_account = "oyster" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -3620,7 +3648,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "abalone", + "billing_account": "nudibranch", } path = EndpointServiceClient.common_billing_account_path(**expected) @@ -3630,7 +3658,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "squid" + folder = "cuttlefish" expected = "folders/{folder}".format( folder=folder, ) @@ -3640,7 +3668,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "clam", + "folder": "mussel", } path = EndpointServiceClient.common_folder_path(**expected) @@ -3650,7 +3678,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "whelk" + organization = "winkle" expected = "organizations/{organization}".format( organization=organization, ) @@ -3660,7 +3688,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "octopus", + "organization": "nautilus", } path = EndpointServiceClient.common_organization_path(**expected) @@ -3670,7 +3698,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "oyster" + project = "scallop" expected = "projects/{project}".format( project=project, ) @@ -3680,7 +3708,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "nudibranch", + "project": "abalone", } path = EndpointServiceClient.common_project_path(**expected) @@ -3690,8 +3718,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "cuttlefish" - location = "mussel" + project = "squid" + location = "clam" expected = "projects/{project}/locations/{location}".format( project=project, location=location, @@ -3702,8 +3730,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "winkle", - "location": "nautilus", + "project": "whelk", + "location": "octopus", } path = EndpointServiceClient.common_location_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1/test_llm_utility_service.py b/tests/unit/gapic/aiplatform_v1/test_llm_utility_service.py index aea2cd8ded..90265f7fbe 100644 --- a/tests/unit/gapic/aiplatform_v1/test_llm_utility_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_llm_utility_service.py @@ -44,14 +44,17 @@ LlmUtilityServiceClient, ) from google.cloud.aiplatform_v1.services.llm_utility_service import transports +from google.cloud.aiplatform_v1.types import content from google.cloud.aiplatform_v1.types import llm_utility_service from google.cloud.aiplatform_v1.types import prediction_service +from google.cloud.aiplatform_v1.types import tool from google.cloud.location import locations_pb2 from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import options_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore from google.oauth2 import service_account +from google.protobuf import duration_pb2 # type: ignore from google.protobuf import struct_pb2 # type: ignore import google.auth diff --git a/tests/unit/gapic/aiplatform_v1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1/test_migration_service.py index 6de611bf1d..fcb9f0977d 100644 --- a/tests/unit/gapic/aiplatform_v1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_migration_service.py @@ -2006,22 +2006,19 @@ def test_parse_annotated_dataset_path(): def test_dataset_path(): project = "cuttlefish" - location = "mussel" - dataset = "winkle" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + dataset = "mussel" + expected = "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "nautilus", - "location": "scallop", - "dataset": "abalone", + "project": "winkle", + "dataset": "nautilus", } path = MigrationServiceClient.dataset_path(**expected) @@ -2031,19 +2028,22 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "squid" - dataset = "clam" - expected = "projects/{project}/datasets/{dataset}".format( + project = "scallop" + location = "abalone" + dataset = "squid" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "whelk", + "project": "clam", + "location": "whelk", "dataset": "octopus", } path = MigrationServiceClient.dataset_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1/test_model_garden_service.py b/tests/unit/gapic/aiplatform_v1/test_model_garden_service.py index bcd0870c05..7d3159ea3f 100644 --- a/tests/unit/gapic/aiplatform_v1/test_model_garden_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_model_garden_service.py @@ -733,6 +733,7 @@ def test_get_publisher_model(request_type, transport: str = "grpc"): open_source_category=publisher_model.PublisherModel.OpenSourceCategory.PROPRIETARY, frameworks=["frameworks_value"], launch_stage=publisher_model.PublisherModel.LaunchStage.EXPERIMENTAL, + version_state=publisher_model.PublisherModel.VersionState.VERSION_STATE_STABLE, publisher_model_template="publisher_model_template_value", ) response = client.get_publisher_model(request) @@ -754,6 +755,10 @@ def test_get_publisher_model(request_type, transport: str = "grpc"): assert ( response.launch_stage == publisher_model.PublisherModel.LaunchStage.EXPERIMENTAL ) + assert ( + response.version_state + == publisher_model.PublisherModel.VersionState.VERSION_STATE_STABLE + ) assert response.publisher_model_template == "publisher_model_template_value" @@ -801,6 +806,7 @@ async def test_get_publisher_model_async( open_source_category=publisher_model.PublisherModel.OpenSourceCategory.PROPRIETARY, frameworks=["frameworks_value"], launch_stage=publisher_model.PublisherModel.LaunchStage.EXPERIMENTAL, + version_state=publisher_model.PublisherModel.VersionState.VERSION_STATE_STABLE, publisher_model_template="publisher_model_template_value", ) ) @@ -823,6 +829,10 @@ async def test_get_publisher_model_async( assert ( response.launch_stage == publisher_model.PublisherModel.LaunchStage.EXPERIMENTAL ) + assert ( + response.version_state + == publisher_model.PublisherModel.VersionState.VERSION_STATE_STABLE + ) assert response.publisher_model_template == "publisher_model_template_value" diff --git a/tests/unit/gapic/aiplatform_v1/test_prediction_service.py b/tests/unit/gapic/aiplatform_v1/test_prediction_service.py index aeff05b0c0..fd914786ac 100644 --- a/tests/unit/gapic/aiplatform_v1/test_prediction_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_prediction_service.py @@ -45,9 +45,12 @@ PredictionServiceClient, ) from google.cloud.aiplatform_v1.services.prediction_service import transports +from google.cloud.aiplatform_v1.types import content from google.cloud.aiplatform_v1.types import explanation from google.cloud.aiplatform_v1.types import io +from google.cloud.aiplatform_v1.types import openapi from google.cloud.aiplatform_v1.types import prediction_service +from google.cloud.aiplatform_v1.types import tool from google.cloud.aiplatform_v1.types import types from google.cloud.location import locations_pb2 from google.iam.v1 import iam_policy_pb2 # type: ignore @@ -56,6 +59,7 @@ from google.longrunning import operations_pb2 # type: ignore from google.oauth2 import service_account from google.protobuf import any_pb2 # type: ignore +from google.protobuf import duration_pb2 # type: ignore from google.protobuf import struct_pb2 # type: ignore import google.auth @@ -1954,6 +1958,259 @@ async def test_explain_flattened_error_async(): ) +@pytest.mark.parametrize( + "request_type", + [ + prediction_service.GenerateContentRequest, + dict, + ], +) +def test_stream_generate_content(request_type, transport: str = "grpc"): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stream_generate_content), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter([prediction_service.GenerateContentResponse()]) + response = client.stream_generate_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.GenerateContentRequest() + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, prediction_service.GenerateContentResponse) + + +def test_stream_generate_content_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stream_generate_content), "__call__" + ) as call: + client.stream_generate_content() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.GenerateContentRequest() + + +@pytest.mark.asyncio +async def test_stream_generate_content_async( + transport: str = "grpc_asyncio", + request_type=prediction_service.GenerateContentRequest, +): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stream_generate_content), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[prediction_service.GenerateContentResponse()] + ) + response = await client.stream_generate_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.GenerateContentRequest() + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, prediction_service.GenerateContentResponse) + + +@pytest.mark.asyncio +async def test_stream_generate_content_async_from_dict(): + await test_stream_generate_content_async(request_type=dict) + + +def test_stream_generate_content_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.GenerateContentRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stream_generate_content), "__call__" + ) as call: + call.return_value = iter([prediction_service.GenerateContentResponse()]) + client.stream_generate_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_stream_generate_content_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.GenerateContentRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stream_generate_content), "__call__" + ) as call: + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[prediction_service.GenerateContentResponse()] + ) + await client.stream_generate_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +def test_stream_generate_content_flattened(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stream_generate_content), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter([prediction_service.GenerateContentResponse()]) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.stream_generate_content( + model="model_value", + contents=[content.Content(role="role_value")], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].contents + mock_val = [content.Content(role="role_value")] + assert arg == mock_val + + +def test_stream_generate_content_flattened_error(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.stream_generate_content( + prediction_service.GenerateContentRequest(), + model="model_value", + contents=[content.Content(role="role_value")], + ) + + +@pytest.mark.asyncio +async def test_stream_generate_content_flattened_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stream_generate_content), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter([prediction_service.GenerateContentResponse()]) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.stream_generate_content( + model="model_value", + contents=[content.Content(role="role_value")], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].contents + mock_val = [content.Content(role="role_value")] + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_stream_generate_content_flattened_error_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.stream_generate_content( + prediction_service.GenerateContentRequest(), + model="model_value", + contents=[content.Content(role="role_value")], + ) + + def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.PredictionServiceGrpcTransport( @@ -2099,6 +2356,7 @@ def test_prediction_service_base_transport(): "server_streaming_predict", "streaming_raw_predict", "explain", + "stream_generate_content", "set_iam_policy", "get_iam_policy", "test_iam_permissions", diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py index ca2cf3c25f..f83a664cea 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py @@ -2034,22 +2034,19 @@ def test_parse_dataset_path(): def test_dataset_path(): project = "squid" - location = "clam" - dataset = "whelk" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + dataset = "clam" + expected = "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "octopus", - "location": "oyster", - "dataset": "nudibranch", + "project": "whelk", + "dataset": "octopus", } path = MigrationServiceClient.dataset_path(**expected) @@ -2059,19 +2056,22 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "cuttlefish" - dataset = "mussel" - expected = "projects/{project}/datasets/{dataset}".format( + project = "oyster" + location = "nudibranch" + dataset = "cuttlefish" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "winkle", + "project": "mussel", + "location": "winkle", "dataset": "nautilus", } path = MigrationServiceClient.dataset_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_model_garden_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_model_garden_service.py index 5ab399eaf7..96de11e68b 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_model_garden_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_model_garden_service.py @@ -734,6 +734,7 @@ def test_get_publisher_model(request_type, transport: str = "grpc"): open_source_category=publisher_model.PublisherModel.OpenSourceCategory.PROPRIETARY, frameworks=["frameworks_value"], launch_stage=publisher_model.PublisherModel.LaunchStage.EXPERIMENTAL, + version_state=publisher_model.PublisherModel.VersionState.VERSION_STATE_STABLE, publisher_model_template="publisher_model_template_value", ) response = client.get_publisher_model(request) @@ -755,6 +756,10 @@ def test_get_publisher_model(request_type, transport: str = "grpc"): assert ( response.launch_stage == publisher_model.PublisherModel.LaunchStage.EXPERIMENTAL ) + assert ( + response.version_state + == publisher_model.PublisherModel.VersionState.VERSION_STATE_STABLE + ) assert response.publisher_model_template == "publisher_model_template_value" @@ -802,6 +807,7 @@ async def test_get_publisher_model_async( open_source_category=publisher_model.PublisherModel.OpenSourceCategory.PROPRIETARY, frameworks=["frameworks_value"], launch_stage=publisher_model.PublisherModel.LaunchStage.EXPERIMENTAL, + version_state=publisher_model.PublisherModel.VersionState.VERSION_STATE_STABLE, publisher_model_template="publisher_model_template_value", ) ) @@ -824,6 +830,10 @@ async def test_get_publisher_model_async( assert ( response.launch_stage == publisher_model.PublisherModel.LaunchStage.EXPERIMENTAL ) + assert ( + response.version_state + == publisher_model.PublisherModel.VersionState.VERSION_STATE_STABLE + ) assert response.publisher_model_template == "publisher_model_template_value" diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py index 694d9a044c..107fa4c1f2 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py @@ -45,9 +45,12 @@ PredictionServiceClient, ) from google.cloud.aiplatform_v1beta1.services.prediction_service import transports +from google.cloud.aiplatform_v1beta1.types import content from google.cloud.aiplatform_v1beta1.types import explanation from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import openapi from google.cloud.aiplatform_v1beta1.types import prediction_service +from google.cloud.aiplatform_v1beta1.types import tool from google.cloud.aiplatform_v1beta1.types import types from google.cloud.location import locations_pb2 from google.iam.v1 import iam_policy_pb2 # type: ignore @@ -56,6 +59,7 @@ from google.longrunning import operations_pb2 # type: ignore from google.oauth2 import service_account from google.protobuf import any_pb2 # type: ignore +from google.protobuf import duration_pb2 # type: ignore from google.protobuf import struct_pb2 # type: ignore import google.auth @@ -2200,6 +2204,259 @@ async def test_count_tokens_flattened_error_async(): ) +@pytest.mark.parametrize( + "request_type", + [ + prediction_service.GenerateContentRequest, + dict, + ], +) +def test_stream_generate_content(request_type, transport: str = "grpc"): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stream_generate_content), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter([prediction_service.GenerateContentResponse()]) + response = client.stream_generate_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.GenerateContentRequest() + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, prediction_service.GenerateContentResponse) + + +def test_stream_generate_content_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stream_generate_content), "__call__" + ) as call: + client.stream_generate_content() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.GenerateContentRequest() + + +@pytest.mark.asyncio +async def test_stream_generate_content_async( + transport: str = "grpc_asyncio", + request_type=prediction_service.GenerateContentRequest, +): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stream_generate_content), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[prediction_service.GenerateContentResponse()] + ) + response = await client.stream_generate_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.GenerateContentRequest() + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, prediction_service.GenerateContentResponse) + + +@pytest.mark.asyncio +async def test_stream_generate_content_async_from_dict(): + await test_stream_generate_content_async(request_type=dict) + + +def test_stream_generate_content_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.GenerateContentRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stream_generate_content), "__call__" + ) as call: + call.return_value = iter([prediction_service.GenerateContentResponse()]) + client.stream_generate_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_stream_generate_content_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.GenerateContentRequest() + + request.model = "model_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stream_generate_content), "__call__" + ) as call: + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[prediction_service.GenerateContentResponse()] + ) + await client.stream_generate_content(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model=model_value", + ) in kw["metadata"] + + +def test_stream_generate_content_flattened(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stream_generate_content), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter([prediction_service.GenerateContentResponse()]) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.stream_generate_content( + model="model_value", + contents=[content.Content(role="role_value")], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].contents + mock_val = [content.Content(role="role_value")] + assert arg == mock_val + + +def test_stream_generate_content_flattened_error(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.stream_generate_content( + prediction_service.GenerateContentRequest(), + model="model_value", + contents=[content.Content(role="role_value")], + ) + + +@pytest.mark.asyncio +async def test_stream_generate_content_flattened_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stream_generate_content), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter([prediction_service.GenerateContentResponse()]) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.stream_generate_content( + model="model_value", + contents=[content.Content(role="role_value")], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model + mock_val = "model_value" + assert arg == mock_val + arg = args[0].contents + mock_val = [content.Content(role="role_value")] + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_stream_generate_content_flattened_error_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.stream_generate_content( + prediction_service.GenerateContentRequest(), + model="model_value", + contents=[content.Content(role="role_value")], + ) + + def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.PredictionServiceGrpcTransport( @@ -2346,6 +2603,7 @@ def test_prediction_service_base_transport(): "streaming_raw_predict", "explain", "count_tokens", + "stream_generate_content", "set_iam_policy", "get_iam_policy", "test_iam_permissions",