diff --git a/google/cloud/aiplatform/initializer.py b/google/cloud/aiplatform/initializer.py index 55d634c192..aad5a4061c 100644 --- a/google/cloud/aiplatform/initializer.py +++ b/google/cloud/aiplatform/initializer.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -51,6 +51,7 @@ def __init__(self): self._staging_bucket = None self._credentials = None self._encryption_spec_key_name = None + self._network = None def init( self, @@ -65,6 +66,7 @@ def init( staging_bucket: Optional[str] = None, credentials: Optional[auth_credentials.Credentials] = None, encryption_spec_key_name: Optional[str] = None, + network: Optional[str] = None, ): """Updates common initialization parameters with provided options. @@ -95,6 +97,12 @@ def init( resource is created. If set, this resource and all sub-resources will be secured by this key. + network (str): + Optional. The full name of the Compute Engine network to which jobs + and resources should be peered. E.g. "projects/12345/global/networks/myVPC". + Private services access must already be configured for the network. + If specified, all eligible jobs and resources created will be peered + with this VPC. Raises: ValueError: If experiment_description is provided but experiment is not. @@ -130,6 +138,8 @@ def init( self._credentials = credentials if encryption_spec_key_name: self._encryption_spec_key_name = encryption_spec_key_name + if network is not None: + self._network = network if experiment: metadata._experiment_tracker.set_experiment( @@ -237,6 +247,11 @@ def encryption_spec_key_name(self) -> Optional[str]: """Default encryption spec key name, if provided.""" return self._encryption_spec_key_name + @property + def network(self) -> Optional[str]: + """Default Compute Engine network to peer to, if provided.""" + return self._network + @property def experiment_name(self) -> Optional[str]: """Default experiment name, if provided.""" diff --git a/google/cloud/aiplatform/jobs.py b/google/cloud/aiplatform/jobs.py index 5ab03b75b5..7c3cad80a8 100644 --- a/google/cloud/aiplatform/jobs.py +++ b/google/cloud/aiplatform/jobs.py @@ -1515,7 +1515,6 @@ def from_local_script( staging_bucket=staging_bucket, ) - @base.optional_sync() def run( self, service_account: Optional[str] = None, @@ -1537,7 +1536,8 @@ def run( Optional. The full name of the Compute Engine network to which the job should be peered. For example, projects/12345/global/networks/myVPC. Private services access must already be configured for the network. - If left unspecified, the job is not peered with any network. + If left unspecified, the network set in aiplatform.init will be used. + Otherwise, the job is not peered with any network. timeout (int): The maximum job running time in seconds. The default is 7 days. restart_job_on_worker_restart (bool): @@ -1570,7 +1570,73 @@ def run( create_request_timeout (float): Optional. The timeout for the create request in seconds. """ + network = network or initializer.global_config.network + + self._run( + service_account=service_account, + network=network, + timeout=timeout, + restart_job_on_worker_restart=restart_job_on_worker_restart, + enable_web_access=enable_web_access, + tensorboard=tensorboard, + sync=sync, + create_request_timeout=create_request_timeout, + ) + + @base.optional_sync() + def _run( + self, + service_account: Optional[str] = None, + network: Optional[str] = None, + timeout: Optional[int] = None, + restart_job_on_worker_restart: bool = False, + enable_web_access: bool = False, + tensorboard: Optional[str] = None, + sync: bool = True, + create_request_timeout: Optional[float] = None, + ) -> None: + """Helper method to ensure network synchronization and to run the configured CustomJob. + + Args: + service_account (str): + Optional. Specifies the service account for workload run-as account. + Users submitting jobs must have act-as permission on this run-as account. + network (str): + Optional. The full name of the Compute Engine network to which the job + should be peered. For example, projects/12345/global/networks/myVPC. + Private services access must already be configured for the network. + timeout (int): + The maximum job running time in seconds. The default is 7 days. + restart_job_on_worker_restart (bool): + Restarts the entire CustomJob if a worker + gets restarted. This feature can be used by + distributed training jobs that are not resilient + to workers leaving and joining a job. + enable_web_access (bool): + Whether you want Vertex AI to enable interactive shell access + to training containers. + https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell + tensorboard (str): + Optional. The name of a Vertex AI + [Tensorboard][google.cloud.aiplatform.v1beta1.Tensorboard] + resource to which this CustomJob will upload Tensorboard + logs. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + The training script should write Tensorboard to following Vertex AI environment + variable: + + AIP_TENSORBOARD_LOG_DIR + + `service_account` is required with provided `tensorboard`. + For more information on configuring your service account please visit: + https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training + sync (bool): + Whether to execute this method synchronously. If False, this method + will unblock and it will be executed in a concurrent Future. + create_request_timeout (float): + Optional. The timeout for the create request in seconds. + """ if service_account: self._gca_resource.job_spec.service_account = service_account @@ -1907,7 +1973,6 @@ def _log_web_access_uris(self): ) self._logged_web_access_uris.add(uri) - @base.optional_sync() def run( self, service_account: Optional[str] = None, @@ -1929,7 +1994,8 @@ def run( Optional. The full name of the Compute Engine network to which the job should be peered. For example, projects/12345/global/networks/myVPC. Private services access must already be configured for the network. - If left unspecified, the job is not peered with any network. + If left unspecified, the network set in aiplatform.init will be used. + Otherwise, the job is not peered with any network. timeout (int): Optional. The maximum job running time in seconds. The default is 7 days. restart_job_on_worker_restart (bool): @@ -1962,7 +2028,73 @@ def run( create_request_timeout (float): Optional. The timeout for the create request in seconds. """ + network = network or initializer.global_config.network + + self._run( + service_account=service_account, + network=network, + timeout=timeout, + restart_job_on_worker_restart=restart_job_on_worker_restart, + enable_web_access=enable_web_access, + tensorboard=tensorboard, + sync=sync, + create_request_timeout=create_request_timeout, + ) + + @base.optional_sync() + def _run( + self, + service_account: Optional[str] = None, + network: Optional[str] = None, + timeout: Optional[int] = None, # seconds + restart_job_on_worker_restart: bool = False, + enable_web_access: bool = False, + tensorboard: Optional[str] = None, + sync: bool = True, + create_request_timeout: Optional[float] = None, + ) -> None: + """Helper method to ensure network synchronization and to run the configured CustomJob. + + Args: + service_account (str): + Optional. Specifies the service account for workload run-as account. + Users submitting jobs must have act-as permission on this run-as account. + network (str): + Optional. The full name of the Compute Engine network to which the job + should be peered. For example, projects/12345/global/networks/myVPC. + Private services access must already be configured for the network. + timeout (int): + Optional. The maximum job running time in seconds. The default is 7 days. + restart_job_on_worker_restart (bool): + Restarts the entire CustomJob if a worker + gets restarted. This feature can be used by + distributed training jobs that are not resilient + to workers leaving and joining a job. + enable_web_access (bool): + Whether you want Vertex AI to enable interactive shell access + to training containers. + https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell + tensorboard (str): + Optional. The name of a Vertex AI + [Tensorboard][google.cloud.aiplatform.v1beta1.Tensorboard] + resource to which this CustomJob will upload Tensorboard + logs. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + The training script should write Tensorboard to following Vertex AI environment + variable: + + AIP_TENSORBOARD_LOG_DIR + + `service_account` is required with provided `tensorboard`. + For more information on configuring your service account please visit: + https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training + sync (bool): + Whether to execute this method synchronously. If False, this method + will unblock and it will be executed in a concurrent Future. + create_request_timeout (float): + Optional. The timeout for the create request in seconds. + """ if service_account: self._gca_resource.trial_job_spec.service_account = service_account diff --git a/google/cloud/aiplatform/matching_engine/matching_engine_index_endpoint.py b/google/cloud/aiplatform/matching_engine/matching_engine_index_endpoint.py index 6e6423e99e..7b82a482c5 100644 --- a/google/cloud/aiplatform/matching_engine/matching_engine_index_endpoint.py +++ b/google/cloud/aiplatform/matching_engine/matching_engine_index_endpoint.py @@ -126,11 +126,10 @@ def __init__( self._gca_resource = self._get_gca_resource(resource_name=index_endpoint_name) @classmethod - @base.optional_sync() def create( cls, display_name: str, - network: str, + network: Optional[str] = None, description: Optional[str] = None, labels: Optional[Dict[str, str]] = None, project: Optional[str] = None, @@ -153,13 +152,12 @@ def create( The name can be up to 128 characters long and can be consist of any UTF-8 characters. network (str): - Required. The full name of the Google Compute Engine + Optional. The full name of the Google Compute Engine `network `__ to which the IndexEndpoint should be peered. - Private services access must already be configured for the - network. If left unspecified, the Endpoint is not peered - with any network. + Private services access must already be configured for the network. + If left unspecified, the network set with aiplatform.init will be used. `Format `__: projects/{project}/global/networks/{network}. Where @@ -182,13 +180,13 @@ def create( System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. project (str): - Optional. Project to create EntityType in. If not set, project + Optional. Project to create IndexEndpoint in. If not set, project set in aiplatform.init will be used. location (str): - Optional. Location to create EntityType in. If not set, location + Optional. Location to create IndexEndpoint in. If not set, location set in aiplatform.init will be used. credentials (auth_credentials.Credentials): - Optional. Custom credentials to use to create EntityTypes. Overrides + Optional. Custom credentials to use to create IndexEndpoints. Overrides credentials set in aiplatform.init. request_metadata (Sequence[Tuple[str, str]]): Optional. Strings which should be sent along with the request as metadata. @@ -200,11 +198,98 @@ def create( Returns: MatchingEngineIndexEndpoint - IndexEndpoint resource object + Raises: + ValueError: A network must be instantiated when creating a IndexEndpoint. """ - gapic_index_endpoint = gca_matching_engine_index_endpoint.IndexEndpoint( + network = network or initializer.global_config.network + + if not network: + raise ValueError( + "Please provide `network` argument or set network" + "using aiplatform.init(network=...)" + ) + + return cls._create( display_name=display_name, - description=description, network=network, + description=description, + labels=labels, + project=project, + location=location, + credentials=credentials, + request_metadata=request_metadata, + sync=sync, + ) + + @classmethod + @base.optional_sync() + def _create( + cls, + display_name: str, + network: Optional[str] = None, + description: Optional[str] = None, + labels: Optional[Dict[str, str]] = None, + project: Optional[str] = None, + location: Optional[str] = None, + credentials: Optional[auth_credentials.Credentials] = None, + request_metadata: Optional[Sequence[Tuple[str, str]]] = (), + sync: bool = True, + ) -> "MatchingEngineIndexEndpoint": + """Helper method to ensure network synchronization and to + create a MatchingEngineIndexEndpoint resource. + + Args: + display_name (str): + Required. The display name of the IndexEndpoint. + The name can be up to 128 characters long and + can be consist of any UTF-8 characters. + network (str): + Optional. The full name of the Google Compute Engine + `network `__ + to which the IndexEndpoint should be peered. + Private services access must already be configured for the network. + + `Format `__: + projects/{project}/global/networks/{network}. Where + {project} is a project number, as in '12345', and {network} + is network name. + description (str): + Optional. The description of the IndexEndpoint. + labels (Dict[str, str]): + Optional. The labels with user-defined + metadata to organize your IndexEndpoint. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + on and examples of labels. No more than 64 user + labels can be associated with one + IndexEndpoint (System labels are excluded)." + System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + project (str): + Optional. Project to create IndexEndpoint in. If not set, project + set in aiplatform.init will be used. + location (str): + Optional. Location to create IndexEndpoint in. If not set, location + set in aiplatform.init will be used. + credentials (auth_credentials.Credentials): + Optional. Custom credentials to use to create IndexEndpoints. Overrides + credentials set in aiplatform.init. + request_metadata (Sequence[Tuple[str, str]]): + Optional. Strings which should be sent along with the request as metadata. + sync (bool): + Optional. Whether to execute this creation synchronously. If False, this method + will be executed in concurrent Future and any downstream object will + be immediately returned and synced when the Future has completed. + + Returns: + MatchingEngineIndexEndpoint - IndexEndpoint resource object + """ + gapic_index_endpoint = gca_matching_engine_index_endpoint.IndexEndpoint( + display_name=display_name, description=description, network=network ) if labels: diff --git a/google/cloud/aiplatform/models.py b/google/cloud/aiplatform/models.py index d8e6496eb2..9b41004ad5 100644 --- a/google/cloud/aiplatform/models.py +++ b/google/cloud/aiplatform/models.py @@ -430,10 +430,8 @@ def _create( Optional. The full name of the Compute Engine network to which this Endpoint will be peered. E.g. "projects/12345/global/networks/myVPC". Private services access must already be configured for the network. - If left unspecified, the job is not peered with any network or - the network set in aiplatform.init will be used. - If set, this will be a PrivateEndpoint. Read more about PrivateEndpoints - [in the documentation](https://cloud.google.com/vertex-ai/docs/predictions/using-private-endpoints) + Read more about PrivateEndpoints + [in the documentation](https://cloud.google.com/vertex-ai/docs/predictions/using-private-endpoints). sync (bool): Whether to create this endpoint synchronously. create_request_timeout (float): @@ -1015,8 +1013,6 @@ def _deploy_call( Optional. The full name of the Compute Engine network to which this Endpoint will be peered. E.g. "projects/123/global/networks/my_vpc". Private services access must already be configured for the network. - If left unspecified, the job is not peered with any network or - the network set in aiplatform.init will be used. deployed_model_display_name (str): Optional. The display name of the DeployedModel. If not provided upon creation, the Model's display_name is used. @@ -1885,7 +1881,7 @@ def create( Optional. The full name of the Compute Engine network to which this Endpoint will be peered. E.g. "projects/123456789123/global/networks/my_vpc". Private services access must already be configured for the network. - If not set, network set in aiplatform.init will be used. + If left unspecified, the network set with aiplatform.init will be used. description (str): Optional. The description of the Endpoint. labels (Dict[str, str]): @@ -1932,10 +1928,11 @@ def create( project = project or initializer.global_config.project location = location or initializer.global_config.location + network = network or initializer.global_config.network if not network: raise ValueError( - "Please provide required argument `network` or set " + "Please provide required argument `network` or set network" "using aiplatform.init(network=...)" ) @@ -3246,11 +3243,10 @@ def deploy( Overrides encryption_spec_key_name set in aiplatform.init. network (str): Optional. The full name of the Compute Engine network to which - this Endpoint will be peered. E.g. "projects/12345/global/networks/myVPC". + the Endpoint, if created, will be peered to. E.g. "projects/12345/global/networks/myVPC". Private services access must already be configured for the network. - If left unspecified, the job is not peered with any network or - the network set in aiplatform.init will be used. - If set, a PrivateEndpoint will be created. Read more about PrivateEndpoints + If set or aiplatform.init(network=...) has been set, a PrivateEndpoint will be created. + If left unspecified, an Endpoint will be created. Read more about PrivateEndpoints [in the documentation](https://cloud.google.com/vertex-ai/docs/predictions/using-private-endpoints). sync (bool): Whether to execute this method synchronously. If False, this method @@ -3273,6 +3269,7 @@ def deploy( Raises: ValueError: If `traffic_split` is set for PrivateEndpoint. """ + network = network or initializer.global_config.network Endpoint._validate_deploy_args( min_replica_count=min_replica_count, @@ -3420,12 +3417,10 @@ def _deploy( Overrides encryption_spec_key_name set in aiplatform.init network (str): Optional. The full name of the Compute Engine network to which - this Endpoint will be peered. E.g. "projects/12345/global/networks/myVPC". + the Endpoint, if created, will be peered to. E.g. "projects/12345/global/networks/myVPC". Private services access must already be configured for the network. - If left unspecified, the job is not peered with any network or - the network set in aiplatform.init will be used. - If set, a PrivateEndpoint will be created. Read more about PrivateEndpoints - [in the documentation](https://cloud.google.com/vertex-ai/docs/predictions/using-private-endpoints) + Read more about PrivateEndpoints + [in the documentation](https://cloud.google.com/vertex-ai/docs/predictions/using-private-endpoints). sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will diff --git a/google/cloud/aiplatform/pipeline_jobs.py b/google/cloud/aiplatform/pipeline_jobs.py index 25c2bad2db..19de1fc88f 100644 --- a/google/cloud/aiplatform/pipeline_jobs.py +++ b/google/cloud/aiplatform/pipeline_jobs.py @@ -285,7 +285,6 @@ def __init__( self._gca_resource = gca_pipeline_job.PipelineJob(**pipeline_job_args) - @base.optional_sync() def run( self, service_account: Optional[str] = None, @@ -302,9 +301,42 @@ def run( network (str): Optional. The full name of the Compute Engine network to which the job should be peered. For example, projects/12345/global/networks/myVPC. + Private services access must already be configured for the network. + If left unspecified, the network set in aiplatform.init will be used. + Otherwise, the job is not peered with any network. + sync (bool): + Optional. Whether to execute this method synchronously. If False, this method will unblock and it will be executed in a concurrent Future. + create_request_timeout (float): + Optional. The timeout for the create request in seconds. + """ + network = network or initializer.global_config.network + + self._run( + service_account=service_account, + network=network, + sync=sync, + create_request_timeout=create_request_timeout, + ) + + @base.optional_sync() + def _run( + self, + service_account: Optional[str] = None, + network: Optional[str] = None, + sync: Optional[bool] = True, + create_request_timeout: Optional[float] = None, + ) -> None: + """Helper method to ensure network synchronization and to run + the configured PipelineJob and monitor the job until completion. + Args: + service_account (str): + Optional. Specifies the service account for workload run-as account. + Users submitting jobs must have act-as permission on this run-as account. + network (str): + Optional. The full name of the Compute Engine network to which the job + should be peered. For example, projects/12345/global/networks/myVPC. Private services access must already be configured for the network. - If left unspecified, the job is not peered with any network. sync (bool): Optional. Whether to execute this method synchronously. If False, this method will unblock and it will be executed in a concurrent Future. create_request_timeout (float): @@ -337,7 +369,8 @@ def submit( should be peered. For example, projects/12345/global/networks/myVPC. Private services access must already be configured for the network. - If left unspecified, the job is not peered with any network. + If left unspecified, the network set in aiplatform.init will be used. + Otherwise, the job is not peered with any network. create_request_timeout (float): Optional. The timeout for the create request in seconds. experiment (Union[str, experiments_resource.Experiment]): @@ -349,6 +382,8 @@ def submit( Pipeline parameters will be associated as parameters to the current Experiment Run. """ + network = network or initializer.global_config.network + if service_account: self._gca_resource.service_account = service_account diff --git a/google/cloud/aiplatform/training_jobs.py b/google/cloud/aiplatform/training_jobs.py index 82c865454b..4b394dabdc 100644 --- a/google/cloud/aiplatform/training_jobs.py +++ b/google/cloud/aiplatform/training_jobs.py @@ -3010,7 +3010,8 @@ def run( The full name of the Compute Engine network to which the job should be peered. For example, projects/12345/global/networks/myVPC. Private services access must already be configured for the network. - If left unspecified, the job is not peered with any network. + If left unspecified, the network set in aiplatform.init will be used. + Otherwise, the job is not peered with any network. bigquery_destination (str): Provide this field if `dataset` is a BigQuery dataset. The BigQuery project location where the training data is to @@ -3150,6 +3151,8 @@ def run( model: The trained Vertex AI Model resource or None if training did not produce a Vertex AI Model. """ + network = network or initializer.global_config.network + worker_pool_specs, managed_model = self._prepare_and_validate_run( model_display_name=model_display_name, model_labels=model_labels, @@ -3919,7 +3922,8 @@ def run( The full name of the Compute Engine network to which the job should be peered. For example, projects/12345/global/networks/myVPC. Private services access must already be configured for the network. - If left unspecified, the job is not peered with any network. + If left unspecified, the network set in aiplatform.init will be used. + Otherwise, the job is not peered with any network. bigquery_destination (str): Provide this field if `dataset` is a BigQuery dataset. The BigQuery project location where the training data is to @@ -4064,6 +4068,8 @@ def run( been set, or model_display_name was provided but required arguments were not provided in constructor. """ + network = network or initializer.global_config.network + worker_pool_specs, managed_model = self._prepare_and_validate_run( model_display_name=model_display_name, model_labels=model_labels, @@ -6170,7 +6176,8 @@ def run( The full name of the Compute Engine network to which the job should be peered. For example, projects/12345/global/networks/myVPC. Private services access must already be configured for the network. - If left unspecified, the job is not peered with any network. + If left unspecified, the network set in aiplatform.init will be used. + Otherwise, the job is not peered with any network. bigquery_destination (str): Provide this field if `dataset` is a BigQuery dataset. The BigQuery project location where the training data is to @@ -6310,6 +6317,8 @@ def run( model: The trained Vertex AI Model resource or None if training did not produce a Vertex AI Model. """ + network = network or initializer.global_config.network + worker_pool_specs, managed_model = self._prepare_and_validate_run( model_display_name=model_display_name, model_labels=model_labels, diff --git a/tests/unit/aiplatform/test_initializer.py b/tests/unit/aiplatform/test_initializer.py index 56cc1c3733..ae051594ff 100644 --- a/tests/unit/aiplatform/test_initializer.py +++ b/tests/unit/aiplatform/test_initializer.py @@ -42,6 +42,7 @@ _TEST_EXPERIMENT = "test-experiment" _TEST_DESCRIPTION = "test-description" _TEST_STAGING_BUCKET = "test-bucket" +_TEST_NETWORK = "projects/12345/global/networks/myVPC" @pytest.mark.usefixtures("google_auth_mock") @@ -90,6 +91,10 @@ def test_init_location_with_invalid_location_raises(self): with pytest.raises(ValueError): initializer.global_config.init(location=_TEST_INVALID_LOCATION) + def test_init_network_sets_network(self): + initializer.global_config.init(network=_TEST_NETWORK) + assert initializer.global_config.network == _TEST_NETWORK + @patch.object(_experiment_tracker, "set_experiment") def test_init_experiment_sets_experiment(self, set_experiment_mock): initializer.global_config.init(experiment=_TEST_EXPERIMENT) diff --git a/tests/unit/aiplatform/test_matching_engine_index_endpoint.py b/tests/unit/aiplatform/test_matching_engine_index_endpoint.py index 21589c4267..2f5deebf6e 100644 --- a/tests/unit/aiplatform/test_matching_engine_index_endpoint.py +++ b/tests/unit/aiplatform/test_matching_engine_index_endpoint.py @@ -297,7 +297,6 @@ def get_index_endpoint_mock(): ), ), ] - get_index_endpoint_mock.return_value = index_endpoint yield get_index_endpoint_mock @@ -526,6 +525,29 @@ def test_create_index_endpoint(self, create_index_endpoint_mock, sync): metadata=_TEST_REQUEST_METADATA, ) + @pytest.mark.usefixtures("get_index_endpoint_mock") + def test_create_index_endpoint_with_network_init(self, create_index_endpoint_mock): + aiplatform.init(project=_TEST_PROJECT, network=_TEST_INDEX_ENDPOINT_VPC_NETWORK) + + aiplatform.MatchingEngineIndexEndpoint.create( + display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME, + description=_TEST_INDEX_ENDPOINT_DESCRIPTION, + labels=_TEST_LABELS, + ) + + expected = gca_index_endpoint.IndexEndpoint( + display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME, + network=_TEST_INDEX_ENDPOINT_VPC_NETWORK, + description=_TEST_INDEX_ENDPOINT_DESCRIPTION, + labels=_TEST_LABELS, + ) + + create_index_endpoint_mock.assert_called_once_with( + parent=_TEST_PARENT, + index_endpoint=expected, + metadata=_TEST_REQUEST_METADATA, + ) + @pytest.mark.usefixtures("get_index_endpoint_mock", "get_index_mock") def test_deploy_index(self, deploy_index_mock, undeploy_index_mock): aiplatform.init(project=_TEST_PROJECT)