From fa123c219c248f887f5e4b176b833dc141d3cbea Mon Sep 17 00:00:00 2001 From: Sara Robinson Date: Tue, 22 Mar 2022 09:46:11 -0400 Subject: [PATCH 01/19] feat: add timeout arg and tests to model upload --- google/cloud/aiplatform/models.py | 4 +++ tests/unit/aiplatform/test_models.py | 39 ++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) diff --git a/google/cloud/aiplatform/models.py b/google/cloud/aiplatform/models.py index ed986a64dc..2f70a13471 100644 --- a/google/cloud/aiplatform/models.py +++ b/google/cloud/aiplatform/models.py @@ -1646,6 +1646,7 @@ def upload( labels: Optional[Dict[str, str]] = None, encryption_spec_key_name: Optional[str] = None, staging_bucket: Optional[str] = None, + timeout: Optional[float] = None, sync=True, ) -> "Model": """Uploads a model and returns a Model representing the uploaded Model @@ -1792,6 +1793,8 @@ def upload( staging_bucket (str): Optional. Bucket to stage local model artifacts. Overrides staging_bucket set in aiplatform.init. + timeout (float): + Optional. The timeout for this request in seconds. Returns: model: Instantiated representation of the uploaded model resource. Raises: @@ -1898,6 +1901,7 @@ def upload( lro = api_client.upload_model( parent=initializer.global_config.common_location_path(project, location), model=managed_model, + timeout=timeout, ) _LOGGER.log_create_with_lro(cls, lro) diff --git a/tests/unit/aiplatform/test_models.py b/tests/unit/aiplatform/test_models.py index 17b4f72bd1..b54a7c63ad 100644 --- a/tests/unit/aiplatform/test_models.py +++ b/tests/unit/aiplatform/test_models.py @@ -537,6 +537,7 @@ def test_upload_uploads_and_gets_model( serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE, serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE, serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE, + timeout=None, sync=sync, ) @@ -556,12 +557,42 @@ def test_upload_uploads_and_gets_model( upload_model_mock.assert_called_once_with( parent=initializer.global_config.common_location_path(), model=managed_model, + timeout=None, ) get_model_mock.assert_called_once_with( name=_TEST_MODEL_RESOURCE_NAME, retry=base._DEFAULT_RETRY ) + @pytest.mark.parametrize("sync", [True, False]) + def test_upload_with_timeout( + self, upload_model_mock, get_model_mock, sync + ): + my_model = models.Model.upload( + display_name=_TEST_MODEL_NAME, + serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE, + timeout=180.0, + sync=sync, + ) + + if not sync: + my_model.wait() + + container_spec = gca_model.ModelContainerSpec( + image_uri=_TEST_SERVING_CONTAINER_IMAGE, + ) + + managed_model = gca_model.Model( + display_name=_TEST_MODEL_NAME, + container_spec=container_spec, + ) + + upload_model_mock.assert_called_once_with( + parent=initializer.global_config.common_location_path(), + model=managed_model, + timeout=180.0 + ) + @pytest.mark.parametrize("sync", [True, False]) def test_upload_uploads_and_gets_model_with_labels( self, upload_model_mock, get_model_mock, sync @@ -573,6 +604,7 @@ def test_upload_uploads_and_gets_model_with_labels( serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE, serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE, labels=_TEST_LABEL, + timeout=None, sync=sync, ) @@ -594,6 +626,7 @@ def test_upload_uploads_and_gets_model_with_labels( upload_model_mock.assert_called_once_with( parent=initializer.global_config.common_location_path(), model=managed_model, + timeout=None, ) get_model_mock.assert_called_once_with( @@ -635,6 +668,7 @@ def test_upload_uploads_and_gets_model_with_all_args( explanation_metadata=_TEST_EXPLANATION_METADATA, explanation_parameters=_TEST_EXPLANATION_PARAMETERS, labels=_TEST_LABEL, + timeout=None, sync=sync, ) @@ -681,6 +715,7 @@ def test_upload_uploads_and_gets_model_with_all_args( upload_model_mock.assert_called_once_with( parent=initializer.global_config.common_location_path(), model=managed_model, + timeout=None, ) get_model_mock.assert_called_once_with( name=_TEST_MODEL_RESOURCE_NAME, retry=base._DEFAULT_RETRY @@ -706,6 +741,7 @@ def test_upload_uploads_and_gets_model_with_custom_project( serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE, serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE, project=_TEST_PROJECT_2, + timeout=None, sync=sync, ) @@ -727,6 +763,7 @@ def test_upload_uploads_and_gets_model_with_custom_project( upload_model_with_custom_project_mock.assert_called_once_with( parent=f"projects/{_TEST_PROJECT_2}/locations/{_TEST_LOCATION}", model=managed_model, + timeout=None, ) get_model_with_custom_project_mock.assert_called_once_with( @@ -793,6 +830,7 @@ def test_upload_uploads_and_gets_model_with_custom_location( serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE, serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE, location=_TEST_LOCATION_2, + timeout=None, sync=sync, ) @@ -814,6 +852,7 @@ def test_upload_uploads_and_gets_model_with_custom_location( upload_model_with_custom_location_mock.assert_called_once_with( parent=f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION_2}", model=managed_model, + timeout=None, ) get_model_with_custom_location_mock.assert_called_once_with( From d7a387bffa15880d4a4ebc3bc3574dd013c920c8 Mon Sep 17 00:00:00 2001 From: Sara Robinson Date: Tue, 22 Mar 2022 10:10:32 -0400 Subject: [PATCH 02/19] feat: add timeout arg and tests to dataset create method --- google/cloud/aiplatform/datasets/dataset.py | 13 ++- .../aiplatform/datasets/image_dataset.py | 4 + .../aiplatform/datasets/tabular_dataset.py | 4 + .../cloud/aiplatform/datasets/text_dataset.py | 4 + .../aiplatform/datasets/video_dataset.py | 4 + tests/unit/aiplatform/test_datasets.py | 103 ++++++++++++++++-- 6 files changed, 124 insertions(+), 8 deletions(-) diff --git a/google/cloud/aiplatform/datasets/dataset.py b/google/cloud/aiplatform/datasets/dataset.py index 9adcaca7dc..a1a4fee272 100644 --- a/google/cloud/aiplatform/datasets/dataset.py +++ b/google/cloud/aiplatform/datasets/dataset.py @@ -118,6 +118,7 @@ def create( request_metadata: Optional[Sequence[Tuple[str, str]]] = (), labels: Optional[Dict[str, str]] = None, encryption_spec_key_name: Optional[str] = None, + timeout: Optional[float] = None, sync: bool = True, ) -> "_Dataset": """Creates a new dataset and optionally imports data into dataset when @@ -199,6 +200,8 @@ def create( If set, this Dataset and all sub-resources of this Dataset will be secured by this key. Overrides encryption_spec_key_name set in aiplatform.init. + timeout (float): + Optional. The timeout for this request in seconds. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -239,6 +242,7 @@ def create( encryption_spec=initializer.global_config.get_encryption_spec( encryption_spec_key_name=encryption_spec_key_name ), + timeout=timeout, sync=sync, ) @@ -257,6 +261,7 @@ def _create_and_import( request_metadata: Optional[Sequence[Tuple[str, str]]] = (), labels: Optional[Dict[str, str]] = None, encryption_spec: Optional[gca_encryption_spec.EncryptionSpec] = None, + timeout: Optional[float] = None, sync: bool = True, ) -> "_Dataset": """Creates a new dataset and optionally imports data into dataset when @@ -309,6 +314,8 @@ def _create_and_import( resource is created. If set, this Dataset and all sub-resources of this Dataset will be secured by this key. + timeout (float): + Optional. The timeout for this request in seconds. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -328,6 +335,7 @@ def _create_and_import( request_metadata=request_metadata, labels=labels, encryption_spec=encryption_spec, + timeout=timeout, ) _LOGGER.log_create_with_lro(cls, create_dataset_lro) @@ -375,6 +383,7 @@ def _create( request_metadata: Sequence[Tuple[str, str]] = (), labels: Optional[Dict[str, str]] = None, encryption_spec: Optional[gca_encryption_spec.EncryptionSpec] = None, + timeout: Optional[float] = None, ) -> operation.Operation: """Creates a new managed dataset by directly calling API client. @@ -417,6 +426,8 @@ def _create( resource is created. If set, this Dataset and all sub-resources of this Dataset will be secured by this key. + timeout (float): + Optional. The timeout for this request in seconds. Returns: operation (Operation): An object representing a long-running operation. @@ -431,7 +442,7 @@ def _create( ) return api_client.create_dataset( - parent=parent, dataset=gapic_dataset, metadata=request_metadata + parent=parent, dataset=gapic_dataset, metadata=request_metadata, timeout=timeout, ) def _import( diff --git a/google/cloud/aiplatform/datasets/image_dataset.py b/google/cloud/aiplatform/datasets/image_dataset.py index 14b26cc6de..592071c4e8 100644 --- a/google/cloud/aiplatform/datasets/image_dataset.py +++ b/google/cloud/aiplatform/datasets/image_dataset.py @@ -46,6 +46,7 @@ def create( request_metadata: Optional[Sequence[Tuple[str, str]]] = (), labels: Optional[Dict[str, str]] = None, encryption_spec_key_name: Optional[str] = None, + timeout: Optional[float] = None, sync: bool = True, ) -> "ImageDataset": """Creates a new image dataset and optionally imports data into dataset @@ -117,6 +118,8 @@ def create( If set, this Dataset and all sub-resources of this Dataset will be secured by this key. Overrides encryption_spec_key_name set in aiplatform.init. + timeout (float): + Optional. The timeout for this request in seconds. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -158,5 +161,6 @@ def create( encryption_spec=initializer.global_config.get_encryption_spec( encryption_spec_key_name=encryption_spec_key_name ), + timeout=timeout, sync=sync, ) diff --git a/google/cloud/aiplatform/datasets/tabular_dataset.py b/google/cloud/aiplatform/datasets/tabular_dataset.py index 57ad827b31..2164c88c37 100644 --- a/google/cloud/aiplatform/datasets/tabular_dataset.py +++ b/google/cloud/aiplatform/datasets/tabular_dataset.py @@ -45,6 +45,7 @@ def create( request_metadata: Optional[Sequence[Tuple[str, str]]] = (), labels: Optional[Dict[str, str]] = None, encryption_spec_key_name: Optional[str] = None, + timeout: Optional[float] = None, sync: bool = True, ) -> "TabularDataset": """Creates a new tabular dataset. @@ -98,6 +99,8 @@ def create( If set, this Dataset and all sub-resources of this Dataset will be secured by this key. Overrides encryption_spec_key_name set in aiplatform.init. + timeout (float): + The timeout for this request in seconds. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -138,6 +141,7 @@ def create( encryption_spec=initializer.global_config.get_encryption_spec( encryption_spec_key_name=encryption_spec_key_name ), + timeout=timeout, sync=sync, ) diff --git a/google/cloud/aiplatform/datasets/text_dataset.py b/google/cloud/aiplatform/datasets/text_dataset.py index 8fa28c3f31..f4c5dfab6d 100644 --- a/google/cloud/aiplatform/datasets/text_dataset.py +++ b/google/cloud/aiplatform/datasets/text_dataset.py @@ -46,6 +46,7 @@ def create( request_metadata: Optional[Sequence[Tuple[str, str]]] = (), labels: Optional[Dict[str, str]] = None, encryption_spec_key_name: Optional[str] = None, + timeout: Optional[float] = None, sync: bool = True, ) -> "TextDataset": """Creates a new text dataset and optionally imports data into dataset @@ -124,6 +125,8 @@ def create( If set, this Dataset and all sub-resources of this Dataset will be secured by this key. Overrides encryption_spec_key_name set in aiplatform.init. + timeout (float): + Optional. The timeout for this request in seconds. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -165,5 +168,6 @@ def create( encryption_spec=initializer.global_config.get_encryption_spec( encryption_spec_key_name=encryption_spec_key_name ), + timeout=timeout, sync=sync, ) diff --git a/google/cloud/aiplatform/datasets/video_dataset.py b/google/cloud/aiplatform/datasets/video_dataset.py index a758339b71..1d95bdd2a7 100644 --- a/google/cloud/aiplatform/datasets/video_dataset.py +++ b/google/cloud/aiplatform/datasets/video_dataset.py @@ -46,6 +46,7 @@ def create( request_metadata: Optional[Sequence[Tuple[str, str]]] = (), labels: Optional[Dict[str, str]] = None, encryption_spec_key_name: Optional[str] = None, + timeout: Optional[float] = None, sync: bool = True, ) -> "VideoDataset": """Creates a new video dataset and optionally imports data into dataset @@ -117,6 +118,8 @@ def create( If set, this Dataset and all sub-resources of this Dataset will be secured by this key. Overrides encryption_spec_key_name set in aiplatform.init. + timeout (float): + Optional. The timeout for this request in seconds. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -158,5 +161,6 @@ def create( encryption_spec=initializer.global_config.get_encryption_spec( encryption_spec_key_name=encryption_spec_key_name ), + timeout=timeout, sync=sync, ) diff --git a/tests/unit/aiplatform/test_datasets.py b/tests/unit/aiplatform/test_datasets.py index 71ca5907ab..8a90a26e13 100644 --- a/tests/unit/aiplatform/test_datasets.py +++ b/tests/unit/aiplatform/test_datasets.py @@ -16,6 +16,7 @@ # import os +from socket import timeout import pytest @@ -577,6 +578,7 @@ def test_init_aiplatform_with_encryption_key_name_and_create_dataset( display_name=_TEST_DISPLAY_NAME, metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_NONTABULAR, sync=sync, + timeout=None, ) if not sync: @@ -593,6 +595,7 @@ def test_init_aiplatform_with_encryption_key_name_and_create_dataset( parent=_TEST_PARENT, dataset=expected_dataset, metadata=_TEST_REQUEST_METADATA, + timeout=None, ) @pytest.mark.usefixtures("get_dataset_mock") @@ -604,6 +607,7 @@ def test_create_dataset_nontabular(self, create_dataset_mock, sync): display_name=_TEST_DISPLAY_NAME, metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_NONTABULAR, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, + timeout=None, sync=sync, ) @@ -621,6 +625,37 @@ def test_create_dataset_nontabular(self, create_dataset_mock, sync): parent=_TEST_PARENT, dataset=expected_dataset, metadata=_TEST_REQUEST_METADATA, + timeout=None, + ) + + @pytest.mark.usefixtures("get_dataset_mock") + @pytest.mark.parametrize("sync", [True, False]) + def test_create_dataset_nontabular_with_timeout(self, create_dataset_mock, sync): + aiplatform.init(project=_TEST_PROJECT) + + my_dataset = datasets._Dataset.create( + display_name=_TEST_DISPLAY_NAME, + metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_NONTABULAR, + encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, + timeout=180.0, + sync=sync, + ) + + if not sync: + my_dataset.wait() + + expected_dataset = gca_dataset.Dataset( + display_name=_TEST_DISPLAY_NAME, + metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_NONTABULAR, + metadata=_TEST_NONTABULAR_DATASET_METADATA, + encryption_spec=_TEST_ENCRYPTION_SPEC, + ) + + create_dataset_mock.assert_called_once_with( + parent=_TEST_PARENT, + dataset=expected_dataset, + metadata=_TEST_REQUEST_METADATA, + timeout=180.0 ) @pytest.mark.usefixtures("get_dataset_mock") @@ -632,6 +667,7 @@ def test_create_dataset_tabular(self, create_dataset_mock): metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_TABULAR, bq_source=_TEST_SOURCE_URI_BQ, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, + timeout=None, ) expected_dataset = gca_dataset.Dataset( @@ -645,6 +681,32 @@ def test_create_dataset_tabular(self, create_dataset_mock): parent=_TEST_PARENT, dataset=expected_dataset, metadata=_TEST_REQUEST_METADATA, + timeout=None, + ) + + @pytest.mark.usefixtures("get_dataset_mock") + def test_create_dataset_tabular_with_timeout(self, create_dataset_mock): + aiplatform.init(project=_TEST_PROJECT) + + my_dataset = datasets._Dataset.create( + display_name=_TEST_DISPLAY_NAME, + metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_NONTABULAR, + encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, + timeout=180.0, + ) + + expected_dataset = gca_dataset.Dataset( + display_name=_TEST_DISPLAY_NAME, + metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_NONTABULAR, + metadata=_TEST_NONTABULAR_DATASET_METADATA, + encryption_spec=_TEST_ENCRYPTION_SPEC, + ) + + create_dataset_mock.assert_called_once_with( + parent=_TEST_PARENT, + dataset=expected_dataset, + metadata=_TEST_REQUEST_METADATA, + timeout=180.0 ) @pytest.mark.usefixtures("get_dataset_mock") @@ -661,6 +723,7 @@ def test_create_and_import_dataset( import_schema_uri=_TEST_IMPORT_SCHEMA_URI, data_item_labels=_TEST_DATA_LABEL_ITEMS, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, + timeout=None, sync=sync, ) @@ -684,6 +747,7 @@ def test_create_and_import_dataset( parent=_TEST_PARENT, dataset=expected_dataset, metadata=_TEST_REQUEST_METADATA, + timeout=None, ) import_data_mock.assert_called_once_with( @@ -747,6 +811,7 @@ def test_create_then_import( display_name=_TEST_DISPLAY_NAME, metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_NONTABULAR, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, + timeout=None, sync=sync, ) @@ -777,6 +842,7 @@ def test_create_then_import( parent=_TEST_PARENT, dataset=expected_dataset, metadata=_TEST_REQUEST_METADATA, + timeout=None, ) get_dataset_mock.assert_called_once_with( @@ -833,7 +899,7 @@ def test_create_dataset(self, create_dataset_mock, sync): ) my_dataset = datasets.ImageDataset.create( - display_name=_TEST_DISPLAY_NAME, sync=sync, + display_name=_TEST_DISPLAY_NAME, sync=sync, timeout=None, ) if not sync: @@ -850,6 +916,7 @@ def test_create_dataset(self, create_dataset_mock, sync): parent=_TEST_PARENT, dataset=expected_dataset, metadata=_TEST_REQUEST_METADATA, + timeout=None, ) @pytest.mark.usefixtures("get_dataset_image_mock") @@ -864,6 +931,7 @@ def test_create_and_import_dataset( gcs_source=[_TEST_SOURCE_URI_GCS], import_schema_uri=_TEST_IMPORT_SCHEMA_URI_IMAGE, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, + timeout=None, sync=sync, ) @@ -881,6 +949,7 @@ def test_create_and_import_dataset( parent=_TEST_PARENT, dataset=expected_dataset, metadata=_TEST_REQUEST_METADATA, + timeout=None, ) expected_import_config = gca_dataset.ImportDataConfig( @@ -929,6 +998,7 @@ def test_create_then_import( my_dataset = datasets.ImageDataset.create( display_name=_TEST_DISPLAY_NAME, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, + timeout=None, sync=sync, ) @@ -951,6 +1021,7 @@ def test_create_then_import( parent=_TEST_PARENT, dataset=expected_dataset, metadata=_TEST_REQUEST_METADATA, + timeout=None, ) get_dataset_image_mock.assert_called_once_with( @@ -977,7 +1048,7 @@ def test_create_dataset_with_labels(self, create_dataset_mock, sync): ) my_dataset = datasets.ImageDataset.create( - display_name=_TEST_DISPLAY_NAME, labels=_TEST_LABELS, sync=sync, + display_name=_TEST_DISPLAY_NAME, labels=_TEST_LABELS, sync=sync, timeout=None, ) if not sync: @@ -995,6 +1066,7 @@ def test_create_dataset_with_labels(self, create_dataset_mock, sync): parent=_TEST_PARENT, dataset=expected_dataset, metadata=_TEST_REQUEST_METADATA, + timeout=None, ) @@ -1030,7 +1102,7 @@ def test_create_dataset_with_default_encryption_key( ) my_dataset = datasets.TabularDataset.create( - display_name=_TEST_DISPLAY_NAME, bq_source=_TEST_SOURCE_URI_BQ, sync=sync, + display_name=_TEST_DISPLAY_NAME, bq_source=_TEST_SOURCE_URI_BQ, sync=sync, timeout=None, ) if not sync: @@ -1049,6 +1121,7 @@ def test_create_dataset_with_default_encryption_key( parent=_TEST_PARENT, dataset=expected_dataset, metadata=_TEST_REQUEST_METADATA, + timeout=None, ) @pytest.mark.usefixtures("create_dataset_mock_fail") @@ -1085,6 +1158,7 @@ def test_create_dataset(self, create_dataset_mock, sync): display_name=_TEST_DISPLAY_NAME, bq_source=_TEST_SOURCE_URI_BQ, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, + timeout=None, sync=sync, ) @@ -1102,6 +1176,7 @@ def test_create_dataset(self, create_dataset_mock, sync): parent=_TEST_PARENT, dataset=expected_dataset, metadata=_TEST_REQUEST_METADATA, + timeout=None, ) @pytest.mark.usefixtures("get_dataset_tabular_bq_mock") @@ -1227,6 +1302,7 @@ def test_create_dataset_with_labels(self, create_dataset_mock, sync): bq_source=_TEST_SOURCE_URI_BQ, labels=_TEST_LABELS, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, + timeout=None, sync=sync, ) @@ -1245,6 +1321,7 @@ def test_create_dataset_with_labels(self, create_dataset_mock, sync): parent=_TEST_PARENT, dataset=expected_dataset, metadata=_TEST_REQUEST_METADATA, + timeout=None, ) @@ -1277,7 +1354,7 @@ def test_create_dataset(self, create_dataset_mock, sync): ) my_dataset = datasets.TextDataset.create( - display_name=_TEST_DISPLAY_NAME, sync=sync, + display_name=_TEST_DISPLAY_NAME, sync=sync, timeout=None, ) if not sync: @@ -1294,6 +1371,7 @@ def test_create_dataset(self, create_dataset_mock, sync): parent=_TEST_PARENT, dataset=expected_dataset, metadata=_TEST_REQUEST_METADATA, + timeout=None, ) @pytest.mark.usefixtures("get_dataset_text_mock") @@ -1308,6 +1386,7 @@ def test_create_and_import_dataset( gcs_source=[_TEST_SOURCE_URI_GCS], import_schema_uri=_TEST_IMPORT_SCHEMA_URI_TEXT, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, + timeout=None, sync=sync, ) @@ -1325,6 +1404,7 @@ def test_create_and_import_dataset( parent=_TEST_PARENT, dataset=expected_dataset, metadata=_TEST_REQUEST_METADATA, + timeout=None, ) expected_import_config = gca_dataset.ImportDataConfig( @@ -1410,6 +1490,7 @@ def test_create_then_import( my_dataset = datasets.TextDataset.create( display_name=_TEST_DISPLAY_NAME, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, + timeout=None, sync=sync, ) @@ -1432,6 +1513,7 @@ def test_create_then_import( parent=_TEST_PARENT, dataset=expected_dataset, metadata=_TEST_REQUEST_METADATA, + timeout=None, ) get_dataset_text_mock.assert_called_once_with( @@ -1458,7 +1540,7 @@ def test_create_dataset_with_labels(self, create_dataset_mock, sync): ) my_dataset = datasets.TextDataset.create( - display_name=_TEST_DISPLAY_NAME, labels=_TEST_LABELS, sync=sync, + display_name=_TEST_DISPLAY_NAME, labels=_TEST_LABELS, sync=sync, timeout=None, ) if not sync: @@ -1476,6 +1558,7 @@ def test_create_dataset_with_labels(self, create_dataset_mock, sync): parent=_TEST_PARENT, dataset=expected_dataset, metadata=_TEST_REQUEST_METADATA, + timeout=None, ) @@ -1508,7 +1591,7 @@ def test_create_dataset(self, create_dataset_mock, sync): ) my_dataset = datasets.VideoDataset.create( - display_name=_TEST_DISPLAY_NAME, sync=sync, + display_name=_TEST_DISPLAY_NAME, sync=sync, timeout=None, ) if not sync: @@ -1525,6 +1608,7 @@ def test_create_dataset(self, create_dataset_mock, sync): parent=_TEST_PARENT, dataset=expected_dataset, metadata=_TEST_REQUEST_METADATA, + timeout=None, ) @pytest.mark.usefixtures("get_dataset_video_mock") @@ -1539,6 +1623,7 @@ def test_create_and_import_dataset( gcs_source=[_TEST_SOURCE_URI_GCS], import_schema_uri=_TEST_IMPORT_SCHEMA_URI_VIDEO, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, + timeout=None, sync=sync, ) @@ -1556,6 +1641,7 @@ def test_create_and_import_dataset( parent=_TEST_PARENT, dataset=expected_dataset, metadata=_TEST_REQUEST_METADATA, + timeout=None, ) expected_import_config = gca_dataset.ImportDataConfig( @@ -1604,6 +1690,7 @@ def test_create_then_import( my_dataset = datasets.VideoDataset.create( display_name=_TEST_DISPLAY_NAME, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, + timeout=None, sync=sync, ) @@ -1626,6 +1713,7 @@ def test_create_then_import( parent=_TEST_PARENT, dataset=expected_dataset, metadata=_TEST_REQUEST_METADATA, + timeout=None, ) get_dataset_video_mock.assert_called_once_with( @@ -1652,7 +1740,7 @@ def test_create_dataset_with_labels(self, create_dataset_mock, sync): ) my_dataset = datasets.VideoDataset.create( - display_name=_TEST_DISPLAY_NAME, labels=_TEST_LABELS, sync=sync, + display_name=_TEST_DISPLAY_NAME, labels=_TEST_LABELS, sync=sync, timeout=None, ) if not sync: @@ -1670,4 +1758,5 @@ def test_create_dataset_with_labels(self, create_dataset_mock, sync): parent=_TEST_PARENT, dataset=expected_dataset, metadata=_TEST_REQUEST_METADATA, + timeout=None, ) From 5952489c67bc5ab8ab7f2d1f1885d62751c36973 Mon Sep 17 00:00:00 2001 From: Sara Robinson Date: Tue, 22 Mar 2022 11:38:47 -0400 Subject: [PATCH 03/19] feat: add tiemout arg to dataset import and tensorboard create --- google/cloud/aiplatform/datasets/dataset.py | 26 ++++-- .../tensorboard/tensorboard_resource.py | 8 +- tests/unit/aiplatform/test_datasets.py | 85 +++++++++++++++---- tests/unit/aiplatform/test_models.py | 9 +- tests/unit/aiplatform/test_tensorboard.py | 29 ++++++- 5 files changed, 123 insertions(+), 34 deletions(-) diff --git a/google/cloud/aiplatform/datasets/dataset.py b/google/cloud/aiplatform/datasets/dataset.py index a1a4fee272..37f29d6194 100644 --- a/google/cloud/aiplatform/datasets/dataset.py +++ b/google/cloud/aiplatform/datasets/dataset.py @@ -353,16 +353,16 @@ def _create_and_import( # Import if import datasource is DatasourceImportable if isinstance(datasource, _datasources.DatasourceImportable): - dataset_obj._import_and_wait(datasource) + dataset_obj._import_and_wait(datasource, timeout=None) return dataset_obj - def _import_and_wait(self, datasource): + def _import_and_wait(self, datasource, timeout): _LOGGER.log_action_start_against_resource( "Importing", "data", self, ) - import_lro = self._import(datasource=datasource) + import_lro = self._import(datasource=datasource, timeout=timeout) _LOGGER.log_action_started_against_resource_with_lro( "Import", "data", self.__class__, import_lro @@ -383,7 +383,7 @@ def _create( request_metadata: Sequence[Tuple[str, str]] = (), labels: Optional[Dict[str, str]] = None, encryption_spec: Optional[gca_encryption_spec.EncryptionSpec] = None, - timeout: Optional[float] = None, + timeout: Optional[float] = None, ) -> operation.Operation: """Creates a new managed dataset by directly calling API client. @@ -442,11 +442,16 @@ def _create( ) return api_client.create_dataset( - parent=parent, dataset=gapic_dataset, metadata=request_metadata, timeout=timeout, + parent=parent, + dataset=gapic_dataset, + metadata=request_metadata, + timeout=timeout, ) def _import( - self, datasource: _datasources.DatasourceImportable, + self, + datasource: _datasources.DatasourceImportable, + timeout: Optional[float] = None, ) -> operation.Operation: """Imports data into managed dataset by directly calling API client. @@ -459,7 +464,9 @@ def _import( An object representing a long-running operation. """ return self.api_client.import_data( - name=self.resource_name, import_configs=[datasource.import_data_config] + name=self.resource_name, + import_configs=[datasource.import_data_config], + timeout=timeout, ) @base.optional_sync(return_input_arg="self") @@ -468,6 +475,7 @@ def import_data( gcs_source: Union[str, Sequence[str]], import_schema_uri: str, data_item_labels: Optional[Dict] = None, + timeout: Optional[float] = None, sync: bool = True, ) -> "_Dataset": """Upload data to existing managed dataset. @@ -502,6 +510,8 @@ def import_data( labels specified inside index file referenced by ``import_schema_uri``, e.g. jsonl file. + timeout (float): + Optional. The timeout for this request in seconds. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -518,7 +528,7 @@ def import_data( data_item_labels=data_item_labels, ) - self._import_and_wait(datasource=datasource) + self._import_and_wait(datasource=datasource, timeout=timeout) return self # TODO(b/174751568) add optional sync support diff --git a/google/cloud/aiplatform/tensorboard/tensorboard_resource.py b/google/cloud/aiplatform/tensorboard/tensorboard_resource.py index 5871bae832..f8db00fb61 100644 --- a/google/cloud/aiplatform/tensorboard/tensorboard_resource.py +++ b/google/cloud/aiplatform/tensorboard/tensorboard_resource.py @@ -90,6 +90,7 @@ def create( credentials: Optional[auth_credentials.Credentials] = None, request_metadata: Optional[Sequence[Tuple[str, str]]] = (), encryption_spec_key_name: Optional[str] = None, + timeout: Optional[float] = None, ) -> "Tensorboard": """Creates a new tensorboard. @@ -143,6 +144,8 @@ def create( If set, this Tensorboard and all sub-resources of this Tensorboard will be secured by this key. Overrides encryption_spec_key_name set in aiplatform.init. + timeout (float): + Optional. The timeout for this request in seconds. Returns: tensorboard (Tensorboard): @@ -171,7 +174,10 @@ def create( ) create_tensorboard_lro = api_client.create_tensorboard( - parent=parent, tensorboard=gapic_tensorboard, metadata=request_metadata + parent=parent, + tensorboard=gapic_tensorboard, + metadata=request_metadata, + timeout=timeout, ) _LOGGER.log_create_with_lro(cls, create_tensorboard_lro) diff --git a/tests/unit/aiplatform/test_datasets.py b/tests/unit/aiplatform/test_datasets.py index 8a90a26e13..2d472c1316 100644 --- a/tests/unit/aiplatform/test_datasets.py +++ b/tests/unit/aiplatform/test_datasets.py @@ -17,6 +17,7 @@ import os from socket import timeout +from time import time import pytest @@ -655,7 +656,7 @@ def test_create_dataset_nontabular_with_timeout(self, create_dataset_mock, sync) parent=_TEST_PARENT, dataset=expected_dataset, metadata=_TEST_REQUEST_METADATA, - timeout=180.0 + timeout=180.0, ) @pytest.mark.usefixtures("get_dataset_mock") @@ -706,7 +707,7 @@ def test_create_dataset_tabular_with_timeout(self, create_dataset_mock): parent=_TEST_PARENT, dataset=expected_dataset, metadata=_TEST_REQUEST_METADATA, - timeout=180.0 + timeout=180.0, ) @pytest.mark.usefixtures("get_dataset_mock") @@ -751,7 +752,7 @@ def test_create_and_import_dataset( ) import_data_mock.assert_called_once_with( - name=_TEST_NAME, import_configs=[expected_import_config] + name=_TEST_NAME, import_configs=[expected_import_config], timeout=None, ) expected_dataset.name = _TEST_NAME @@ -768,6 +769,35 @@ def test_import_data(self, import_data_mock, sync): gcs_source=_TEST_SOURCE_URI_GCS, import_schema_uri=_TEST_IMPORT_SCHEMA_URI, data_item_labels=_TEST_DATA_LABEL_ITEMS, + timeout=None, + sync=sync, + ) + + if not sync: + my_dataset.wait() + + expected_import_config = gca_dataset.ImportDataConfig( + gcs_source=gca_io.GcsSource(uris=[_TEST_SOURCE_URI_GCS]), + import_schema_uri=_TEST_IMPORT_SCHEMA_URI, + data_item_labels=_TEST_DATA_LABEL_ITEMS, + ) + + import_data_mock.assert_called_once_with( + name=_TEST_NAME, import_configs=[expected_import_config], timeout=None, + ) + + @pytest.mark.usefixtures("get_dataset_mock") + @pytest.mark.parametrize("sync", [True, False]) + def test_import_data_with_timeout(self, import_data_mock, sync): + aiplatform.init(project=_TEST_PROJECT) + + my_dataset = datasets._Dataset(dataset_name=_TEST_NAME) + + my_dataset.import_data( + gcs_source=_TEST_SOURCE_URI_GCS, + import_schema_uri=_TEST_IMPORT_SCHEMA_URI, + data_item_labels=_TEST_DATA_LABEL_ITEMS, + timeout=180.0, sync=sync, ) @@ -781,7 +811,7 @@ def test_import_data(self, import_data_mock, sync): ) import_data_mock.assert_called_once_with( - name=_TEST_NAME, import_configs=[expected_import_config] + name=_TEST_NAME, import_configs=[expected_import_config], timeout=180.0, ) @pytest.mark.usefixtures("get_dataset_mock") @@ -819,6 +849,7 @@ def test_create_then_import( gcs_source=_TEST_SOURCE_URI_GCS, import_schema_uri=_TEST_IMPORT_SCHEMA_URI, data_item_labels=_TEST_DATA_LABEL_ITEMS, + timeout=None, sync=sync, ) @@ -850,7 +881,7 @@ def test_create_then_import( ) import_data_mock.assert_called_once_with( - name=_TEST_NAME, import_configs=[expected_import_config] + name=_TEST_NAME, import_configs=[expected_import_config], timeout=None, ) expected_dataset.name = _TEST_NAME @@ -957,7 +988,7 @@ def test_create_and_import_dataset( import_schema_uri=_TEST_IMPORT_SCHEMA_URI_IMAGE, ) import_data_mock.assert_called_once_with( - name=_TEST_NAME, import_configs=[expected_import_config] + name=_TEST_NAME, import_configs=[expected_import_config], timeout=None, ) expected_dataset.name = _TEST_NAME @@ -973,6 +1004,7 @@ def test_import_data(self, import_data_mock, sync): my_dataset.import_data( gcs_source=[_TEST_SOURCE_URI_GCS], import_schema_uri=_TEST_IMPORT_SCHEMA_URI_IMAGE, + timeout=None, sync=sync, ) @@ -985,7 +1017,7 @@ def test_import_data(self, import_data_mock, sync): ) import_data_mock.assert_called_once_with( - name=_TEST_NAME, import_configs=[expected_import_config] + name=_TEST_NAME, import_configs=[expected_import_config], timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -1005,6 +1037,7 @@ def test_create_then_import( my_dataset.import_data( gcs_source=[_TEST_SOURCE_URI_GCS], import_schema_uri=_TEST_IMPORT_SCHEMA_URI_IMAGE, + timeout=None, sync=sync, ) @@ -1034,7 +1067,7 @@ def test_create_then_import( ) import_data_mock.assert_called_once_with( - name=_TEST_NAME, import_configs=[expected_import_config] + name=_TEST_NAME, import_configs=[expected_import_config], timeout=None, ) expected_dataset.name = _TEST_NAME @@ -1048,7 +1081,10 @@ def test_create_dataset_with_labels(self, create_dataset_mock, sync): ) my_dataset = datasets.ImageDataset.create( - display_name=_TEST_DISPLAY_NAME, labels=_TEST_LABELS, sync=sync, timeout=None, + display_name=_TEST_DISPLAY_NAME, + labels=_TEST_LABELS, + sync=sync, + timeout=None, ) if not sync: @@ -1102,7 +1138,10 @@ def test_create_dataset_with_default_encryption_key( ) my_dataset = datasets.TabularDataset.create( - display_name=_TEST_DISPLAY_NAME, bq_source=_TEST_SOURCE_URI_BQ, sync=sync, timeout=None, + display_name=_TEST_DISPLAY_NAME, + bq_source=_TEST_SOURCE_URI_BQ, + sync=sync, + timeout=None, ) if not sync: @@ -1412,7 +1451,7 @@ def test_create_and_import_dataset( import_schema_uri=_TEST_IMPORT_SCHEMA_URI_TEXT, ) import_data_mock.assert_called_once_with( - name=_TEST_NAME, import_configs=[expected_import_config] + name=_TEST_NAME, import_configs=[expected_import_config], timeout=None, ) expected_dataset.name = _TEST_NAME @@ -1465,6 +1504,7 @@ def test_import_data(self, import_data_mock, sync): my_dataset.import_data( gcs_source=[_TEST_SOURCE_URI_GCS], import_schema_uri=_TEST_IMPORT_SCHEMA_URI_TEXT, + timeout=None, sync=sync, ) @@ -1477,7 +1517,7 @@ def test_import_data(self, import_data_mock, sync): ) import_data_mock.assert_called_once_with( - name=_TEST_NAME, import_configs=[expected_import_config] + name=_TEST_NAME, import_configs=[expected_import_config], timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -1497,6 +1537,7 @@ def test_create_then_import( my_dataset.import_data( gcs_source=[_TEST_SOURCE_URI_GCS], import_schema_uri=_TEST_IMPORT_SCHEMA_URI_TEXT, + timeout=None, sync=sync, ) @@ -1526,7 +1567,7 @@ def test_create_then_import( ) import_data_mock.assert_called_once_with( - name=_TEST_NAME, import_configs=[expected_import_config] + name=_TEST_NAME, import_configs=[expected_import_config], timeout=None, ) expected_dataset.name = _TEST_NAME @@ -1540,7 +1581,10 @@ def test_create_dataset_with_labels(self, create_dataset_mock, sync): ) my_dataset = datasets.TextDataset.create( - display_name=_TEST_DISPLAY_NAME, labels=_TEST_LABELS, sync=sync, timeout=None, + display_name=_TEST_DISPLAY_NAME, + labels=_TEST_LABELS, + sync=sync, + timeout=None, ) if not sync: @@ -1649,7 +1693,7 @@ def test_create_and_import_dataset( import_schema_uri=_TEST_IMPORT_SCHEMA_URI_VIDEO, ) import_data_mock.assert_called_once_with( - name=_TEST_NAME, import_configs=[expected_import_config] + name=_TEST_NAME, import_configs=[expected_import_config], timeout=None, ) expected_dataset.name = _TEST_NAME @@ -1665,6 +1709,7 @@ def test_import_data(self, import_data_mock, sync): my_dataset.import_data( gcs_source=[_TEST_SOURCE_URI_GCS], import_schema_uri=_TEST_IMPORT_SCHEMA_URI_VIDEO, + timeout=None, sync=sync, ) @@ -1677,7 +1722,7 @@ def test_import_data(self, import_data_mock, sync): ) import_data_mock.assert_called_once_with( - name=_TEST_NAME, import_configs=[expected_import_config] + name=_TEST_NAME, import_configs=[expected_import_config], timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -1697,6 +1742,7 @@ def test_create_then_import( my_dataset.import_data( gcs_source=[_TEST_SOURCE_URI_GCS], import_schema_uri=_TEST_IMPORT_SCHEMA_URI_VIDEO, + timeout=None, sync=sync, ) @@ -1726,7 +1772,7 @@ def test_create_then_import( ) import_data_mock.assert_called_once_with( - name=_TEST_NAME, import_configs=[expected_import_config] + name=_TEST_NAME, import_configs=[expected_import_config], timeout=None, ) expected_dataset.name = _TEST_NAME @@ -1740,7 +1786,10 @@ def test_create_dataset_with_labels(self, create_dataset_mock, sync): ) my_dataset = datasets.VideoDataset.create( - display_name=_TEST_DISPLAY_NAME, labels=_TEST_LABELS, sync=sync, timeout=None, + display_name=_TEST_DISPLAY_NAME, + labels=_TEST_LABELS, + sync=sync, + timeout=None, ) if not sync: diff --git a/tests/unit/aiplatform/test_models.py b/tests/unit/aiplatform/test_models.py index b54a7c63ad..f60abcf429 100644 --- a/tests/unit/aiplatform/test_models.py +++ b/tests/unit/aiplatform/test_models.py @@ -565,9 +565,7 @@ def test_upload_uploads_and_gets_model( ) @pytest.mark.parametrize("sync", [True, False]) - def test_upload_with_timeout( - self, upload_model_mock, get_model_mock, sync - ): + def test_upload_with_timeout(self, upload_model_mock, get_model_mock, sync): my_model = models.Model.upload( display_name=_TEST_MODEL_NAME, serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE, @@ -583,14 +581,13 @@ def test_upload_with_timeout( ) managed_model = gca_model.Model( - display_name=_TEST_MODEL_NAME, - container_spec=container_spec, + display_name=_TEST_MODEL_NAME, container_spec=container_spec, ) upload_model_mock.assert_called_once_with( parent=initializer.global_config.common_location_path(), model=managed_model, - timeout=180.0 + timeout=180.0, ) @pytest.mark.parametrize("sync", [True, False]) diff --git a/tests/unit/aiplatform/test_tensorboard.py b/tests/unit/aiplatform/test_tensorboard.py index 1a1d20b97a..79a587846d 100644 --- a/tests/unit/aiplatform/test_tensorboard.py +++ b/tests/unit/aiplatform/test_tensorboard.py @@ -339,7 +339,9 @@ def test_create_tensorboard_with_default_encryption_key( project=_TEST_PROJECT, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, ) - tensorboard.Tensorboard.create(display_name=_TEST_DISPLAY_NAME,) + tensorboard.Tensorboard.create( + display_name=_TEST_DISPLAY_NAME, timeout=None, + ) expected_tensorboard = gca_tensorboard.Tensorboard( display_name=_TEST_DISPLAY_NAME, encryption_spec=_TEST_ENCRYPTION_SPEC, @@ -349,6 +351,7 @@ def test_create_tensorboard_with_default_encryption_key( parent=_TEST_PARENT, tensorboard=expected_tensorboard, metadata=_TEST_REQUEST_METADATA, + timeout=None, ) @pytest.mark.usefixtures("get_tensorboard_mock") @@ -359,6 +362,29 @@ def test_create_tensorboard(self, create_tensorboard_mock): tensorboard.Tensorboard.create( display_name=_TEST_DISPLAY_NAME, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, + timeout=None, + ) + + expected_tensorboard = gca_tensorboard.Tensorboard( + display_name=_TEST_DISPLAY_NAME, encryption_spec=_TEST_ENCRYPTION_SPEC, + ) + + create_tensorboard_mock.assert_called_once_with( + parent=_TEST_PARENT, + tensorboard=expected_tensorboard, + metadata=_TEST_REQUEST_METADATA, + timeout=None, + ) + + @pytest.mark.usefixtures("get_tensorboard_mock") + def test_create_tensorboard_with_timeout(self, create_tensorboard_mock): + + aiplatform.init(project=_TEST_PROJECT,) + + tensorboard.Tensorboard.create( + display_name=_TEST_DISPLAY_NAME, + encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, + timeout=180.0, ) expected_tensorboard = gca_tensorboard.Tensorboard( @@ -369,6 +395,7 @@ def test_create_tensorboard(self, create_tensorboard_mock): parent=_TEST_PARENT, tensorboard=expected_tensorboard, metadata=_TEST_REQUEST_METADATA, + timeout=180.0, ) @pytest.mark.usefixtures("get_tensorboard_mock") From a696ff48b5553d4f6896cf20cdc65031e6cb0bd1 Mon Sep 17 00:00:00 2001 From: Sara Robinson Date: Tue, 22 Mar 2022 14:52:13 -0400 Subject: [PATCH 04/19] Update system tests with timeout arg --- tests/system/aiplatform/test_dataset.py | 2 ++ tests/system/aiplatform/test_e2e_tabular.py | 1 + tests/system/aiplatform/test_tensorboard.py | 2 +- 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/system/aiplatform/test_dataset.py b/tests/system/aiplatform/test_dataset.py index 0167cb8f20..d847d8f678 100644 --- a/tests/system/aiplatform/test_dataset.py +++ b/tests/system/aiplatform/test_dataset.py @@ -16,6 +16,7 @@ # import os +from socket import timeout import uuid import pytest import importlib @@ -196,6 +197,7 @@ def test_get_new_dataset_and_import(self, dataset_gapic_client, shared_state): my_dataset.import_data( gcs_source=_TEST_TEXT_ENTITY_EXTRACTION_GCS_SOURCE, import_schema_uri=_TEST_TEXT_ENTITY_IMPORT_SCHEMA, + timeout=180.0, ) data_items_post_import = dataset_gapic_client.list_data_items( diff --git a/tests/system/aiplatform/test_e2e_tabular.py b/tests/system/aiplatform/test_e2e_tabular.py index ee0692014a..fda1608b11 100644 --- a/tests/system/aiplatform/test_e2e_tabular.py +++ b/tests/system/aiplatform/test_e2e_tabular.py @@ -82,6 +82,7 @@ def test_end_to_end_tabular(self, shared_state): ds = aiplatform.TabularDataset.create( display_name=self._make_display_name("dataset"), gcs_source=[dataset_gcs_source], + timeout=180.0, sync=False, ) diff --git a/tests/system/aiplatform/test_tensorboard.py b/tests/system/aiplatform/test_tensorboard.py index 5c3d3f003a..487e9f0d89 100644 --- a/tests/system/aiplatform/test_tensorboard.py +++ b/tests/system/aiplatform/test_tensorboard.py @@ -31,7 +31,7 @@ def test_create_and_get_tensorboard(self, shared_state): display_name = self._make_display_name("tensorboard") - tb = aiplatform.Tensorboard.create(display_name=display_name) + tb = aiplatform.Tensorboard.create(display_name=display_name, timeout=180.0,) shared_state["resources"] = [tb] From 157227390bf3814e8d7aa571af5c0dbc170bba9b Mon Sep 17 00:00:00 2001 From: Sara Robinson Date: Wed, 23 Mar 2022 11:18:08 -0400 Subject: [PATCH 05/19] rename timeout arg with method name and update tests --- google/cloud/aiplatform/datasets/dataset.py | 51 ++++++++------- .../aiplatform/datasets/image_dataset.py | 10 +-- .../aiplatform/datasets/tabular_dataset.py | 10 +-- .../cloud/aiplatform/datasets/text_dataset.py | 10 +-- .../aiplatform/datasets/video_dataset.py | 10 +-- google/cloud/aiplatform/models.py | 28 +++++++-- .../tensorboard/tensorboard_resource.py | 10 +-- tests/system/aiplatform/test_e2e_tabular.py | 2 +- tests/unit/aiplatform/test_datasets.py | 62 +++++++++---------- tests/unit/aiplatform/test_end_to_end.py | 7 ++- tests/unit/aiplatform/test_models.py | 15 +++-- tests/unit/aiplatform/test_tensorboard.py | 6 +- 12 files changed, 134 insertions(+), 87 deletions(-) diff --git a/google/cloud/aiplatform/datasets/dataset.py b/google/cloud/aiplatform/datasets/dataset.py index 37f29d6194..5b26b67431 100644 --- a/google/cloud/aiplatform/datasets/dataset.py +++ b/google/cloud/aiplatform/datasets/dataset.py @@ -118,7 +118,7 @@ def create( request_metadata: Optional[Sequence[Tuple[str, str]]] = (), labels: Optional[Dict[str, str]] = None, encryption_spec_key_name: Optional[str] = None, - timeout: Optional[float] = None, + create_request_timeout: Optional[float] = None, sync: bool = True, ) -> "_Dataset": """Creates a new dataset and optionally imports data into dataset when @@ -200,8 +200,10 @@ def create( If set, this Dataset and all sub-resources of this Dataset will be secured by this key. Overrides encryption_spec_key_name set in aiplatform.init. - timeout (float): - Optional. The timeout for this request in seconds. + create_request_timeout (float): + Optional. The timeout for initiating this create request in seconds. Note: + this does not set the timeout on the underlying create job, only on the time + to initiate the create request. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -242,7 +244,7 @@ def create( encryption_spec=initializer.global_config.get_encryption_spec( encryption_spec_key_name=encryption_spec_key_name ), - timeout=timeout, + create_request_timeout=create_request_timeout, sync=sync, ) @@ -261,7 +263,7 @@ def _create_and_import( request_metadata: Optional[Sequence[Tuple[str, str]]] = (), labels: Optional[Dict[str, str]] = None, encryption_spec: Optional[gca_encryption_spec.EncryptionSpec] = None, - timeout: Optional[float] = None, + create_request_timeout: Optional[float] = None, sync: bool = True, ) -> "_Dataset": """Creates a new dataset and optionally imports data into dataset when @@ -314,8 +316,10 @@ def _create_and_import( resource is created. If set, this Dataset and all sub-resources of this Dataset will be secured by this key. - timeout (float): - Optional. The timeout for this request in seconds. + create_request_timeout (float): + Optional. The timeout for initiating this create request in seconds. Note: + this does not set the timeout on the underlying create job, only on the time + to initiate the create request. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -335,7 +339,7 @@ def _create_and_import( request_metadata=request_metadata, labels=labels, encryption_spec=encryption_spec, - timeout=timeout, + create_request_timeout=create_request_timeout, ) _LOGGER.log_create_with_lro(cls, create_dataset_lro) @@ -352,17 +356,18 @@ def _create_and_import( ) # Import if import datasource is DatasourceImportable + # import_request_timeout is None since user is issuing a single request with create and import if isinstance(datasource, _datasources.DatasourceImportable): - dataset_obj._import_and_wait(datasource, timeout=None) + dataset_obj._import_and_wait(datasource, import_request_timeout=None) return dataset_obj - def _import_and_wait(self, datasource, timeout): + def _import_and_wait(self, datasource, import_request_timeout): _LOGGER.log_action_start_against_resource( "Importing", "data", self, ) - import_lro = self._import(datasource=datasource, timeout=timeout) + import_lro = self._import(datasource=datasource, import_request_timeout=import_request_timeout) _LOGGER.log_action_started_against_resource_with_lro( "Import", "data", self.__class__, import_lro @@ -383,7 +388,7 @@ def _create( request_metadata: Sequence[Tuple[str, str]] = (), labels: Optional[Dict[str, str]] = None, encryption_spec: Optional[gca_encryption_spec.EncryptionSpec] = None, - timeout: Optional[float] = None, + create_request_timeout: Optional[float] = None, ) -> operation.Operation: """Creates a new managed dataset by directly calling API client. @@ -426,8 +431,10 @@ def _create( resource is created. If set, this Dataset and all sub-resources of this Dataset will be secured by this key. - timeout (float): - Optional. The timeout for this request in seconds. + create_request_timeout (float): + Optional. The timeout for initiating this create request in seconds. Note: + this does not set the timeout on the underlying create job, only on the time + to initiate the create request. Returns: operation (Operation): An object representing a long-running operation. @@ -445,13 +452,13 @@ def _create( parent=parent, dataset=gapic_dataset, metadata=request_metadata, - timeout=timeout, + timeout=create_request_timeout, ) def _import( self, datasource: _datasources.DatasourceImportable, - timeout: Optional[float] = None, + import_request_timeout: Optional[float] = None, ) -> operation.Operation: """Imports data into managed dataset by directly calling API client. @@ -466,7 +473,7 @@ def _import( return self.api_client.import_data( name=self.resource_name, import_configs=[datasource.import_data_config], - timeout=timeout, + timeout=import_request_timeout, ) @base.optional_sync(return_input_arg="self") @@ -475,7 +482,7 @@ def import_data( gcs_source: Union[str, Sequence[str]], import_schema_uri: str, data_item_labels: Optional[Dict] = None, - timeout: Optional[float] = None, + import_request_timeout: Optional[float] = None, sync: bool = True, ) -> "_Dataset": """Upload data to existing managed dataset. @@ -510,8 +517,10 @@ def import_data( labels specified inside index file referenced by ``import_schema_uri``, e.g. jsonl file. - timeout (float): - Optional. The timeout for this request in seconds. + import_request_timeout (float): + Optional. The timeout for initiating this import request in seconds. Note: + this does not set the timeout on the underlying import job, only on the time + to initiate the import request. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -528,7 +537,7 @@ def import_data( data_item_labels=data_item_labels, ) - self._import_and_wait(datasource=datasource, timeout=timeout) + self._import_and_wait(datasource=datasource, import_request_timeout=import_request_timeout) return self # TODO(b/174751568) add optional sync support diff --git a/google/cloud/aiplatform/datasets/image_dataset.py b/google/cloud/aiplatform/datasets/image_dataset.py index 592071c4e8..1d6325418b 100644 --- a/google/cloud/aiplatform/datasets/image_dataset.py +++ b/google/cloud/aiplatform/datasets/image_dataset.py @@ -46,7 +46,7 @@ def create( request_metadata: Optional[Sequence[Tuple[str, str]]] = (), labels: Optional[Dict[str, str]] = None, encryption_spec_key_name: Optional[str] = None, - timeout: Optional[float] = None, + create_request_timeout: Optional[float] = None, sync: bool = True, ) -> "ImageDataset": """Creates a new image dataset and optionally imports data into dataset @@ -118,8 +118,10 @@ def create( If set, this Dataset and all sub-resources of this Dataset will be secured by this key. Overrides encryption_spec_key_name set in aiplatform.init. - timeout (float): - Optional. The timeout for this request in seconds. + create_request_timeout (float): + Optional. The timeout for initiating this create request in seconds. Note: + this does not set the timeout on the underlying create job, only on the time + to initiate the create request. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -161,6 +163,6 @@ def create( encryption_spec=initializer.global_config.get_encryption_spec( encryption_spec_key_name=encryption_spec_key_name ), - timeout=timeout, + create_request_timeout=create_request_timeout, sync=sync, ) diff --git a/google/cloud/aiplatform/datasets/tabular_dataset.py b/google/cloud/aiplatform/datasets/tabular_dataset.py index 2164c88c37..6d535e4258 100644 --- a/google/cloud/aiplatform/datasets/tabular_dataset.py +++ b/google/cloud/aiplatform/datasets/tabular_dataset.py @@ -45,7 +45,7 @@ def create( request_metadata: Optional[Sequence[Tuple[str, str]]] = (), labels: Optional[Dict[str, str]] = None, encryption_spec_key_name: Optional[str] = None, - timeout: Optional[float] = None, + create_request_timeout: Optional[float] = None, sync: bool = True, ) -> "TabularDataset": """Creates a new tabular dataset. @@ -99,8 +99,10 @@ def create( If set, this Dataset and all sub-resources of this Dataset will be secured by this key. Overrides encryption_spec_key_name set in aiplatform.init. - timeout (float): - The timeout for this request in seconds. + create_request_timeout (float): + Optional. The timeout for initiating this create request in seconds. Note: + this does not set the timeout on the underlying create job, only on the time + to initiate the create request. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -141,7 +143,7 @@ def create( encryption_spec=initializer.global_config.get_encryption_spec( encryption_spec_key_name=encryption_spec_key_name ), - timeout=timeout, + create_request_timeout=create_request_timeout, sync=sync, ) diff --git a/google/cloud/aiplatform/datasets/text_dataset.py b/google/cloud/aiplatform/datasets/text_dataset.py index f4c5dfab6d..720370ab19 100644 --- a/google/cloud/aiplatform/datasets/text_dataset.py +++ b/google/cloud/aiplatform/datasets/text_dataset.py @@ -46,7 +46,7 @@ def create( request_metadata: Optional[Sequence[Tuple[str, str]]] = (), labels: Optional[Dict[str, str]] = None, encryption_spec_key_name: Optional[str] = None, - timeout: Optional[float] = None, + create_request_timeout: Optional[float] = None, sync: bool = True, ) -> "TextDataset": """Creates a new text dataset and optionally imports data into dataset @@ -125,8 +125,10 @@ def create( If set, this Dataset and all sub-resources of this Dataset will be secured by this key. Overrides encryption_spec_key_name set in aiplatform.init. - timeout (float): - Optional. The timeout for this request in seconds. + create_request_timeout (float): + Optional. The timeout for initiating this create request in seconds. Note: + this does not set the timeout on the underlying create job, only on the time + to initiate the create request. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -168,6 +170,6 @@ def create( encryption_spec=initializer.global_config.get_encryption_spec( encryption_spec_key_name=encryption_spec_key_name ), - timeout=timeout, + create_request_timeout=create_request_timeout, sync=sync, ) diff --git a/google/cloud/aiplatform/datasets/video_dataset.py b/google/cloud/aiplatform/datasets/video_dataset.py index 1d95bdd2a7..28f123715a 100644 --- a/google/cloud/aiplatform/datasets/video_dataset.py +++ b/google/cloud/aiplatform/datasets/video_dataset.py @@ -46,7 +46,7 @@ def create( request_metadata: Optional[Sequence[Tuple[str, str]]] = (), labels: Optional[Dict[str, str]] = None, encryption_spec_key_name: Optional[str] = None, - timeout: Optional[float] = None, + create_request_timeout: Optional[float] = None, sync: bool = True, ) -> "VideoDataset": """Creates a new video dataset and optionally imports data into dataset @@ -118,8 +118,10 @@ def create( If set, this Dataset and all sub-resources of this Dataset will be secured by this key. Overrides encryption_spec_key_name set in aiplatform.init. - timeout (float): - Optional. The timeout for this request in seconds. + create_request_timeout (float): + Optional. The timeout for initiating this create request in seconds. Note: + this does not set the timeout on the underlying create job, only on the time + to initiate the create request. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -161,6 +163,6 @@ def create( encryption_spec=initializer.global_config.get_encryption_spec( encryption_spec_key_name=encryption_spec_key_name ), - timeout=timeout, + create_request_timeout=create_request_timeout, sync=sync, ) diff --git a/google/cloud/aiplatform/models.py b/google/cloud/aiplatform/models.py index 2f70a13471..3dca5dd919 100644 --- a/google/cloud/aiplatform/models.py +++ b/google/cloud/aiplatform/models.py @@ -1646,7 +1646,7 @@ def upload( labels: Optional[Dict[str, str]] = None, encryption_spec_key_name: Optional[str] = None, staging_bucket: Optional[str] = None, - timeout: Optional[float] = None, + upload_request_timeout: Optional[float] = None, sync=True, ) -> "Model": """Uploads a model and returns a Model representing the uploaded Model @@ -1793,8 +1793,10 @@ def upload( staging_bucket (str): Optional. Bucket to stage local model artifacts. Overrides staging_bucket set in aiplatform.init. - timeout (float): - Optional. The timeout for this request in seconds. + upload_request_timeout (float): + Optional. The timeout for initiating this upload request in seconds. Note: + this does not set the timeout on the underlying upload job, only on the time + to initiate the upload request. Returns: model: Instantiated representation of the uploaded model resource. Raises: @@ -1901,7 +1903,7 @@ def upload( lro = api_client.upload_model( parent=initializer.global_config.common_location_path(project, location), model=managed_model, - timeout=timeout, + timeout=upload_request_timeout, ) _LOGGER.log_create_with_lro(cls, lro) @@ -2604,6 +2606,7 @@ def upload_xgboost_model_file( labels: Optional[Dict[str, str]] = None, encryption_spec_key_name: Optional[str] = None, staging_bucket: Optional[str] = None, + upload_request_timeout: Optional[float] = None, sync=True, ) -> "Model": """Uploads a model and returns a Model representing the uploaded Model @@ -2714,6 +2717,10 @@ def upload_xgboost_model_file( staging_bucket (str): Optional. Bucket to stage local model artifacts. Overrides staging_bucket set in aiplatform.init. + upload_request_timeout (float): + Optional. The timeout for initiating this upload request in seconds. Note: + this does not set the timeout on the underlying upload job, only on the time + to initiate the upload request. Returns: model: Instantiated representation of the uploaded model resource. Raises: @@ -2777,6 +2784,7 @@ def upload_xgboost_model_file( labels=labels, encryption_spec_key_name=encryption_spec_key_name, staging_bucket=staging_bucket, + upload_request_timeout=upload_request_timeout, sync=True, ) @@ -2799,6 +2807,7 @@ def upload_scikit_learn_model_file( labels: Optional[Dict[str, str]] = None, encryption_spec_key_name: Optional[str] = None, staging_bucket: Optional[str] = None, + upload_request_timeout: Optional[float] = None, sync=True, ) -> "Model": """Uploads a model and returns a Model representing the uploaded Model @@ -2910,6 +2919,10 @@ def upload_scikit_learn_model_file( staging_bucket (str): Optional. Bucket to stage local model artifacts. Overrides staging_bucket set in aiplatform.init. + upload_request_timeout (float): + Optional. The timeout for initiating this upload request in seconds. Note: + this does not set the timeout on the underlying upload job, only on the time + to initiate the upload request. Returns: model: Instantiated representation of the uploaded model resource. Raises: @@ -2972,6 +2985,7 @@ def upload_scikit_learn_model_file( labels=labels, encryption_spec_key_name=encryption_spec_key_name, staging_bucket=staging_bucket, + upload_request_timeout=upload_request_timeout, sync=True, ) @@ -2994,6 +3008,7 @@ def upload_tensorflow_saved_model( labels: Optional[Dict[str, str]] = None, encryption_spec_key_name: Optional[str] = None, staging_bucket: Optional[str] = None, + upload_request_timeout: Optional[str] = None, sync=True, ) -> "Model": """Uploads a model and returns a Model representing the uploaded Model @@ -3107,6 +3122,10 @@ def upload_tensorflow_saved_model( staging_bucket (str): Optional. Bucket to stage local model artifacts. Overrides staging_bucket set in aiplatform.init. + upload_request_timeout (float): + Optional. The timeout for initiating this upload request in seconds. Note: + this does not set the timeout on the underlying upload job, only on the time + to initiate the upload request. Returns: model: Instantiated representation of the uploaded model resource. Raises: @@ -3137,5 +3156,6 @@ def upload_tensorflow_saved_model( labels=labels, encryption_spec_key_name=encryption_spec_key_name, staging_bucket=staging_bucket, + upload_request_timeout=upload_request_timeout, sync=sync, ) diff --git a/google/cloud/aiplatform/tensorboard/tensorboard_resource.py b/google/cloud/aiplatform/tensorboard/tensorboard_resource.py index f8db00fb61..ef06d16178 100644 --- a/google/cloud/aiplatform/tensorboard/tensorboard_resource.py +++ b/google/cloud/aiplatform/tensorboard/tensorboard_resource.py @@ -90,7 +90,7 @@ def create( credentials: Optional[auth_credentials.Credentials] = None, request_metadata: Optional[Sequence[Tuple[str, str]]] = (), encryption_spec_key_name: Optional[str] = None, - timeout: Optional[float] = None, + create_request_timeout: Optional[float] = None, ) -> "Tensorboard": """Creates a new tensorboard. @@ -144,8 +144,10 @@ def create( If set, this Tensorboard and all sub-resources of this Tensorboard will be secured by this key. Overrides encryption_spec_key_name set in aiplatform.init. - timeout (float): - Optional. The timeout for this request in seconds. + create_request_timeout (float): + Optional. The timeout for initiating this create request in seconds. Note: + this does not set the timeout on the underlying create job, only on the time + to initiate the create request. Returns: tensorboard (Tensorboard): @@ -177,7 +179,7 @@ def create( parent=parent, tensorboard=gapic_tensorboard, metadata=request_metadata, - timeout=timeout, + timeout=create_request_timeout, ) _LOGGER.log_create_with_lro(cls, create_tensorboard_lro) diff --git a/tests/system/aiplatform/test_e2e_tabular.py b/tests/system/aiplatform/test_e2e_tabular.py index fda1608b11..74474af991 100644 --- a/tests/system/aiplatform/test_e2e_tabular.py +++ b/tests/system/aiplatform/test_e2e_tabular.py @@ -82,7 +82,7 @@ def test_end_to_end_tabular(self, shared_state): ds = aiplatform.TabularDataset.create( display_name=self._make_display_name("dataset"), gcs_source=[dataset_gcs_source], - timeout=180.0, + create_request_timeout=180.0, sync=False, ) diff --git a/tests/unit/aiplatform/test_datasets.py b/tests/unit/aiplatform/test_datasets.py index 2d472c1316..272f70b638 100644 --- a/tests/unit/aiplatform/test_datasets.py +++ b/tests/unit/aiplatform/test_datasets.py @@ -579,7 +579,7 @@ def test_init_aiplatform_with_encryption_key_name_and_create_dataset( display_name=_TEST_DISPLAY_NAME, metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_NONTABULAR, sync=sync, - timeout=None, + create_request_timeout=None, ) if not sync: @@ -608,7 +608,7 @@ def test_create_dataset_nontabular(self, create_dataset_mock, sync): display_name=_TEST_DISPLAY_NAME, metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_NONTABULAR, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - timeout=None, + create_request_timeout=None, sync=sync, ) @@ -638,7 +638,7 @@ def test_create_dataset_nontabular_with_timeout(self, create_dataset_mock, sync) display_name=_TEST_DISPLAY_NAME, metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_NONTABULAR, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - timeout=180.0, + create_request_timeout=180.0, sync=sync, ) @@ -668,7 +668,7 @@ def test_create_dataset_tabular(self, create_dataset_mock): metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_TABULAR, bq_source=_TEST_SOURCE_URI_BQ, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - timeout=None, + create_request_timeout=None, ) expected_dataset = gca_dataset.Dataset( @@ -693,7 +693,7 @@ def test_create_dataset_tabular_with_timeout(self, create_dataset_mock): display_name=_TEST_DISPLAY_NAME, metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_NONTABULAR, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - timeout=180.0, + create_request_timeout=180.0, ) expected_dataset = gca_dataset.Dataset( @@ -724,7 +724,7 @@ def test_create_and_import_dataset( import_schema_uri=_TEST_IMPORT_SCHEMA_URI, data_item_labels=_TEST_DATA_LABEL_ITEMS, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - timeout=None, + create_request_timeout=None, sync=sync, ) @@ -769,7 +769,7 @@ def test_import_data(self, import_data_mock, sync): gcs_source=_TEST_SOURCE_URI_GCS, import_schema_uri=_TEST_IMPORT_SCHEMA_URI, data_item_labels=_TEST_DATA_LABEL_ITEMS, - timeout=None, + import_request_timeout=None, sync=sync, ) @@ -797,7 +797,7 @@ def test_import_data_with_timeout(self, import_data_mock, sync): gcs_source=_TEST_SOURCE_URI_GCS, import_schema_uri=_TEST_IMPORT_SCHEMA_URI, data_item_labels=_TEST_DATA_LABEL_ITEMS, - timeout=180.0, + import_request_timeout=180.0, sync=sync, ) @@ -841,7 +841,7 @@ def test_create_then_import( display_name=_TEST_DISPLAY_NAME, metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_NONTABULAR, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - timeout=None, + create_request_timeout=None, sync=sync, ) @@ -849,7 +849,7 @@ def test_create_then_import( gcs_source=_TEST_SOURCE_URI_GCS, import_schema_uri=_TEST_IMPORT_SCHEMA_URI, data_item_labels=_TEST_DATA_LABEL_ITEMS, - timeout=None, + import_request_timeout=None, sync=sync, ) @@ -930,7 +930,7 @@ def test_create_dataset(self, create_dataset_mock, sync): ) my_dataset = datasets.ImageDataset.create( - display_name=_TEST_DISPLAY_NAME, sync=sync, timeout=None, + display_name=_TEST_DISPLAY_NAME, sync=sync, create_request_timeout=None, ) if not sync: @@ -962,7 +962,7 @@ def test_create_and_import_dataset( gcs_source=[_TEST_SOURCE_URI_GCS], import_schema_uri=_TEST_IMPORT_SCHEMA_URI_IMAGE, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - timeout=None, + create_request_timeout=None, sync=sync, ) @@ -1004,7 +1004,7 @@ def test_import_data(self, import_data_mock, sync): my_dataset.import_data( gcs_source=[_TEST_SOURCE_URI_GCS], import_schema_uri=_TEST_IMPORT_SCHEMA_URI_IMAGE, - timeout=None, + import_request_timeout=None, sync=sync, ) @@ -1030,14 +1030,14 @@ def test_create_then_import( my_dataset = datasets.ImageDataset.create( display_name=_TEST_DISPLAY_NAME, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - timeout=None, + create_request_timeout=None, sync=sync, ) my_dataset.import_data( gcs_source=[_TEST_SOURCE_URI_GCS], import_schema_uri=_TEST_IMPORT_SCHEMA_URI_IMAGE, - timeout=None, + import_request_timeout=None, sync=sync, ) @@ -1084,7 +1084,7 @@ def test_create_dataset_with_labels(self, create_dataset_mock, sync): display_name=_TEST_DISPLAY_NAME, labels=_TEST_LABELS, sync=sync, - timeout=None, + create_request_timeout=None, ) if not sync: @@ -1141,7 +1141,7 @@ def test_create_dataset_with_default_encryption_key( display_name=_TEST_DISPLAY_NAME, bq_source=_TEST_SOURCE_URI_BQ, sync=sync, - timeout=None, + create_request_timeout=None, ) if not sync: @@ -1197,7 +1197,7 @@ def test_create_dataset(self, create_dataset_mock, sync): display_name=_TEST_DISPLAY_NAME, bq_source=_TEST_SOURCE_URI_BQ, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - timeout=None, + create_request_timeout=None, sync=sync, ) @@ -1341,7 +1341,7 @@ def test_create_dataset_with_labels(self, create_dataset_mock, sync): bq_source=_TEST_SOURCE_URI_BQ, labels=_TEST_LABELS, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - timeout=None, + create_request_timeout=None, sync=sync, ) @@ -1393,7 +1393,7 @@ def test_create_dataset(self, create_dataset_mock, sync): ) my_dataset = datasets.TextDataset.create( - display_name=_TEST_DISPLAY_NAME, sync=sync, timeout=None, + display_name=_TEST_DISPLAY_NAME, sync=sync, create_request_timeout=None, ) if not sync: @@ -1425,7 +1425,7 @@ def test_create_and_import_dataset( gcs_source=[_TEST_SOURCE_URI_GCS], import_schema_uri=_TEST_IMPORT_SCHEMA_URI_TEXT, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - timeout=None, + create_request_timeout=None, sync=sync, ) @@ -1504,7 +1504,7 @@ def test_import_data(self, import_data_mock, sync): my_dataset.import_data( gcs_source=[_TEST_SOURCE_URI_GCS], import_schema_uri=_TEST_IMPORT_SCHEMA_URI_TEXT, - timeout=None, + import_request_timeout=None, sync=sync, ) @@ -1530,14 +1530,14 @@ def test_create_then_import( my_dataset = datasets.TextDataset.create( display_name=_TEST_DISPLAY_NAME, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - timeout=None, + create_request_timeout=None, sync=sync, ) my_dataset.import_data( gcs_source=[_TEST_SOURCE_URI_GCS], import_schema_uri=_TEST_IMPORT_SCHEMA_URI_TEXT, - timeout=None, + import_request_timeout=None, sync=sync, ) @@ -1584,7 +1584,7 @@ def test_create_dataset_with_labels(self, create_dataset_mock, sync): display_name=_TEST_DISPLAY_NAME, labels=_TEST_LABELS, sync=sync, - timeout=None, + create_request_timeout=None, ) if not sync: @@ -1635,7 +1635,7 @@ def test_create_dataset(self, create_dataset_mock, sync): ) my_dataset = datasets.VideoDataset.create( - display_name=_TEST_DISPLAY_NAME, sync=sync, timeout=None, + display_name=_TEST_DISPLAY_NAME, sync=sync, create_request_timeout=None, ) if not sync: @@ -1667,7 +1667,7 @@ def test_create_and_import_dataset( gcs_source=[_TEST_SOURCE_URI_GCS], import_schema_uri=_TEST_IMPORT_SCHEMA_URI_VIDEO, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - timeout=None, + create_request_timeout=None, sync=sync, ) @@ -1709,7 +1709,7 @@ def test_import_data(self, import_data_mock, sync): my_dataset.import_data( gcs_source=[_TEST_SOURCE_URI_GCS], import_schema_uri=_TEST_IMPORT_SCHEMA_URI_VIDEO, - timeout=None, + import_request_timeout=None, sync=sync, ) @@ -1735,14 +1735,14 @@ def test_create_then_import( my_dataset = datasets.VideoDataset.create( display_name=_TEST_DISPLAY_NAME, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - timeout=None, + create_request_timeout=None, sync=sync, ) my_dataset.import_data( gcs_source=[_TEST_SOURCE_URI_GCS], import_schema_uri=_TEST_IMPORT_SCHEMA_URI_VIDEO, - timeout=None, + import_request_timeout=None, sync=sync, ) @@ -1789,7 +1789,7 @@ def test_create_dataset_with_labels(self, create_dataset_mock, sync): display_name=_TEST_DISPLAY_NAME, labels=_TEST_LABELS, sync=sync, - timeout=None, + create_request_timeout=None, ) if not sync: diff --git a/tests/unit/aiplatform/test_end_to_end.py b/tests/unit/aiplatform/test_end_to_end.py index 09a3db12f4..6251886b7d 100644 --- a/tests/unit/aiplatform/test_end_to_end.py +++ b/tests/unit/aiplatform/test_end_to_end.py @@ -102,6 +102,7 @@ def test_dataset_create_to_model_predict( my_dataset = aiplatform.ImageDataset.create( display_name=test_datasets._TEST_DISPLAY_NAME, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, + create_request_timeout=None, sync=sync, ) @@ -109,6 +110,7 @@ def test_dataset_create_to_model_predict( gcs_source=test_datasets._TEST_SOURCE_URI_GCS, import_schema_uri=test_datasets._TEST_IMPORT_SCHEMA_URI, data_item_labels=test_datasets._TEST_DATA_LABEL_ITEMS, + import_request_timeout=None, sync=sync, ) @@ -194,10 +196,13 @@ def test_dataset_create_to_model_predict( parent=test_datasets._TEST_PARENT, dataset=expected_dataset, metadata=test_datasets._TEST_REQUEST_METADATA, + timeout=None, ) import_data_mock.assert_called_once_with( - name=test_datasets._TEST_NAME, import_configs=[expected_import_config] + name=test_datasets._TEST_NAME, + import_configs=[expected_import_config], + timeout=None, ) expected_dataset.name = test_datasets._TEST_NAME diff --git a/tests/unit/aiplatform/test_models.py b/tests/unit/aiplatform/test_models.py index f60abcf429..2db2099e2e 100644 --- a/tests/unit/aiplatform/test_models.py +++ b/tests/unit/aiplatform/test_models.py @@ -537,7 +537,7 @@ def test_upload_uploads_and_gets_model( serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE, serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE, serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE, - timeout=None, + upload_request_timeout=None, sync=sync, ) @@ -569,7 +569,7 @@ def test_upload_with_timeout(self, upload_model_mock, get_model_mock, sync): my_model = models.Model.upload( display_name=_TEST_MODEL_NAME, serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE, - timeout=180.0, + upload_request_timeout=180.0, sync=sync, ) @@ -601,7 +601,7 @@ def test_upload_uploads_and_gets_model_with_labels( serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE, serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE, labels=_TEST_LABEL, - timeout=None, + upload_request_timeout=None, sync=sync, ) @@ -665,7 +665,7 @@ def test_upload_uploads_and_gets_model_with_all_args( explanation_metadata=_TEST_EXPLANATION_METADATA, explanation_parameters=_TEST_EXPLANATION_PARAMETERS, labels=_TEST_LABEL, - timeout=None, + upload_request_timeout=None, sync=sync, ) @@ -738,7 +738,7 @@ def test_upload_uploads_and_gets_model_with_custom_project( serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE, serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE, project=_TEST_PROJECT_2, - timeout=None, + upload_request_timeout=None, sync=sync, ) @@ -827,7 +827,7 @@ def test_upload_uploads_and_gets_model_with_custom_location( serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE, serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE, location=_TEST_LOCATION_2, - timeout=None, + upload_request_timeout=None, sync=sync, ) @@ -1549,6 +1549,7 @@ def test_upload_xgboost_model_file_uploads_and_gets_model( display_name=_TEST_MODEL_NAME, project=_TEST_PROJECT, location=_TEST_LOCATION, + upload_request_timeout=None, sync=sync, ) @@ -1658,6 +1659,7 @@ def test_upload_scikit_learn_model_file_uploads_and_gets_model( display_name=_TEST_MODEL_NAME, project=_TEST_PROJECT, location=_TEST_LOCATION, + upload_request_timeout=None, sync=sync, ) @@ -1707,6 +1709,7 @@ def test_upload_tensorflow_saved_model_uploads_and_gets_model( display_name=_TEST_MODEL_NAME, project=_TEST_PROJECT, location=_TEST_LOCATION, + upload_request_timeout=None, sync=sync, ) diff --git a/tests/unit/aiplatform/test_tensorboard.py b/tests/unit/aiplatform/test_tensorboard.py index 79a587846d..dbe1054686 100644 --- a/tests/unit/aiplatform/test_tensorboard.py +++ b/tests/unit/aiplatform/test_tensorboard.py @@ -340,7 +340,7 @@ def test_create_tensorboard_with_default_encryption_key( ) tensorboard.Tensorboard.create( - display_name=_TEST_DISPLAY_NAME, timeout=None, + display_name=_TEST_DISPLAY_NAME, create_request_timeout=None, ) expected_tensorboard = gca_tensorboard.Tensorboard( @@ -362,7 +362,7 @@ def test_create_tensorboard(self, create_tensorboard_mock): tensorboard.Tensorboard.create( display_name=_TEST_DISPLAY_NAME, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - timeout=None, + create_request_timeout=None, ) expected_tensorboard = gca_tensorboard.Tensorboard( @@ -384,7 +384,7 @@ def test_create_tensorboard_with_timeout(self, create_tensorboard_mock): tensorboard.Tensorboard.create( display_name=_TEST_DISPLAY_NAME, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - timeout=180.0, + create_request_timeout=180.0, ) expected_tensorboard = gca_tensorboard.Tensorboard( From 5bf72f4e1e510d66546af3ae7f0764827bc59d05 Mon Sep 17 00:00:00 2001 From: Sara Robinson Date: Wed, 23 Mar 2022 11:37:30 -0400 Subject: [PATCH 06/19] add deploy_request_timeout to Model deploy --- google/cloud/aiplatform/models.py | 19 +++++++++++ tests/unit/aiplatform/test_end_to_end.py | 2 +- tests/unit/aiplatform/test_models.py | 41 ++++++++++++++++++++++++ 3 files changed, 61 insertions(+), 1 deletion(-) diff --git a/google/cloud/aiplatform/models.py b/google/cloud/aiplatform/models.py index 3dca5dd919..ffe20675da 100644 --- a/google/cloud/aiplatform/models.py +++ b/google/cloud/aiplatform/models.py @@ -15,6 +15,7 @@ # limitations under the License. # import pathlib +from socket import timeout import proto import re import shutil @@ -839,6 +840,7 @@ def _deploy_call( explanation_metadata: Optional[explain.ExplanationMetadata] = None, explanation_parameters: Optional[explain.ExplanationParameters] = None, metadata: Optional[Sequence[Tuple[str, str]]] = (), + deploy_request_timeout: Optional[float] = None, ): """Helper method to deploy model to endpoint. @@ -906,6 +908,10 @@ def _deploy_call( metadata (Sequence[Tuple[str, str]]): Optional. Strings which should be sent along with the request as metadata. + deploy_request_timeout (float): + Optional. The timeout for initiating this deploy request in seconds. Note: + this does not set the timeout on the underlying deploy job, only on the time + to initiate the deploy request. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -1015,6 +1021,7 @@ def _deploy_call( deployed_model=deployed_model, traffic_split=traffic_split, metadata=metadata, + timeout=deploy_request_timeout, ) _LOGGER.log_action_started_against_resource_with_lro( @@ -1933,6 +1940,7 @@ def deploy( explanation_parameters: Optional[explain.ExplanationParameters] = None, metadata: Optional[Sequence[Tuple[str, str]]] = (), encryption_spec_key_name: Optional[str] = None, + deploy_request_timeout: Optional[float] = None, sync=True, ) -> Endpoint: """Deploys model to endpoint. Endpoint will be created if unspecified. @@ -2012,6 +2020,10 @@ def deploy( If set, this Model and all sub-resources of this Model will be secured by this key. Overrides encryption_spec_key_name set in aiplatform.init + deploy_request_timeout (float): + Optional. The timeout for initiating this deploy request in seconds. Note: + this does not set the timeout on the underlying deploy job, only on the time + to initiate the deploy request. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -2048,6 +2060,7 @@ def deploy( metadata=metadata, encryption_spec_key_name=encryption_spec_key_name or initializer.global_config.encryption_spec_key_name, + deploy_request_timeout=deploy_request_timeout, sync=sync, ) @@ -2068,6 +2081,7 @@ def _deploy( explanation_parameters: Optional[explain.ExplanationParameters] = None, metadata: Optional[Sequence[Tuple[str, str]]] = (), encryption_spec_key_name: Optional[str] = None, + deploy_request_timeout: Optional[float] = None, sync: bool = True, ) -> Endpoint: """Deploys model to endpoint. Endpoint will be created if unspecified. @@ -2147,6 +2161,10 @@ def _deploy( If set, this Model and all sub-resources of this Model will be secured by this key. Overrides encryption_spec_key_name set in aiplatform.init + deploy_request_timeout (float): + Optional. The timeout for initiating this deploy request in seconds. Note: + this does not set the timeout on the underlying deploy job, only on the time + to initiate the deploy request. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -2184,6 +2202,7 @@ def _deploy( service_account=service_account, explanation_metadata=explanation_metadata, explanation_parameters=explanation_parameters, + deploy_request_timeout=deploy_request_timeout, metadata=metadata, ) diff --git a/tests/unit/aiplatform/test_end_to_end.py b/tests/unit/aiplatform/test_end_to_end.py index 6251886b7d..8c9c796263 100644 --- a/tests/unit/aiplatform/test_end_to_end.py +++ b/tests/unit/aiplatform/test_end_to_end.py @@ -200,7 +200,7 @@ def test_dataset_create_to_model_predict( ) import_data_mock.assert_called_once_with( - name=test_datasets._TEST_NAME, + name=test_datasets._TEST_NAME, import_configs=[expected_import_config], timeout=None, ) diff --git a/tests/unit/aiplatform/test_models.py b/tests/unit/aiplatform/test_models.py index 2db2099e2e..9d71078666 100644 --- a/tests/unit/aiplatform/test_models.py +++ b/tests/unit/aiplatform/test_models.py @@ -18,6 +18,7 @@ import importlib from concurrent import futures import pathlib +from socket import timeout import pytest from unittest import mock from unittest.mock import patch @@ -885,6 +886,41 @@ def test_deploy(self, deploy_model_mock, sync): deployed_model=deployed_model, traffic_split={"0": 100}, metadata=(), + timeout=None, + ) + + @pytest.mark.usefixtures( + "get_endpoint_mock", "get_model_mock", "create_endpoint_mock" + ) + @pytest.mark.parametrize("sync", [True, False]) + def test_deploy_with_timeout(self, deploy_model_mock, sync): + + test_model = models.Model(_TEST_ID) + test_model._gca_resource.supported_deployment_resources_types.append( + aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES + ) + + test_endpoint = models.Endpoint(_TEST_ID) + + test_model.deploy(test_endpoint, sync=sync, deploy_request_timeout=180.0) + + if not sync: + test_endpoint.wait() + + automatic_resources = gca_machine_resources.AutomaticResources( + min_replica_count=1, max_replica_count=1, + ) + deployed_model = gca_endpoint.DeployedModel( + automatic_resources=automatic_resources, + model=test_model.resource_name, + display_name=None, + ) + deploy_model_mock.assert_called_once_with( + endpoint=test_endpoint.resource_name, + deployed_model=deployed_model, + traffic_split={"0": 100}, + metadata=(), + timeout=180.0, ) @pytest.mark.usefixtures( @@ -915,6 +951,7 @@ def test_deploy_no_endpoint(self, deploy_model_mock, sync): deployed_model=deployed_model, traffic_split={"0": 100}, metadata=(), + timeout=None, ) @pytest.mark.usefixtures( @@ -932,6 +969,7 @@ def test_deploy_no_endpoint_dedicated_resources(self, deploy_model_mock, sync): accelerator_type=_TEST_ACCELERATOR_TYPE, accelerator_count=_TEST_ACCELERATOR_COUNT, service_account=_TEST_SERVICE_ACCOUNT, + deploy_request_timeout=None, sync=sync, ) @@ -957,6 +995,7 @@ def test_deploy_no_endpoint_dedicated_resources(self, deploy_model_mock, sync): deployed_model=expected_deployed_model, traffic_split={"0": 100}, metadata=(), + timeout=None, ) @pytest.mark.usefixtures( @@ -974,6 +1013,7 @@ def test_deploy_no_endpoint_with_explanations(self, deploy_model_mock, sync): accelerator_count=_TEST_ACCELERATOR_COUNT, explanation_metadata=_TEST_EXPLANATION_METADATA, explanation_parameters=_TEST_EXPLANATION_PARAMETERS, + deploy_request_timeout=None, sync=sync, ) @@ -1002,6 +1042,7 @@ def test_deploy_no_endpoint_with_explanations(self, deploy_model_mock, sync): deployed_model=expected_deployed_model, traffic_split={"0": 100}, metadata=(), + timeout=None, ) @pytest.mark.usefixtures( From 9d383b2cc64ca61131d3b192614c21d095853b85 Mon Sep 17 00:00:00 2001 From: Sara Robinson Date: Wed, 23 Mar 2022 11:58:45 -0400 Subject: [PATCH 07/19] add create_timeout_request arg to pipeline job run and submit --- google/cloud/aiplatform/datasets/dataset.py | 10 +- google/cloud/aiplatform/pipeline_jobs.py | 22 +++- tests/unit/aiplatform/test_pipeline_jobs.py | 108 +++++++++++++++++++- 3 files changed, 130 insertions(+), 10 deletions(-) diff --git a/google/cloud/aiplatform/datasets/dataset.py b/google/cloud/aiplatform/datasets/dataset.py index 5b26b67431..629404cdc7 100644 --- a/google/cloud/aiplatform/datasets/dataset.py +++ b/google/cloud/aiplatform/datasets/dataset.py @@ -356,7 +356,7 @@ def _create_and_import( ) # Import if import datasource is DatasourceImportable - # import_request_timeout is None since user is issuing a single request with create and import + # import_request_timeout is None since user is issuing a single request with create and import if isinstance(datasource, _datasources.DatasourceImportable): dataset_obj._import_and_wait(datasource, import_request_timeout=None) @@ -367,7 +367,9 @@ def _import_and_wait(self, datasource, import_request_timeout): "Importing", "data", self, ) - import_lro = self._import(datasource=datasource, import_request_timeout=import_request_timeout) + import_lro = self._import( + datasource=datasource, import_request_timeout=import_request_timeout + ) _LOGGER.log_action_started_against_resource_with_lro( "Import", "data", self.__class__, import_lro @@ -537,7 +539,9 @@ def import_data( data_item_labels=data_item_labels, ) - self._import_and_wait(datasource=datasource, import_request_timeout=import_request_timeout) + self._import_and_wait( + datasource=datasource, import_request_timeout=import_request_timeout + ) return self # TODO(b/174751568) add optional sync support diff --git a/google/cloud/aiplatform/pipeline_jobs.py b/google/cloud/aiplatform/pipeline_jobs.py index 64115df918..e2a96684e9 100644 --- a/google/cloud/aiplatform/pipeline_jobs.py +++ b/google/cloud/aiplatform/pipeline_jobs.py @@ -17,6 +17,7 @@ import datetime import logging +from socket import timeout import time import re from typing import Any, Dict, List, Optional @@ -234,6 +235,7 @@ def run( self, service_account: Optional[str] = None, network: Optional[str] = None, + create_request_timeout: Optional[float] = None, sync: Optional[bool] = True, ) -> None: """Run this configured PipelineJob and monitor the job until completion. @@ -248,15 +250,26 @@ def run( Private services access must already be configured for the network. If left unspecified, the job is not peered with any network. + create_request_timeout (float): + Optional. The timeout for initiating this job create request in seconds. Note: + this does not set the timeout on the underlying job create job, only on the time + to initiate the job create request. sync (bool): Optional. Whether to execute this method synchronously. If False, this method will unblock and it will be executed in a concurrent Future. """ - self.submit(service_account=service_account, network=network) + self.submit( + service_account=service_account, + network=network, + create_request_timeout=create_request_timeout, + ) self._block_until_complete() def submit( - self, service_account: Optional[str] = None, network: Optional[str] = None, + self, + service_account: Optional[str] = None, + network: Optional[str] = None, + create_request_timeout: Optional[float] = None, ) -> None: """Run this configured PipelineJob. @@ -270,6 +283,10 @@ def submit( Private services access must already be configured for the network. If left unspecified, the job is not peered with any network. + create_request_timeout (float): + Optional. The timeout for initiating this job create request in seconds. Note: + this does not set the timeout on the underlying job create job, only on the time + to initiate the job create request. """ if service_account: self._gca_resource.service_account = service_account @@ -287,6 +304,7 @@ def submit( parent=self._parent, pipeline_job=self._gca_resource, pipeline_job_id=self.job_id, + timeout=create_request_timeout, ) _LOGGER.log_create_complete_with_getter( diff --git a/tests/unit/aiplatform/test_pipeline_jobs.py b/tests/unit/aiplatform/test_pipeline_jobs.py index 87169ea010..a1fddc63a5 100644 --- a/tests/unit/aiplatform/test_pipeline_jobs.py +++ b/tests/unit/aiplatform/test_pipeline_jobs.py @@ -15,6 +15,7 @@ # limitations under the License. # +from socket import timeout import pytest import json @@ -278,7 +279,10 @@ def test_run_call_pipeline_service_create( ) job.run( - service_account=_TEST_SERVICE_ACCOUNT, network=_TEST_NETWORK, sync=sync, + service_account=_TEST_SERVICE_ACCOUNT, + network=_TEST_NETWORK, + sync=sync, + create_request_timeout=None, ) if not sync: @@ -311,6 +315,7 @@ def test_run_call_pipeline_service_create( parent=_TEST_PARENT, pipeline_job=expected_gapic_pipeline_job, pipeline_job_id=_TEST_PIPELINE_JOB_ID, + timeout=None, ) mock_pipeline_service_get.assert_called_with( @@ -321,6 +326,81 @@ def test_run_call_pipeline_service_create( gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED ) + @pytest.mark.parametrize( + "job_spec_json", [_TEST_PIPELINE_SPEC, _TEST_PIPELINE_JOB], + ) + @pytest.mark.parametrize("sync", [True, False]) + def test_run_call_pipeline_service_create_with_timeout( + self, + mock_pipeline_service_create, + mock_pipeline_service_get, + job_spec_json, + mock_load_json, + sync, + ): + aiplatform.init( + project=_TEST_PROJECT, + staging_bucket=_TEST_GCS_BUCKET_NAME, + location=_TEST_LOCATION, + credentials=_TEST_CREDENTIALS, + ) + + job = pipeline_jobs.PipelineJob( + display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME, + template_path=_TEST_TEMPLATE_PATH, + job_id=_TEST_PIPELINE_JOB_ID, + parameter_values=_TEST_PIPELINE_PARAMETER_VALUES, + enable_caching=True, + ) + + job.run( + service_account=_TEST_SERVICE_ACCOUNT, + network=_TEST_NETWORK, + sync=sync, + create_request_timeout=180.0, + ) + + if not sync: + job.wait() + + expected_runtime_config_dict = { + "gcsOutputDirectory": _TEST_GCS_BUCKET_NAME, + "parameterValues": _TEST_PIPELINE_PARAMETER_VALUES, + } + runtime_config = gca_pipeline_job_v1.PipelineJob.RuntimeConfig()._pb + json_format.ParseDict(expected_runtime_config_dict, runtime_config) + + pipeline_spec = job_spec_json.get("pipelineSpec") or job_spec_json + + # Construct expected request + expected_gapic_pipeline_job = gca_pipeline_job_v1.PipelineJob( + display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME, + pipeline_spec={ + "components": {}, + "pipelineInfo": pipeline_spec["pipelineInfo"], + "root": pipeline_spec["root"], + "schemaVersion": "2.1.0", + }, + runtime_config=runtime_config, + service_account=_TEST_SERVICE_ACCOUNT, + network=_TEST_NETWORK, + ) + + mock_pipeline_service_create.assert_called_once_with( + parent=_TEST_PARENT, + pipeline_job=expected_gapic_pipeline_job, + pipeline_job_id=_TEST_PIPELINE_JOB_ID, + timeout=180.0, + ) + + # mock_pipeline_service_get.assert_called_with( + # name=_TEST_PIPELINE_JOB_NAME, retry=base._DEFAULT_RETRY + # ) + + # assert job._gca_resource == make_pipeline_job( + # gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED + # ) + @pytest.mark.parametrize( "job_spec_json", [_TEST_PIPELINE_SPEC_LEGACY, _TEST_PIPELINE_JOB_LEGACY], ) @@ -349,7 +429,10 @@ def test_run_call_pipeline_service_create_legacy( ) job.run( - service_account=_TEST_SERVICE_ACCOUNT, network=_TEST_NETWORK, sync=sync, + service_account=_TEST_SERVICE_ACCOUNT, + network=_TEST_NETWORK, + sync=sync, + create_request_timeout=None, ) if not sync: @@ -382,6 +465,7 @@ def test_run_call_pipeline_service_create_legacy( parent=_TEST_PARENT, pipeline_job=expected_gapic_pipeline_job, pipeline_job_id=_TEST_PIPELINE_JOB_ID, + timeout=None, ) mock_pipeline_service_get.assert_called_with( @@ -420,7 +504,10 @@ def test_run_call_pipeline_service_create_tfx( ) job.run( - service_account=_TEST_SERVICE_ACCOUNT, network=_TEST_NETWORK, sync=sync, + service_account=_TEST_SERVICE_ACCOUNT, + network=_TEST_NETWORK, + sync=sync, + create_request_timeout=None, ) if not sync: @@ -454,6 +541,7 @@ def test_run_call_pipeline_service_create_tfx( parent=_TEST_PARENT, pipeline_job=expected_gapic_pipeline_job, pipeline_job_id=_TEST_PIPELINE_JOB_ID, + timeout=None, ) mock_pipeline_service_get.assert_called_with( @@ -489,7 +577,11 @@ def test_submit_call_pipeline_service_pipeline_job_create( enable_caching=True, ) - job.submit(service_account=_TEST_SERVICE_ACCOUNT, network=_TEST_NETWORK) + job.submit( + service_account=_TEST_SERVICE_ACCOUNT, + network=_TEST_NETWORK, + create_request_timeout=None, + ) expected_runtime_config_dict = { "gcsOutputDirectory": _TEST_GCS_BUCKET_NAME, @@ -518,6 +610,7 @@ def test_submit_call_pipeline_service_pipeline_job_create( parent=_TEST_PARENT, pipeline_job=expected_gapic_pipeline_job, pipeline_job_id=_TEST_PIPELINE_JOB_ID, + timeout=None, ) assert not mock_pipeline_service_get.called @@ -590,7 +683,11 @@ def test_submit_call_pipeline_service_pipeline_job_create_legacy( enable_caching=True, ) - job.submit(service_account=_TEST_SERVICE_ACCOUNT, network=_TEST_NETWORK) + job.submit( + service_account=_TEST_SERVICE_ACCOUNT, + network=_TEST_NETWORK, + create_request_timeout=None, + ) expected_runtime_config_dict = { "parameters": {"string_param": {"stringValue": "hello"}}, @@ -619,6 +716,7 @@ def test_submit_call_pipeline_service_pipeline_job_create_legacy( parent=_TEST_PARENT, pipeline_job=expected_gapic_pipeline_job, pipeline_job_id=_TEST_PIPELINE_JOB_ID, + timeout=None, ) assert not mock_pipeline_service_get.called From dd3d57bf9b3bc795dbb86235a72d6a6d244b54df Mon Sep 17 00:00:00 2001 From: Sara Robinson Date: Wed, 23 Mar 2022 17:59:31 -0400 Subject: [PATCH 08/19] add timeout arg and tests to training_jobs --- google/cloud/aiplatform/training_jobs.py | 85 +++++++++++++++++++++ tests/unit/aiplatform/test_training_jobs.py | 71 +++++++++++++++++ 2 files changed, 156 insertions(+) diff --git a/google/cloud/aiplatform/training_jobs.py b/google/cloud/aiplatform/training_jobs.py index f84a4136d8..deb12f68d9 100644 --- a/google/cloud/aiplatform/training_jobs.py +++ b/google/cloud/aiplatform/training_jobs.py @@ -16,10 +16,12 @@ # import datetime +from socket import timeout import time from typing import Dict, List, Optional, Sequence, Tuple, Union import abc +from venv import create from google.auth import credentials as auth_credentials from google.cloud.aiplatform import base @@ -568,6 +570,7 @@ def _run_job( model: Optional[gca_model.Model] = None, gcs_destination_uri_prefix: Optional[str] = None, bigquery_destination: Optional[str] = None, + create_request_timeout: Optional[float] = None, ) -> Optional[models.Model]: """Runs the training job. @@ -713,6 +716,10 @@ def _run_job( - AIP_TRAINING_DATA_URI ="bigquery_destination.dataset_*.training" - AIP_VALIDATION_DATA_URI = "bigquery_destination.dataset_*.validation" - AIP_TEST_DATA_URI = "bigquery_destination.dataset_*.test" + create_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying training job, only on + the time to initiate the request. """ input_data_config = self._create_input_data_config( @@ -746,6 +753,7 @@ def _run_job( self.project, self.location ), training_pipeline=training_pipeline, + timeout=create_request_timeout, ) self._gca_resource = training_pipeline @@ -1818,6 +1826,7 @@ def run( restart_job_on_worker_restart: bool = False, enable_web_access: bool = False, tensorboard: Optional[str] = None, + create_request_timeout: Optional[float] = None, sync=True, ) -> Optional[models.Model]: """Runs the custom training job. @@ -2062,6 +2071,10 @@ def run( `service_account` is required with provided `tensorboard`. For more information on configuring your service account please visit: https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training + create_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying training job, only on + the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -2116,6 +2129,7 @@ def run( reduction_server_container_uri=reduction_server_container_uri if reduction_server_replica_count > 0 else None, + create_request_timeout=create_request_timeout, sync=sync, ) @@ -2153,6 +2167,7 @@ def _run( enable_web_access: bool = False, tensorboard: Optional[str] = None, reduction_server_container_uri: Optional[str] = None, + create_request_timeout: Optional[float] = None, sync=True, ) -> Optional[models.Model]: """Packages local script and launches training_job. @@ -2298,6 +2313,10 @@ def _run( https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training reduction_server_container_uri (str): Optional. The Uri of the reduction server container image. + create_request_timeout (float) + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying training job, only on the + time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -2371,6 +2390,7 @@ def _run( model=managed_model, gcs_destination_uri_prefix=base_output_dir, bigquery_destination=bigquery_destination, + create_request_timeout=create_request_timeout, ) return model @@ -2403,6 +2423,7 @@ def __init__( training_encryption_spec_key_name: Optional[str] = None, model_encryption_spec_key_name: Optional[str] = None, staging_bucket: Optional[str] = None, + create_request_timeout: Optional[float] = None, ): """Constructs a Custom Container Training Job. @@ -2578,6 +2599,10 @@ def __init__( staging_bucket (str): Bucket used to stage source and training artifacts. Overrides staging_bucket set in aiplatform.init. + create_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying training job, only on + the time to initiate the request. """ super().__init__( display_name=display_name, @@ -2644,6 +2669,7 @@ def run( restart_job_on_worker_restart: bool = False, enable_web_access: bool = False, tensorboard: Optional[str] = None, + create_request_timeout: Optional[float] = None, sync=True, ) -> Optional[models.Model]: """Runs the custom training job. @@ -2881,6 +2907,10 @@ def run( `service_account` is required with provided `tensorboard`. For more information on configuring your service account please visit: https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training + create_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying training job, only on + the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -2934,6 +2964,7 @@ def run( reduction_server_container_uri=reduction_server_container_uri if reduction_server_replica_count > 0 else None, + create_request_timeout=create_request_timeout, sync=sync, ) @@ -2970,6 +3001,7 @@ def _run( enable_web_access: bool = False, tensorboard: Optional[str] = None, reduction_server_container_uri: Optional[str] = None, + create_request_timeout: Optional[float] = None, sync=True, ) -> Optional[models.Model]: """Packages local script and launches training_job. @@ -3111,6 +3143,10 @@ def _run( https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training reduction_server_container_uri (str): Optional. The Uri of the reduction server container image. + create_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying training job, only on + the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -3178,6 +3214,7 @@ def _run( model=managed_model, gcs_destination_uri_prefix=base_output_dir, bigquery_destination=bigquery_destination, + create_request_timeout=create_request_timeout, ) return model @@ -3565,6 +3602,7 @@ def _run( export_evaluated_data_items: bool = False, export_evaluated_data_items_bigquery_destination_uri: Optional[str] = None, export_evaluated_data_items_override_destination: bool = False, + create_request_timeout: Optional[float] = None, sync: bool = True, ) -> models.Model: """Runs the training job and returns a model. @@ -3691,6 +3729,10 @@ def _run( Applies only if [export_evaluated_data_items] is True and [export_evaluated_data_items_bigquery_destination_uri] is specified. + create_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying training job, only on + the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -3768,6 +3810,7 @@ def _run( predefined_split_column_name=predefined_split_column_name, timestamp_split_column_name=timestamp_split_column_name, model=model, + create_request_timeout=create_request_timeout, ) @property @@ -3971,6 +4014,7 @@ def run( model_display_name: Optional[str] = None, model_labels: Optional[Dict[str, str]] = None, additional_experiments: Optional[List[str]] = None, + create_request_timeout: Optional[float] = None, sync: bool = True, ) -> models.Model: """Runs the training job and returns a model. @@ -4119,6 +4163,10 @@ def run( and examples of labels. additional_experiments (List[str]): Optional. Additional experiment flags for the time series forcasting training. + create_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying training job, only on + the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -4172,6 +4220,7 @@ def run( validation_options=validation_options, model_display_name=model_display_name, model_labels=model_labels, + create_request_timeout=create_request_timeout, sync=sync, ) @@ -4202,6 +4251,7 @@ def _run( budget_milli_node_hours: int = 1000, model_display_name: Optional[str] = None, model_labels: Optional[Dict[str, str]] = None, + create_request_timeout: Optional[float] = None, sync: bool = True, ) -> models.Model: """Runs the training job and returns a model. @@ -4356,6 +4406,10 @@ def _run( are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + create_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying training job, only on + the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -4438,6 +4492,7 @@ def _run( test_fraction_split=test_fraction_split, predefined_split_column_name=predefined_split_column_name, timestamp_split_column_name=None, # Not supported by AutoMLForecasting + create_request_timeout=create_request_timeout, model=model, ) @@ -4814,6 +4869,7 @@ def _run( model_display_name: Optional[str] = None, model_labels: Optional[Dict[str, str]] = None, disable_early_stopping: bool = False, + create_request_timeout: Optional[float] = None, sync: bool = True, ) -> models.Model: """Runs the training job and returns a model. @@ -4916,6 +4972,10 @@ def _run( that training might stop before the entire training budget has been used, if further training does no longer brings significant improvement to the model. + create_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying training job, only on + the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -4968,6 +5028,7 @@ def _run( validation_filter_split=validation_filter_split, test_filter_split=test_filter_split, model=model_tbt, + create_request_timeout=create_request_timeout, ) @property @@ -5257,6 +5318,7 @@ def run( restart_job_on_worker_restart: bool = False, enable_web_access: bool = False, tensorboard: Optional[str] = None, + create_request_timeout: Optional[float] = None, sync=True, ) -> Optional[models.Model]: """Runs the custom training job. @@ -5494,6 +5556,10 @@ def run( `service_account` is required with provided `tensorboard`. For more information on configuring your service account please visit: https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training + create_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying training job, only on + the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -5542,6 +5608,7 @@ def run( reduction_server_container_uri=reduction_server_container_uri if reduction_server_replica_count > 0 else None, + create_request_timeout=create_request_timeout, sync=sync, ) @@ -5578,6 +5645,7 @@ def _run( enable_web_access: bool = False, tensorboard: Optional[str] = None, reduction_server_container_uri: Optional[str] = None, + create_request_timeout: Optional[float] = None, sync=True, ) -> Optional[models.Model]: """Packages local script and launches training_job. @@ -5706,6 +5774,10 @@ def _run( https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training reduction_server_container_uri (str): Optional. The Uri of the reduction server container image. + create_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying training job, only on + the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -5773,6 +5845,7 @@ def _run( model=managed_model, gcs_destination_uri_prefix=base_output_dir, bigquery_destination=bigquery_destination, + create_request_timeout=create_request_timeout, ) return model @@ -6028,6 +6101,7 @@ def _run( test_filter_split: Optional[str] = None, model_display_name: Optional[str] = None, model_labels: Optional[Dict[str, str]] = None, + create_request_timeout: Optional[float] = None, sync: bool = True, ) -> models.Model: """Runs the training job and returns a model. @@ -6094,6 +6168,10 @@ def _run( are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + create_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying training job, only on + the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -6135,6 +6213,7 @@ def _run( validation_filter_split=validation_filter_split, test_filter_split=test_filter_split, model=model_tbt, + create_request_timeout=create_request_timeout, ) @property @@ -6419,6 +6498,7 @@ def _run( test_filter_split: Optional[str] = None, model_display_name: Optional[str] = None, model_labels: Optional[Dict[str, str]] = None, + create_request_timeout: Optional[float] = None, sync: bool = True, ) -> models.Model: """Runs the training job and returns a model. @@ -6497,6 +6577,10 @@ def _run( are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + create_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying training job, only on + the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -6524,6 +6608,7 @@ def _run( validation_filter_split=validation_filter_split, test_filter_split=test_filter_split, model=model, + create_request_timeout=create_request_timeout, ) @property diff --git a/tests/unit/aiplatform/test_training_jobs.py b/tests/unit/aiplatform/test_training_jobs.py index 6861efd667..b809e6da65 100644 --- a/tests/unit/aiplatform/test_training_jobs.py +++ b/tests/unit/aiplatform/test_training_jobs.py @@ -877,6 +877,7 @@ def test_run_call_pipeline_service_create_with_tabular_dataset( test_fraction_split=_TEST_TEST_FRACTION_SPLIT, timestamp_split_column_name=_TEST_TIMESTAMP_SPLIT_COLUMN_NAME, tensorboard=_TEST_TENSORBOARD_RESOURCE_NAME, + create_request_timeout=None, sync=sync, ) @@ -987,6 +988,7 @@ def test_run_call_pipeline_service_create_with_tabular_dataset( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) assert job._gca_resource == make_training_pipeline( @@ -1057,6 +1059,7 @@ def test_custom_training_tabular_done( test_fraction_split=_TEST_TEST_FRACTION_SPLIT, timestamp_split_column_name=_TEST_TIMESTAMP_SPLIT_COLUMN_NAME, tensorboard=_TEST_TENSORBOARD_RESOURCE_NAME, + create_request_timeout=None, sync=False, ) @@ -1110,6 +1113,7 @@ def test_run_call_pipeline_service_create_with_bigquery_destination( accelerator_count=_TEST_ACCELERATOR_COUNT, model_display_name=_TEST_MODEL_DISPLAY_NAME, predefined_split_column_name=_TEST_PREDEFINED_SPLIT_COLUMN_NAME, + create_request_timeout=None, sync=sync, ) @@ -1204,6 +1208,7 @@ def test_run_call_pipeline_service_create_with_bigquery_destination( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) assert job._gca_resource == make_training_pipeline( @@ -1255,6 +1260,7 @@ def test_run_called_twice_raises( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + create_request_timeout=None, sync=sync, ) @@ -1271,6 +1277,7 @@ def test_run_called_twice_raises( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + create_request_timeout=None, sync=sync, ) @@ -1307,6 +1314,7 @@ def test_run_with_invalid_accelerator_type_raises( accelerator_type=_TEST_INVALID_ACCELERATOR_TYPE, accelerator_count=_TEST_ACCELERATOR_COUNT, model_display_name=_TEST_MODEL_DISPLAY_NAME, + create_request_timeout=None, sync=sync, ) @@ -1338,6 +1346,7 @@ def test_run_with_two_splits_raises( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + create_request_timeout=None, sync=sync, ) @@ -1371,6 +1380,7 @@ def test_run_with_incomplete_model_info_raises_with_model_to_upload( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + create_request_timeout=None, sync=sync, ) @@ -1412,6 +1422,7 @@ def test_run_call_pipeline_service_create_with_no_dataset( training_filter_split=_TEST_TRAINING_FILTER_SPLIT, validation_filter_split=_TEST_VALIDATION_FILTER_SPLIT, test_filter_split=_TEST_TEST_FILTER_SPLIT, + create_request_timeout=None, sync=sync, ) @@ -1478,6 +1489,7 @@ def test_run_call_pipeline_service_create_with_no_dataset( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) assert job._gca_resource == make_training_pipeline( @@ -1522,6 +1534,7 @@ def test_run_call_pipeline_service_create_with_enable_web_access( accelerator_type=_TEST_ACCELERATOR_TYPE, accelerator_count=_TEST_ACCELERATOR_COUNT, enable_web_access=_TEST_ENABLE_WEB_ACCESS, + create_request_timeout=None, sync=sync, ) @@ -1562,6 +1575,7 @@ def test_run_call_pipeline_service_create_with_scheduling(self, sync, caplog): accelerator_count=_TEST_ACCELERATOR_COUNT, timeout=_TEST_TIMEOUT, restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, + create_request_timeout=None, sync=sync, ) @@ -1610,6 +1624,7 @@ def test_run_returns_none_if_no_model_to_upload( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + create_request_timeout=None, sync=sync, ) @@ -1643,6 +1658,7 @@ def test_get_model_raises_if_no_model_to_upload( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + create_request_timeout=None, sync=sync, ) @@ -1681,6 +1697,7 @@ def test_run_raises_if_pipeline_fails( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + create_request_timeout=None, sync=sync, ) @@ -1762,6 +1779,7 @@ def test_run_call_pipeline_service_create_distributed_training( accelerator_type=_TEST_ACCELERATOR_TYPE, accelerator_count=_TEST_ACCELERATOR_COUNT, model_display_name=_TEST_MODEL_DISPLAY_NAME, + create_request_timeout=None, sync=sync, ) @@ -1863,6 +1881,7 @@ def test_run_call_pipeline_service_create_distributed_training( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) assert job._gca_resource == make_training_pipeline( @@ -1912,6 +1931,7 @@ def test_run_call_pipeline_service_create_distributed_training_with_reduction_se reduction_server_replica_count=_TEST_REDUCTION_SERVER_REPLICA_COUNT, reduction_server_machine_type=_TEST_REDUCTION_SERVER_MACHINE_TYPE, reduction_server_container_uri=_TEST_REDUCTION_SERVER_CONTAINER_URI, + create_request_timeout=None, sync=sync, ) @@ -1997,6 +2017,7 @@ def test_run_call_pipeline_service_create_distributed_training_with_reduction_se mock_pipeline_service_create_with_no_model_to_upload.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) assert job._gca_resource == make_training_pipeline_with_no_model_upload( @@ -2174,6 +2195,7 @@ def test_run_call_pipeline_service_create_with_nontabular_dataset_without_model_ training_filter_split=_TEST_TRAINING_FILTER_SPLIT, validation_filter_split=_TEST_VALIDATION_FILTER_SPLIT, test_filter_split=_TEST_TEST_FILTER_SPLIT, + create_request_timeout=None, sync=sync, ) @@ -2274,6 +2296,7 @@ def test_run_call_pipeline_service_create_with_nontabular_dataset_without_model_ mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) assert job._gca_resource == make_training_pipeline( @@ -2326,6 +2349,7 @@ def test_run_call_pipeline_service_create_with_nontabular_dataset_raises_if_anno accelerator_type=_TEST_ACCELERATOR_TYPE, accelerator_count=_TEST_ACCELERATOR_COUNT, model_display_name=_TEST_MODEL_DISPLAY_NAME, + create_request_timeout=None, ) @pytest.mark.usefixtures( @@ -2426,6 +2450,7 @@ def test_custom_container_training_tabular_done( predefined_split_column_name=_TEST_PREDEFINED_SPLIT_COLUMN_NAME, service_account=_TEST_SERVICE_ACCOUNT, tensorboard=_TEST_TENSORBOARD_RESOURCE_NAME, + create_request_timeout=None, sync=False, ) @@ -2481,6 +2506,7 @@ def test_run_call_pipeline_service_create_with_tabular_dataset( predefined_split_column_name=_TEST_PREDEFINED_SPLIT_COLUMN_NAME, service_account=_TEST_SERVICE_ACCOUNT, tensorboard=_TEST_TENSORBOARD_RESOURCE_NAME, + create_request_timeout=None, sync=sync, ) @@ -2578,6 +2604,7 @@ def test_run_call_pipeline_service_create_with_tabular_dataset( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) assert job._gca_resource == make_training_pipeline( @@ -2641,6 +2668,7 @@ def test_run_call_pipeline_service_create_with_bigquery_destination( validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, timestamp_split_column_name=_TEST_TIMESTAMP_SPLIT_COLUMN_NAME, + create_request_timeout=None, sync=sync, ) @@ -2734,6 +2762,7 @@ def test_run_call_pipeline_service_create_with_bigquery_destination( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) assert job._gca_resource == make_training_pipeline( @@ -2785,6 +2814,7 @@ def test_run_called_twice_raises( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + create_request_timeout=None, sync=sync, ) @@ -2801,6 +2831,7 @@ def test_run_called_twice_raises( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + create_request_timeout=None, sync=sync, ) @@ -2837,6 +2868,7 @@ def test_run_with_invalid_accelerator_type_raises( accelerator_type=_TEST_INVALID_ACCELERATOR_TYPE, accelerator_count=_TEST_ACCELERATOR_COUNT, model_display_name=_TEST_MODEL_DISPLAY_NAME, + create_request_timeout=None, sync=sync, ) @@ -2867,6 +2899,7 @@ def test_run_with_two_split_raises( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + create_request_timeout=None, sync=sync, ) @@ -2900,6 +2933,7 @@ def test_run_with_incomplete_model_info_raises_with_model_to_upload( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + create_request_timeout=None, sync=sync, ) @@ -2932,6 +2966,7 @@ def test_run_call_pipeline_service_create_with_no_dataset( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + create_request_timeout=None, sync=sync, ) @@ -2986,6 +3021,7 @@ def test_run_call_pipeline_service_create_with_no_dataset( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) assert job._gca_resource == make_training_pipeline( @@ -3029,6 +3065,7 @@ def test_run_call_pipeline_service_create_with_enable_web_access( accelerator_type=_TEST_ACCELERATOR_TYPE, accelerator_count=_TEST_ACCELERATOR_COUNT, enable_web_access=_TEST_ENABLE_WEB_ACCESS, + create_request_timeout=None, sync=sync, ) @@ -3068,6 +3105,7 @@ def test_run_call_pipeline_service_create_with_scheduling(self, sync, caplog): accelerator_count=_TEST_ACCELERATOR_COUNT, timeout=_TEST_TIMEOUT, restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, + create_request_timeout=None, sync=sync, ) @@ -3112,6 +3150,7 @@ def test_run_returns_none_if_no_model_to_upload( machine_type=_TEST_MACHINE_TYPE, accelerator_type=_TEST_ACCELERATOR_TYPE, accelerator_count=_TEST_ACCELERATOR_COUNT, + create_request_timeout=None, sync=sync, ) @@ -3144,6 +3183,7 @@ def test_get_model_raises_if_no_model_to_upload( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + create_request_timeout=None, sync=sync, ) @@ -3181,6 +3221,7 @@ def test_run_raises_if_pipeline_fails( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + create_request_timeout=None, sync=sync, ) @@ -3257,6 +3298,7 @@ def test_run_call_pipeline_service_create_distributed_training( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + create_request_timeout=None, sync=sync, ) @@ -3351,6 +3393,7 @@ def test_run_call_pipeline_service_create_distributed_training( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) assert job._gca_resource == make_training_pipeline( @@ -3398,6 +3441,7 @@ def test_run_call_pipeline_service_create_distributed_training_with_reduction_se reduction_server_replica_count=_TEST_REDUCTION_SERVER_REPLICA_COUNT, reduction_server_machine_type=_TEST_REDUCTION_SERVER_MACHINE_TYPE, reduction_server_container_uri=_TEST_REDUCTION_SERVER_CONTAINER_URI, + create_request_timeout=None, sync=sync, ) @@ -3469,6 +3513,7 @@ def test_run_call_pipeline_service_create_distributed_training_with_reduction_se mock_pipeline_service_create_with_no_model_to_upload.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) assert job._gca_resource == make_training_pipeline_with_no_model_upload( @@ -3526,6 +3571,7 @@ def test_run_call_pipeline_service_create_with_nontabular_dataset( training_filter_split=_TEST_TRAINING_FILTER_SPLIT, validation_filter_split=_TEST_VALIDATION_FILTER_SPLIT, test_filter_split=_TEST_TEST_FILTER_SPLIT, + create_request_timeout=None, sync=sync, ) @@ -3621,6 +3667,7 @@ def test_run_call_pipeline_service_create_with_nontabular_dataset( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) assert job._gca_resource == make_training_pipeline( @@ -3673,6 +3720,7 @@ def test_run_call_pipeline_service_create_with_nontabular_dataset_raises_if_anno accelerator_type=_TEST_ACCELERATOR_TYPE, accelerator_count=_TEST_ACCELERATOR_COUNT, model_display_name=_TEST_MODEL_DISPLAY_NAME, + create_request_timeout=None, ) @@ -4044,6 +4092,7 @@ def test_run_call_pipeline_service_create_with_tabular_dataset( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + create_request_timeout=None, sync=sync, ) @@ -4146,6 +4195,7 @@ def test_run_call_pipeline_service_create_with_tabular_dataset( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) assert job._gca_resource == make_training_pipeline( @@ -4207,6 +4257,7 @@ def test_run_call_pipeline_service_create_with_tabular_dataset_without_model_dis accelerator_type=_TEST_ACCELERATOR_TYPE, accelerator_count=_TEST_ACCELERATOR_COUNT, predefined_split_column_name=_TEST_PREDEFINED_SPLIT_COLUMN_NAME, + create_request_timeout=None, sync=sync, ) @@ -4298,6 +4349,7 @@ def test_run_call_pipeline_service_create_with_tabular_dataset_without_model_dis mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) assert job._gca_resource == make_training_pipeline( @@ -4360,6 +4412,7 @@ def test_run_call_pipeline_service_create_with_bigquery_destination( validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, timestamp_split_column_name=_TEST_TIMESTAMP_SPLIT_COLUMN_NAME, + create_request_timeout=None, sync=sync, ) @@ -4454,6 +4507,7 @@ def test_run_call_pipeline_service_create_with_bigquery_destination( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) assert job._gca_resource == make_training_pipeline( @@ -4503,6 +4557,7 @@ def test_run_called_twice_raises( accelerator_type=_TEST_ACCELERATOR_TYPE, accelerator_count=_TEST_ACCELERATOR_COUNT, model_display_name=_TEST_MODEL_DISPLAY_NAME, + create_request_timeout=None, sync=sync, ) @@ -4516,6 +4571,7 @@ def test_run_called_twice_raises( accelerator_type=_TEST_ACCELERATOR_TYPE, accelerator_count=_TEST_ACCELERATOR_COUNT, model_display_name=_TEST_MODEL_DISPLAY_NAME, + create_request_timeout=None, sync=sync, ) @@ -4556,6 +4612,7 @@ def test_run_with_invalid_accelerator_type_raises( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + create_request_timeout=None, sync=sync, ) @@ -4588,6 +4645,7 @@ def test_run_with_two_split_raises( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + create_request_timeout=None, sync=sync, ) @@ -4622,6 +4680,7 @@ def test_run_with_incomplete_model_info_raises_with_model_to_upload( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + create_request_timeout=None, sync=sync, ) @@ -4655,6 +4714,7 @@ def test_run_call_pipeline_service_create_with_no_dataset( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + create_request_timeout=None, sync=sync, ) @@ -4710,6 +4770,7 @@ def test_run_call_pipeline_service_create_with_no_dataset( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) assert job._gca_resource == make_training_pipeline( @@ -4754,6 +4815,7 @@ def test_run_call_pipeline_service_create_with_enable_web_access( accelerator_type=_TEST_ACCELERATOR_TYPE, accelerator_count=_TEST_ACCELERATOR_COUNT, enable_web_access=_TEST_ENABLE_WEB_ACCESS, + create_request_timeout=None, sync=sync, ) @@ -4794,6 +4856,7 @@ def test_run_call_pipeline_service_create_with_scheduling(self, sync, caplog): accelerator_count=_TEST_ACCELERATOR_COUNT, timeout=_TEST_TIMEOUT, restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, + create_request_timeout=None, sync=sync, ) @@ -4842,6 +4905,7 @@ def test_run_returns_none_if_no_model_to_upload( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + create_request_timeout=None, sync=sync, ) @@ -4875,6 +4939,7 @@ def test_get_model_raises_if_no_model_to_upload( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + create_request_timeout=None, sync=sync, ) @@ -4989,6 +5054,7 @@ def test_run_call_pipeline_service_create_distributed_training( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + create_request_timeout=None, sync=sync, ) @@ -5085,6 +5151,7 @@ def test_run_call_pipeline_service_create_distributed_training( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) assert job._gca_resource == make_training_pipeline( @@ -5133,6 +5200,7 @@ def test_run_call_pipeline_service_create_distributed_training_with_reduction_se reduction_server_replica_count=_TEST_REDUCTION_SERVER_REPLICA_COUNT, reduction_server_machine_type=_TEST_REDUCTION_SERVER_MACHINE_TYPE, reduction_server_container_uri=_TEST_REDUCTION_SERVER_CONTAINER_URI, + create_request_timeout=None, sync=sync, ) @@ -5206,6 +5274,7 @@ def test_run_call_pipeline_service_create_distributed_training_with_reduction_se mock_pipeline_service_create_with_no_model_to_upload.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) assert job._gca_resource == make_training_pipeline_with_no_model_upload( @@ -5262,6 +5331,7 @@ def test_run_call_pipeline_service_create_with_nontabular_dataset_without_model_ training_filter_split=_TEST_TRAINING_FILTER_SPLIT, validation_filter_split=_TEST_VALIDATION_FILTER_SPLIT, test_filter_split=_TEST_TEST_FILTER_SPLIT, + create_request_timeout=None, sync=sync, ) @@ -5358,6 +5428,7 @@ def test_run_call_pipeline_service_create_with_nontabular_dataset_without_model_ mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) assert job._gca_resource == make_training_pipeline( From a4a852c78985d88d99345c8d127be0553713f8ae Mon Sep 17 00:00:00 2001 From: Sara Robinson Date: Thu, 24 Mar 2022 09:30:52 -0400 Subject: [PATCH 09/19] add timeout arg tests for training_jobs --- tests/unit/aiplatform/test_training_jobs.py | 480 ++++++++++++++++++++ 1 file changed, 480 insertions(+) diff --git a/tests/unit/aiplatform/test_training_jobs.py b/tests/unit/aiplatform/test_training_jobs.py index b809e6da65..e715cbdf3a 100644 --- a/tests/unit/aiplatform/test_training_jobs.py +++ b/tests/unit/aiplatform/test_training_jobs.py @@ -1069,6 +1069,166 @@ def test_custom_training_tabular_done( assert job.done() is True + @pytest.mark.parametrize("sync", [True, False]) + def test_run_call_pipeline_service_create_with_tabular_dataset_and_timeout( + self, + mock_pipeline_service_create, + mock_pipeline_service_get, + mock_python_package_to_gcs, + mock_tabular_dataset, + mock_model_service_get, + sync, + ): + aiplatform.init( + project=_TEST_PROJECT, + staging_bucket=_TEST_BUCKET_NAME, + credentials=_TEST_CREDENTIALS, + encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME, + ) + + job = training_jobs.CustomTrainingJob( + display_name=_TEST_DISPLAY_NAME, + labels=_TEST_LABELS, + script_path=_TEST_LOCAL_SCRIPT_FILE_NAME, + container_uri=_TEST_TRAINING_CONTAINER_IMAGE, + model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE, + model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE, + model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE, + model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI, + model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI, + model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI, + model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND, + model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS, + model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES, + model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS, + model_description=_TEST_MODEL_DESCRIPTION, + ) + + model_from_job = job.run( + dataset=mock_tabular_dataset, + base_output_dir=_TEST_BASE_OUTPUT_DIR, + service_account=_TEST_SERVICE_ACCOUNT, + network=_TEST_NETWORK, + args=_TEST_RUN_ARGS, + environment_variables=_TEST_ENVIRONMENT_VARIABLES, + machine_type=_TEST_MACHINE_TYPE, + accelerator_type=_TEST_ACCELERATOR_TYPE, + accelerator_count=_TEST_ACCELERATOR_COUNT, + model_display_name=_TEST_MODEL_DISPLAY_NAME, + model_labels=_TEST_MODEL_LABELS, + training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, + validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, + test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + timestamp_split_column_name=_TEST_TIMESTAMP_SPLIT_COLUMN_NAME, + tensorboard=_TEST_TENSORBOARD_RESOURCE_NAME, + create_request_timeout=180.0, + sync=sync, + ) + + if not sync: + model_from_job.wait() + + true_args = _TEST_RUN_ARGS + true_env = [ + {"name": key, "value": value} + for key, value in _TEST_ENVIRONMENT_VARIABLES.items() + ] + + true_worker_pool_spec = { + "replica_count": _TEST_REPLICA_COUNT, + "machine_spec": { + "machine_type": _TEST_MACHINE_TYPE, + "accelerator_type": _TEST_ACCELERATOR_TYPE, + "accelerator_count": _TEST_ACCELERATOR_COUNT, + }, + "disk_spec": { + "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT, + "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT, + }, + "python_package_spec": { + "executor_image_uri": _TEST_TRAINING_CONTAINER_IMAGE, + "python_module": _TEST_MODULE_NAME, + "package_uris": [_TEST_OUTPUT_PYTHON_PACKAGE_PATH], + "args": true_args, + "env": true_env, + }, + } + + true_timestamp_split = gca_training_pipeline.TimestampSplit( + training_fraction=_TEST_TRAINING_FRACTION_SPLIT, + validation_fraction=_TEST_VALIDATION_FRACTION_SPLIT, + test_fraction=_TEST_TEST_FRACTION_SPLIT, + key=_TEST_TIMESTAMP_SPLIT_COLUMN_NAME, + ) + + env = [ + gca_env_var.EnvVar(name=str(key), value=str(value)) + for key, value in _TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES.items() + ] + + ports = [ + gca_model.Port(container_port=port) + for port in _TEST_MODEL_SERVING_CONTAINER_PORTS + ] + + true_container_spec = gca_model.ModelContainerSpec( + image_uri=_TEST_SERVING_CONTAINER_IMAGE, + predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE, + health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE, + command=_TEST_MODEL_SERVING_CONTAINER_COMMAND, + args=_TEST_MODEL_SERVING_CONTAINER_ARGS, + env=env, + ports=ports, + ) + + true_managed_model = gca_model.Model( + display_name=_TEST_MODEL_DISPLAY_NAME, + labels=_TEST_MODEL_LABELS, + description=_TEST_MODEL_DESCRIPTION, + container_spec=true_container_spec, + predict_schemata=gca_model.PredictSchemata( + instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI, + parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI, + prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI, + ), + encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC, + ) + + true_input_data_config = gca_training_pipeline.InputDataConfig( + timestamp_split=true_timestamp_split, + dataset_id=mock_tabular_dataset.name, + gcs_destination=gca_io.GcsDestination( + output_uri_prefix=_TEST_BASE_OUTPUT_DIR + ), + ) + + true_training_pipeline = gca_training_pipeline.TrainingPipeline( + display_name=_TEST_DISPLAY_NAME, + training_task_definition=schema.training_job.definition.custom_task, + training_task_inputs=json_format.ParseDict( + { + "worker_pool_specs": [true_worker_pool_spec], + "base_output_directory": { + "output_uri_prefix": _TEST_BASE_OUTPUT_DIR + }, + "service_account": _TEST_SERVICE_ACCOUNT, + "network": _TEST_NETWORK, + "tensorboard": _TEST_TENSORBOARD_RESOURCE_NAME, + }, + struct_pb2.Value(), + ), + model_to_upload=true_managed_model, + input_data_config=true_input_data_config, + labels=_TEST_LABELS, + encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC, + ) + + mock_pipeline_service_create.assert_called_once_with( + parent=initializer.global_config.common_location_path(), + training_pipeline=true_training_pipeline, + timeout=180.0, + ) + @pytest.mark.parametrize("sync", [True, False]) def test_run_call_pipeline_service_create_with_bigquery_destination( self, @@ -2625,6 +2785,171 @@ def test_run_call_pipeline_service_create_with_tabular_dataset( assert job._has_logged_custom_job + @pytest.mark.parametrize("sync", [True, False]) + def test_run_call_pipeline_service_create_with_tabular_dataset_and_timeout( + self, + mock_pipeline_service_create, + mock_pipeline_service_get, + mock_tabular_dataset, + mock_model_service_get, + sync, + ): + aiplatform.init( + project=_TEST_PROJECT, + staging_bucket=_TEST_BUCKET_NAME, + encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME, + ) + + job = training_jobs.CustomContainerTrainingJob( + display_name=_TEST_DISPLAY_NAME, + labels=_TEST_LABELS, + container_uri=_TEST_TRAINING_CONTAINER_IMAGE, + command=_TEST_TRAINING_CONTAINER_CMD, + model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE, + model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE, + model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE, + model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI, + model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI, + model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI, + model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND, + model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS, + model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES, + model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS, + model_description=_TEST_MODEL_DESCRIPTION, + ) + + model_from_job = job.run( + dataset=mock_tabular_dataset, + base_output_dir=_TEST_BASE_OUTPUT_DIR, + args=_TEST_RUN_ARGS, + environment_variables=_TEST_ENVIRONMENT_VARIABLES, + machine_type=_TEST_MACHINE_TYPE, + accelerator_type=_TEST_ACCELERATOR_TYPE, + accelerator_count=_TEST_ACCELERATOR_COUNT, + model_display_name=_TEST_MODEL_DISPLAY_NAME, + model_labels=_TEST_MODEL_LABELS, + predefined_split_column_name=_TEST_PREDEFINED_SPLIT_COLUMN_NAME, + service_account=_TEST_SERVICE_ACCOUNT, + tensorboard=_TEST_TENSORBOARD_RESOURCE_NAME, + create_request_timeout=180.0, + sync=sync, + ) + + if not sync: + model_from_job.wait() + + true_args = _TEST_RUN_ARGS + true_env = [ + {"name": key, "value": value} + for key, value in _TEST_ENVIRONMENT_VARIABLES.items() + ] + + true_worker_pool_spec = { + "replica_count": _TEST_REPLICA_COUNT, + "machine_spec": { + "machine_type": _TEST_MACHINE_TYPE, + "accelerator_type": _TEST_ACCELERATOR_TYPE, + "accelerator_count": _TEST_ACCELERATOR_COUNT, + }, + "disk_spec": { + "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT, + "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT, + }, + "containerSpec": { + "imageUri": _TEST_TRAINING_CONTAINER_IMAGE, + "command": _TEST_TRAINING_CONTAINER_CMD, + "args": true_args, + "env": true_env, + }, + } + + env = [ + gca_env_var.EnvVar(name=str(key), value=str(value)) + for key, value in _TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES.items() + ] + + ports = [ + gca_model.Port(container_port=port) + for port in _TEST_MODEL_SERVING_CONTAINER_PORTS + ] + + true_container_spec = gca_model.ModelContainerSpec( + image_uri=_TEST_SERVING_CONTAINER_IMAGE, + predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE, + health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE, + command=_TEST_MODEL_SERVING_CONTAINER_COMMAND, + args=_TEST_MODEL_SERVING_CONTAINER_ARGS, + env=env, + ports=ports, + ) + + true_managed_model = gca_model.Model( + display_name=_TEST_MODEL_DISPLAY_NAME, + labels=_TEST_MODEL_LABELS, + description=_TEST_MODEL_DESCRIPTION, + container_spec=true_container_spec, + predict_schemata=gca_model.PredictSchemata( + instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI, + parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI, + prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI, + ), + encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC, + ) + + true_input_data_config = gca_training_pipeline.InputDataConfig( + predefined_split=gca_training_pipeline.PredefinedSplit( + key=_TEST_PREDEFINED_SPLIT_COLUMN_NAME + ), + dataset_id=mock_tabular_dataset.name, + gcs_destination=gca_io.GcsDestination( + output_uri_prefix=_TEST_BASE_OUTPUT_DIR + ), + ) + + true_training_pipeline = gca_training_pipeline.TrainingPipeline( + display_name=_TEST_DISPLAY_NAME, + labels=_TEST_LABELS, + training_task_definition=schema.training_job.definition.custom_task, + training_task_inputs=json_format.ParseDict( + { + "worker_pool_specs": [true_worker_pool_spec], + "base_output_directory": { + "output_uri_prefix": _TEST_BASE_OUTPUT_DIR + }, + "service_account": _TEST_SERVICE_ACCOUNT, + "tensorboard": _TEST_TENSORBOARD_RESOURCE_NAME, + }, + struct_pb2.Value(), + ), + model_to_upload=true_managed_model, + input_data_config=true_input_data_config, + encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC, + ) + + mock_pipeline_service_create.assert_called_once_with( + parent=initializer.global_config.common_location_path(), + training_pipeline=true_training_pipeline, + timeout=180.0, + ) + + # assert job._gca_resource == make_training_pipeline( + # gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED + # ) + + # mock_model_service_get.assert_called_once_with( + # name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY + # ) + + # assert model_from_job._gca_resource is mock_model_service_get.return_value + + # assert job.get_model()._gca_resource is mock_model_service_get.return_value + + # assert not job.has_failed + + # assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED + + # assert job._has_logged_custom_job + @pytest.mark.parametrize("sync", [True, False]) def test_run_call_pipeline_service_create_with_bigquery_destination( self, @@ -4214,6 +4539,161 @@ def test_run_call_pipeline_service_create_with_tabular_dataset( assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED + @pytest.mark.parametrize("sync", [True, False]) + def test_run_call_pipeline_service_create_with_tabular_dataset_with_timeout( + self, + mock_pipeline_service_create, + mock_pipeline_service_get, + mock_tabular_dataset, + mock_model_service_get, + sync, + ): + aiplatform.init( + project=_TEST_PROJECT, + staging_bucket=_TEST_BUCKET_NAME, + encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME, + ) + + job = training_jobs.CustomPythonPackageTrainingJob( + display_name=_TEST_DISPLAY_NAME, + labels=_TEST_LABELS, + python_package_gcs_uri=_TEST_OUTPUT_PYTHON_PACKAGE_PATH, + python_module_name=_TEST_PYTHON_MODULE_NAME, + container_uri=_TEST_TRAINING_CONTAINER_IMAGE, + model_serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE, + model_serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE, + model_serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE, + model_serving_container_command=_TEST_MODEL_SERVING_CONTAINER_COMMAND, + model_serving_container_args=_TEST_MODEL_SERVING_CONTAINER_ARGS, + model_serving_container_environment_variables=_TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES, + model_serving_container_ports=_TEST_MODEL_SERVING_CONTAINER_PORTS, + model_description=_TEST_MODEL_DESCRIPTION, + model_instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI, + model_parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI, + model_prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI, + ) + + model_from_job = job.run( + dataset=mock_tabular_dataset, + model_display_name=_TEST_MODEL_DISPLAY_NAME, + model_labels=_TEST_MODEL_LABELS, + base_output_dir=_TEST_BASE_OUTPUT_DIR, + service_account=_TEST_SERVICE_ACCOUNT, + network=_TEST_NETWORK, + args=_TEST_RUN_ARGS, + environment_variables=_TEST_ENVIRONMENT_VARIABLES, + machine_type=_TEST_MACHINE_TYPE, + accelerator_type=_TEST_ACCELERATOR_TYPE, + accelerator_count=_TEST_ACCELERATOR_COUNT, + training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, + validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, + test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + create_request_timeout=180.0, + sync=sync, + ) + + if not sync: + model_from_job.wait() + + true_args = _TEST_RUN_ARGS + true_env = [ + {"name": key, "value": value} + for key, value in _TEST_ENVIRONMENT_VARIABLES.items() + ] + + true_worker_pool_spec = { + "replica_count": _TEST_REPLICA_COUNT, + "machine_spec": { + "machine_type": _TEST_MACHINE_TYPE, + "accelerator_type": _TEST_ACCELERATOR_TYPE, + "accelerator_count": _TEST_ACCELERATOR_COUNT, + }, + "disk_spec": { + "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT, + "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT, + }, + "python_package_spec": { + "executor_image_uri": _TEST_TRAINING_CONTAINER_IMAGE, + "python_module": _TEST_PYTHON_MODULE_NAME, + "package_uris": [_TEST_OUTPUT_PYTHON_PACKAGE_PATH], + "args": true_args, + "env": true_env, + }, + } + + true_fraction_split = gca_training_pipeline.FractionSplit( + training_fraction=_TEST_TRAINING_FRACTION_SPLIT, + validation_fraction=_TEST_VALIDATION_FRACTION_SPLIT, + test_fraction=_TEST_TEST_FRACTION_SPLIT, + ) + + env = [ + gca_env_var.EnvVar(name=str(key), value=str(value)) + for key, value in _TEST_MODEL_SERVING_CONTAINER_ENVIRONMENT_VARIABLES.items() + ] + + ports = [ + gca_model.Port(container_port=port) + for port in _TEST_MODEL_SERVING_CONTAINER_PORTS + ] + + true_container_spec = gca_model.ModelContainerSpec( + image_uri=_TEST_SERVING_CONTAINER_IMAGE, + predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE, + health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE, + command=_TEST_MODEL_SERVING_CONTAINER_COMMAND, + args=_TEST_MODEL_SERVING_CONTAINER_ARGS, + env=env, + ports=ports, + ) + + true_managed_model = gca_model.Model( + display_name=_TEST_MODEL_DISPLAY_NAME, + labels=_TEST_MODEL_LABELS, + description=_TEST_MODEL_DESCRIPTION, + container_spec=true_container_spec, + predict_schemata=gca_model.PredictSchemata( + instance_schema_uri=_TEST_MODEL_INSTANCE_SCHEMA_URI, + parameters_schema_uri=_TEST_MODEL_PARAMETERS_SCHEMA_URI, + prediction_schema_uri=_TEST_MODEL_PREDICTION_SCHEMA_URI, + ), + encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC, + ) + + true_input_data_config = gca_training_pipeline.InputDataConfig( + fraction_split=true_fraction_split, + dataset_id=mock_tabular_dataset.name, + gcs_destination=gca_io.GcsDestination( + output_uri_prefix=_TEST_BASE_OUTPUT_DIR + ), + ) + + true_training_pipeline = gca_training_pipeline.TrainingPipeline( + display_name=_TEST_DISPLAY_NAME, + labels=_TEST_LABELS, + training_task_definition=schema.training_job.definition.custom_task, + training_task_inputs=json_format.ParseDict( + { + "worker_pool_specs": [true_worker_pool_spec], + "base_output_directory": { + "output_uri_prefix": _TEST_BASE_OUTPUT_DIR + }, + "service_account": _TEST_SERVICE_ACCOUNT, + "network": _TEST_NETWORK, + }, + struct_pb2.Value(), + ), + model_to_upload=true_managed_model, + input_data_config=true_input_data_config, + encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC, + ) + + mock_pipeline_service_create.assert_called_once_with( + parent=initializer.global_config.common_location_path(), + training_pipeline=true_training_pipeline, + timeout=180.0, + ) + @pytest.mark.parametrize("sync", [True, False]) def test_run_call_pipeline_service_create_with_tabular_dataset_without_model_display_name_nor_model_labels( self, From 85305987551ffab2ef18c03dc51176525b3b83fb Mon Sep 17 00:00:00 2001 From: Sara Robinson Date: Thu, 24 Mar 2022 09:40:55 -0400 Subject: [PATCH 10/19] update system tests with timeout arg --- tests/system/aiplatform/test_dataset.py | 5 +++-- tests/system/aiplatform/test_e2e_tabular.py | 2 ++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/system/aiplatform/test_dataset.py b/tests/system/aiplatform/test_dataset.py index d847d8f678..e79403acfa 100644 --- a/tests/system/aiplatform/test_dataset.py +++ b/tests/system/aiplatform/test_dataset.py @@ -16,7 +16,6 @@ # import os -from socket import timeout import uuid import pytest import importlib @@ -197,7 +196,7 @@ def test_get_new_dataset_and_import(self, dataset_gapic_client, shared_state): my_dataset.import_data( gcs_source=_TEST_TEXT_ENTITY_EXTRACTION_GCS_SOURCE, import_schema_uri=_TEST_TEXT_ENTITY_IMPORT_SCHEMA, - timeout=180.0, + timeout=None, ) data_items_post_import = dataset_gapic_client.list_data_items( @@ -218,6 +217,7 @@ def test_create_and_import_image_dataset(self, dataset_gapic_client, shared_stat display_name=f"temp_sdk_integration_create_and_import_dataset_{uuid.uuid4()}", gcs_source=_TEST_IMAGE_OBJECT_DETECTION_GCS_SOURCE, import_schema_uri=_TEST_IMAGE_OBJ_DET_IMPORT_SCHEMA, + timeout=None, ) shared_state["dataset_name"] = img_dataset.resource_name @@ -238,6 +238,7 @@ def test_create_tabular_dataset(self, dataset_gapic_client, shared_state): tabular_dataset = aiplatform.TabularDataset.create( display_name=f"temp_sdk_integration_create_and_import_dataset_{uuid.uuid4()}", gcs_source=[_TEST_TABULAR_CLASSIFICATION_GCS_SOURCE], + timeout=None, ) shared_state["dataset_name"] = tabular_dataset.resource_name diff --git a/tests/system/aiplatform/test_e2e_tabular.py b/tests/system/aiplatform/test_e2e_tabular.py index 74474af991..108752ded0 100644 --- a/tests/system/aiplatform/test_e2e_tabular.py +++ b/tests/system/aiplatform/test_e2e_tabular.py @@ -113,6 +113,7 @@ def test_end_to_end_tabular(self, shared_state): timeout=1234, restart_job_on_worker_restart=True, enable_web_access=True, + timeout=None, sync=False, ) @@ -120,6 +121,7 @@ def test_end_to_end_tabular(self, shared_state): dataset=ds, target_column="median_house_value", model_display_name=self._make_display_name("automl-housing-model"), + timeout=None, sync=False, ) From 8cc46b011e862ef13071fdc79019cbbf5bdd4cb7 Mon Sep 17 00:00:00 2001 From: Sara Robinson Date: Thu, 24 Mar 2022 12:06:18 -0400 Subject: [PATCH 11/19] add timeout arg tests and run linter --- google/cloud/aiplatform/datasets/dataset.py | 16 ++--- .../aiplatform/datasets/image_dataset.py | 4 +- .../aiplatform/datasets/tabular_dataset.py | 4 +- .../cloud/aiplatform/datasets/text_dataset.py | 4 +- .../aiplatform/datasets/video_dataset.py | 4 +- google/cloud/aiplatform/jobs.py | 33 ++++++++- google/cloud/aiplatform/models.py | 29 ++++---- google/cloud/aiplatform/pipeline_jobs.py | 9 ++- .../tensorboard/tensorboard_resource.py | 4 +- google/cloud/aiplatform/training_jobs.py | 58 ++++++++------- tests/system/aiplatform/test_e2e_tabular.py | 3 +- tests/unit/aiplatform/test_custom_job.py | 52 ++++++++++++-- tests/unit/aiplatform/test_datasets.py | 9 --- .../test_hyperparameter_tuning_job.py | 70 +++++++++++++++++++ tests/unit/aiplatform/test_jobs.py | 51 ++++++++++++++ tests/unit/aiplatform/test_models.py | 1 - tests/unit/aiplatform/test_pipeline_jobs.py | 1 - 17 files changed, 264 insertions(+), 88 deletions(-) diff --git a/google/cloud/aiplatform/datasets/dataset.py b/google/cloud/aiplatform/datasets/dataset.py index 629404cdc7..a8477cbdde 100644 --- a/google/cloud/aiplatform/datasets/dataset.py +++ b/google/cloud/aiplatform/datasets/dataset.py @@ -201,8 +201,8 @@ def create( Overrides encryption_spec_key_name set in aiplatform.init. create_request_timeout (float): - Optional. The timeout for initiating this create request in seconds. Note: - this does not set the timeout on the underlying create job, only on the time + Optional. The timeout for initiating this create request in seconds. Note: + this does not set the timeout on the underlying create job, only on the time to initiate the create request. sync (bool): Whether to execute this method synchronously. If False, this method @@ -317,8 +317,8 @@ def _create_and_import( If set, this Dataset and all sub-resources of this Dataset will be secured by this key. create_request_timeout (float): - Optional. The timeout for initiating this create request in seconds. Note: - this does not set the timeout on the underlying create job, only on the time + Optional. The timeout for initiating this create request in seconds. Note: + this does not set the timeout on the underlying create job, only on the time to initiate the create request. sync (bool): Whether to execute this method synchronously. If False, this method @@ -434,8 +434,8 @@ def _create( If set, this Dataset and all sub-resources of this Dataset will be secured by this key. create_request_timeout (float): - Optional. The timeout for initiating this create request in seconds. Note: - this does not set the timeout on the underlying create job, only on the time + Optional. The timeout for initiating this create request in seconds. Note: + this does not set the timeout on the underlying create job, only on the time to initiate the create request. Returns: operation (Operation): @@ -520,8 +520,8 @@ def import_data( ``import_schema_uri``, e.g. jsonl file. import_request_timeout (float): - Optional. The timeout for initiating this import request in seconds. Note: - this does not set the timeout on the underlying import job, only on the time + Optional. The timeout for initiating this import request in seconds. Note: + this does not set the timeout on the underlying import job, only on the time to initiate the import request. sync (bool): Whether to execute this method synchronously. If False, this method diff --git a/google/cloud/aiplatform/datasets/image_dataset.py b/google/cloud/aiplatform/datasets/image_dataset.py index 1d6325418b..8f307594eb 100644 --- a/google/cloud/aiplatform/datasets/image_dataset.py +++ b/google/cloud/aiplatform/datasets/image_dataset.py @@ -119,8 +119,8 @@ def create( Overrides encryption_spec_key_name set in aiplatform.init. create_request_timeout (float): - Optional. The timeout for initiating this create request in seconds. Note: - this does not set the timeout on the underlying create job, only on the time + Optional. The timeout for initiating this create request in seconds. Note: + this does not set the timeout on the underlying create job, only on the time to initiate the create request. sync (bool): Whether to execute this method synchronously. If False, this method diff --git a/google/cloud/aiplatform/datasets/tabular_dataset.py b/google/cloud/aiplatform/datasets/tabular_dataset.py index 6d535e4258..880921a9bf 100644 --- a/google/cloud/aiplatform/datasets/tabular_dataset.py +++ b/google/cloud/aiplatform/datasets/tabular_dataset.py @@ -100,8 +100,8 @@ def create( Overrides encryption_spec_key_name set in aiplatform.init. create_request_timeout (float): - Optional. The timeout for initiating this create request in seconds. Note: - this does not set the timeout on the underlying create job, only on the time + Optional. The timeout for initiating this create request in seconds. Note: + this does not set the timeout on the underlying create job, only on the time to initiate the create request. sync (bool): Whether to execute this method synchronously. If False, this method diff --git a/google/cloud/aiplatform/datasets/text_dataset.py b/google/cloud/aiplatform/datasets/text_dataset.py index 720370ab19..288e5459f7 100644 --- a/google/cloud/aiplatform/datasets/text_dataset.py +++ b/google/cloud/aiplatform/datasets/text_dataset.py @@ -126,8 +126,8 @@ def create( Overrides encryption_spec_key_name set in aiplatform.init. create_request_timeout (float): - Optional. The timeout for initiating this create request in seconds. Note: - this does not set the timeout on the underlying create job, only on the time + Optional. The timeout for initiating this create request in seconds. Note: + this does not set the timeout on the underlying create job, only on the time to initiate the create request. sync (bool): Whether to execute this method synchronously. If False, this method diff --git a/google/cloud/aiplatform/datasets/video_dataset.py b/google/cloud/aiplatform/datasets/video_dataset.py index 28f123715a..f7be347341 100644 --- a/google/cloud/aiplatform/datasets/video_dataset.py +++ b/google/cloud/aiplatform/datasets/video_dataset.py @@ -119,8 +119,8 @@ def create( Overrides encryption_spec_key_name set in aiplatform.init. create_request_timeout (float): - Optional. The timeout for initiating this create request in seconds. Note: - this does not set the timeout on the underlying create job, only on the time + Optional. The timeout for initiating this create request in seconds. Note: + this does not set the timeout on the underlying create job, only on the time to initiate the create request. sync (bool): Whether to execute this method synchronously. If False, this method diff --git a/google/cloud/aiplatform/jobs.py b/google/cloud/aiplatform/jobs.py index 9d53b13bee..295f0615b8 100644 --- a/google/cloud/aiplatform/jobs.py +++ b/google/cloud/aiplatform/jobs.py @@ -371,6 +371,7 @@ def create( location: Optional[str] = None, credentials: Optional[auth_credentials.Credentials] = None, encryption_spec_key_name: Optional[str] = None, + create_request_timeout: Optional[float] = None, sync: bool = True, ) -> "BatchPredictionJob": """Create a batch prediction job. @@ -521,6 +522,10 @@ def create( be encrypted with the provided encryption key. Overrides encryption_spec_key_name set in aiplatform.init. + create_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -658,6 +663,7 @@ def create( model_or_model_name=model_name, gca_batch_prediction_job=gapic_batch_prediction_job, generate_explanation=generate_explanation, + create_request_timeout=create_request_timeout, sync=sync, ) @@ -669,6 +675,7 @@ def _create( model_or_model_name: Union[str, "aiplatform.Model"], gca_batch_prediction_job: gca_bp_job_compat.BatchPredictionJob, generate_explanation: bool, + create_request_timeout: Optional[float] = None, sync: bool = True, ) -> "BatchPredictionJob": """Create a batch prediction job. @@ -684,6 +691,10 @@ def _create( generate_explanation (bool): Required. Generate explanation along with the batch prediction results. + create_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying training job, only on + the time to initiate the request. Returns: (jobs.BatchPredictionJob): Instantiated representation of the created batch prediction job. @@ -714,7 +725,9 @@ def _create( _LOGGER.log_create_with_lro(cls) gca_batch_prediction_job = api_client.create_batch_prediction_job( - parent=parent, batch_prediction_job=gca_batch_prediction_job + parent=parent, + batch_prediction_job=gca_batch_prediction_job, + timeout=create_request_timeout, ) empty_batch_prediction_job._gca_resource = gca_batch_prediction_job @@ -1372,6 +1385,7 @@ def run( restart_job_on_worker_restart: bool = False, enable_web_access: bool = False, tensorboard: Optional[str] = None, + create_request_timeout: Optional[float] = None, sync: bool = True, ) -> None: """Run this configured CustomJob. @@ -1411,6 +1425,10 @@ def run( `service_account` is required with provided `tensorboard`. For more information on configuring your service account please visit: https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training + create_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method will unblock and it will be executed in a concurrent Future. @@ -1438,7 +1456,9 @@ def run( _LOGGER.log_create_with_lro(self.__class__) self._gca_resource = self.api_client.create_custom_job( - parent=self._parent, custom_job=self._gca_resource + parent=self._parent, + custom_job=self._gca_resource, + timeout=create_request_timeout, ) _LOGGER.log_create_complete_with_getter( @@ -1753,6 +1773,7 @@ def run( restart_job_on_worker_restart: bool = False, enable_web_access: bool = False, tensorboard: Optional[str] = None, + create_request_timeout: Optional[float] = None, sync: bool = True, ) -> None: """Run this configured CustomJob. @@ -1792,6 +1813,10 @@ def run( `service_account` is required with provided `tensorboard`. For more information on configuring your service account please visit: https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training + create_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method will unblock and it will be executed in a concurrent Future. @@ -1819,7 +1844,9 @@ def run( _LOGGER.log_create_with_lro(self.__class__) self._gca_resource = self.api_client.create_hyperparameter_tuning_job( - parent=self._parent, hyperparameter_tuning_job=self._gca_resource + parent=self._parent, + hyperparameter_tuning_job=self._gca_resource, + timeout=create_request_timeout, ) _LOGGER.log_create_complete_with_getter( diff --git a/google/cloud/aiplatform/models.py b/google/cloud/aiplatform/models.py index ffe20675da..904d93ac13 100644 --- a/google/cloud/aiplatform/models.py +++ b/google/cloud/aiplatform/models.py @@ -15,7 +15,6 @@ # limitations under the License. # import pathlib -from socket import timeout import proto import re import shutil @@ -909,8 +908,8 @@ def _deploy_call( Optional. Strings which should be sent along with the request as metadata. deploy_request_timeout (float): - Optional. The timeout for initiating this deploy request in seconds. Note: - this does not set the timeout on the underlying deploy job, only on the time + Optional. The timeout for initiating this deploy request in seconds. Note: + this does not set the timeout on the underlying deploy job, only on the time to initiate the deploy request. sync (bool): Whether to execute this method synchronously. If False, this method @@ -1801,8 +1800,8 @@ def upload( Optional. Bucket to stage local model artifacts. Overrides staging_bucket set in aiplatform.init. upload_request_timeout (float): - Optional. The timeout for initiating this upload request in seconds. Note: - this does not set the timeout on the underlying upload job, only on the time + Optional. The timeout for initiating this upload request in seconds. Note: + this does not set the timeout on the underlying upload job, only on the time to initiate the upload request. Returns: model: Instantiated representation of the uploaded model resource. @@ -2021,8 +2020,8 @@ def deploy( Overrides encryption_spec_key_name set in aiplatform.init deploy_request_timeout (float): - Optional. The timeout for initiating this deploy request in seconds. Note: - this does not set the timeout on the underlying deploy job, only on the time + Optional. The timeout for initiating this deploy request in seconds. Note: + this does not set the timeout on the underlying deploy job, only on the time to initiate the deploy request. sync (bool): Whether to execute this method synchronously. If False, this method @@ -2162,8 +2161,8 @@ def _deploy( Overrides encryption_spec_key_name set in aiplatform.init deploy_request_timeout (float): - Optional. The timeout for initiating this deploy request in seconds. Note: - this does not set the timeout on the underlying deploy job, only on the time + Optional. The timeout for initiating this deploy request in seconds. Note: + this does not set the timeout on the underlying deploy job, only on the time to initiate the deploy request. sync (bool): Whether to execute this method synchronously. If False, this method @@ -2737,8 +2736,8 @@ def upload_xgboost_model_file( Optional. Bucket to stage local model artifacts. Overrides staging_bucket set in aiplatform.init. upload_request_timeout (float): - Optional. The timeout for initiating this upload request in seconds. Note: - this does not set the timeout on the underlying upload job, only on the time + Optional. The timeout for initiating this upload request in seconds. Note: + this does not set the timeout on the underlying upload job, only on the time to initiate the upload request. Returns: model: Instantiated representation of the uploaded model resource. @@ -2939,8 +2938,8 @@ def upload_scikit_learn_model_file( Optional. Bucket to stage local model artifacts. Overrides staging_bucket set in aiplatform.init. upload_request_timeout (float): - Optional. The timeout for initiating this upload request in seconds. Note: - this does not set the timeout on the underlying upload job, only on the time + Optional. The timeout for initiating this upload request in seconds. Note: + this does not set the timeout on the underlying upload job, only on the time to initiate the upload request. Returns: model: Instantiated representation of the uploaded model resource. @@ -3142,8 +3141,8 @@ def upload_tensorflow_saved_model( Optional. Bucket to stage local model artifacts. Overrides staging_bucket set in aiplatform.init. upload_request_timeout (float): - Optional. The timeout for initiating this upload request in seconds. Note: - this does not set the timeout on the underlying upload job, only on the time + Optional. The timeout for initiating this upload request in seconds. Note: + this does not set the timeout on the underlying upload job, only on the time to initiate the upload request. Returns: model: Instantiated representation of the uploaded model resource. diff --git a/google/cloud/aiplatform/pipeline_jobs.py b/google/cloud/aiplatform/pipeline_jobs.py index e2a96684e9..d2edc6d0be 100644 --- a/google/cloud/aiplatform/pipeline_jobs.py +++ b/google/cloud/aiplatform/pipeline_jobs.py @@ -17,7 +17,6 @@ import datetime import logging -from socket import timeout import time import re from typing import Any, Dict, List, Optional @@ -251,8 +250,8 @@ def run( Private services access must already be configured for the network. If left unspecified, the job is not peered with any network. create_request_timeout (float): - Optional. The timeout for initiating this job create request in seconds. Note: - this does not set the timeout on the underlying job create job, only on the time + Optional. The timeout for initiating this job create request in seconds. Note: + this does not set the timeout on the underlying job create job, only on the time to initiate the job create request. sync (bool): Optional. Whether to execute this method synchronously. If False, this method will unblock and it will be executed in a concurrent Future. @@ -284,8 +283,8 @@ def submit( Private services access must already be configured for the network. If left unspecified, the job is not peered with any network. create_request_timeout (float): - Optional. The timeout for initiating this job create request in seconds. Note: - this does not set the timeout on the underlying job create job, only on the time + Optional. The timeout for initiating this job create request in seconds. Note: + this does not set the timeout on the underlying job create job, only on the time to initiate the job create request. """ if service_account: diff --git a/google/cloud/aiplatform/tensorboard/tensorboard_resource.py b/google/cloud/aiplatform/tensorboard/tensorboard_resource.py index ef06d16178..2d39833b25 100644 --- a/google/cloud/aiplatform/tensorboard/tensorboard_resource.py +++ b/google/cloud/aiplatform/tensorboard/tensorboard_resource.py @@ -145,8 +145,8 @@ def create( Overrides encryption_spec_key_name set in aiplatform.init. create_request_timeout (float): - Optional. The timeout for initiating this create request in seconds. Note: - this does not set the timeout on the underlying create job, only on the time + Optional. The timeout for initiating this create request in seconds. Note: + this does not set the timeout on the underlying create job, only on the time to initiate the create request. Returns: diff --git a/google/cloud/aiplatform/training_jobs.py b/google/cloud/aiplatform/training_jobs.py index deb12f68d9..8a53a812d8 100644 --- a/google/cloud/aiplatform/training_jobs.py +++ b/google/cloud/aiplatform/training_jobs.py @@ -16,12 +16,10 @@ # import datetime -from socket import timeout import time from typing import Dict, List, Optional, Sequence, Tuple, Union import abc -from venv import create from google.auth import credentials as auth_credentials from google.cloud.aiplatform import base @@ -717,8 +715,8 @@ def _run_job( - AIP_VALIDATION_DATA_URI = "bigquery_destination.dataset_*.validation" - AIP_TEST_DATA_URI = "bigquery_destination.dataset_*.test" create_request_timeout (float): - Optional. The timeout for initiating this request in seconds. Note: - this does not set the timeout on the underlying training job, only on + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying training job, only on the time to initiate the request. """ @@ -2072,8 +2070,8 @@ def run( For more information on configuring your service account please visit: https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training create_request_timeout (float): - Optional. The timeout for initiating this request in seconds. Note: - this does not set the timeout on the underlying training job, only on + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying training job, only on the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method @@ -2314,8 +2312,8 @@ def _run( reduction_server_container_uri (str): Optional. The Uri of the reduction server container image. create_request_timeout (float) - Optional. The timeout for initiating this request in seconds. Note: - this does not set the timeout on the underlying training job, only on the + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying training job, only on the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method @@ -2600,8 +2598,8 @@ def __init__( Bucket used to stage source and training artifacts. Overrides staging_bucket set in aiplatform.init. create_request_timeout (float): - Optional. The timeout for initiating this request in seconds. Note: - this does not set the timeout on the underlying training job, only on + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying training job, only on the time to initiate the request. """ super().__init__( @@ -2908,8 +2906,8 @@ def run( For more information on configuring your service account please visit: https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training create_request_timeout (float): - Optional. The timeout for initiating this request in seconds. Note: - this does not set the timeout on the underlying training job, only on + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying training job, only on the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method @@ -3144,8 +3142,8 @@ def _run( reduction_server_container_uri (str): Optional. The Uri of the reduction server container image. create_request_timeout (float): - Optional. The timeout for initiating this request in seconds. Note: - this does not set the timeout on the underlying training job, only on + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying training job, only on the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method @@ -3730,8 +3728,8 @@ def _run( Applies only if [export_evaluated_data_items] is True and [export_evaluated_data_items_bigquery_destination_uri] is specified. create_request_timeout (float): - Optional. The timeout for initiating this request in seconds. Note: - this does not set the timeout on the underlying training job, only on + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying training job, only on the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method @@ -4164,8 +4162,8 @@ def run( additional_experiments (List[str]): Optional. Additional experiment flags for the time series forcasting training. create_request_timeout (float): - Optional. The timeout for initiating this request in seconds. Note: - this does not set the timeout on the underlying training job, only on + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying training job, only on the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method @@ -4407,8 +4405,8 @@ def _run( See https://goo.gl/xmQnxf for more information and examples of labels. create_request_timeout (float): - Optional. The timeout for initiating this request in seconds. Note: - this does not set the timeout on the underlying training job, only on + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying training job, only on the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method @@ -4973,8 +4971,8 @@ def _run( used, if further training does no longer brings significant improvement to the model. create_request_timeout (float): - Optional. The timeout for initiating this request in seconds. Note: - this does not set the timeout on the underlying training job, only on + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying training job, only on the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method @@ -5557,8 +5555,8 @@ def run( For more information on configuring your service account please visit: https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training create_request_timeout (float): - Optional. The timeout for initiating this request in seconds. Note: - this does not set the timeout on the underlying training job, only on + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying training job, only on the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method @@ -5775,8 +5773,8 @@ def _run( reduction_server_container_uri (str): Optional. The Uri of the reduction server container image. create_request_timeout (float): - Optional. The timeout for initiating this request in seconds. Note: - this does not set the timeout on the underlying training job, only on + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying training job, only on the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method @@ -6169,8 +6167,8 @@ def _run( See https://goo.gl/xmQnxf for more information and examples of labels. create_request_timeout (float): - Optional. The timeout for initiating this request in seconds. Note: - this does not set the timeout on the underlying training job, only on + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying training job, only on the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method @@ -6578,8 +6576,8 @@ def _run( See https://goo.gl/xmQnxf for more information and examples of labels. create_request_timeout (float): - Optional. The timeout for initiating this request in seconds. Note: - this does not set the timeout on the underlying training job, only on + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying training job, only on the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method diff --git a/tests/system/aiplatform/test_e2e_tabular.py b/tests/system/aiplatform/test_e2e_tabular.py index 108752ded0..e36ce5458a 100644 --- a/tests/system/aiplatform/test_e2e_tabular.py +++ b/tests/system/aiplatform/test_e2e_tabular.py @@ -113,7 +113,7 @@ def test_end_to_end_tabular(self, shared_state): timeout=1234, restart_job_on_worker_restart=True, enable_web_access=True, - timeout=None, + create_request_timeout=None, sync=False, ) @@ -121,7 +121,6 @@ def test_end_to_end_tabular(self, shared_state): dataset=ds, target_column="median_house_value", model_display_name=self._make_display_name("automl-housing-model"), - timeout=None, sync=False, ) diff --git a/tests/unit/aiplatform/test_custom_job.py b/tests/unit/aiplatform/test_custom_job.py index 04bb6b8dd2..36d794f8f5 100644 --- a/tests/unit/aiplatform/test_custom_job.py +++ b/tests/unit/aiplatform/test_custom_job.py @@ -293,6 +293,7 @@ def test_create_custom_job(self, create_custom_job_mock, get_custom_job_mock, sy network=_TEST_NETWORK, timeout=_TEST_TIMEOUT, restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, + create_request_timeout=None, sync=sync, ) @@ -305,7 +306,7 @@ def test_create_custom_job(self, create_custom_job_mock, get_custom_job_mock, sy expected_custom_job = _get_custom_job_proto() create_custom_job_mock.assert_called_once_with( - parent=_TEST_PARENT, custom_job=expected_custom_job + parent=_TEST_PARENT, custom_job=expected_custom_job, timeout=None, ) assert job.job_spec == expected_custom_job.job_spec @@ -314,6 +315,46 @@ def test_create_custom_job(self, create_custom_job_mock, get_custom_job_mock, sy ) assert job.network == _TEST_NETWORK + @pytest.mark.parametrize("sync", [True, False]) + def test_create_custom_job_with_timeout( + self, create_custom_job_mock, get_custom_job_mock, sync + ): + + aiplatform.init( + project=_TEST_PROJECT, + location=_TEST_LOCATION, + staging_bucket=_TEST_STAGING_BUCKET, + encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME, + ) + + job = aiplatform.CustomJob( + display_name=_TEST_DISPLAY_NAME, + worker_pool_specs=_TEST_WORKER_POOL_SPEC, + base_output_dir=_TEST_BASE_OUTPUT_DIR, + labels=_TEST_LABELS, + ) + + job.run( + service_account=_TEST_SERVICE_ACCOUNT, + network=_TEST_NETWORK, + timeout=_TEST_TIMEOUT, + restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, + create_request_timeout=180.0, + sync=sync, + ) + + job.wait_for_resource_creation() + + assert job.resource_name == _TEST_CUSTOM_JOB_NAME + + job.wait() + + expected_custom_job = _get_custom_job_proto() + + create_custom_job_mock.assert_called_once_with( + parent=_TEST_PARENT, custom_job=expected_custom_job, timeout=180.0, + ) + @pytest.mark.parametrize("sync", [True, False]) def test_run_custom_job_with_fail_raises( self, create_custom_job_mock, get_custom_job_mock_with_fail, sync @@ -342,6 +383,7 @@ def test_run_custom_job_with_fail_raises( network=_TEST_NETWORK, timeout=_TEST_TIMEOUT, restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, + create_request_timeout=None, sync=sync, ) @@ -354,7 +396,7 @@ def test_run_custom_job_with_fail_raises( expected_custom_job = _get_custom_job_proto() create_custom_job_mock.assert_called_once_with( - parent=_TEST_PARENT, custom_job=expected_custom_job + parent=_TEST_PARENT, custom_job=expected_custom_job, timeout=None, ) assert job.job_spec == expected_custom_job.job_spec @@ -517,6 +559,7 @@ def test_create_custom_job_with_enable_web_access( network=_TEST_NETWORK, timeout=_TEST_TIMEOUT, restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, + create_request_timeout=None, sync=sync, ) @@ -531,7 +574,7 @@ def test_create_custom_job_with_enable_web_access( expected_custom_job = _get_custom_job_proto_with_enable_web_access() create_custom_job_mock_with_enable_web_access.assert_called_once_with( - parent=_TEST_PARENT, custom_job=expected_custom_job + parent=_TEST_PARENT, custom_job=expected_custom_job, timeout=None, ) assert job.job_spec == expected_custom_job.job_spec @@ -585,6 +628,7 @@ def test_create_custom_job_with_tensorboard( network=_TEST_NETWORK, timeout=_TEST_TIMEOUT, restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, + create_request_timeout=None, sync=sync, ) @@ -594,7 +638,7 @@ def test_create_custom_job_with_tensorboard( expected_custom_job.job_spec.tensorboard = _TEST_TENSORBOARD_NAME create_custom_job_mock_with_tensorboard.assert_called_once_with( - parent=_TEST_PARENT, custom_job=expected_custom_job + parent=_TEST_PARENT, custom_job=expected_custom_job, timeout=None, ) expected_custom_job = _get_custom_job_proto() diff --git a/tests/unit/aiplatform/test_datasets.py b/tests/unit/aiplatform/test_datasets.py index 272f70b638..3101de1413 100644 --- a/tests/unit/aiplatform/test_datasets.py +++ b/tests/unit/aiplatform/test_datasets.py @@ -16,8 +16,6 @@ # import os -from socket import timeout -from time import time import pytest @@ -689,13 +687,6 @@ def test_create_dataset_tabular(self, create_dataset_mock): def test_create_dataset_tabular_with_timeout(self, create_dataset_mock): aiplatform.init(project=_TEST_PROJECT) - my_dataset = datasets._Dataset.create( - display_name=_TEST_DISPLAY_NAME, - metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_NONTABULAR, - encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - create_request_timeout=180.0, - ) - expected_dataset = gca_dataset.Dataset( display_name=_TEST_DISPLAY_NAME, metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_NONTABULAR, diff --git a/tests/unit/aiplatform/test_hyperparameter_tuning_job.py b/tests/unit/aiplatform/test_hyperparameter_tuning_job.py index 0ec9022df2..f43afe041a 100644 --- a/tests/unit/aiplatform/test_hyperparameter_tuning_job.py +++ b/tests/unit/aiplatform/test_hyperparameter_tuning_job.py @@ -397,6 +397,7 @@ def test_create_hyperparameter_tuning_job( network=_TEST_NETWORK, timeout=_TEST_TIMEOUT, restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, + create_request_timeout=None, sync=sync, ) @@ -407,12 +408,75 @@ def test_create_hyperparameter_tuning_job( create_hyperparameter_tuning_job_mock.assert_called_once_with( parent=_TEST_PARENT, hyperparameter_tuning_job=expected_hyperparameter_tuning_job, + timeout=None, ) assert job.state == gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED assert job.network == _TEST_NETWORK assert job.trials == [] + @pytest.mark.parametrize("sync", [True, False]) + def test_create_hyperparameter_tuning_job_with_timeout( + self, + create_hyperparameter_tuning_job_mock, + get_hyperparameter_tuning_job_mock, + sync, + ): + + aiplatform.init( + project=_TEST_PROJECT, + location=_TEST_LOCATION, + staging_bucket=_TEST_STAGING_BUCKET, + encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME, + ) + + custom_job = aiplatform.CustomJob( + display_name=test_custom_job._TEST_DISPLAY_NAME, + worker_pool_specs=test_custom_job._TEST_WORKER_POOL_SPEC, + base_output_dir=test_custom_job._TEST_BASE_OUTPUT_DIR, + ) + + job = aiplatform.HyperparameterTuningJob( + display_name=_TEST_DISPLAY_NAME, + custom_job=custom_job, + metric_spec={_TEST_METRIC_SPEC_KEY: _TEST_METRIC_SPEC_VALUE}, + parameter_spec={ + "lr": hpt.DoubleParameterSpec(min=0.001, max=0.1, scale="log"), + "units": hpt.IntegerParameterSpec(min=4, max=1028, scale="linear"), + "activation": hpt.CategoricalParameterSpec( + values=["relu", "sigmoid", "elu", "selu", "tanh"] + ), + "batch_size": hpt.DiscreteParameterSpec( + values=[16, 32], scale="linear" + ), + }, + parallel_trial_count=_TEST_PARALLEL_TRIAL_COUNT, + max_trial_count=_TEST_MAX_TRIAL_COUNT, + max_failed_trial_count=_TEST_MAX_FAILED_TRIAL_COUNT, + search_algorithm=_TEST_SEARCH_ALGORITHM, + measurement_selection=_TEST_MEASUREMENT_SELECTION, + labels=_TEST_LABELS, + ) + + job.run( + service_account=_TEST_SERVICE_ACCOUNT, + network=_TEST_NETWORK, + timeout=_TEST_TIMEOUT, + restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, + create_request_timeout=180.0, + sync=sync, + ) + + job.wait() + + expected_hyperparameter_tuning_job = _get_hyperparameter_tuning_job_proto() + + create_hyperparameter_tuning_job_mock.assert_called_once_with( + parent=_TEST_PARENT, + hyperparameter_tuning_job=expected_hyperparameter_tuning_job, + timeout=180.0, + ) + @pytest.mark.parametrize("sync", [True, False]) def test_run_hyperparameter_tuning_job_with_fail_raises( self, @@ -461,6 +525,7 @@ def test_run_hyperparameter_tuning_job_with_fail_raises( network=_TEST_NETWORK, timeout=_TEST_TIMEOUT, restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, + create_request_timeout=None, sync=sync, ) @@ -471,6 +536,7 @@ def test_run_hyperparameter_tuning_job_with_fail_raises( create_hyperparameter_tuning_job_mock.assert_called_once_with( parent=_TEST_PARENT, hyperparameter_tuning_job=expected_hyperparameter_tuning_job, + timeout=None, ) assert job._gca_resource.state == gca_job_state_compat.JobState.JOB_STATE_FAILED @@ -641,6 +707,7 @@ def test_create_hyperparameter_tuning_job_with_tensorboard( timeout=_TEST_TIMEOUT, restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, tensorboard=test_custom_job._TEST_TENSORBOARD_NAME, + create_request_timeout=None, sync=sync, ) @@ -654,6 +721,7 @@ def test_create_hyperparameter_tuning_job_with_tensorboard( create_hyperparameter_tuning_job_mock_with_tensorboard.assert_called_once_with( parent=_TEST_PARENT, hyperparameter_tuning_job=expected_hyperparameter_tuning_job, + timeout=None, ) assert ( @@ -711,6 +779,7 @@ def test_create_hyperparameter_tuning_job_with_enable_web_access( timeout=_TEST_TIMEOUT, restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, enable_web_access=test_custom_job._TEST_ENABLE_WEB_ACCESS, + create_request_timeout=None, sync=sync, ) @@ -725,6 +794,7 @@ def test_create_hyperparameter_tuning_job_with_enable_web_access( create_hyperparameter_tuning_job_mock_with_enable_web_access.assert_called_once_with( parent=_TEST_PARENT, hyperparameter_tuning_job=expected_hyperparameter_tuning_job, + timeout=None, ) assert job.state == gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED diff --git a/tests/unit/aiplatform/test_jobs.py b/tests/unit/aiplatform/test_jobs.py index 9642a006c3..c6d8dfe69a 100644 --- a/tests/unit/aiplatform/test_jobs.py +++ b/tests/unit/aiplatform/test_jobs.py @@ -485,6 +485,7 @@ def test_batch_predict_gcs_source_and_dest( job_display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME, gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE, gcs_destination_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX, + create_request_timeout=None, sync=sync, ) @@ -513,6 +514,52 @@ def test_batch_predict_gcs_source_and_dest( create_batch_prediction_job_mock.assert_called_once_with( parent=_TEST_PARENT, batch_prediction_job=expected_gapic_batch_prediction_job, + timeout=None, + ) + + @pytest.mark.parametrize("sync", [True, False]) + @pytest.mark.usefixtures("get_batch_prediction_job_mock") + def test_batch_predict_gcs_source_and_dest_with_timeout( + self, create_batch_prediction_job_mock, sync + ): + aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION) + + # Make SDK batch_predict method call + batch_prediction_job = jobs.BatchPredictionJob.create( + model_name=_TEST_MODEL_NAME, + job_display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME, + gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE, + gcs_destination_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX, + create_request_timeout=180.0, + sync=sync, + ) + + batch_prediction_job.wait_for_resource_creation() + + batch_prediction_job.wait() + + # Construct expected request + expected_gapic_batch_prediction_job = gca_batch_prediction_job_compat.BatchPredictionJob( + display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME, + model=_TEST_MODEL_NAME, + input_config=gca_batch_prediction_job_compat.BatchPredictionJob.InputConfig( + instances_format="jsonl", + gcs_source=gca_io_compat.GcsSource( + uris=[_TEST_BATCH_PREDICTION_GCS_SOURCE] + ), + ), + output_config=gca_batch_prediction_job_compat.BatchPredictionJob.OutputConfig( + gcs_destination=gca_io_compat.GcsDestination( + output_uri_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX + ), + predictions_format="jsonl", + ), + ) + + create_batch_prediction_job_mock.assert_called_once_with( + parent=_TEST_PARENT, + batch_prediction_job=expected_gapic_batch_prediction_job, + timeout=180.0, ) @pytest.mark.usefixtures("get_batch_prediction_job_mock") @@ -548,6 +595,7 @@ def test_batch_predict_gcs_source_bq_dest( job_display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME, gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE, bigquery_destination_prefix=_TEST_BATCH_PREDICTION_BQ_PREFIX, + create_request_timeout=None, sync=sync, ) @@ -581,6 +629,7 @@ def test_batch_predict_gcs_source_bq_dest( create_batch_prediction_job_mock.assert_called_once_with( parent=_TEST_PARENT, batch_prediction_job=expected_gapic_batch_prediction_job, + timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -608,6 +657,7 @@ def test_batch_predict_with_all_args( explanation_parameters=_TEST_EXPLANATION_PARAMETERS, labels=_TEST_LABEL, credentials=creds, + create_request_timeout=None, sync=sync, ) @@ -651,6 +701,7 @@ def test_batch_predict_with_all_args( create_batch_prediction_job_with_explanations_mock.assert_called_once_with( parent=f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}", batch_prediction_job=expected_gapic_batch_prediction_job, + timeout=None, ) @pytest.mark.usefixtures("create_batch_prediction_job_mock_fail") diff --git a/tests/unit/aiplatform/test_models.py b/tests/unit/aiplatform/test_models.py index 9d71078666..0131b578c7 100644 --- a/tests/unit/aiplatform/test_models.py +++ b/tests/unit/aiplatform/test_models.py @@ -18,7 +18,6 @@ import importlib from concurrent import futures import pathlib -from socket import timeout import pytest from unittest import mock from unittest.mock import patch diff --git a/tests/unit/aiplatform/test_pipeline_jobs.py b/tests/unit/aiplatform/test_pipeline_jobs.py index a1fddc63a5..1d57e68470 100644 --- a/tests/unit/aiplatform/test_pipeline_jobs.py +++ b/tests/unit/aiplatform/test_pipeline_jobs.py @@ -15,7 +15,6 @@ # limitations under the License. # -from socket import timeout import pytest import json From 77994c3473b46ce1b1ad89bae9bf5853c89784c1 Mon Sep 17 00:00:00 2001 From: Sara Robinson Date: Thu, 24 Mar 2022 13:22:54 -0400 Subject: [PATCH 12/19] add timeout arg and tests to tensorboard --- .../tensorboard/tensorboard_resource.py | 12 +++++ google/cloud/aiplatform/training_jobs.py | 11 ++-- tests/unit/aiplatform/test_tensorboard.py | 53 +++++++++++++++++++ 3 files changed, 71 insertions(+), 5 deletions(-) diff --git a/google/cloud/aiplatform/tensorboard/tensorboard_resource.py b/google/cloud/aiplatform/tensorboard/tensorboard_resource.py index 2d39833b25..8f0e9dc7cf 100644 --- a/google/cloud/aiplatform/tensorboard/tensorboard_resource.py +++ b/google/cloud/aiplatform/tensorboard/tensorboard_resource.py @@ -367,6 +367,7 @@ def create( location: Optional[str] = None, credentials: Optional[auth_credentials.Credentials] = None, request_metadata: Sequence[Tuple[str, str]] = (), + create_request_timeout: Optional[float] = None, ) -> "TensorboardExperiment": """Creates a new TensorboardExperiment. @@ -426,6 +427,10 @@ def create( credentials set in aiplatform.init. request_metadata (Sequence[Tuple[str, str]]): Optional. Strings which should be sent along with the request as metadata. + create_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. Returns: TensorboardExperiment: The TensorboardExperiment resource. """ @@ -458,6 +463,7 @@ def create( tensorboard_experiment=gapic_tensorboard_experiment, tensorboard_experiment_id=tensorboard_experiment_id, metadata=request_metadata, + timeout=create_request_timeout, ) _LOGGER.log_create_complete(cls, tensorboard_experiment, "tb experiment") @@ -619,6 +625,7 @@ def create( location: Optional[str] = None, credentials: Optional[auth_credentials.Credentials] = None, request_metadata: Sequence[Tuple[str, str]] = (), + create_request_timeout: Optional[float] = None, ) -> "TensorboardRun": """Creates a new tensorboard. @@ -681,6 +688,10 @@ def create( credentials set in aiplatform.init. request_metadata (Sequence[Tuple[str, str]]): Optional. Strings which should be sent along with the request as metadata. + create_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. Returns: TensorboardExperiment: The TensorboardExperiment resource. """ @@ -716,6 +727,7 @@ def create( tensorboard_run=gapic_tensorboard_run, tensorboard_run_id=tensorboard_run_id, metadata=request_metadata, + timeout=create_request_timeout, ) _LOGGER.log_create_complete(cls, tensorboard_run, "tb_run") diff --git a/google/cloud/aiplatform/training_jobs.py b/google/cloud/aiplatform/training_jobs.py index 8a53a812d8..0fac54f29e 100644 --- a/google/cloud/aiplatform/training_jobs.py +++ b/google/cloud/aiplatform/training_jobs.py @@ -2421,7 +2421,6 @@ def __init__( training_encryption_spec_key_name: Optional[str] = None, model_encryption_spec_key_name: Optional[str] = None, staging_bucket: Optional[str] = None, - create_request_timeout: Optional[float] = None, ): """Constructs a Custom Container Training Job. @@ -2597,10 +2596,6 @@ def __init__( staging_bucket (str): Bucket used to stage source and training artifacts. Overrides staging_bucket set in aiplatform.init. - create_request_timeout (float): - Optional. The timeout for initiating this request in seconds. Note: - this does not set the timeout on the underlying training job, only on - the time to initiate the request. """ super().__init__( display_name=display_name, @@ -3410,6 +3405,7 @@ def run( export_evaluated_data_items_bigquery_destination_uri: Optional[str] = None, export_evaluated_data_items_override_destination: bool = False, additional_experiments: Optional[List[str]] = None, + create_request_timeout: Optional[float] = None, sync: bool = True, ) -> models.Model: """Runs the training job and returns a model. @@ -3538,6 +3534,10 @@ def run( [export_evaluated_data_items_bigquery_destination_uri] is specified. additional_experiments (List[str]): Optional. Additional experiment flags for the automl tables training. + create_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -3579,6 +3579,7 @@ def run( export_evaluated_data_items=export_evaluated_data_items, export_evaluated_data_items_bigquery_destination_uri=export_evaluated_data_items_bigquery_destination_uri, export_evaluated_data_items_override_destination=export_evaluated_data_items_override_destination, + create_request_timeout=create_request_timeout, sync=sync, ) diff --git a/tests/unit/aiplatform/test_tensorboard.py b/tests/unit/aiplatform/test_tensorboard.py index dbe1054686..7e1c3e88be 100644 --- a/tests/unit/aiplatform/test_tensorboard.py +++ b/tests/unit/aiplatform/test_tensorboard.py @@ -496,6 +496,7 @@ def test_create_tensorboard_experiment( tensorboard_experiment_id=_TEST_TENSORBOARD_EXPERIMENT_ID, tensorboard_name=_TEST_NAME, display_name=_TEST_DISPLAY_NAME, + create_request_timeout=None, ) expected_tensorboard_experiment = gca_tensorboard_experiment.TensorboardExperiment( @@ -507,12 +508,38 @@ def test_create_tensorboard_experiment( tensorboard_experiment=expected_tensorboard_experiment, tensorboard_experiment_id=_TEST_TENSORBOARD_EXPERIMENT_ID, metadata=_TEST_REQUEST_METADATA, + timeout=None, ) get_tensorboard_experiment_mock.assert_called_once_with( name=_TEST_TENSORBOARD_EXPERIMENT_NAME, retry=base._DEFAULT_RETRY ) + def test_create_tensorboard_experiment_with_timeout( + self, create_tensorboard_experiment_mock, get_tensorboard_experiment_mock + ): + + aiplatform.init(project=_TEST_PROJECT,) + + tensorboard.TensorboardExperiment.create( + tensorboard_experiment_id=_TEST_TENSORBOARD_EXPERIMENT_ID, + tensorboard_name=_TEST_NAME, + display_name=_TEST_DISPLAY_NAME, + create_request_timeout=180.0, + ) + + expected_tensorboard_experiment = gca_tensorboard_experiment.TensorboardExperiment( + display_name=_TEST_DISPLAY_NAME, + ) + + create_tensorboard_experiment_mock.assert_called_once_with( + parent=_TEST_NAME, + tensorboard_experiment=expected_tensorboard_experiment, + tensorboard_experiment_id=_TEST_TENSORBOARD_EXPERIMENT_ID, + metadata=_TEST_REQUEST_METADATA, + timeout=180.0, + ) + @pytest.mark.usefixtures("get_tensorboard_experiment_mock") def test_delete_tensorboard_experiement(self, delete_tensorboard_experiment_mock): aiplatform.init(project=_TEST_PROJECT) @@ -589,6 +616,7 @@ def test_create_tensorboard_run( tensorboard.TensorboardRun.create( tensorboard_run_id=_TEST_TENSORBOARD_RUN_ID, tensorboard_experiment_name=_TEST_TENSORBOARD_EXPERIMENT_NAME, + create_request_timeout=None, ) expected_tensorboard_run = gca_tensorboard_run.TensorboardRun( @@ -600,12 +628,37 @@ def test_create_tensorboard_run( tensorboard_run=expected_tensorboard_run, tensorboard_run_id=_TEST_TENSORBOARD_RUN_ID, metadata=_TEST_REQUEST_METADATA, + timeout=None, ) get_tensorboard_run_mock.assert_called_once_with( name=_TEST_TENSORBOARD_RUN_NAME, retry=base._DEFAULT_RETRY ) + def test_create_tensorboard_run_with_timeout( + self, create_tensorboard_run_mock, get_tensorboard_run_mock + ): + + aiplatform.init(project=_TEST_PROJECT,) + + tensorboard.TensorboardRun.create( + tensorboard_run_id=_TEST_TENSORBOARD_RUN_ID, + tensorboard_experiment_name=_TEST_TENSORBOARD_EXPERIMENT_NAME, + create_request_timeout=180.0, + ) + + expected_tensorboard_run = gca_tensorboard_run.TensorboardRun( + display_name=_TEST_TENSORBOARD_RUN_ID, + ) + + create_tensorboard_run_mock.assert_called_once_with( + parent=_TEST_TENSORBOARD_EXPERIMENT_NAME, + tensorboard_run=expected_tensorboard_run, + tensorboard_run_id=_TEST_TENSORBOARD_RUN_ID, + metadata=_TEST_REQUEST_METADATA, + timeout=180.0, + ) + @pytest.mark.usefixtures("get_tensorboard_run_mock") def test_delete_tensorboard_run(self, delete_tensorboard_run_mock): aiplatform.init(project=_TEST_PROJECT) From e72c07aa681dcba18b7ef706f6c5dfd53e2c6161 Mon Sep 17 00:00:00 2001 From: Sara Robinson Date: Thu, 24 Mar 2022 15:34:33 -0400 Subject: [PATCH 13/19] add timeout arg and tests to featurestore --- .../aiplatform/featurestore/entity_type.py | 59 ++++- .../cloud/aiplatform/featurestore/feature.py | 20 +- .../aiplatform/featurestore/featurestore.py | 66 ++++- .../tensorboard/tensorboard_resource.py | 2 +- tests/unit/aiplatform/test_featurestores.py | 243 +++++++++++++++++- 5 files changed, 372 insertions(+), 18 deletions(-) diff --git a/google/cloud/aiplatform/featurestore/entity_type.py b/google/cloud/aiplatform/featurestore/entity_type.py index 648c6928be..aada181051 100644 --- a/google/cloud/aiplatform/featurestore/entity_type.py +++ b/google/cloud/aiplatform/featurestore/entity_type.py @@ -183,6 +183,7 @@ def update( description: Optional[str] = None, labels: Optional[Dict[str, str]] = None, request_metadata: Sequence[Tuple[str, str]] = (), + update_request_timeout: Optional[float] = None, ) -> "EntityType": """Updates an existing managed entityType resource. @@ -215,6 +216,10 @@ def update( "aiplatform.googleapis.com/" and are immutable. request_metadata (Sequence[Tuple[str, str]]): Required. Strings which should be sent along with the request as metadata. + update_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. Returns: EntityType - The updated entityType resource object. """ @@ -242,6 +247,7 @@ def update( entity_type=gapic_entity_type, update_mask=update_mask, metadata=request_metadata, + timeout=update_request_timeout, ) _LOGGER.log_action_started_against_resource_with_lro( @@ -466,6 +472,7 @@ def create( location: Optional[str] = None, credentials: Optional[auth_credentials.Credentials] = None, request_metadata: Optional[Sequence[Tuple[str, str]]] = (), + create_request_timeout: Optional[float] = None, sync: bool = True, ) -> "EntityType": """Creates an EntityType resource in a Featurestore. @@ -524,6 +531,10 @@ def create( credentials set in aiplatform.init. request_metadata (Sequence[Tuple[str, str]]): Optional. Strings which should be sent along with the request as metadata. + create_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. sync (bool): Optional. Whether to execute this creation synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -566,6 +577,7 @@ def create( entity_type=gapic_entity_type, entity_type_id=entity_type_id, metadata=request_metadata, + timeout=create_request_timeout, ) _LOGGER.log_create_with_lro(cls, created_entity_type_lro) @@ -590,6 +602,7 @@ def create_feature( description: Optional[str] = None, labels: Optional[Dict[str, str]] = None, request_metadata: Optional[Sequence[Tuple[str, str]]] = (), + create_request_timeout: Optional[float] = None, sync: bool = True, ) -> "featurestore.Feature": """Creates a Feature resource in this EntityType. @@ -635,6 +648,10 @@ def create_feature( "aiplatform.googleapis.com/" and are immutable. request_metadata (Sequence[Tuple[str, str]]): Optional. Strings which should be sent along with the request as metadata. + create_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. sync (bool): Optional. Whether to execute this creation synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -652,6 +669,7 @@ def create_feature( description=description, labels=labels, request_metadata=request_metadata, + create_request_timeout=create_request_timeout, sync=sync, ) @@ -900,6 +918,7 @@ def _import_feature_values( self, import_feature_values_request: gca_featurestore_service.ImportFeatureValuesRequest, request_metadata: Optional[Sequence[Tuple[str, str]]] = (), + ingest_request_timeout: Optional[float] = None, ) -> "EntityType": """Imports Feature values into the Featurestore from a source storage. @@ -908,6 +927,10 @@ def _import_feature_values( Required. Request message for importing feature values. request_metadata (Sequence[Tuple[str, str]]): Optional. Strings which should be sent along with the request as metadata. + ingest_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. Returns: EntityType - The entityType resource object with imported feature values. @@ -917,7 +940,9 @@ def _import_feature_values( ) import_lro = self.api_client.import_feature_values( - request=import_feature_values_request, metadata=request_metadata, + request=import_feature_values_request, + metadata=request_metadata, + timeout=ingest_request_timeout, ) _LOGGER.log_action_started_against_resource_with_lro( @@ -943,6 +968,7 @@ def ingest_from_bq( disable_online_serving: Optional[bool] = None, worker_count: Optional[int] = None, request_metadata: Optional[Sequence[Tuple[str, str]]] = (), + ingest_request_timeout: Optional[float] = None, sync: bool = True, ) -> "EntityType": """Ingest feature values from BigQuery. @@ -1001,6 +1027,14 @@ def ingest_from_bq( performance. request_metadata (Sequence[Tuple[str, str]]): Optional. Strings which should be sent along with the request as metadata. + ingest_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. + ingest_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. sync (bool): Optional. Whether to execute this import synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -1027,6 +1061,7 @@ def ingest_from_bq( return self._import_feature_values( import_feature_values_request=import_feature_values_request, request_metadata=request_metadata, + ingest_request_timeout=ingest_request_timeout, ) @base.optional_sync(return_input_arg="self") @@ -1041,6 +1076,7 @@ def ingest_from_gcs( disable_online_serving: Optional[bool] = None, worker_count: Optional[int] = None, request_metadata: Optional[Sequence[Tuple[str, str]]] = (), + ingest_request_timeout: Optional[float] = None, sync: bool = True, ) -> "EntityType": """Ingest feature values from GCS. @@ -1107,6 +1143,10 @@ def ingest_from_gcs( performance. request_metadata (Sequence[Tuple[str, str]]): Optional. Strings which should be sent along with the request as metadata. + ingest_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. sync (bool): Optional. Whether to execute this import synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -1150,6 +1190,7 @@ def ingest_from_gcs( return self._import_feature_values( import_feature_values_request=import_feature_values_request, request_metadata=request_metadata, + ingest_request_timeout=ingest_request_timeout, ) def ingest_from_df( @@ -1160,6 +1201,7 @@ def ingest_from_df( feature_source_fields: Optional[Dict[str, str]] = None, entity_id_field: Optional[str] = None, request_metadata: Optional[Sequence[Tuple[str, str]]] = (), + ingest_request_timeout: Optional[float] = None, ) -> "EntityType": """Ingest feature values from DataFrame. @@ -1220,6 +1262,10 @@ def ingest_from_df( IDs are extracted from the column named ``entity_id``. request_metadata (Sequence[Tuple[str, str]]): Optional. Strings which should be sent along with the request as metadata. + ingest_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. Returns: EntityType - The entityType resource object with feature values imported. @@ -1296,6 +1342,7 @@ def ingest_from_df( feature_source_fields=feature_source_fields, entity_id_field=entity_id_field, request_metadata=request_metadata, + ingest_request_timeout=ingest_request_timeout, ) finally: @@ -1358,6 +1405,7 @@ def read( entity_ids: Union[str, List[str]], feature_ids: Union[str, List[str]] = "*", request_metadata: Optional[Sequence[Tuple[str, str]]] = (), + read_request_timeout: Optional[float] = None, ) -> "pd.DataFrame": # noqa: F821 - skip check for undefined name 'pd' """Reads feature values for given feature IDs of given entity IDs in this EntityType. @@ -1370,6 +1418,10 @@ def read( for reading feature values. Default to "*", where value of all features will be read. request_metadata (Sequence[Tuple[str, str]]): Optional. Strings which should be sent along with the request as metadata. + read_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. Returns: pd.DataFrame: entities' feature values in DataFrame @@ -1389,7 +1441,9 @@ def read( feature_selector=feature_selector, ) read_feature_values_response = self._featurestore_online_client.read_feature_values( - request=read_feature_values_request, metadata=request_metadata + request=read_feature_values_request, + metadata=request_metadata, + timeout=read_request_timeout, ) header = read_feature_values_response.header entity_views = [read_feature_values_response.entity_view] @@ -1404,6 +1458,7 @@ def read( for response in self._featurestore_online_client.streaming_read_feature_values( request=streaming_read_feature_values_request, metadata=request_metadata, + timeout=read_request_timeout, ) ] header = streaming_read_feature_values_responses[0].header diff --git a/google/cloud/aiplatform/featurestore/feature.py b/google/cloud/aiplatform/featurestore/feature.py index 1564abfd62..073f94e502 100644 --- a/google/cloud/aiplatform/featurestore/feature.py +++ b/google/cloud/aiplatform/featurestore/feature.py @@ -15,6 +15,7 @@ # limitations under the License. # +from socket import timeout from typing import Dict, List, Optional, Sequence, Tuple from google.auth import credentials as auth_credentials @@ -174,6 +175,7 @@ def update( description: Optional[str] = None, labels: Optional[Dict[str, str]] = None, request_metadata: Optional[Sequence[Tuple[str, str]]] = (), + update_request_timeout: Optional[float] = None, ) -> "Feature": """Updates an existing managed feature resource. @@ -207,6 +209,10 @@ def update( "aiplatform.googleapis.com/" and are immutable. request_metadata (Sequence[Tuple[str, str]]): Optional. Strings which should be sent along with the request as metadata. + update_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. Returns: Feature - The updated feature resource object. @@ -232,7 +238,10 @@ def update( ) update_feature_lro = self.api_client.update_feature( - feature=gapic_feature, update_mask=update_mask, metadata=request_metadata, + feature=gapic_feature, + update_mask=update_mask, + metadata=request_metadata, + timeout=update_request_timeout, ) _LOGGER.log_action_started_against_resource_with_lro( @@ -500,6 +509,7 @@ def create( location: Optional[str] = None, credentials: Optional[auth_credentials.Credentials] = None, request_metadata: Optional[Sequence[Tuple[str, str]]] = (), + create_request_timeout: Optional[float] = None, sync: bool = True, ) -> "Feature": """Creates a Feature resource in an EntityType. @@ -567,6 +577,10 @@ def create( credentials set in aiplatform.init. request_metadata (Sequence[Tuple[str, str]]): Optional. Strings which should be sent along with the request as metadata. + create_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. sync (bool): Optional. Whether to execute this creation synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -609,7 +623,9 @@ def create( ) created_feature_lro = api_client.create_feature( - request=create_feature_request, metadata=request_metadata, + request=create_feature_request, + metadata=request_metadata, + timeout=create_request_timeout, ) _LOGGER.log_create_with_lro(cls, created_feature_lro) diff --git a/google/cloud/aiplatform/featurestore/featurestore.py b/google/cloud/aiplatform/featurestore/featurestore.py index 2d4f0bcfc3..45018645c5 100644 --- a/google/cloud/aiplatform/featurestore/featurestore.py +++ b/google/cloud/aiplatform/featurestore/featurestore.py @@ -138,6 +138,7 @@ def update( self, labels: Optional[Dict[str, str]] = None, request_metadata: Optional[Sequence[Tuple[str, str]]] = (), + update_request_timeout: Optional[float] = None, ) -> "Featurestore": """Updates an existing managed featurestore resource. @@ -167,18 +168,27 @@ def update( "aiplatform.googleapis.com/" and are immutable. request_metadata (Sequence[Tuple[str, str]]): Optional. Strings which should be sent along with the request as metadata. + update_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. Returns: Featurestore - The updated featurestore resource object. """ - return self._update(labels=labels, request_metadata=request_metadata) + return self._update( + labels=labels, + request_metadata=request_metadata, + update_request_timeout=update_request_timeout, + ) # TODO(b/206818784): Add enable_online_store and disable_online_store methods def update_online_store( self, fixed_node_count: int, request_metadata: Optional[Sequence[Tuple[str, str]]] = (), + update_request_timeout: Optional[float] = None, ) -> "Featurestore": """Updates the online store of an existing managed featurestore resource. @@ -196,12 +206,18 @@ def update_online_store( Required. Config for online serving resources, can only update the node count to >= 1. request_metadata (Sequence[Tuple[str, str]]): Optional. Strings which should be sent along with the request as metadata. + update_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. Returns: Featurestore - The updated featurestore resource object. """ return self._update( - fixed_node_count=fixed_node_count, request_metadata=request_metadata + fixed_node_count=fixed_node_count, + request_metadata=request_metadata, + update_request_timeout=update_request_timeout, ) def _update( @@ -209,6 +225,7 @@ def _update( labels: Optional[Dict[str, str]] = None, fixed_node_count: Optional[int] = None, request_metadata: Optional[Sequence[Tuple[str, str]]] = (), + update_request_timeout: Optional[float] = None, ) -> "Featurestore": """Updates an existing managed featurestore resource. @@ -231,6 +248,10 @@ def _update( Optional. Config for online serving resources, can only update the node count to >= 1. request_metadata (Sequence[Tuple[str, str]]): Optional. Strings which should be sent along with the request as metadata. + update_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. Returns: Featurestore - The updated featurestore resource object. @@ -263,6 +284,7 @@ def _update( featurestore=gapic_featurestore, update_mask=update_mask, metadata=request_metadata, + timeout=update_request_timeout, ) _LOGGER.log_action_started_against_resource_with_lro( @@ -399,6 +421,7 @@ def create( credentials: Optional[auth_credentials.Credentials] = None, request_metadata: Optional[Sequence[Tuple[str, str]]] = (), encryption_spec_key_name: Optional[str] = None, + create_request_timeout: Optional[float] = None, sync: bool = True, ) -> "Featurestore": """Creates a Featurestore resource. @@ -457,6 +480,10 @@ def create( spec for data storage. If set, both of the online and offline data storage will be secured by this key. + create_request_timeout (float): + Optional. The timeout for initiating this create request in seconds. Note: + this does not set the timeout on the underlying job, only on the time + to initiate the create request. sync (bool): Optional. Whether to execute this creation synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -490,6 +517,7 @@ def create( featurestore=gapic_featurestore, featurestore_id=featurestore_id, metadata=request_metadata, + timeout=create_request_timeout, ) _LOGGER.log_create_with_lro(cls, created_featurestore_lro) @@ -513,6 +541,7 @@ def create_entity_type( description: Optional[str] = None, labels: Optional[Dict[str, str]] = None, request_metadata: Optional[Sequence[Tuple[str, str]]] = (), + create_request_timeout: Optional[float] = None, sync: bool = True, ) -> "featurestore.EntityType": """Creates an EntityType resource in this Featurestore. @@ -554,6 +583,10 @@ def create_entity_type( "aiplatform.googleapis.com/" and are immutable. request_metadata (Sequence[Tuple[str, str]]): Optional. Strings which should be sent along with the request as metadata. + create_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. sync (bool): Optional. Whether to execute this creation synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -570,6 +603,7 @@ def create_entity_type( description=description, labels=labels, request_metadata=request_metadata, + create_request_timeout=create_request_timeout, sync=sync, ) @@ -577,6 +611,7 @@ def _batch_read_feature_values( self, batch_read_feature_values_request: gca_featurestore_service.BatchReadFeatureValuesRequest, request_metadata: Optional[Sequence[Tuple[str, str]]] = (), + serve_request_timeout: Optional[float] = None, ) -> "Featurestore": """Batch read Feature values from the Featurestore to a destination storage. @@ -585,6 +620,10 @@ def _batch_read_feature_values( Required. Request of batch read feature values. request_metadata (Sequence[Tuple[str, str]]): Optional. Strings which should be sent along with the request as metadata. + serve_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. Returns: Featurestore: The featurestore resource object batch read feature values from. @@ -595,7 +634,9 @@ def _batch_read_feature_values( ) batch_read_lro = self.api_client.batch_read_feature_values( - request=batch_read_feature_values_request, metadata=request_metadata, + request=batch_read_feature_values_request, + metadata=request_metadata, + timeout=serve_request_timeout, ) _LOGGER.log_action_started_against_resource_with_lro( @@ -775,6 +816,7 @@ def batch_serve_to_bq( pass_through_fields: Optional[List[str]] = None, feature_destination_fields: Optional[Dict[str, str]] = None, request_metadata: Optional[Sequence[Tuple[str, str]]] = (), + serve_request_timeout: Optional[float] = None, sync: bool = True, ) -> "Featurestore": """ Batch serves feature values to BigQuery destination @@ -847,7 +889,10 @@ def batch_serve_to_bq( 'projects/123/locations/us-central1/featurestores/fs_id/entityTypes/et_id1/features/f_id11': 'foo', 'projects/123/locations/us-central1/featurestores/fs_id/entityTypes/et_id2/features/f_id22': 'bar', } - + serve_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. Returns: Featurestore: The featurestore resource object batch read feature values from. @@ -871,6 +916,7 @@ def batch_serve_to_bq( return self._batch_read_feature_values( batch_read_feature_values_request=batch_read_feature_values_request, request_metadata=request_metadata, + serve_request_timeout=serve_request_timeout, ) @base.optional_sync(return_input_arg="self") @@ -883,6 +929,7 @@ def batch_serve_to_gcs( pass_through_fields: Optional[List[str]] = None, feature_destination_fields: Optional[Dict[str, str]] = None, request_metadata: Optional[Sequence[Tuple[str, str]]] = (), + serve_request_timeout: Optional[float] = None, sync: bool = True, ) -> "Featurestore": """ Batch serves feature values to GCS destination @@ -976,6 +1023,10 @@ def batch_serve_to_gcs( 'projects/123/locations/us-central1/featurestores/fs_id/entityTypes/et_id1/features/f_id11': 'foo', 'projects/123/locations/us-central1/featurestores/fs_id/entityTypes/et_id2/features/f_id22': 'bar', } + serve_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. Returns: Featurestore: The featurestore resource object batch read feature values from. @@ -1016,6 +1067,7 @@ def batch_serve_to_gcs( return self._batch_read_feature_values( batch_read_feature_values_request=batch_read_feature_values_request, request_metadata=request_metadata, + serve_request_timeout=serve_request_timeout, ) def batch_serve_to_df( @@ -1025,6 +1077,7 @@ def batch_serve_to_df( pass_through_fields: Optional[List[str]] = None, feature_destination_fields: Optional[Dict[str, str]] = None, request_metadata: Optional[Sequence[Tuple[str, str]]] = (), + serve_request_timeout: Optional[float] = None, ) -> "pd.DataFrame": # noqa: F821 - skip check for undefined name 'pd' """ Batch serves feature values to pandas DataFrame @@ -1106,6 +1159,10 @@ def batch_serve_to_df( 'projects/123/locations/us-central1/featurestores/fs_id/entityTypes/et_id1/features/f_id11': 'foo', 'projects/123/locations/us-central1/featurestores/fs_id/entityTypes/et_id2/features/f_id22': 'bar', } + serve_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. Returns: pd.DataFrame: The pandas DataFrame containing feature values from batch serving. @@ -1179,6 +1236,7 @@ def batch_serve_to_df( pass_through_fields=pass_through_fields, feature_destination_fields=feature_destination_fields, request_metadata=request_metadata, + serve_request_timeout=serve_request_timeout, ) bigquery_storage_read_client = bigquery_storage.BigQueryReadClient( diff --git a/google/cloud/aiplatform/tensorboard/tensorboard_resource.py b/google/cloud/aiplatform/tensorboard/tensorboard_resource.py index 8f0e9dc7cf..0c8c14072b 100644 --- a/google/cloud/aiplatform/tensorboard/tensorboard_resource.py +++ b/google/cloud/aiplatform/tensorboard/tensorboard_resource.py @@ -146,7 +146,7 @@ def create( Overrides encryption_spec_key_name set in aiplatform.init. create_request_timeout (float): Optional. The timeout for initiating this create request in seconds. Note: - this does not set the timeout on the underlying create job, only on the time + this does not set the timeout on the underlying job, only on the time to initiate the create request. Returns: diff --git a/tests/unit/aiplatform/test_featurestores.py b/tests/unit/aiplatform/test_featurestores.py index 364fab3b3f..1d25a5a024 100644 --- a/tests/unit/aiplatform/test_featurestores.py +++ b/tests/unit/aiplatform/test_featurestores.py @@ -860,7 +860,32 @@ def test_update_featurestore(self, update_featurestore_mock): my_featurestore = aiplatform.Featurestore( featurestore_name=_TEST_FEATURESTORE_ID ) - my_featurestore.update(labels=_TEST_LABELS_UPDATE) + my_featurestore.update( + labels=_TEST_LABELS_UPDATE, update_request_timeout=None, + ) + + expected_featurestore = gca_featurestore.Featurestore( + name=_TEST_FEATURESTORE_NAME, + labels=_TEST_LABELS_UPDATE, + online_serving_config=gca_featurestore.Featurestore.OnlineServingConfig(), + ) + update_featurestore_mock.assert_called_once_with( + featurestore=expected_featurestore, + update_mask=field_mask_pb2.FieldMask(paths=["labels"]), + metadata=_TEST_REQUEST_METADATA, + timeout=None, + ) + + @pytest.mark.usefixtures("get_featurestore_mock") + def test_update_featurestore_with_timeout(self, update_featurestore_mock): + aiplatform.init(project=_TEST_PROJECT) + + my_featurestore = aiplatform.Featurestore( + featurestore_name=_TEST_FEATURESTORE_ID + ) + my_featurestore.update( + labels=_TEST_LABELS_UPDATE, update_request_timeout=180.0, + ) expected_featurestore = gca_featurestore.Featurestore( name=_TEST_FEATURESTORE_NAME, @@ -871,6 +896,7 @@ def test_update_featurestore(self, update_featurestore_mock): featurestore=expected_featurestore, update_mask=field_mask_pb2.FieldMask(paths=["labels"]), metadata=_TEST_REQUEST_METADATA, + timeout=180.0, ) @pytest.mark.usefixtures("get_featurestore_mock") @@ -881,7 +907,8 @@ def test_update_featurestore_online(self, update_featurestore_mock): featurestore_name=_TEST_FEATURESTORE_ID ) my_featurestore.update_online_store( - fixed_node_count=_TEST_ONLINE_SERVING_CONFIG_UPDATE + fixed_node_count=_TEST_ONLINE_SERVING_CONFIG_UPDATE, + update_request_timeout=None, ) expected_featurestore = gca_featurestore.Featurestore( @@ -896,6 +923,7 @@ def test_update_featurestore_online(self, update_featurestore_mock): paths=["online_serving_config.fixed_node_count"] ), metadata=_TEST_REQUEST_METADATA, + timeout=None, ) def test_list_featurestores(self, list_featurestores_mock): @@ -1000,6 +1028,37 @@ def test_create_entity_type(self, create_entity_type_mock, sync): entity_type_id=_TEST_ENTITY_TYPE_ID, description=_TEST_DESCRIPTION, labels=_TEST_LABELS, + create_request_timeout=None, + sync=sync, + ) + + if not sync: + my_entity_type.wait() + + expected_entity_type = gca_entity_type.EntityType( + labels=_TEST_LABELS, description=_TEST_DESCRIPTION, + ) + create_entity_type_mock.assert_called_once_with( + parent=_TEST_FEATURESTORE_NAME, + entity_type=expected_entity_type, + entity_type_id=_TEST_ENTITY_TYPE_ID, + metadata=_TEST_REQUEST_METADATA, + timeout=None, + ) + + @pytest.mark.usefixtures("get_featurestore_mock", "get_entity_type_mock") + @pytest.mark.parametrize("sync", [True, False]) + def test_create_entity_type_with_timeout(self, create_entity_type_mock, sync): + aiplatform.init(project=_TEST_PROJECT) + + my_featurestore = aiplatform.Featurestore( + featurestore_name=_TEST_FEATURESTORE_NAME + ) + my_entity_type = my_featurestore.create_entity_type( + entity_type_id=_TEST_ENTITY_TYPE_ID, + description=_TEST_DESCRIPTION, + labels=_TEST_LABELS, + create_request_timeout=180.0, sync=sync, ) @@ -1014,6 +1073,7 @@ def test_create_entity_type(self, create_entity_type_mock, sync): entity_type=expected_entity_type, entity_type_id=_TEST_ENTITY_TYPE_ID, metadata=_TEST_REQUEST_METADATA, + timeout=180.0, ) @pytest.mark.usefixtures("get_featurestore_mock") @@ -1026,6 +1086,7 @@ def test_create_featurestore(self, create_featurestore_mock, sync): online_store_fixed_node_count=_TEST_ONLINE_SERVING_CONFIG, labels=_TEST_LABELS, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, + create_request_timeout=None, ) if not sync: @@ -1043,6 +1104,38 @@ def test_create_featurestore(self, create_featurestore_mock, sync): featurestore=expected_featurestore, featurestore_id=_TEST_FEATURESTORE_ID, metadata=_TEST_REQUEST_METADATA, + timeout=None, + ) + + @pytest.mark.usefixtures("get_featurestore_mock") + @pytest.mark.parametrize("sync", [True, False]) + def test_create_featurestore_with_timeout(self, create_featurestore_mock, sync): + aiplatform.init(project=_TEST_PROJECT) + + my_featurestore = aiplatform.Featurestore.create( + featurestore_id=_TEST_FEATURESTORE_ID, + online_store_fixed_node_count=_TEST_ONLINE_SERVING_CONFIG, + labels=_TEST_LABELS, + encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, + create_request_timeout=180.0, + ) + + if not sync: + my_featurestore.wait() + + expected_featurestore = gca_featurestore.Featurestore( + labels=_TEST_LABELS, + online_serving_config=gca_featurestore.Featurestore.OnlineServingConfig( + fixed_node_count=_TEST_ONLINE_SERVING_CONFIG + ), + encryption_spec=_TEST_ENCRYPTION_SPEC, + ) + create_featurestore_mock.assert_called_once_with( + parent=_TEST_PARENT, + featurestore=expected_featurestore, + featurestore_id=_TEST_FEATURESTORE_ID, + metadata=_TEST_REQUEST_METADATA, + timeout=180.0, ) @pytest.mark.usefixtures("get_featurestore_mock") @@ -1221,6 +1314,7 @@ def test_batch_serve_to_bq(self, batch_read_feature_values_mock, sync): bq_destination_output_uri=_TEST_BQ_DESTINATION_URI, serving_feature_ids=_TEST_SERVING_FEATURE_IDS, read_instances_uri=_TEST_BQ_SOURCE_URI, + serve_request_timeout=None, sync=sync, ) @@ -1230,6 +1324,52 @@ def test_batch_serve_to_bq(self, batch_read_feature_values_mock, sync): batch_read_feature_values_mock.assert_called_once_with( request=expected_batch_read_feature_values_request, metadata=_TEST_REQUEST_METADATA, + timeout=None, + ) + + @pytest.mark.parametrize("sync", [True, False]) + @pytest.mark.usefixtures("get_featurestore_mock") + def test_batch_serve_to_bq_with_timeout(self, batch_read_feature_values_mock, sync): + aiplatform.init(project=_TEST_PROJECT) + my_featurestore = aiplatform.Featurestore( + featurestore_name=_TEST_FEATURESTORE_NAME + ) + + expected_entity_type_specs = [ + _get_entity_type_spec_proto_with_feature_ids( + entity_type_id="my_entity_type_id_1", + feature_ids=["my_feature_id_1_1", "my_feature_id_1_2"], + ), + _get_entity_type_spec_proto_with_feature_ids( + entity_type_id="my_entity_type_id_2", + feature_ids=["my_feature_id_2_1", "my_feature_id_2_2"], + ), + ] + + expected_batch_read_feature_values_request = gca_featurestore_service.BatchReadFeatureValuesRequest( + featurestore=my_featurestore.resource_name, + destination=gca_featurestore_service.FeatureValueDestination( + bigquery_destination=_TEST_BQ_DESTINATION, + ), + entity_type_specs=expected_entity_type_specs, + bigquery_read_instances=_TEST_BQ_SOURCE, + ) + + my_featurestore.batch_serve_to_bq( + bq_destination_output_uri=_TEST_BQ_DESTINATION_URI, + serving_feature_ids=_TEST_SERVING_FEATURE_IDS, + read_instances_uri=_TEST_BQ_SOURCE_URI, + serve_request_timeout=180.0, + sync=sync, + ) + + if not sync: + my_featurestore.wait() + + batch_read_feature_values_mock.assert_called_once_with( + request=expected_batch_read_feature_values_request, + metadata=_TEST_REQUEST_METADATA, + timeout=180.0, ) @pytest.mark.parametrize("sync", [True, False]) @@ -1265,6 +1405,7 @@ def test_batch_serve_to_gcs(self, batch_read_feature_values_mock, sync): gcs_destination_type=_TEST_GCS_DESTINATION_TYPE_TFRECORD, serving_feature_ids=_TEST_SERVING_FEATURE_IDS, read_instances_uri=_TEST_GCS_CSV_SOURCE_URI, + serve_request_timeout=None, sync=sync, ) @@ -1274,6 +1415,7 @@ def test_batch_serve_to_gcs(self, batch_read_feature_values_mock, sync): batch_read_feature_values_mock.assert_called_once_with( request=expected_batch_read_feature_values_request, metadata=_TEST_REQUEST_METADATA, + timeout=None, ) @pytest.mark.usefixtures("get_featurestore_mock") @@ -1354,11 +1496,13 @@ def test_batch_serve_to_df(self, batch_read_feature_values_mock): my_featurestore.batch_serve_to_df( serving_feature_ids=_TEST_SERVING_FEATURE_IDS, read_instances_df=read_instances_df, + serve_request_timeout=None, ) batch_read_feature_values_mock.assert_called_once_with( request=expected_batch_read_feature_values_request, metadata=_TEST_REQUEST_METADATA, + timeout=None, ) @@ -1419,7 +1563,9 @@ def test_update_entity_type(self, update_entity_type_mock): aiplatform.init(project=_TEST_PROJECT) my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME) - my_entity_type.update(labels=_TEST_LABELS_UPDATE) + my_entity_type.update( + labels=_TEST_LABELS_UPDATE, update_request_timeout=None, + ) expected_entity_type = gca_entity_type.EntityType( name=_TEST_ENTITY_TYPE_NAME, labels=_TEST_LABELS_UPDATE, @@ -1428,6 +1574,7 @@ def test_update_entity_type(self, update_entity_type_mock): entity_type=expected_entity_type, update_mask=field_mask_pb2.FieldMask(paths=["labels"]), metadata=_TEST_REQUEST_METADATA, + timeout=None, ) @pytest.mark.parametrize( @@ -1493,6 +1640,7 @@ def test_create_feature(self, create_feature_mock, sync): value_type=_TEST_FEATURE_VALUE_TYPE_STR, description=_TEST_DESCRIPTION, labels=_TEST_LABELS, + create_request_timeout=None, ) if not sync: @@ -1510,7 +1658,7 @@ def test_create_feature(self, create_feature_mock, sync): ) create_feature_mock.assert_called_once_with( - request=expected_request, metadata=_TEST_REQUEST_METADATA, + request=expected_request, metadata=_TEST_REQUEST_METADATA, timeout=None, ) @pytest.mark.usefixtures("get_entity_type_mock") @@ -1523,6 +1671,7 @@ def test_create_entity_type(self, create_entity_type_mock, sync): featurestore_name=_TEST_FEATURESTORE_NAME, description=_TEST_DESCRIPTION, labels=_TEST_LABELS, + create_request_timeout=None, ) if not sync: @@ -1536,6 +1685,7 @@ def test_create_entity_type(self, create_entity_type_mock, sync): entity_type=expected_entity_type, entity_type_id=_TEST_ENTITY_TYPE_ID, metadata=_TEST_REQUEST_METADATA, + timeout=None, ) @pytest.mark.usefixtures("get_entity_type_mock") @@ -1642,6 +1792,7 @@ def test_ingest_from_bq(self, import_feature_values_mock, sync): feature_time=_TEST_FEATURE_TIME_FIELD, bq_source_uri=_TEST_BQ_SOURCE_URI, feature_source_fields=_TEST_IMPORTING_FEATURE_SOURCE_FIELDS, + ingest_request_timeout=None, sync=sync, ) @@ -1659,7 +1810,43 @@ def test_ingest_from_bq(self, import_feature_values_mock, sync): feature_time_field=_TEST_FEATURE_TIME_FIELD, ) import_feature_values_mock.assert_called_once_with( - request=true_import_feature_values_request, metadata=_TEST_REQUEST_METADATA, + request=true_import_feature_values_request, + metadata=_TEST_REQUEST_METADATA, + timeout=None, + ) + + @pytest.mark.usefixtures("get_entity_type_mock") + @pytest.mark.parametrize("sync", [True, False]) + def test_ingest_from_bq_with_timeout(self, import_feature_values_mock, sync): + aiplatform.init(project=_TEST_PROJECT) + + my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME) + my_entity_type.ingest_from_bq( + feature_ids=_TEST_IMPORTING_FEATURE_IDS, + feature_time=_TEST_FEATURE_TIME_FIELD, + bq_source_uri=_TEST_BQ_SOURCE_URI, + feature_source_fields=_TEST_IMPORTING_FEATURE_SOURCE_FIELDS, + ingest_request_timeout=180.0, + sync=sync, + ) + + if not sync: + my_entity_type.wait() + + true_import_feature_values_request = gca_featurestore_service.ImportFeatureValuesRequest( + entity_type=_TEST_ENTITY_TYPE_NAME, + feature_specs=[ + gca_featurestore_service.ImportFeatureValuesRequest.FeatureSpec( + id="my_feature_id_1", source_field="my_feature_id_1_source_field" + ), + ], + bigquery_source=_TEST_BQ_SOURCE, + feature_time_field=_TEST_FEATURE_TIME_FIELD, + ) + import_feature_values_mock.assert_called_once_with( + request=true_import_feature_values_request, + metadata=_TEST_REQUEST_METADATA, + timeout=180.0, ) @pytest.mark.usefixtures("get_entity_type_mock") @@ -1673,6 +1860,7 @@ def test_ingest_from_gcs(self, import_feature_values_mock, sync): feature_time=_TEST_FEATURE_TIME, gcs_source_uris=_TEST_GCS_AVRO_SOURCE_URIS, gcs_source_type=_TEST_GCS_SOURCE_TYPE_AVRO, + ingest_request_timeout=None, sync=sync, ) @@ -1690,7 +1878,9 @@ def test_ingest_from_gcs(self, import_feature_values_mock, sync): feature_time=utils.get_timestamp_proto(_TEST_FEATURE_TIME), ) import_feature_values_mock.assert_called_once_with( - request=true_import_feature_values_request, metadata=_TEST_REQUEST_METADATA, + request=true_import_feature_values_request, + metadata=_TEST_REQUEST_METADATA, + timeout=None, ) @pytest.mark.usefixtures("get_entity_type_mock") @@ -1732,6 +1922,7 @@ def test_ingest_from_df_using_column( feature_time=_TEST_FEATURE_TIME_FIELD, df_source=df_source, feature_source_fields=_TEST_IMPORTING_FEATURE_SOURCE_FIELDS, + ingest_request_timeout=None, ) expected_temp_bq_dataset_name = f"temp_{_TEST_FEATURESTORE_ID}_{uuid.uuid4()}".replace( "-", "_" @@ -1765,6 +1956,7 @@ def test_ingest_from_df_using_column( import_feature_values_mock.assert_called_once_with( request=expected_import_feature_values_request, metadata=_TEST_REQUEST_METADATA, + timeout=None, ) @pytest.mark.usefixtures( @@ -1793,6 +1985,7 @@ def test_ingest_from_df_using_datetime( feature_time=_TEST_FEATURE_TIME_DATETIME, df_source=df_source, feature_source_fields=_TEST_IMPORTING_FEATURE_SOURCE_FIELDS, + ingest_request_timeout=None, ) expected_temp_bq_dataset_name = f"temp_{_TEST_FEATURESTORE_ID}_{uuid.uuid4()}".replace( @@ -1830,6 +2023,7 @@ def test_ingest_from_df_using_datetime( import_feature_values_mock.assert_called_once_with( request=expected_import_feature_values_request, metadata=_TEST_REQUEST_METADATA, + timeout=None, ) @pytest.mark.parametrize( @@ -1867,16 +2061,39 @@ def test_read_single_entity(self, read_feature_values_mock): id_matcher=gca_feature_selector.IdMatcher(ids=["*"]) ), ) - result = my_entity_type.read(entity_ids=_TEST_READ_ENTITY_ID) + result = my_entity_type.read( + entity_ids=_TEST_READ_ENTITY_ID, read_request_timeout=None, + ) read_feature_values_mock.assert_called_once_with( request=expected_read_feature_values_request, metadata=_TEST_REQUEST_METADATA, + timeout=None, ) assert type(result) == pd.DataFrame assert len(result) == 1 assert result.entity_id[0] == _TEST_READ_ENTITY_ID assert result.get(_TEST_FEATURE_ID)[0] == _TEST_FEATURE_VALUE + @pytest.mark.usefixtures("get_entity_type_mock", "get_feature_mock") + def test_read_single_entity_with_timeout(self, read_feature_values_mock): + aiplatform.init(project=_TEST_PROJECT) + my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME) + expected_read_feature_values_request = gca_featurestore_online_service.ReadFeatureValuesRequest( + entity_type=my_entity_type.resource_name, + entity_id=_TEST_READ_ENTITY_ID, + feature_selector=gca_feature_selector.FeatureSelector( + id_matcher=gca_feature_selector.IdMatcher(ids=["*"]) + ), + ) + result = my_entity_type.read( + entity_ids=_TEST_READ_ENTITY_ID, read_request_timeout=180.0, + ) + read_feature_values_mock.assert_called_once_with( + request=expected_read_feature_values_request, + metadata=_TEST_REQUEST_METADATA, + timeout=180.0, + ) + @pytest.mark.usefixtures("get_entity_type_mock", "get_feature_mock") def test_read_multiple_entities(self, streaming_read_feature_values_mock): aiplatform.init(project=_TEST_PROJECT) @@ -1889,11 +2106,14 @@ def test_read_multiple_entities(self, streaming_read_feature_values_mock): ), ) result = my_entity_type.read( - entity_ids=_TEST_READ_ENTITY_IDS, feature_ids=_TEST_FEATURE_ID + entity_ids=_TEST_READ_ENTITY_IDS, + feature_ids=_TEST_FEATURE_ID, + read_request_timeout=None, ) streaming_read_feature_values_mock.assert_called_once_with( request=expected_streaming_read_feature_values_request, metadata=_TEST_REQUEST_METADATA, + timeout=None, ) assert type(result) == pd.DataFrame assert len(result) == 1 @@ -2377,7 +2597,9 @@ def test_update_feature(self, update_feature_mock): aiplatform.init(project=_TEST_PROJECT) my_feature = aiplatform.Feature(feature_name=_TEST_FEATURE_NAME) - my_feature.update(labels=_TEST_LABELS_UPDATE) + my_feature.update( + labels=_TEST_LABELS_UPDATE, update_request_timeout=None, + ) expected_feature = gca_feature.Feature( name=_TEST_FEATURE_NAME, labels=_TEST_LABELS_UPDATE, @@ -2386,6 +2608,7 @@ def test_update_feature(self, update_feature_mock): feature=expected_feature, update_mask=field_mask_pb2.FieldMask(paths=["labels"]), metadata=_TEST_REQUEST_METADATA, + timeout=None, ) @pytest.mark.parametrize( @@ -2434,6 +2657,7 @@ def test_create_feature(self, create_feature_mock, sync): featurestore_id=_TEST_FEATURESTORE_ID, description=_TEST_DESCRIPTION, labels=_TEST_LABELS, + create_request_timeout=None, ) if not sync: @@ -2451,6 +2675,7 @@ def test_create_feature(self, create_feature_mock, sync): feature_id=_TEST_FEATURE_ID, ), metadata=_TEST_REQUEST_METADATA, + timeout=None, ) From 3e5dd0067b34c05749928666f589902f17df94c3 Mon Sep 17 00:00:00 2001 From: Sara Robinson Date: Fri, 25 Mar 2022 11:36:48 -0400 Subject: [PATCH 14/19] fix failing tests --- google/cloud/aiplatform/models.py | 35 +++++- google/cloud/aiplatform/training_jobs.py | 18 +++ .../test_automl_forecasting_training_jobs.py | 86 +++++++++++++ .../test_automl_image_training_jobs.py | 80 ++++++++++++ .../test_automl_text_training_jobs.py | 84 +++++++++++++ .../test_automl_video_training_jobs.py | 81 ++++++++++++ tests/unit/aiplatform/test_datasets.py | 18 --- tests/unit/aiplatform/test_end_to_end.py | 23 +++- tests/unit/aiplatform/test_endpoints.py | 119 ++++++++++++++++-- tests/unit/aiplatform/test_models.py | 8 ++ 10 files changed, 516 insertions(+), 36 deletions(-) diff --git a/google/cloud/aiplatform/models.py b/google/cloud/aiplatform/models.py index 904d93ac13..e019d787e3 100644 --- a/google/cloud/aiplatform/models.py +++ b/google/cloud/aiplatform/models.py @@ -15,6 +15,7 @@ # limitations under the License. # import pathlib +from venv import create import proto import re import shutil @@ -204,6 +205,7 @@ def create( location: Optional[str] = None, credentials: Optional[auth_credentials.Credentials] = None, encryption_spec_key_name: Optional[str] = None, + create_request_timeout: Optional[float] = None, sync=True, ) -> "Endpoint": """Creates a new endpoint. @@ -248,6 +250,10 @@ def create( If set, this Endpoint and all sub-resources of this Endpoint will be secured by this key. Overrides encryption_spec_key_name set in aiplatform.init. + create_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -278,6 +284,7 @@ def create( encryption_spec=initializer.global_config.get_encryption_spec( encryption_spec_key_name=encryption_spec_key_name ), + create_request_timeout=create_request_timeout, sync=sync, ) @@ -294,6 +301,7 @@ def _create( metadata: Optional[Sequence[Tuple[str, str]]] = (), credentials: Optional[auth_credentials.Credentials] = None, encryption_spec: Optional[gca_encryption_spec.EncryptionSpec] = None, + create_request_timeout: Optional[float] = None, sync=True, ) -> "Endpoint": """Creates a new endpoint by calling the API client. @@ -336,6 +344,10 @@ def _create( resource is created. If set, this Dataset and all sub-resources of this Dataset will be secured by this key. + create_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. sync (bool): Whether to create this endpoint synchronously. Returns: @@ -355,7 +367,10 @@ def _create( ) operation_future = api_client.create_endpoint( - parent=parent, endpoint=gapic_endpoint, metadata=metadata + parent=parent, + endpoint=gapic_endpoint, + metadata=metadata, + timeout=create_request_timeout, ) _LOGGER.log_create_with_lro(cls, operation_future) @@ -598,6 +613,7 @@ def deploy( explanation_metadata: Optional[explain.ExplanationMetadata] = None, explanation_parameters: Optional[explain.ExplanationParameters] = None, metadata: Optional[Sequence[Tuple[str, str]]] = (), + deploy_request_timeout: Optional[float] = None, sync=True, ) -> None: """Deploys a Model to the Endpoint. @@ -666,6 +682,10 @@ def deploy( metadata (Sequence[Tuple[str, str]]): Optional. Strings which should be sent along with the request as metadata. + deploy_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -698,6 +718,7 @@ def deploy( explanation_metadata=explanation_metadata, explanation_parameters=explanation_parameters, metadata=metadata, + deploy_request_timeout=deploy_request_timeout, sync=sync, ) @@ -717,6 +738,7 @@ def _deploy( explanation_metadata: Optional[explain.ExplanationMetadata] = None, explanation_parameters: Optional[explain.ExplanationParameters] = None, metadata: Optional[Sequence[Tuple[str, str]]] = (), + deploy_request_timeout: Optional[float] = None, sync=True, ) -> None: """Deploys a Model to the Endpoint. @@ -785,6 +807,10 @@ def _deploy( metadata (Sequence[Tuple[str, str]]): Optional. Strings which should be sent along with the request as metadata. + deploy_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -814,6 +840,7 @@ def _deploy( explanation_metadata=explanation_metadata, explanation_parameters=explanation_parameters, metadata=metadata, + deploy_request_timeout=deploy_request_timeout, ) _LOGGER.log_action_completed_against_resource("model", "deployed", self) @@ -2232,6 +2259,7 @@ def batch_predict( labels: Optional[Dict[str, str]] = None, credentials: Optional[auth_credentials.Credentials] = None, encryption_spec_key_name: Optional[str] = None, + create_request_timeout: Optional[float] = None, sync: bool = True, ) -> jobs.BatchPredictionJob: """Creates a batch prediction job using this Model and outputs @@ -2386,6 +2414,10 @@ def batch_predict( If set, this Model and all sub-resources of this Model will be secured by this key. Overrides encryption_spec_key_name set in aiplatform.init. + create_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. Returns: (jobs.BatchPredictionJob): Instantiated representation of the created batch prediction job. @@ -2414,6 +2446,7 @@ def batch_predict( location=self.location, credentials=credentials or self.credentials, encryption_spec_key_name=encryption_spec_key_name, + create_request_timeout=create_request_timeout, sync=sync, ) diff --git a/google/cloud/aiplatform/training_jobs.py b/google/cloud/aiplatform/training_jobs.py index b4c3b65408..68f984ff29 100644 --- a/google/cloud/aiplatform/training_jobs.py +++ b/google/cloud/aiplatform/training_jobs.py @@ -4716,6 +4716,7 @@ def run( model_display_name: Optional[str] = None, model_labels: Optional[Dict[str, str]] = None, disable_early_stopping: bool = False, + create_request_timeout: Optional[float] = False, sync: bool = True, ) -> models.Model: """Runs the AutoML Image training job and returns a model. @@ -4818,6 +4819,10 @@ def run( that training might stop before the entire training budget has been used, if further training does no longer brings significant improvement to the model. + create_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. sync: bool = True Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -4854,6 +4859,7 @@ def run( model_display_name=model_display_name, model_labels=model_labels, disable_early_stopping=disable_early_stopping, + create_request_timeout=create_request_timeout, sync=sync, ) @@ -5996,6 +6002,7 @@ def run( test_filter_split: Optional[str] = None, model_display_name: Optional[str] = None, model_labels: Optional[Dict[str, str]] = None, + create_request_timeout: Optional[float] = None, sync: bool = True, ) -> models.Model: """Runs the AutoML Video training job and returns a model. @@ -6060,6 +6067,10 @@ def run( are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + create_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. sync: bool = True Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -6091,6 +6102,7 @@ def run( test_filter_split=test_filter_split, model_display_name=model_display_name, model_labels=model_labels, + create_request_timeout=create_request_timeout, sync=sync, ) @@ -6378,6 +6390,7 @@ def run( test_filter_split: Optional[str] = None, model_display_name: Optional[str] = None, model_labels: Optional[Dict[str, str]] = None, + create_request_timeout: Optional[float] = None, sync: bool = True, ) -> models.Model: """Runs the training job and returns a model. @@ -6454,6 +6467,10 @@ def run( are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + create_request_timeout (float): + Optional. The timeout for initiating this request in seconds. Note: + this does not set the timeout on the underlying job, only on + the time to initiate the request. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will @@ -6486,6 +6503,7 @@ def run( test_filter_split=test_filter_split, model_display_name=model_display_name, model_labels=model_labels, + create_request_timeout=create_request_timeout, sync=sync, ) diff --git a/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py b/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py index 3ca54a8ad6..bed6a07661 100644 --- a/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py +++ b/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py @@ -272,6 +272,7 @@ def test_run_call_pipeline_service_create( quantiles=_TEST_TRAINING_QUANTILES, validation_options=_TEST_TRAINING_VALIDATION_OPTIONS, additional_experiments=_TEST_ADDITIONAL_EXPERIMENTS, + create_request_timeout=None, sync=sync, ) @@ -301,6 +302,7 @@ def test_run_call_pipeline_service_create( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) assert job._gca_resource is mock_pipeline_service_get.return_value @@ -317,6 +319,80 @@ def test_run_call_pipeline_service_create( assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED + @pytest.mark.parametrize("sync", [True, False]) + def test_run_call_pipeline_service_create_with_timeout( + self, + mock_pipeline_service_create, + mock_pipeline_service_get, + mock_dataset_time_series, + mock_model_service_get, + sync, + ): + aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME) + + job = AutoMLForecastingTrainingJob( + display_name=_TEST_DISPLAY_NAME, + optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME, + column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS, + labels=_TEST_LABELS, + ) + + model_from_job = job.run( + dataset=mock_dataset_time_series, + target_column=_TEST_TRAINING_TARGET_COLUMN, + time_column=_TEST_TRAINING_TIME_COLUMN, + time_series_identifier_column=_TEST_TRAINING_TIME_SERIES_IDENTIFIER_COLUMN, + unavailable_at_forecast_columns=_TEST_TRAINING_UNAVAILABLE_AT_FORECAST_COLUMNS, + available_at_forecast_columns=_TEST_TRAINING_AVAILABLE_AT_FORECAST_COLUMNS, + forecast_horizon=_TEST_TRAINING_FORECAST_HORIZON, + data_granularity_unit=_TEST_TRAINING_DATA_GRANULARITY_UNIT, + data_granularity_count=_TEST_TRAINING_DATA_GRANULARITY_COUNT, + model_display_name=_TEST_MODEL_DISPLAY_NAME, + model_labels=_TEST_MODEL_LABELS, + predefined_split_column_name=_TEST_PREDEFINED_SPLIT_COLUMN_NAME, + weight_column=_TEST_TRAINING_WEIGHT_COLUMN, + time_series_attribute_columns=_TEST_TRAINING_TIME_SERIES_ATTRIBUTE_COLUMNS, + context_window=_TEST_TRAINING_CONTEXT_WINDOW, + budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS, + export_evaluated_data_items=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS, + export_evaluated_data_items_bigquery_destination_uri=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_BIGQUERY_DESTINATION_URI, + export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION, + quantiles=_TEST_TRAINING_QUANTILES, + validation_options=_TEST_TRAINING_VALIDATION_OPTIONS, + additional_experiments=_TEST_ADDITIONAL_EXPERIMENTS, + create_request_timeout=180.0, + sync=sync, + ) + + if not sync: + model_from_job.wait() + + true_managed_model = gca_model.Model( + display_name=_TEST_MODEL_DISPLAY_NAME, labels=_TEST_MODEL_LABELS + ) + + true_input_data_config = gca_training_pipeline.InputDataConfig( + predefined_split=gca_training_pipeline.PredefinedSplit( + key=_TEST_PREDEFINED_SPLIT_COLUMN_NAME + ), + dataset_id=mock_dataset_time_series.name, + ) + + true_training_pipeline = gca_training_pipeline.TrainingPipeline( + display_name=_TEST_DISPLAY_NAME, + labels=_TEST_LABELS, + training_task_definition=schema.training_job.definition.automl_forecasting, + training_task_inputs=_TEST_TRAINING_TASK_INPUTS_WITH_ADDITIONAL_EXPERIMENTS, + model_to_upload=true_managed_model, + input_data_config=true_input_data_config, + ) + + mock_pipeline_service_create.assert_called_once_with( + parent=initializer.global_config.common_location_path(), + training_pipeline=true_training_pipeline, + timeout=180.0, + ) + @pytest.mark.usefixtures("mock_pipeline_service_get") @pytest.mark.parametrize("sync", [True, False]) def test_run_call_pipeline_if_no_model_display_name_nor_model_labels( @@ -354,6 +430,7 @@ def test_run_call_pipeline_if_no_model_display_name_nor_model_labels( export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION, quantiles=_TEST_TRAINING_QUANTILES, validation_options=_TEST_TRAINING_VALIDATION_OPTIONS, + create_request_timeout=None, sync=sync, ) @@ -381,6 +458,7 @@ def test_run_call_pipeline_if_no_model_display_name_nor_model_labels( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) @pytest.mark.usefixtures("mock_pipeline_service_get") @@ -421,6 +499,7 @@ def test_run_call_pipeline_if_set_additional_experiments( export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION, quantiles=_TEST_TRAINING_QUANTILES, validation_options=_TEST_TRAINING_VALIDATION_OPTIONS, + create_request_timeout=None, sync=sync, ) @@ -445,6 +524,7 @@ def test_run_call_pipeline_if_set_additional_experiments( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) @pytest.mark.usefixtures( @@ -623,6 +703,7 @@ def test_splits_fraction( export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION, quantiles=_TEST_TRAINING_QUANTILES, validation_options=_TEST_TRAINING_VALIDATION_OPTIONS, + create_request_timeout=None, sync=sync, ) @@ -657,6 +738,7 @@ def test_splits_fraction( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -705,6 +787,7 @@ def test_splits_predefined( export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION, quantiles=_TEST_TRAINING_QUANTILES, validation_options=_TEST_TRAINING_VALIDATION_OPTIONS, + create_request_timeout=None, sync=sync, ) @@ -736,6 +819,7 @@ def test_splits_predefined( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -783,6 +867,7 @@ def test_splits_default( export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION, quantiles=_TEST_TRAINING_QUANTILES, validation_options=_TEST_TRAINING_VALIDATION_OPTIONS, + create_request_timeout=None, sync=sync, ) @@ -810,4 +895,5 @@ def test_splits_default( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) diff --git a/tests/unit/aiplatform/test_automl_image_training_jobs.py b/tests/unit/aiplatform/test_automl_image_training_jobs.py index 338edf7adc..72ec80a0d6 100644 --- a/tests/unit/aiplatform/test_automl_image_training_jobs.py +++ b/tests/unit/aiplatform/test_automl_image_training_jobs.py @@ -271,6 +271,7 @@ def test_run_call_pipeline_service_create( test_filter_split=_TEST_FILTER_SPLIT_TEST, budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS, disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, + create_request_timeout=None, sync=sync, ) @@ -307,6 +308,7 @@ def test_run_call_pipeline_service_create( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) mock_model_service_get.assert_called_once_with( @@ -318,6 +320,76 @@ def test_run_call_pipeline_service_create( assert not job.has_failed assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED + @pytest.mark.parametrize("sync", [True, False]) + def test_run_call_pipeline_service_create_with_timeout( + self, + mock_pipeline_service_create, + mock_pipeline_service_get, + mock_dataset_image, + mock_model_service_get, + mock_model, + sync, + ): + """Create and run an AutoML ICN training job, verify calls and return value""" + + aiplatform.init( + project=_TEST_PROJECT, + encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME, + ) + + job = training_jobs.AutoMLImageTrainingJob( + display_name=_TEST_DISPLAY_NAME, base_model=mock_model, labels=_TEST_LABELS, + ) + + model_from_job = job.run( + dataset=mock_dataset_image, + model_display_name=_TEST_MODEL_DISPLAY_NAME, + model_labels=_TEST_MODEL_LABELS, + training_filter_split=_TEST_FILTER_SPLIT_TRAINING, + validation_filter_split=_TEST_FILTER_SPLIT_VALIDATION, + test_filter_split=_TEST_FILTER_SPLIT_TEST, + budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS, + disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, + create_request_timeout=180.0, + sync=sync, + ) + + if not sync: + model_from_job.wait() + + true_filter_split = gca_training_pipeline.FilterSplit( + training_filter=_TEST_FILTER_SPLIT_TRAINING, + validation_filter=_TEST_FILTER_SPLIT_VALIDATION, + test_filter=_TEST_FILTER_SPLIT_TEST, + ) + + true_managed_model = gca_model.Model( + display_name=_TEST_MODEL_DISPLAY_NAME, + labels=mock_model._gca_resource.labels, + description=mock_model._gca_resource.description, + encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC, + ) + + true_input_data_config = gca_training_pipeline.InputDataConfig( + filter_split=true_filter_split, dataset_id=mock_dataset_image.name, + ) + + true_training_pipeline = gca_training_pipeline.TrainingPipeline( + display_name=_TEST_DISPLAY_NAME, + labels=_TEST_LABELS, + training_task_definition=schema.training_job.definition.automl_image_classification, + training_task_inputs=_TEST_TRAINING_TASK_INPUTS_WITH_BASE_MODEL, + model_to_upload=true_managed_model, + input_data_config=true_input_data_config, + encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC, + ) + + mock_pipeline_service_create.assert_called_once_with( + parent=initializer.global_config.common_location_path(), + training_pipeline=true_training_pipeline, + timeout=180.0, + ) + @pytest.mark.usefixtures("mock_pipeline_service_get") @pytest.mark.parametrize("sync", [True, False]) def test_run_call_pipeline_if_no_model_display_name_nor_model_labels( @@ -340,6 +412,7 @@ def test_run_call_pipeline_if_no_model_display_name_nor_model_labels( dataset=mock_dataset_image, budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS, disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, + create_request_timeout=None, ) if not sync: @@ -369,6 +442,7 @@ def test_run_call_pipeline_if_no_model_display_name_nor_model_labels( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) @pytest.mark.usefixtures( @@ -497,6 +571,7 @@ def test_splits_fraction( test_fraction_split=_TEST_FRACTION_SPLIT_TEST, budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS, disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, + create_request_timeout=None, sync=sync, ) @@ -531,6 +606,7 @@ def test_splits_fraction( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -565,6 +641,7 @@ def test_splits_filter( test_filter_split=_TEST_FILTER_SPLIT_TEST, budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS, disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, + create_request_timeout=None, sync=sync, ) @@ -599,6 +676,7 @@ def test_splits_filter( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -630,6 +708,7 @@ def test_splits_default( model_display_name=_TEST_MODEL_DISPLAY_NAME, budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS, disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, + create_request_timeout=None, sync=sync, ) @@ -658,6 +737,7 @@ def test_splits_default( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) def test_splits_filter_incomplete( diff --git a/tests/unit/aiplatform/test_automl_text_training_jobs.py b/tests/unit/aiplatform/test_automl_text_training_jobs.py index 76d6f789b4..806c041dc7 100644 --- a/tests/unit/aiplatform/test_automl_text_training_jobs.py +++ b/tests/unit/aiplatform/test_automl_text_training_jobs.py @@ -275,6 +275,7 @@ def test_init_aiplatform_with_encryption_key_name_and_create_training_job( model_from_job = job.run( dataset=mock_dataset_text, model_display_name=_TEST_MODEL_DISPLAY_NAME, + create_request_timeout=None, sync=sync, ) @@ -302,6 +303,7 @@ def test_init_aiplatform_with_encryption_key_name_and_create_training_job( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -333,6 +335,7 @@ def test_run_call_pipeline_service_create_classification( training_filter_split=_TEST_FILTER_SPLIT_TRAINING, validation_filter_split=_TEST_FILTER_SPLIT_VALIDATION, test_filter_split=_TEST_FILTER_SPLIT_TEST, + create_request_timeout=None, sync=sync, ) @@ -368,6 +371,7 @@ def test_run_call_pipeline_service_create_classification( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) mock_model_service_get.assert_called_once_with( @@ -379,6 +383,74 @@ def test_run_call_pipeline_service_create_classification( assert not job.has_failed assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED + @pytest.mark.parametrize("sync", [True, False]) + def test_run_call_pipeline_service_create_classification_with_timeout( + self, + mock_pipeline_service_create, + mock_pipeline_service_get, + mock_dataset_text, + mock_model_service_get, + sync, + ): + """Create and run an AutoML Text Classification training job, verify calls and return value""" + + aiplatform.init(project=_TEST_PROJECT) + + job = training_jobs.AutoMLTextTrainingJob( + display_name=_TEST_DISPLAY_NAME, + labels=_TEST_LABELS, + prediction_type=_TEST_PREDICTION_TYPE_CLASSIFICATION, + multi_label=_TEST_CLASSIFICATION_MULTILABEL, + training_encryption_spec_key_name=_TEST_PIPELINE_ENCRYPTION_KEY_NAME, + model_encryption_spec_key_name=_TEST_MODEL_ENCRYPTION_KEY_NAME, + ) + + model_from_job = job.run( + dataset=mock_dataset_text, + model_display_name=_TEST_MODEL_DISPLAY_NAME, + model_labels=_TEST_MODEL_LABELS, + training_filter_split=_TEST_FILTER_SPLIT_TRAINING, + validation_filter_split=_TEST_FILTER_SPLIT_VALIDATION, + test_filter_split=_TEST_FILTER_SPLIT_TEST, + create_request_timeout=180.0, + sync=sync, + ) + + if not sync: + model_from_job.wait() + + true_filter_split = gca_training_pipeline.FilterSplit( + training_filter=_TEST_FILTER_SPLIT_TRAINING, + validation_filter=_TEST_FILTER_SPLIT_VALIDATION, + test_filter=_TEST_FILTER_SPLIT_TEST, + ) + + true_managed_model = gca_model.Model( + display_name=_TEST_MODEL_DISPLAY_NAME, + labels=_TEST_MODEL_LABELS, + encryption_spec=_TEST_MODEL_ENCRYPTION_SPEC, + ) + + true_input_data_config = gca_training_pipeline.InputDataConfig( + filter_split=true_filter_split, dataset_id=mock_dataset_text.name, + ) + + true_training_pipeline = gca_training_pipeline.TrainingPipeline( + display_name=_TEST_DISPLAY_NAME, + labels=_TEST_LABELS, + training_task_definition=schema.training_job.definition.automl_text_classification, + training_task_inputs=_TEST_TRAINING_TASK_INPUTS_CLASSIFICATION, + model_to_upload=true_managed_model, + input_data_config=true_input_data_config, + encryption_spec=_TEST_PIPELINE_ENCRYPTION_SPEC, + ) + + mock_pipeline_service_create.assert_called_once_with( + parent=initializer.global_config.common_location_path(), + training_pipeline=true_training_pipeline, + timeout=180.0, + ) + @pytest.mark.parametrize("sync", [True, False]) def test_run_call_pipeline_service_create_extraction( self, @@ -405,6 +477,7 @@ def test_run_call_pipeline_service_create_extraction( training_fraction_split=_TEST_FRACTION_SPLIT_TRAINING, validation_fraction_split=_TEST_FRACTION_SPLIT_VALIDATION, test_fraction_split=_TEST_FRACTION_SPLIT_TEST, + create_request_timeout=None, sync=sync, ) @@ -437,6 +510,7 @@ def test_run_call_pipeline_service_create_extraction( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) mock_model_service_get.assert_called_once_with( @@ -475,6 +549,7 @@ def test_run_call_pipeline_service_create_sentiment( training_filter_split=_TEST_FILTER_SPLIT_TRAINING, validation_filter_split=_TEST_FILTER_SPLIT_VALIDATION, test_filter_split=_TEST_FILTER_SPLIT_TEST, + create_request_timeout=None, sync=sync, ) @@ -507,6 +582,7 @@ def test_run_call_pipeline_service_create_sentiment( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) mock_model_service_get.assert_called_once_with( @@ -540,6 +616,7 @@ def test_run_call_pipeline_if_no_model_display_name_nor_model_labels( model_from_job = job.run( dataset=mock_dataset_text, model_display_name=None, # Omit model_display_name + create_request_timeout=None, sync=sync, ) @@ -567,6 +644,7 @@ def test_run_call_pipeline_if_no_model_display_name_nor_model_labels( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) @pytest.mark.usefixtures( @@ -693,6 +771,7 @@ def test_splits_fraction( training_fraction_split=_TEST_FRACTION_SPLIT_TRAINING, validation_fraction_split=_TEST_FRACTION_SPLIT_VALIDATION, test_fraction_split=_TEST_FRACTION_SPLIT_TEST, + create_request_timeout=None, sync=sync, ) @@ -727,6 +806,7 @@ def test_splits_fraction( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -761,6 +841,7 @@ def test_splits_filter( training_filter_split=_TEST_FILTER_SPLIT_TRAINING, validation_filter_split=_TEST_FILTER_SPLIT_VALIDATION, test_filter_split=_TEST_FILTER_SPLIT_TEST, + create_request_timeout=None, sync=sync, ) @@ -795,6 +876,7 @@ def test_splits_filter( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -826,6 +908,7 @@ def test_splits_default( model_from_job = job.run( dataset=mock_dataset_text, model_display_name=_TEST_MODEL_DISPLAY_NAME, + create_request_timeout=None, sync=sync, ) @@ -854,4 +937,5 @@ def test_splits_default( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) diff --git a/tests/unit/aiplatform/test_automl_video_training_jobs.py b/tests/unit/aiplatform/test_automl_video_training_jobs.py index 58ea230313..485cc8199e 100644 --- a/tests/unit/aiplatform/test_automl_video_training_jobs.py +++ b/tests/unit/aiplatform/test_automl_video_training_jobs.py @@ -241,6 +241,7 @@ def test_init_aiplatform_with_encryption_key_name_and_create_training_job( model_from_job = job.run( dataset=mock_dataset_video, model_display_name=_TEST_MODEL_DISPLAY_NAME, + create_request_timeout=None, sync=sync, ) @@ -269,6 +270,7 @@ def test_init_aiplatform_with_encryption_key_name_and_create_training_job( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) mock_model_service_get.assert_called_once_with( @@ -311,6 +313,7 @@ def test_splits_fraction( model_display_name=_TEST_MODEL_DISPLAY_NAME, training_fraction_split=_TEST_FRACTION_SPLIT_TRAINING, test_fraction_split=_TEST_FRACTION_SPLIT_TEST, + create_request_timeout=None, sync=sync, ) @@ -345,6 +348,7 @@ def test_splits_fraction( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -378,6 +382,7 @@ def test_splits_filter( model_display_name=_TEST_MODEL_DISPLAY_NAME, training_filter_split=_TEST_FILTER_SPLIT_TRAINING, test_filter_split=_TEST_FILTER_SPLIT_TEST, + create_request_timeout=None, sync=sync, ) @@ -412,6 +417,7 @@ def test_splits_filter( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -443,6 +449,7 @@ def test_splits_default( model_from_job = job.run( dataset=mock_dataset_video, model_display_name=_TEST_MODEL_DISPLAY_NAME, + create_request_timeout=None, sync=sync, ) @@ -471,6 +478,7 @@ def test_splits_default( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -502,6 +510,7 @@ def test_run_call_pipeline_service_create( model_labels=_TEST_MODEL_LABELS, training_filter_split=_TEST_FILTER_SPLIT_TRAINING, test_filter_split=_TEST_FILTER_SPLIT_TEST, + create_request_timeout=None, sync=sync, ) @@ -538,6 +547,7 @@ def test_run_call_pipeline_service_create( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) mock_model_service_get.assert_called_once_with( @@ -549,6 +559,75 @@ def test_run_call_pipeline_service_create( assert not job.has_failed assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED + @pytest.mark.parametrize("sync", [True, False]) + def test_run_call_pipeline_service_create_with_timeout( + self, + mock_pipeline_service_create, + mock_pipeline_service_get, + mock_dataset_video, + mock_model_service_get, + mock_model, + sync, + ): + """Create and run an AutoML ICN training job, verify calls and return value""" + + aiplatform.init(project=_TEST_PROJECT) + + job = training_jobs.AutoMLVideoTrainingJob( + display_name=_TEST_DISPLAY_NAME, + labels=_TEST_LABELS, + prediction_type=_TEST_PREDICTION_TYPE_VCN, + model_type=_TEST_MODEL_TYPE_CLOUD, + training_encryption_spec_key_name=_TEST_PIPELINE_ENCRYPTION_KEY_NAME, + model_encryption_spec_key_name=_TEST_MODEL_ENCRYPTION_KEY_NAME, + ) + + model_from_job = job.run( + dataset=mock_dataset_video, + model_display_name=_TEST_MODEL_DISPLAY_NAME, + model_labels=_TEST_MODEL_LABELS, + training_filter_split=_TEST_FILTER_SPLIT_TRAINING, + test_filter_split=_TEST_FILTER_SPLIT_TEST, + create_request_timeout=180.0, + sync=sync, + ) + + if not sync: + model_from_job.wait() + + true_filter_split = gca_training_pipeline.FilterSplit( + training_filter=_TEST_FILTER_SPLIT_TRAINING, + validation_filter=_TEST_FILTER_SPLIT_VALIDATION, + test_filter=_TEST_FILTER_SPLIT_TEST, + ) + + true_managed_model = gca_model.Model( + display_name=_TEST_MODEL_DISPLAY_NAME, + labels=_TEST_MODEL_LABELS, + description=mock_model._gca_resource.description, + encryption_spec=_TEST_MODEL_ENCRYPTION_SPEC, + ) + + true_input_data_config = gca_training_pipeline.InputDataConfig( + filter_split=true_filter_split, dataset_id=mock_dataset_video.name, + ) + + true_training_pipeline = gca_training_pipeline.TrainingPipeline( + display_name=_TEST_DISPLAY_NAME, + labels=_TEST_LABELS, + training_task_definition=schema.training_job.definition.automl_video_classification, + training_task_inputs=_TEST_TRAINING_TASK_INPUTS, + model_to_upload=true_managed_model, + input_data_config=true_input_data_config, + encryption_spec=_TEST_PIPELINE_ENCRYPTION_SPEC, + ) + + mock_pipeline_service_create.assert_called_once_with( + parent=initializer.global_config.common_location_path(), + training_pipeline=true_training_pipeline, + timeout=180.0, + ) + @pytest.mark.usefixtures("mock_pipeline_service_get") @pytest.mark.parametrize("sync", [True, False]) def test_run_call_pipeline_if_no_model_display_name_nor_model_labels( @@ -571,6 +650,7 @@ def test_run_call_pipeline_if_no_model_display_name_nor_model_labels( dataset=mock_dataset_video, training_fraction_split=_TEST_ALTERNATE_FRACTION_SPLIT_TRAINING, test_fraction_split=_TEST_ALTERNATE_FRACTION_SPLIT_TEST, + create_request_timeout=None, ) if not sync: @@ -603,6 +683,7 @@ def test_run_call_pipeline_if_no_model_display_name_nor_model_labels( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) @pytest.mark.usefixtures( diff --git a/tests/unit/aiplatform/test_datasets.py b/tests/unit/aiplatform/test_datasets.py index 3101de1413..79e4973926 100644 --- a/tests/unit/aiplatform/test_datasets.py +++ b/tests/unit/aiplatform/test_datasets.py @@ -683,24 +683,6 @@ def test_create_dataset_tabular(self, create_dataset_mock): timeout=None, ) - @pytest.mark.usefixtures("get_dataset_mock") - def test_create_dataset_tabular_with_timeout(self, create_dataset_mock): - aiplatform.init(project=_TEST_PROJECT) - - expected_dataset = gca_dataset.Dataset( - display_name=_TEST_DISPLAY_NAME, - metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_NONTABULAR, - metadata=_TEST_NONTABULAR_DATASET_METADATA, - encryption_spec=_TEST_ENCRYPTION_SPEC, - ) - - create_dataset_mock.assert_called_once_with( - parent=_TEST_PARENT, - dataset=expected_dataset, - metadata=_TEST_REQUEST_METADATA, - timeout=180.0, - ) - @pytest.mark.usefixtures("get_dataset_mock") @pytest.mark.parametrize("sync", [True, False]) def test_create_and_import_dataset( diff --git a/tests/unit/aiplatform/test_end_to_end.py b/tests/unit/aiplatform/test_end_to_end.py index 8c9c796263..13dd8c44e1 100644 --- a/tests/unit/aiplatform/test_end_to_end.py +++ b/tests/unit/aiplatform/test_end_to_end.py @@ -135,17 +135,21 @@ def test_dataset_create_to_model_predict( training_fraction_split=test_training_jobs._TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=test_training_jobs._TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=test_training_jobs._TEST_TEST_FRACTION_SPLIT, + create_request_timeout=None, sync=sync, ) created_endpoint = models.Endpoint.create( display_name=test_endpoints._TEST_DISPLAY_NAME, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, + create_request_timeout=None, sync=sync, ) my_endpoint = model_from_job.deploy( - encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, sync=sync + encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, + deploy_request_timeout=None, + sync=sync, ) endpoint_deploy_return = created_endpoint.deploy(model_from_job, sync=sync) @@ -279,6 +283,7 @@ def test_dataset_create_to_model_predict( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) assert job._gca_resource == make_training_pipeline( @@ -322,13 +327,16 @@ def test_dataset_create_to_model_predict_with_pipeline_fail( ) my_dataset = aiplatform.ImageDataset.create( - display_name=test_datasets._TEST_DISPLAY_NAME, sync=sync, + display_name=test_datasets._TEST_DISPLAY_NAME, + create_request_timeout=None, + sync=sync, ) my_dataset.import_data( gcs_source=test_datasets._TEST_SOURCE_URI_GCS, import_schema_uri=test_datasets._TEST_IMPORT_SCHEMA_URI, data_item_labels=test_datasets._TEST_DATA_LABEL_ITEMS, + import_request_timeout=None, sync=sync, ) @@ -342,7 +350,9 @@ def test_dataset_create_to_model_predict_with_pipeline_fail( ) created_endpoint = models.Endpoint.create( - display_name=test_endpoints._TEST_DISPLAY_NAME, sync=sync, + display_name=test_endpoints._TEST_DISPLAY_NAME, + create_request_timeout=None, + sync=sync, ) model_from_job = job.run( @@ -357,6 +367,7 @@ def test_dataset_create_to_model_predict_with_pipeline_fail( training_fraction_split=test_training_jobs._TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=test_training_jobs._TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=test_training_jobs._TEST_TEST_FRACTION_SPLIT, + create_request_timeout=None, sync=sync, ) @@ -386,10 +397,13 @@ def test_dataset_create_to_model_predict_with_pipeline_fail( parent=test_datasets._TEST_PARENT, dataset=expected_dataset, metadata=test_datasets._TEST_REQUEST_METADATA, + timeout=None, ) import_data_mock.assert_called_once_with( - name=test_datasets._TEST_NAME, import_configs=[expected_import_config] + name=test_datasets._TEST_NAME, + import_configs=[expected_import_config], + timeout=None, ) expected_dataset.name = test_datasets._TEST_NAME @@ -468,6 +482,7 @@ def test_dataset_create_to_model_predict_with_pipeline_fail( mock_pipeline_service_create_and_get_with_fail[0].assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) assert ( diff --git a/tests/unit/aiplatform/test_endpoints.py b/tests/unit/aiplatform/test_endpoints.py index 2adfc23e01..db55b1a9fd 100644 --- a/tests/unit/aiplatform/test_endpoints.py +++ b/tests/unit/aiplatform/test_endpoints.py @@ -519,7 +519,9 @@ def test_init_aiplatform_with_encryption_key_name_and_create_endpoint( location=_TEST_LOCATION, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, ) - my_endpoint = models.Endpoint.create(display_name=_TEST_DISPLAY_NAME, sync=sync) + my_endpoint = models.Endpoint.create( + display_name=_TEST_DISPLAY_NAME, sync=sync, create_request_timeout=None, + ) if not sync: my_endpoint.wait() @@ -528,7 +530,7 @@ def test_init_aiplatform_with_encryption_key_name_and_create_endpoint( display_name=_TEST_DISPLAY_NAME, encryption_spec=_TEST_ENCRYPTION_SPEC ) create_endpoint_mock.assert_called_once_with( - parent=_TEST_PARENT, endpoint=expected_endpoint, metadata=(), + parent=_TEST_PARENT, endpoint=expected_endpoint, metadata=(), timeout=None, ) expected_endpoint.name = _TEST_ENDPOINT_NAME @@ -540,6 +542,7 @@ def test_create(self, create_endpoint_mock, sync): my_endpoint = models.Endpoint.create( display_name=_TEST_DISPLAY_NAME, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, + create_request_timeout=None, sync=sync, ) @@ -550,12 +553,32 @@ def test_create(self, create_endpoint_mock, sync): display_name=_TEST_DISPLAY_NAME, encryption_spec=_TEST_ENCRYPTION_SPEC ) create_endpoint_mock.assert_called_once_with( - parent=_TEST_PARENT, endpoint=expected_endpoint, metadata=(), + parent=_TEST_PARENT, endpoint=expected_endpoint, metadata=(), timeout=None, ) expected_endpoint.name = _TEST_ENDPOINT_NAME assert my_endpoint._gca_resource == expected_endpoint + @pytest.mark.usefixtures("get_endpoint_mock") + @pytest.mark.parametrize("sync", [True, False]) + def test_create_with_timeout(self, create_endpoint_mock, sync): + my_endpoint = models.Endpoint.create( + display_name=_TEST_DISPLAY_NAME, + encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, + create_request_timeout=180.0, + sync=sync, + ) + + if not sync: + my_endpoint.wait() + + expected_endpoint = gca_endpoint.Endpoint( + display_name=_TEST_DISPLAY_NAME, encryption_spec=_TEST_ENCRYPTION_SPEC + ) + create_endpoint_mock.assert_called_once_with( + parent=_TEST_PARENT, endpoint=expected_endpoint, metadata=(), timeout=180.0, + ) + @pytest.mark.usefixtures("get_empty_endpoint_mock") def test_accessing_properties_with_no_resource_raises(self,): """Ensure a descriptive RuntimeError is raised when the @@ -578,7 +601,10 @@ def test_accessing_properties_with_no_resource_raises(self,): @pytest.mark.parametrize("sync", [True, False]) def test_create_with_description(self, create_endpoint_mock, sync): my_endpoint = models.Endpoint.create( - display_name=_TEST_DISPLAY_NAME, description=_TEST_DESCRIPTION, sync=sync + display_name=_TEST_DISPLAY_NAME, + description=_TEST_DESCRIPTION, + create_request_timeout=None, + sync=sync, ) if not sync: my_endpoint.wait() @@ -587,14 +613,17 @@ def test_create_with_description(self, create_endpoint_mock, sync): display_name=_TEST_DISPLAY_NAME, description=_TEST_DESCRIPTION, ) create_endpoint_mock.assert_called_once_with( - parent=_TEST_PARENT, endpoint=expected_endpoint, metadata=(), + parent=_TEST_PARENT, endpoint=expected_endpoint, metadata=(), timeout=None, ) @pytest.mark.usefixtures("get_endpoint_mock") @pytest.mark.parametrize("sync", [True, False]) def test_create_with_labels(self, create_endpoint_mock, sync): my_endpoint = models.Endpoint.create( - display_name=_TEST_DISPLAY_NAME, labels=_TEST_LABELS, sync=sync + display_name=_TEST_DISPLAY_NAME, + labels=_TEST_LABELS, + create_request_timeout=None, + sync=sync, ) if not sync: my_endpoint.wait() @@ -603,7 +632,7 @@ def test_create_with_labels(self, create_endpoint_mock, sync): display_name=_TEST_DISPLAY_NAME, labels=_TEST_LABELS, ) create_endpoint_mock.assert_called_once_with( - parent=_TEST_PARENT, endpoint=expected_endpoint, metadata=(), + parent=_TEST_PARENT, endpoint=expected_endpoint, metadata=(), timeout=None, ) @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock") @@ -614,7 +643,40 @@ def test_deploy(self, deploy_model_mock, sync): test_model._gca_resource.supported_deployment_resources_types.append( aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES ) - test_endpoint.deploy(test_model, sync=sync) + test_endpoint.deploy( + test_model, sync=sync, deploy_request_timeout=None, + ) + + if not sync: + test_endpoint.wait() + + automatic_resources = gca_machine_resources.AutomaticResources( + min_replica_count=1, max_replica_count=1, + ) + deployed_model = gca_endpoint.DeployedModel( + automatic_resources=automatic_resources, + model=test_model.resource_name, + display_name=None, + ) + deploy_model_mock.assert_called_once_with( + endpoint=test_endpoint.resource_name, + deployed_model=deployed_model, + traffic_split={"0": 100}, + metadata=(), + timeout=None, + ) + + @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock") + @pytest.mark.parametrize("sync", [True, False]) + def test_deploy_with_timeout(self, deploy_model_mock, sync): + test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME) + test_model = models.Model(_TEST_ID) + test_model._gca_resource.supported_deployment_resources_types.append( + aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES + ) + test_endpoint.deploy( + test_model, sync=sync, deploy_request_timeout=180.0, + ) if not sync: test_endpoint.wait() @@ -632,6 +694,7 @@ def test_deploy(self, deploy_model_mock, sync): deployed_model=deployed_model, traffic_split={"0": 100}, metadata=(), + timeout=180.0, ) @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock") @@ -643,7 +706,10 @@ def test_deploy_with_display_name(self, deploy_model_mock, sync): aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES ) test_endpoint.deploy( - model=test_model, deployed_model_display_name=_TEST_DISPLAY_NAME, sync=sync + model=test_model, + deployed_model_display_name=_TEST_DISPLAY_NAME, + sync=sync, + deploy_request_timeout=None, ) if not sync: @@ -662,6 +728,7 @@ def test_deploy_with_display_name(self, deploy_model_mock, sync): deployed_model=deployed_model, traffic_split={"0": 100}, metadata=(), + timeout=None, ) @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock") @@ -750,7 +817,12 @@ def test_deploy_with_traffic_percent(self, deploy_model_mock, sync): test_model._gca_resource.supported_deployment_resources_types.append( aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES ) - test_endpoint.deploy(model=test_model, traffic_percentage=70, sync=sync) + test_endpoint.deploy( + model=test_model, + traffic_percentage=70, + sync=sync, + deploy_request_timeout=None, + ) if not sync: test_endpoint.wait() automatic_resources = gca_machine_resources.AutomaticResources( @@ -766,6 +838,7 @@ def test_deploy_with_traffic_percent(self, deploy_model_mock, sync): deployed_model=deployed_model, traffic_split={"model1": 30, "0": 70}, metadata=(), + timeout=None, ) @pytest.mark.usefixtures("get_model_mock") @@ -786,7 +859,10 @@ def test_deploy_with_traffic_split(self, deploy_model_mock, sync): aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES ) test_endpoint.deploy( - model=test_model, traffic_split={"model1": 30, "0": 70}, sync=sync + model=test_model, + traffic_split={"model1": 30, "0": 70}, + sync=sync, + deploy_request_timeout=None, ) if not sync: @@ -804,6 +880,7 @@ def test_deploy_with_traffic_split(self, deploy_model_mock, sync): deployed_model=deployed_model, traffic_split={"model1": 30, "0": 70}, metadata=(), + timeout=None, ) @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock") @@ -820,6 +897,7 @@ def test_deploy_with_dedicated_resources(self, deploy_model_mock, sync): accelerator_type=_TEST_ACCELERATOR_TYPE, accelerator_count=_TEST_ACCELERATOR_COUNT, service_account=_TEST_SERVICE_ACCOUNT, + deploy_request_timeout=None, sync=sync, ) @@ -847,6 +925,7 @@ def test_deploy_with_dedicated_resources(self, deploy_model_mock, sync): deployed_model=expected_deployed_model, traffic_split={"0": 100}, metadata=(), + timeout=None, ) @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock") @@ -864,6 +943,7 @@ def test_deploy_with_explanations(self, deploy_model_with_explanations_mock, syn accelerator_count=_TEST_ACCELERATOR_COUNT, explanation_metadata=_TEST_EXPLANATION_METADATA, explanation_parameters=_TEST_EXPLANATION_PARAMETERS, + deploy_request_timeout=None, sync=sync, ) @@ -894,6 +974,7 @@ def test_deploy_with_explanations(self, deploy_model_with_explanations_mock, syn deployed_model=expected_deployed_model, traffic_split={"0": 100}, metadata=(), + timeout=None, ) @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock") @@ -904,7 +985,12 @@ def test_deploy_with_min_replica_count(self, deploy_model_mock, sync): test_model._gca_resource.supported_deployment_resources_types.append( aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES ) - test_endpoint.deploy(model=test_model, min_replica_count=2, sync=sync) + test_endpoint.deploy( + model=test_model, + min_replica_count=2, + sync=sync, + deploy_request_timeout=None, + ) if not sync: test_endpoint.wait() @@ -921,6 +1007,7 @@ def test_deploy_with_min_replica_count(self, deploy_model_mock, sync): deployed_model=deployed_model, traffic_split={"0": 100}, metadata=(), + timeout=None, ) @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock") @@ -931,7 +1018,12 @@ def test_deploy_with_max_replica_count(self, deploy_model_mock, sync): test_model._gca_resource.supported_deployment_resources_types.append( aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES ) - test_endpoint.deploy(model=test_model, max_replica_count=2, sync=sync) + test_endpoint.deploy( + model=test_model, + max_replica_count=2, + sync=sync, + deploy_request_timeout=None, + ) if not sync: test_endpoint.wait() automatic_resources = gca_machine_resources.AutomaticResources( @@ -947,6 +1039,7 @@ def test_deploy_with_max_replica_count(self, deploy_model_mock, sync): deployed_model=deployed_model, traffic_split={"0": 100}, metadata=(), + timeout=None, ) @pytest.mark.parametrize( diff --git a/tests/unit/aiplatform/test_models.py b/tests/unit/aiplatform/test_models.py index 0131b578c7..e18cfaccde 100644 --- a/tests/unit/aiplatform/test_models.py +++ b/tests/unit/aiplatform/test_models.py @@ -1082,6 +1082,7 @@ def test_init_aiplatform_with_encryption_key_name_and_batch_predict_gcs_source_a job_display_name=_TEST_BATCH_PREDICTION_DISPLAY_NAME, gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE, gcs_destination_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX, + create_request_timeout=None, sync=sync, ) @@ -1110,6 +1111,7 @@ def test_init_aiplatform_with_encryption_key_name_and_batch_predict_gcs_source_a create_batch_prediction_job_mock.assert_called_once_with( parent=_TEST_PARENT, batch_prediction_job=expected_gapic_batch_prediction_job, + timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -1125,6 +1127,7 @@ def test_batch_predict_gcs_source_and_dest( job_display_name=_TEST_BATCH_PREDICTION_DISPLAY_NAME, gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE, gcs_destination_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX, + create_request_timeout=None, sync=sync, ) @@ -1152,6 +1155,7 @@ def test_batch_predict_gcs_source_and_dest( create_batch_prediction_job_mock.assert_called_once_with( parent=_TEST_PARENT, batch_prediction_job=expected_gapic_batch_prediction_job, + timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -1167,6 +1171,7 @@ def test_batch_predict_gcs_source_bq_dest( job_display_name=_TEST_BATCH_PREDICTION_DISPLAY_NAME, gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE, bigquery_destination_prefix=_TEST_BATCH_PREDICTION_BQ_PREFIX, + create_request_timeout=None, sync=sync, ) @@ -1194,6 +1199,7 @@ def test_batch_predict_gcs_source_bq_dest( create_batch_prediction_job_mock.assert_called_once_with( parent=_TEST_PARENT, batch_prediction_job=expected_gapic_batch_prediction_job, + timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -1220,6 +1226,7 @@ def test_batch_predict_with_all_args(self, create_batch_prediction_job_mock, syn labels=_TEST_LABEL, credentials=creds, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, + create_request_timeout=None, sync=sync, ) @@ -1263,6 +1270,7 @@ def test_batch_predict_with_all_args(self, create_batch_prediction_job_mock, syn create_batch_prediction_job_mock.assert_called_once_with( parent=f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}", batch_prediction_job=expected_gapic_batch_prediction_job, + timeout=None, ) @pytest.mark.usefixtures("get_model_mock", "get_batch_prediction_job_mock") From 2f8d67f801d5bf5df1dba64a74719fda1d2db5c5 Mon Sep 17 00:00:00 2001 From: Sara Robinson Date: Fri, 25 Mar 2022 14:53:39 -0400 Subject: [PATCH 15/19] update system tests with timeout arg --- tests/system/aiplatform/test_dataset.py | 6 +++--- tests/system/aiplatform/test_tensorboard.py | 4 +++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/tests/system/aiplatform/test_dataset.py b/tests/system/aiplatform/test_dataset.py index e79403acfa..d8d8bd53e3 100644 --- a/tests/system/aiplatform/test_dataset.py +++ b/tests/system/aiplatform/test_dataset.py @@ -196,7 +196,7 @@ def test_get_new_dataset_and_import(self, dataset_gapic_client, shared_state): my_dataset.import_data( gcs_source=_TEST_TEXT_ENTITY_EXTRACTION_GCS_SOURCE, import_schema_uri=_TEST_TEXT_ENTITY_IMPORT_SCHEMA, - timeout=None, + import_request_timeout=None, ) data_items_post_import = dataset_gapic_client.list_data_items( @@ -217,7 +217,7 @@ def test_create_and_import_image_dataset(self, dataset_gapic_client, shared_stat display_name=f"temp_sdk_integration_create_and_import_dataset_{uuid.uuid4()}", gcs_source=_TEST_IMAGE_OBJECT_DETECTION_GCS_SOURCE, import_schema_uri=_TEST_IMAGE_OBJ_DET_IMPORT_SCHEMA, - timeout=None, + create_request_timeout=None, ) shared_state["dataset_name"] = img_dataset.resource_name @@ -238,7 +238,7 @@ def test_create_tabular_dataset(self, dataset_gapic_client, shared_state): tabular_dataset = aiplatform.TabularDataset.create( display_name=f"temp_sdk_integration_create_and_import_dataset_{uuid.uuid4()}", gcs_source=[_TEST_TABULAR_CLASSIFICATION_GCS_SOURCE], - timeout=None, + create_request_timeout=None, ) shared_state["dataset_name"] = tabular_dataset.resource_name diff --git a/tests/system/aiplatform/test_tensorboard.py b/tests/system/aiplatform/test_tensorboard.py index 487e9f0d89..539698a5f4 100644 --- a/tests/system/aiplatform/test_tensorboard.py +++ b/tests/system/aiplatform/test_tensorboard.py @@ -31,7 +31,7 @@ def test_create_and_get_tensorboard(self, shared_state): display_name = self._make_display_name("tensorboard") - tb = aiplatform.Tensorboard.create(display_name=display_name, timeout=180.0,) + tb = aiplatform.Tensorboard.create(display_name=display_name, create_request_timeout=None,) shared_state["resources"] = [tb] @@ -49,6 +49,7 @@ def test_create_and_get_tensorboard(self, shared_state): display_name=self._make_display_name("tensorboard_experiment"), description="Vertex SDK Integration test.", labels={"test": "labels"}, + create_request_timeout=None, ) shared_state["resources"].append(tb_experiment) @@ -70,6 +71,7 @@ def test_create_and_get_tensorboard(self, shared_state): tensorboard_experiment_name=tb_experiment.resource_name, description="Vertex SDK Integration test run", labels={"test": "labels"}, + create_request_timeout=None, ) shared_state["resources"].append(tb_run) From 3d211dd70fc9c829e5c5893d425cb3764fd7a39c Mon Sep 17 00:00:00 2001 From: Sara Robinson Date: Fri, 25 Mar 2022 16:49:46 -0400 Subject: [PATCH 16/19] fix broken tests and run linter --- .../cloud/aiplatform/featurestore/feature.py | 1 - google/cloud/aiplatform/models.py | 1 - tests/system/aiplatform/test_tensorboard.py | 4 +- .../test_automl_tabular_training_jobs.py | 91 +++++++++++++++++++ tests/unit/aiplatform/test_featurestores.py | 2 +- 5 files changed, 95 insertions(+), 4 deletions(-) diff --git a/google/cloud/aiplatform/featurestore/feature.py b/google/cloud/aiplatform/featurestore/feature.py index 073f94e502..4e9701dfb1 100644 --- a/google/cloud/aiplatform/featurestore/feature.py +++ b/google/cloud/aiplatform/featurestore/feature.py @@ -15,7 +15,6 @@ # limitations under the License. # -from socket import timeout from typing import Dict, List, Optional, Sequence, Tuple from google.auth import credentials as auth_credentials diff --git a/google/cloud/aiplatform/models.py b/google/cloud/aiplatform/models.py index e019d787e3..da6230d5b2 100644 --- a/google/cloud/aiplatform/models.py +++ b/google/cloud/aiplatform/models.py @@ -15,7 +15,6 @@ # limitations under the License. # import pathlib -from venv import create import proto import re import shutil diff --git a/tests/system/aiplatform/test_tensorboard.py b/tests/system/aiplatform/test_tensorboard.py index 539698a5f4..ab587c0617 100644 --- a/tests/system/aiplatform/test_tensorboard.py +++ b/tests/system/aiplatform/test_tensorboard.py @@ -31,7 +31,9 @@ def test_create_and_get_tensorboard(self, shared_state): display_name = self._make_display_name("tensorboard") - tb = aiplatform.Tensorboard.create(display_name=display_name, create_request_timeout=None,) + tb = aiplatform.Tensorboard.create( + display_name=display_name, create_request_timeout=None, + ) shared_state["resources"] = [tb] diff --git a/tests/unit/aiplatform/test_automl_tabular_training_jobs.py b/tests/unit/aiplatform/test_automl_tabular_training_jobs.py index 7dc3c64acf..0b71aaef21 100644 --- a/tests/unit/aiplatform/test_automl_tabular_training_jobs.py +++ b/tests/unit/aiplatform/test_automl_tabular_training_jobs.py @@ -331,6 +331,7 @@ def test_run_call_pipeline_service_create( budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS, disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, additional_experiments=_TEST_ADDITIONAL_EXPERIMENTS, + create_request_timeout=None, sync=sync, ) @@ -364,6 +365,7 @@ def test_run_call_pipeline_service_create( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) assert job._gca_resource is mock_pipeline_service_get.return_value @@ -380,6 +382,75 @@ def test_run_call_pipeline_service_create( assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED + @pytest.mark.parametrize("sync", [True, False]) + def test_run_call_pipeline_service_create_with_timeout( + self, + mock_pipeline_service_create, + mock_pipeline_service_get, + mock_dataset_tabular, + mock_model_service_get, + sync, + ): + aiplatform.init( + project=_TEST_PROJECT, + staging_bucket=_TEST_BUCKET_NAME, + encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME, + ) + + job = training_jobs.AutoMLTabularTrainingJob( + display_name=_TEST_DISPLAY_NAME, + labels=_TEST_LABELS, + optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME, + optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE, + column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS, + optimization_objective_recall_value=None, + optimization_objective_precision_value=None, + ) + + model_from_job = job.run( + dataset=mock_dataset_tabular, + target_column=_TEST_TRAINING_TARGET_COLUMN, + model_display_name=_TEST_MODEL_DISPLAY_NAME, + model_labels=_TEST_MODEL_LABELS, + weight_column=_TEST_TRAINING_WEIGHT_COLUMN, + budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS, + disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, + additional_experiments=_TEST_ADDITIONAL_EXPERIMENTS, + create_request_timeout=180.0, + sync=sync, + ) + + job.wait_for_resource_creation() + + if not sync: + model_from_job.wait() + + true_managed_model = gca_model.Model( + display_name=_TEST_MODEL_DISPLAY_NAME, + labels=_TEST_MODEL_LABELS, + encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC, + ) + + true_input_data_config = gca_training_pipeline.InputDataConfig( + dataset_id=mock_dataset_tabular.name, + ) + + true_training_pipeline = gca_training_pipeline.TrainingPipeline( + display_name=_TEST_DISPLAY_NAME, + labels=_TEST_LABELS, + training_task_definition=schema.training_job.definition.automl_tabular, + training_task_inputs=_TEST_TRAINING_TASK_INPUTS_WITH_ADDITIONAL_EXPERIMENTS, + model_to_upload=true_managed_model, + input_data_config=true_input_data_config, + encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC, + ) + + mock_pipeline_service_create.assert_called_once_with( + parent=initializer.global_config.common_location_path(), + training_pipeline=true_training_pipeline, + timeout=180.0, + ) + @pytest.mark.parametrize("sync", [True, False]) def test_run_call_pipeline_service_create_with_export_eval_data_items( self, @@ -414,6 +485,7 @@ def test_run_call_pipeline_service_create_with_export_eval_data_items( export_evaluated_data_items=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS, export_evaluated_data_items_bigquery_destination_uri=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_BIGQUERY_DESTINATION_URI, export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION, + create_request_timeout=None, sync=sync, ) @@ -445,6 +517,7 @@ def test_run_call_pipeline_service_create_with_export_eval_data_items( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) assert job._gca_resource is mock_pipeline_service_get.return_value @@ -490,6 +563,7 @@ def test_run_call_pipeline_if_no_model_display_name_nor_model_labels( weight_column=_TEST_TRAINING_WEIGHT_COLUMN, budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS, disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, + create_request_timeout=None, ) job.wait_for_resource_creation() @@ -523,6 +597,7 @@ def test_run_call_pipeline_if_no_model_display_name_nor_model_labels( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -557,6 +632,7 @@ def test_run_call_pipeline_service_create_if_no_column_transformations( weight_column=_TEST_TRAINING_WEIGHT_COLUMN, budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS, disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, + create_request_timeout=None, sync=sync, ) @@ -588,6 +664,7 @@ def test_run_call_pipeline_service_create_if_no_column_transformations( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -624,6 +701,7 @@ def test_run_call_pipeline_service_create_if_set_additional_experiments( weight_column=_TEST_TRAINING_WEIGHT_COLUMN, budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS, disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, + create_request_timeout=None, sync=sync, ) @@ -655,6 +733,7 @@ def test_run_call_pipeline_service_create_if_set_additional_experiments( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -691,6 +770,7 @@ def test_run_call_pipeline_service_create_with_column_specs( weight_column=_TEST_TRAINING_WEIGHT_COLUMN, budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS, disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, + create_request_timeout=None, sync=sync, ) @@ -714,6 +794,7 @@ def test_run_call_pipeline_service_create_with_column_specs( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -789,6 +870,7 @@ def test_run_call_pipeline_service_create_with_column_specs_not_auto( weight_column=_TEST_TRAINING_WEIGHT_COLUMN, budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS, disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, + create_request_timeout=None, sync=sync, ) @@ -812,6 +894,7 @@ def test_run_call_pipeline_service_create_with_column_specs_not_auto( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) @pytest.mark.usefixtures( @@ -1097,6 +1180,7 @@ def test_splits_fraction( validation_fraction_split=_TEST_FRACTION_SPLIT_VALIDATION, test_fraction_split=_TEST_FRACTION_SPLIT_TEST, disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, + create_request_timeout=None, sync=sync, ) @@ -1130,6 +1214,7 @@ def test_splits_fraction( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -1170,6 +1255,7 @@ def test_splits_timestamp( test_fraction_split=_TEST_FRACTION_SPLIT_TEST, timestamp_split_column_name=_TEST_SPLIT_TIMESTAMP_COLUMN_NAME, disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, + create_request_timeout=None, sync=sync, ) @@ -1204,6 +1290,7 @@ def test_splits_timestamp( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -1241,6 +1328,7 @@ def test_splits_predefined( model_display_name=_TEST_MODEL_DISPLAY_NAME, predefined_split_column_name=_TEST_SPLIT_PREDEFINED_COLUMN_NAME, disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, + create_request_timeout=None, sync=sync, ) @@ -1272,6 +1360,7 @@ def test_splits_predefined( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -1308,6 +1397,7 @@ def test_splits_default( weight_column=_TEST_TRAINING_WEIGHT_COLUMN, model_display_name=_TEST_MODEL_DISPLAY_NAME, disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, + create_request_timeout=None, sync=sync, ) @@ -1335,4 +1425,5 @@ def test_splits_default( mock_pipeline_service_create.assert_called_once_with( parent=initializer.global_config.common_location_path(), training_pipeline=true_training_pipeline, + timeout=None, ) diff --git a/tests/unit/aiplatform/test_featurestores.py b/tests/unit/aiplatform/test_featurestores.py index 1d25a5a024..aa8e6ef6ab 100644 --- a/tests/unit/aiplatform/test_featurestores.py +++ b/tests/unit/aiplatform/test_featurestores.py @@ -2085,7 +2085,7 @@ def test_read_single_entity_with_timeout(self, read_feature_values_mock): id_matcher=gca_feature_selector.IdMatcher(ids=["*"]) ), ) - result = my_entity_type.read( + my_entity_type.read( entity_ids=_TEST_READ_ENTITY_ID, read_request_timeout=180.0, ) read_feature_values_mock.assert_called_once_with( From 0b30635120a509db7f8c5a40a80355dca3abdcbb Mon Sep 17 00:00:00 2001 From: Sara Robinson Date: Thu, 31 Mar 2022 13:04:41 -0400 Subject: [PATCH 17/19] update handling of import_request_timeout arg --- google/cloud/aiplatform/datasets/dataset.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/google/cloud/aiplatform/datasets/dataset.py b/google/cloud/aiplatform/datasets/dataset.py index 7c63ad7079..a056df9a4c 100644 --- a/google/cloud/aiplatform/datasets/dataset.py +++ b/google/cloud/aiplatform/datasets/dataset.py @@ -263,6 +263,7 @@ def _create_and_import( encryption_spec: Optional[gca_encryption_spec.EncryptionSpec] = None, sync: bool = True, create_request_timeout: Optional[float] = None, + import_request_timeout: Optional[float] = None, ) -> "_Dataset": """Creates a new dataset and optionally imports data into dataset when source and import_schema_uri are passed. @@ -320,6 +321,8 @@ def _create_and_import( be immediately returned and synced when the Future has completed. create_request_timeout (float): Optional. The timeout for the create request in seconds. + import_request_timeout (float): + Optional. The timeout for the import request in seconds. Returns: dataset (Dataset): @@ -352,13 +355,16 @@ def _create_and_import( ) # Import if import datasource is DatasourceImportable - # import_request_timeout is None since user is issuing a single request with create and import if isinstance(datasource, _datasources.DatasourceImportable): - dataset_obj._import_and_wait(datasource, import_request_timeout=None) + dataset_obj._import_and_wait(datasource, import_request_timeout=import_request_timeout) return dataset_obj - def _import_and_wait(self, datasource, import_request_timeout): + def _import_and_wait( + self, + datasource, + import_request_timeout: Optional[float] = None, + ): _LOGGER.log_action_start_against_resource( "Importing", "data", @@ -463,6 +469,8 @@ def _import( Args: datasource (_datasources.DatasourceImportable): Required. Datasource for importing data to an existing dataset for Vertex AI. + import_request_timeout (float): + Optional. The timeout for the import request in seconds. Returns: operation (Operation): From bd4df2eec49088c145f5be95b9d3de317777e85b Mon Sep 17 00:00:00 2001 From: Sara Robinson Date: Fri, 1 Apr 2022 09:15:17 -0400 Subject: [PATCH 18/19] update timeout arg in tests and run linter --- google/cloud/aiplatform/datasets/dataset.py | 4 +- .../aiplatform/featurestore/entity_type.py | 10 +- tests/system/aiplatform/test_e2e_tabular.py | 4 +- tests/system/aiplatform/test_tensorboard.py | 3 +- .../test_automl_forecasting_training_jobs.py | 12 +- .../test_automl_image_training_jobs.py | 15 ++- .../test_automl_tabular_training_jobs.py | 22 ++-- .../test_automl_text_training_jobs.py | 17 +-- .../test_automl_video_training_jobs.py | 13 ++- tests/unit/aiplatform/test_custom_job.py | 28 +++-- tests/unit/aiplatform/test_datasets.py | 94 +++++++++++----- tests/unit/aiplatform/test_end_to_end.py | 16 +-- tests/unit/aiplatform/test_endpoints.py | 52 +++++---- tests/unit/aiplatform/test_featurestores.py | 93 ++++++++------- .../test_hyperparameter_tuning_job.py | 8 +- tests/unit/aiplatform/test_jobs.py | 8 +- tests/unit/aiplatform/test_models.py | 34 +++--- tests/unit/aiplatform/test_pipeline_jobs.py | 9 +- tests/unit/aiplatform/test_tensorboard.py | 24 ++-- tests/unit/aiplatform/test_training_jobs.py | 106 +++++++++--------- 20 files changed, 328 insertions(+), 244 deletions(-) diff --git a/google/cloud/aiplatform/datasets/dataset.py b/google/cloud/aiplatform/datasets/dataset.py index a056df9a4c..757af1922d 100644 --- a/google/cloud/aiplatform/datasets/dataset.py +++ b/google/cloud/aiplatform/datasets/dataset.py @@ -356,7 +356,9 @@ def _create_and_import( # Import if import datasource is DatasourceImportable if isinstance(datasource, _datasources.DatasourceImportable): - dataset_obj._import_and_wait(datasource, import_request_timeout=import_request_timeout) + dataset_obj._import_and_wait( + datasource, import_request_timeout=import_request_timeout + ) return dataset_obj diff --git a/google/cloud/aiplatform/featurestore/entity_type.py b/google/cloud/aiplatform/featurestore/entity_type.py index 00b3b424d8..edd0c7433b 100644 --- a/google/cloud/aiplatform/featurestore/entity_type.py +++ b/google/cloud/aiplatform/featurestore/entity_type.py @@ -1447,10 +1447,12 @@ def read( feature_selector=feature_selector, ) ) - read_feature_values_response = self._featurestore_online_client.read_feature_values( - request=read_feature_values_request, - metadata=request_metadata, - timeout=read_request_timeout, + read_feature_values_response = ( + self._featurestore_online_client.read_feature_values( + request=read_feature_values_request, + metadata=request_metadata, + timeout=read_request_timeout, + ) ) header = read_feature_values_response.header entity_views = [read_feature_values_response.entity_view] diff --git a/tests/system/aiplatform/test_e2e_tabular.py b/tests/system/aiplatform/test_e2e_tabular.py index e36ce5458a..31b9bb9769 100644 --- a/tests/system/aiplatform/test_e2e_tabular.py +++ b/tests/system/aiplatform/test_e2e_tabular.py @@ -82,8 +82,8 @@ def test_end_to_end_tabular(self, shared_state): ds = aiplatform.TabularDataset.create( display_name=self._make_display_name("dataset"), gcs_source=[dataset_gcs_source], - create_request_timeout=180.0, sync=False, + create_request_timeout=180.0, ) shared_state["resources"].extend([ds]) @@ -113,8 +113,8 @@ def test_end_to_end_tabular(self, shared_state): timeout=1234, restart_job_on_worker_restart=True, enable_web_access=True, - create_request_timeout=None, sync=False, + create_request_timeout=None, ) automl_model = automl_job.run( diff --git a/tests/system/aiplatform/test_tensorboard.py b/tests/system/aiplatform/test_tensorboard.py index 0cfcd2422c..501205122f 100644 --- a/tests/system/aiplatform/test_tensorboard.py +++ b/tests/system/aiplatform/test_tensorboard.py @@ -33,7 +33,8 @@ def test_create_and_get_tensorboard(self, shared_state): display_name = self._make_display_name("tensorboard") tb = aiplatform.Tensorboard.create( - display_name=display_name, create_request_timeout=None, + display_name=display_name, + create_request_timeout=None, ) shared_state["resources"] = [tb] diff --git a/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py b/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py index 99241d9573..26224bea2a 100644 --- a/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py +++ b/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py @@ -279,8 +279,8 @@ def test_run_call_pipeline_service_create( quantiles=_TEST_TRAINING_QUANTILES, validation_options=_TEST_TRAINING_VALIDATION_OPTIONS, additional_experiments=_TEST_ADDITIONAL_EXPERIMENTS, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -367,8 +367,8 @@ def test_run_call_pipeline_service_create_with_timeout( quantiles=_TEST_TRAINING_QUANTILES, validation_options=_TEST_TRAINING_VALIDATION_OPTIONS, additional_experiments=_TEST_ADDITIONAL_EXPERIMENTS, - create_request_timeout=180.0, sync=sync, + create_request_timeout=180.0, ) if not sync: @@ -437,8 +437,8 @@ def test_run_call_pipeline_if_no_model_display_name_nor_model_labels( export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION, quantiles=_TEST_TRAINING_QUANTILES, validation_options=_TEST_TRAINING_VALIDATION_OPTIONS, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -507,8 +507,8 @@ def test_run_call_pipeline_if_set_additional_experiments( export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION, quantiles=_TEST_TRAINING_QUANTILES, validation_options=_TEST_TRAINING_VALIDATION_OPTIONS, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -797,8 +797,8 @@ def test_splits_predefined( export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION, quantiles=_TEST_TRAINING_QUANTILES, validation_options=_TEST_TRAINING_VALIDATION_OPTIONS, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -878,8 +878,8 @@ def test_splits_default( export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION, quantiles=_TEST_TRAINING_QUANTILES, validation_options=_TEST_TRAINING_VALIDATION_OPTIONS, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: diff --git a/tests/unit/aiplatform/test_automl_image_training_jobs.py b/tests/unit/aiplatform/test_automl_image_training_jobs.py index 84d95e0523..bb9c1c2de3 100644 --- a/tests/unit/aiplatform/test_automl_image_training_jobs.py +++ b/tests/unit/aiplatform/test_automl_image_training_jobs.py @@ -282,8 +282,8 @@ def test_run_call_pipeline_service_create( test_filter_split=_TEST_FILTER_SPLIT_TEST, budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS, disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -350,7 +350,9 @@ def test_run_call_pipeline_service_create_with_timeout( ) job = training_jobs.AutoMLImageTrainingJob( - display_name=_TEST_DISPLAY_NAME, base_model=mock_model, labels=_TEST_LABELS, + display_name=_TEST_DISPLAY_NAME, + base_model=mock_model, + labels=_TEST_LABELS, ) model_from_job = job.run( @@ -362,8 +364,8 @@ def test_run_call_pipeline_service_create_with_timeout( test_filter_split=_TEST_FILTER_SPLIT_TEST, budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS, disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, - create_request_timeout=180.0, sync=sync, + create_request_timeout=180.0, ) if not sync: @@ -383,7 +385,8 @@ def test_run_call_pipeline_service_create_with_timeout( ) true_input_data_config = gca_training_pipeline.InputDataConfig( - filter_split=true_filter_split, dataset_id=mock_dataset_image.name, + filter_split=true_filter_split, + dataset_id=mock_dataset_image.name, ) true_training_pipeline = gca_training_pipeline.TrainingPipeline( @@ -593,8 +596,8 @@ def test_splits_fraction( test_fraction_split=_TEST_FRACTION_SPLIT_TEST, budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS, disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -732,8 +735,8 @@ def test_splits_default( model_display_name=_TEST_MODEL_DISPLAY_NAME, budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS, disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: diff --git a/tests/unit/aiplatform/test_automl_tabular_training_jobs.py b/tests/unit/aiplatform/test_automl_tabular_training_jobs.py index a5ceae9516..b15dbd6a0e 100644 --- a/tests/unit/aiplatform/test_automl_tabular_training_jobs.py +++ b/tests/unit/aiplatform/test_automl_tabular_training_jobs.py @@ -340,8 +340,8 @@ def test_run_call_pipeline_service_create( budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS, disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, additional_experiments=_TEST_ADDITIONAL_EXPERIMENTS, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) job.wait_for_resource_creation() @@ -425,8 +425,8 @@ def test_run_call_pipeline_service_create_with_timeout( budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS, disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, additional_experiments=_TEST_ADDITIONAL_EXPERIMENTS, - create_request_timeout=180.0, sync=sync, + create_request_timeout=180.0, ) job.wait_for_resource_creation() @@ -494,8 +494,8 @@ def test_run_call_pipeline_service_create_with_export_eval_data_items( export_evaluated_data_items=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS, export_evaluated_data_items_bigquery_destination_uri=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_BIGQUERY_DESTINATION_URI, export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) job.wait_for_resource_creation() @@ -641,8 +641,8 @@ def test_run_call_pipeline_service_create_if_no_column_transformations( weight_column=_TEST_TRAINING_WEIGHT_COLUMN, budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS, disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) job.wait_for_resource_creation() @@ -710,8 +710,8 @@ def test_run_call_pipeline_service_create_if_set_additional_experiments( weight_column=_TEST_TRAINING_WEIGHT_COLUMN, budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS, disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) job.wait_for_resource_creation() @@ -779,8 +779,8 @@ def test_run_call_pipeline_service_create_with_column_specs( weight_column=_TEST_TRAINING_WEIGHT_COLUMN, budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS, disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -883,8 +883,8 @@ def test_run_call_pipeline_service_create_with_column_specs_not_auto( weight_column=_TEST_TRAINING_WEIGHT_COLUMN, budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS, disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -1193,8 +1193,8 @@ def test_splits_fraction( validation_fraction_split=_TEST_FRACTION_SPLIT_VALIDATION, test_fraction_split=_TEST_FRACTION_SPLIT_TEST, disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -1269,8 +1269,8 @@ def test_splits_timestamp( test_fraction_split=_TEST_FRACTION_SPLIT_TEST, timestamp_split_column_name=_TEST_SPLIT_TIMESTAMP_COLUMN_NAME, disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -1343,8 +1343,8 @@ def test_splits_predefined( model_display_name=_TEST_MODEL_DISPLAY_NAME, predefined_split_column_name=_TEST_SPLIT_PREDEFINED_COLUMN_NAME, disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -1413,8 +1413,8 @@ def test_splits_default( weight_column=_TEST_TRAINING_WEIGHT_COLUMN, model_display_name=_TEST_MODEL_DISPLAY_NAME, disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: diff --git a/tests/unit/aiplatform/test_automl_text_training_jobs.py b/tests/unit/aiplatform/test_automl_text_training_jobs.py index 816378e5a7..0e08954df7 100644 --- a/tests/unit/aiplatform/test_automl_text_training_jobs.py +++ b/tests/unit/aiplatform/test_automl_text_training_jobs.py @@ -286,8 +286,8 @@ def test_init_aiplatform_with_encryption_key_name_and_create_training_job( model_from_job = job.run( dataset=mock_dataset_text, model_display_name=_TEST_MODEL_DISPLAY_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -346,8 +346,8 @@ def test_run_call_pipeline_service_create_classification( training_filter_split=_TEST_FILTER_SPLIT_TRAINING, validation_filter_split=_TEST_FILTER_SPLIT_VALIDATION, test_filter_split=_TEST_FILTER_SPLIT_TEST, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -424,8 +424,8 @@ def test_run_call_pipeline_service_create_classification_with_timeout( training_filter_split=_TEST_FILTER_SPLIT_TRAINING, validation_filter_split=_TEST_FILTER_SPLIT_VALIDATION, test_filter_split=_TEST_FILTER_SPLIT_TEST, - create_request_timeout=180.0, sync=sync, + create_request_timeout=180.0, ) if not sync: @@ -444,7 +444,8 @@ def test_run_call_pipeline_service_create_classification_with_timeout( ) true_input_data_config = gca_training_pipeline.InputDataConfig( - filter_split=true_filter_split, dataset_id=mock_dataset_text.name, + filter_split=true_filter_split, + dataset_id=mock_dataset_text.name, ) true_training_pipeline = gca_training_pipeline.TrainingPipeline( @@ -489,8 +490,8 @@ def test_run_call_pipeline_service_create_extraction( training_fraction_split=_TEST_FRACTION_SPLIT_TRAINING, validation_fraction_split=_TEST_FRACTION_SPLIT_VALIDATION, test_fraction_split=_TEST_FRACTION_SPLIT_TEST, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -563,8 +564,8 @@ def test_run_call_pipeline_service_create_sentiment( training_filter_split=_TEST_FILTER_SPLIT_TRAINING, validation_filter_split=_TEST_FILTER_SPLIT_VALIDATION, test_filter_split=_TEST_FILTER_SPLIT_TEST, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -789,8 +790,8 @@ def test_splits_fraction( training_fraction_split=_TEST_FRACTION_SPLIT_TRAINING, validation_fraction_split=_TEST_FRACTION_SPLIT_VALIDATION, test_fraction_split=_TEST_FRACTION_SPLIT_TEST, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -928,8 +929,8 @@ def test_splits_default( model_from_job = job.run( dataset=mock_dataset_text, model_display_name=_TEST_MODEL_DISPLAY_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: diff --git a/tests/unit/aiplatform/test_automl_video_training_jobs.py b/tests/unit/aiplatform/test_automl_video_training_jobs.py index b46955b375..592640d860 100644 --- a/tests/unit/aiplatform/test_automl_video_training_jobs.py +++ b/tests/unit/aiplatform/test_automl_video_training_jobs.py @@ -252,8 +252,8 @@ def test_init_aiplatform_with_encryption_key_name_and_create_training_job( model_from_job = job.run( dataset=mock_dataset_video, model_display_name=_TEST_MODEL_DISPLAY_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -394,8 +394,8 @@ def test_splits_filter( model_display_name=_TEST_MODEL_DISPLAY_NAME, training_filter_split=_TEST_FILTER_SPLIT_TRAINING, test_filter_split=_TEST_FILTER_SPLIT_TEST, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -462,8 +462,8 @@ def test_splits_default( model_from_job = job.run( dataset=mock_dataset_video, model_display_name=_TEST_MODEL_DISPLAY_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -523,8 +523,8 @@ def test_run_call_pipeline_service_create( model_labels=_TEST_MODEL_LABELS, training_filter_split=_TEST_FILTER_SPLIT_TRAINING, test_filter_split=_TEST_FILTER_SPLIT_TEST, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -602,8 +602,8 @@ def test_run_call_pipeline_service_create_with_timeout( model_labels=_TEST_MODEL_LABELS, training_filter_split=_TEST_FILTER_SPLIT_TRAINING, test_filter_split=_TEST_FILTER_SPLIT_TEST, - create_request_timeout=180.0, sync=sync, + create_request_timeout=180.0, ) if not sync: @@ -623,7 +623,8 @@ def test_run_call_pipeline_service_create_with_timeout( ) true_input_data_config = gca_training_pipeline.InputDataConfig( - filter_split=true_filter_split, dataset_id=mock_dataset_video.name, + filter_split=true_filter_split, + dataset_id=mock_dataset_video.name, ) true_training_pipeline = gca_training_pipeline.TrainingPipeline( diff --git a/tests/unit/aiplatform/test_custom_job.py b/tests/unit/aiplatform/test_custom_job.py index 252f6845a6..d9173b06f7 100644 --- a/tests/unit/aiplatform/test_custom_job.py +++ b/tests/unit/aiplatform/test_custom_job.py @@ -295,8 +295,8 @@ def test_create_custom_job(self, create_custom_job_mock, get_custom_job_mock, sy network=_TEST_NETWORK, timeout=_TEST_TIMEOUT, restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) job.wait_for_resource_creation() @@ -308,7 +308,9 @@ def test_create_custom_job(self, create_custom_job_mock, get_custom_job_mock, sy expected_custom_job = _get_custom_job_proto() create_custom_job_mock.assert_called_once_with( - parent=_TEST_PARENT, custom_job=expected_custom_job, timeout=None, + parent=_TEST_PARENT, + custom_job=expected_custom_job, + timeout=None, ) assert job.job_spec == expected_custom_job.job_spec @@ -341,8 +343,8 @@ def test_create_custom_job_with_timeout( network=_TEST_NETWORK, timeout=_TEST_TIMEOUT, restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, - create_request_timeout=180.0, sync=sync, + create_request_timeout=180.0, ) job.wait_for_resource_creation() @@ -354,7 +356,9 @@ def test_create_custom_job_with_timeout( expected_custom_job = _get_custom_job_proto() create_custom_job_mock.assert_called_once_with( - parent=_TEST_PARENT, custom_job=expected_custom_job, timeout=180.0, + parent=_TEST_PARENT, + custom_job=expected_custom_job, + timeout=180.0, ) @pytest.mark.parametrize("sync", [True, False]) @@ -385,8 +389,8 @@ def test_run_custom_job_with_fail_raises( network=_TEST_NETWORK, timeout=_TEST_TIMEOUT, restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) job.wait() @@ -398,7 +402,9 @@ def test_run_custom_job_with_fail_raises( expected_custom_job = _get_custom_job_proto() create_custom_job_mock.assert_called_once_with( - parent=_TEST_PARENT, custom_job=expected_custom_job, timeout=None, + parent=_TEST_PARENT, + custom_job=expected_custom_job, + timeout=None, ) assert job.job_spec == expected_custom_job.job_spec @@ -561,8 +567,8 @@ def test_create_custom_job_with_enable_web_access( network=_TEST_NETWORK, timeout=_TEST_TIMEOUT, restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) job.wait_for_resource_creation() @@ -576,7 +582,9 @@ def test_create_custom_job_with_enable_web_access( expected_custom_job = _get_custom_job_proto_with_enable_web_access() create_custom_job_mock_with_enable_web_access.assert_called_once_with( - parent=_TEST_PARENT, custom_job=expected_custom_job, timeout=None, + parent=_TEST_PARENT, + custom_job=expected_custom_job, + timeout=None, ) assert job.job_spec == expected_custom_job.job_spec @@ -640,7 +648,9 @@ def test_create_custom_job_with_tensorboard( expected_custom_job.job_spec.tensorboard = _TEST_TENSORBOARD_NAME create_custom_job_mock_with_tensorboard.assert_called_once_with( - parent=_TEST_PARENT, custom_job=expected_custom_job, timeout=None, + parent=_TEST_PARENT, + custom_job=expected_custom_job, + timeout=None, ) expected_custom_job = _get_custom_job_proto() diff --git a/tests/unit/aiplatform/test_datasets.py b/tests/unit/aiplatform/test_datasets.py index f379392028..7b907c667e 100644 --- a/tests/unit/aiplatform/test_datasets.py +++ b/tests/unit/aiplatform/test_datasets.py @@ -607,8 +607,8 @@ def test_create_dataset_nontabular(self, create_dataset_mock, sync): display_name=_TEST_DISPLAY_NAME, metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_NONTABULAR, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -637,8 +637,8 @@ def test_create_dataset_nontabular_with_timeout(self, create_dataset_mock, sync) display_name=_TEST_DISPLAY_NAME, metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_NONTABULAR, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - create_request_timeout=180.0, sync=sync, + create_request_timeout=180.0, ) if not sync: @@ -698,8 +698,8 @@ def test_create_and_import_dataset( import_schema_uri=_TEST_IMPORT_SCHEMA_URI, data_item_labels=_TEST_DATA_LABEL_ITEMS, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -726,7 +726,9 @@ def test_create_and_import_dataset( ) import_data_mock.assert_called_once_with( - name=_TEST_NAME, import_configs=[expected_import_config], timeout=None, + name=_TEST_NAME, + import_configs=[expected_import_config], + timeout=None, ) expected_dataset.name = _TEST_NAME @@ -743,8 +745,8 @@ def test_import_data(self, import_data_mock, sync): gcs_source=_TEST_SOURCE_URI_GCS, import_schema_uri=_TEST_IMPORT_SCHEMA_URI, data_item_labels=_TEST_DATA_LABEL_ITEMS, - import_request_timeout=None, sync=sync, + import_request_timeout=None, ) if not sync: @@ -757,7 +759,9 @@ def test_import_data(self, import_data_mock, sync): ) import_data_mock.assert_called_once_with( - name=_TEST_NAME, import_configs=[expected_import_config], timeout=None, + name=_TEST_NAME, + import_configs=[expected_import_config], + timeout=None, ) @pytest.mark.usefixtures("get_dataset_mock") @@ -771,8 +775,8 @@ def test_import_data_with_timeout(self, import_data_mock, sync): gcs_source=_TEST_SOURCE_URI_GCS, import_schema_uri=_TEST_IMPORT_SCHEMA_URI, data_item_labels=_TEST_DATA_LABEL_ITEMS, - import_request_timeout=180.0, sync=sync, + import_request_timeout=180.0, ) if not sync: @@ -785,7 +789,9 @@ def test_import_data_with_timeout(self, import_data_mock, sync): ) import_data_mock.assert_called_once_with( - name=_TEST_NAME, import_configs=[expected_import_config], timeout=180.0, + name=_TEST_NAME, + import_configs=[expected_import_config], + timeout=180.0, ) @pytest.mark.usefixtures("get_dataset_mock") @@ -823,8 +829,8 @@ def test_create_then_import( gcs_source=_TEST_SOURCE_URI_GCS, import_schema_uri=_TEST_IMPORT_SCHEMA_URI, data_item_labels=_TEST_DATA_LABEL_ITEMS, - import_request_timeout=None, sync=sync, + import_request_timeout=None, ) if not sync: @@ -855,7 +861,9 @@ def test_create_then_import( ) import_data_mock.assert_called_once_with( - name=_TEST_NAME, import_configs=[expected_import_config], timeout=None, + name=_TEST_NAME, + import_configs=[expected_import_config], + timeout=None, ) expected_dataset.name = _TEST_NAME @@ -905,7 +913,9 @@ def test_create_dataset(self, create_dataset_mock, sync): ) my_dataset = datasets.ImageDataset.create( - display_name=_TEST_DISPLAY_NAME, sync=sync, create_request_timeout=None, + display_name=_TEST_DISPLAY_NAME, + sync=sync, + create_request_timeout=None, ) if not sync: @@ -937,8 +947,8 @@ def test_create_and_import_dataset( gcs_source=[_TEST_SOURCE_URI_GCS], import_schema_uri=_TEST_IMPORT_SCHEMA_URI_IMAGE, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -963,7 +973,9 @@ def test_create_and_import_dataset( import_schema_uri=_TEST_IMPORT_SCHEMA_URI_IMAGE, ) import_data_mock.assert_called_once_with( - name=_TEST_NAME, import_configs=[expected_import_config], timeout=None, + name=_TEST_NAME, + import_configs=[expected_import_config], + timeout=None, ) expected_dataset.name = _TEST_NAME @@ -992,7 +1004,9 @@ def test_import_data(self, import_data_mock, sync): ) import_data_mock.assert_called_once_with( - name=_TEST_NAME, import_configs=[expected_import_config], timeout=None, + name=_TEST_NAME, + import_configs=[expected_import_config], + timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -1005,15 +1019,15 @@ def test_create_then_import( my_dataset = datasets.ImageDataset.create( display_name=_TEST_DISPLAY_NAME, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) my_dataset.import_data( gcs_source=[_TEST_SOURCE_URI_GCS], import_schema_uri=_TEST_IMPORT_SCHEMA_URI_IMAGE, - import_request_timeout=None, sync=sync, + import_request_timeout=None, ) if not sync: @@ -1042,7 +1056,9 @@ def test_create_then_import( ) import_data_mock.assert_called_once_with( - name=_TEST_NAME, import_configs=[expected_import_config], timeout=None, + name=_TEST_NAME, + import_configs=[expected_import_config], + timeout=None, ) expected_dataset.name = _TEST_NAME @@ -1177,8 +1193,8 @@ def test_create_dataset(self, create_dataset_mock, sync): display_name=_TEST_DISPLAY_NAME, bq_source=_TEST_SOURCE_URI_BQ, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -1323,8 +1339,8 @@ def test_create_dataset_with_labels(self, create_dataset_mock, sync): bq_source=_TEST_SOURCE_URI_BQ, labels=_TEST_LABELS, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -1376,7 +1392,9 @@ def test_create_dataset(self, create_dataset_mock, sync): ) my_dataset = datasets.TextDataset.create( - display_name=_TEST_DISPLAY_NAME, sync=sync, create_request_timeout=None, + display_name=_TEST_DISPLAY_NAME, + sync=sync, + create_request_timeout=None, ) if not sync: @@ -1408,8 +1426,8 @@ def test_create_and_import_dataset( gcs_source=[_TEST_SOURCE_URI_GCS], import_schema_uri=_TEST_IMPORT_SCHEMA_URI_TEXT, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -1434,7 +1452,9 @@ def test_create_and_import_dataset( import_schema_uri=_TEST_IMPORT_SCHEMA_URI_TEXT, ) import_data_mock.assert_called_once_with( - name=_TEST_NAME, import_configs=[expected_import_config], timeout=None, + name=_TEST_NAME, + import_configs=[expected_import_config], + timeout=None, ) expected_dataset.name = _TEST_NAME @@ -1500,7 +1520,9 @@ def test_import_data(self, import_data_mock, sync): ) import_data_mock.assert_called_once_with( - name=_TEST_NAME, import_configs=[expected_import_config], timeout=None, + name=_TEST_NAME, + import_configs=[expected_import_config], + timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -1513,8 +1535,8 @@ def test_create_then_import( my_dataset = datasets.TextDataset.create( display_name=_TEST_DISPLAY_NAME, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) my_dataset.import_data( @@ -1550,7 +1572,9 @@ def test_create_then_import( ) import_data_mock.assert_called_once_with( - name=_TEST_NAME, import_configs=[expected_import_config], timeout=None, + name=_TEST_NAME, + import_configs=[expected_import_config], + timeout=None, ) expected_dataset.name = _TEST_NAME @@ -1619,7 +1643,9 @@ def test_create_dataset(self, create_dataset_mock, sync): ) my_dataset = datasets.VideoDataset.create( - display_name=_TEST_DISPLAY_NAME, sync=sync, create_request_timeout=None, + display_name=_TEST_DISPLAY_NAME, + sync=sync, + create_request_timeout=None, ) if not sync: @@ -1651,8 +1677,8 @@ def test_create_and_import_dataset( gcs_source=[_TEST_SOURCE_URI_GCS], import_schema_uri=_TEST_IMPORT_SCHEMA_URI_VIDEO, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -1677,7 +1703,9 @@ def test_create_and_import_dataset( import_schema_uri=_TEST_IMPORT_SCHEMA_URI_VIDEO, ) import_data_mock.assert_called_once_with( - name=_TEST_NAME, import_configs=[expected_import_config], timeout=None, + name=_TEST_NAME, + import_configs=[expected_import_config], + timeout=None, ) expected_dataset.name = _TEST_NAME @@ -1706,7 +1734,9 @@ def test_import_data(self, import_data_mock, sync): ) import_data_mock.assert_called_once_with( - name=_TEST_NAME, import_configs=[expected_import_config], timeout=None, + name=_TEST_NAME, + import_configs=[expected_import_config], + timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -1719,8 +1749,8 @@ def test_create_then_import( my_dataset = datasets.VideoDataset.create( display_name=_TEST_DISPLAY_NAME, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) my_dataset.import_data( @@ -1756,7 +1786,9 @@ def test_create_then_import( ) import_data_mock.assert_called_once_with( - name=_TEST_NAME, import_configs=[expected_import_config], timeout=None, + name=_TEST_NAME, + import_configs=[expected_import_config], + timeout=None, ) expected_dataset.name = _TEST_NAME diff --git a/tests/unit/aiplatform/test_end_to_end.py b/tests/unit/aiplatform/test_end_to_end.py index 13dd8c44e1..bc1bdba0cc 100644 --- a/tests/unit/aiplatform/test_end_to_end.py +++ b/tests/unit/aiplatform/test_end_to_end.py @@ -102,8 +102,8 @@ def test_dataset_create_to_model_predict( my_dataset = aiplatform.ImageDataset.create( display_name=test_datasets._TEST_DISPLAY_NAME, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) my_dataset.import_data( @@ -135,21 +135,21 @@ def test_dataset_create_to_model_predict( training_fraction_split=test_training_jobs._TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=test_training_jobs._TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=test_training_jobs._TEST_TEST_FRACTION_SPLIT, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) created_endpoint = models.Endpoint.create( display_name=test_endpoints._TEST_DISPLAY_NAME, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) my_endpoint = model_from_job.deploy( encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - deploy_request_timeout=None, sync=sync, + deploy_request_timeout=None, ) endpoint_deploy_return = created_endpoint.deploy(model_from_job, sync=sync) @@ -328,16 +328,16 @@ def test_dataset_create_to_model_predict_with_pipeline_fail( my_dataset = aiplatform.ImageDataset.create( display_name=test_datasets._TEST_DISPLAY_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) my_dataset.import_data( gcs_source=test_datasets._TEST_SOURCE_URI_GCS, import_schema_uri=test_datasets._TEST_IMPORT_SCHEMA_URI, data_item_labels=test_datasets._TEST_DATA_LABEL_ITEMS, - import_request_timeout=None, sync=sync, + import_request_timeout=None, ) job = aiplatform.CustomTrainingJob( @@ -351,8 +351,8 @@ def test_dataset_create_to_model_predict_with_pipeline_fail( created_endpoint = models.Endpoint.create( display_name=test_endpoints._TEST_DISPLAY_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) model_from_job = job.run( @@ -367,8 +367,8 @@ def test_dataset_create_to_model_predict_with_pipeline_fail( training_fraction_split=test_training_jobs._TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=test_training_jobs._TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=test_training_jobs._TEST_TEST_FRACTION_SPLIT, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) with pytest.raises(RuntimeError): diff --git a/tests/unit/aiplatform/test_endpoints.py b/tests/unit/aiplatform/test_endpoints.py index 2cd9ea7526..a0844fa9b3 100644 --- a/tests/unit/aiplatform/test_endpoints.py +++ b/tests/unit/aiplatform/test_endpoints.py @@ -533,7 +533,9 @@ def test_init_aiplatform_with_encryption_key_name_and_create_endpoint( encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, ) my_endpoint = models.Endpoint.create( - display_name=_TEST_DISPLAY_NAME, sync=sync, create_request_timeout=None, + display_name=_TEST_DISPLAY_NAME, + sync=sync, + create_request_timeout=None, ) if not sync: @@ -543,13 +545,10 @@ def test_init_aiplatform_with_encryption_key_name_and_create_endpoint( display_name=_TEST_DISPLAY_NAME, encryption_spec=_TEST_ENCRYPTION_SPEC ) create_endpoint_mock.assert_called_once_with( -<<<<<<< HEAD - parent=_TEST_PARENT, endpoint=expected_endpoint, metadata=(), timeout=None, -======= parent=_TEST_PARENT, endpoint=expected_endpoint, metadata=(), ->>>>>>> main + timeout=None, ) expected_endpoint.name = _TEST_ENDPOINT_NAME @@ -561,8 +560,8 @@ def test_create(self, create_endpoint_mock, sync): my_endpoint = models.Endpoint.create( display_name=_TEST_DISPLAY_NAME, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -572,13 +571,10 @@ def test_create(self, create_endpoint_mock, sync): display_name=_TEST_DISPLAY_NAME, encryption_spec=_TEST_ENCRYPTION_SPEC ) create_endpoint_mock.assert_called_once_with( -<<<<<<< HEAD - parent=_TEST_PARENT, endpoint=expected_endpoint, metadata=(), timeout=None, -======= parent=_TEST_PARENT, endpoint=expected_endpoint, metadata=(), ->>>>>>> main + timeout=None, ) expected_endpoint.name = _TEST_ENDPOINT_NAME @@ -590,8 +586,8 @@ def test_create_with_timeout(self, create_endpoint_mock, sync): my_endpoint = models.Endpoint.create( display_name=_TEST_DISPLAY_NAME, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - create_request_timeout=180.0, sync=sync, + create_request_timeout=180.0, ) if not sync: @@ -601,7 +597,10 @@ def test_create_with_timeout(self, create_endpoint_mock, sync): display_name=_TEST_DISPLAY_NAME, encryption_spec=_TEST_ENCRYPTION_SPEC ) create_endpoint_mock.assert_called_once_with( - parent=_TEST_PARENT, endpoint=expected_endpoint, metadata=(), timeout=180.0, + parent=_TEST_PARENT, + endpoint=expected_endpoint, + metadata=(), + timeout=180.0, ) @pytest.mark.usefixtures("get_empty_endpoint_mock") @@ -630,8 +629,8 @@ def test_create_with_description(self, create_endpoint_mock, sync): my_endpoint = models.Endpoint.create( display_name=_TEST_DISPLAY_NAME, description=_TEST_DESCRIPTION, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: my_endpoint.wait() @@ -641,7 +640,10 @@ def test_create_with_description(self, create_endpoint_mock, sync): description=_TEST_DESCRIPTION, ) create_endpoint_mock.assert_called_once_with( - parent=_TEST_PARENT, endpoint=expected_endpoint, metadata=(), timeout=None, + parent=_TEST_PARENT, + endpoint=expected_endpoint, + metadata=(), + timeout=None, ) @pytest.mark.usefixtures("get_endpoint_mock") @@ -650,8 +652,8 @@ def test_create_with_labels(self, create_endpoint_mock, sync): my_endpoint = models.Endpoint.create( display_name=_TEST_DISPLAY_NAME, labels=_TEST_LABELS, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: my_endpoint.wait() @@ -661,7 +663,10 @@ def test_create_with_labels(self, create_endpoint_mock, sync): labels=_TEST_LABELS, ) create_endpoint_mock.assert_called_once_with( - parent=_TEST_PARENT, endpoint=expected_endpoint, metadata=(), timeout=None, + parent=_TEST_PARENT, + endpoint=expected_endpoint, + metadata=(), + timeout=None, ) @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock") @@ -673,7 +678,9 @@ def test_deploy(self, deploy_model_mock, sync): aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES ) test_endpoint.deploy( - test_model, sync=sync, deploy_request_timeout=None, + test_model, + sync=sync, + deploy_request_timeout=None, ) if not sync: @@ -705,14 +712,17 @@ def test_deploy_with_timeout(self, deploy_model_mock, sync): aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES ) test_endpoint.deploy( - test_model, sync=sync, deploy_request_timeout=180.0, + test_model, + sync=sync, + deploy_request_timeout=180.0, ) if not sync: test_endpoint.wait() automatic_resources = gca_machine_resources.AutomaticResources( - min_replica_count=1, max_replica_count=1, + min_replica_count=1, + max_replica_count=1, ) deployed_model = gca_endpoint.DeployedModel( automatic_resources=automatic_resources, @@ -930,8 +940,8 @@ def test_deploy_with_dedicated_resources(self, deploy_model_mock, sync): accelerator_type=_TEST_ACCELERATOR_TYPE, accelerator_count=_TEST_ACCELERATOR_COUNT, service_account=_TEST_SERVICE_ACCOUNT, - deploy_request_timeout=None, sync=sync, + deploy_request_timeout=None, ) if not sync: @@ -976,8 +986,8 @@ def test_deploy_with_explanations(self, deploy_model_with_explanations_mock, syn accelerator_count=_TEST_ACCELERATOR_COUNT, explanation_metadata=_TEST_EXPLANATION_METADATA, explanation_parameters=_TEST_EXPLANATION_PARAMETERS, - deploy_request_timeout=None, sync=sync, + deploy_request_timeout=None, ) if not sync: diff --git a/tests/unit/aiplatform/test_featurestores.py b/tests/unit/aiplatform/test_featurestores.py index 7827d67aba..6d96e5ce45 100644 --- a/tests/unit/aiplatform/test_featurestores.py +++ b/tests/unit/aiplatform/test_featurestores.py @@ -881,7 +881,8 @@ def test_update_featurestore(self, update_featurestore_mock): featurestore_name=_TEST_FEATURESTORE_ID ) my_featurestore.update( - labels=_TEST_LABELS_UPDATE, update_request_timeout=None, + labels=_TEST_LABELS_UPDATE, + update_request_timeout=None, ) expected_featurestore = gca_featurestore.Featurestore( @@ -904,7 +905,8 @@ def test_update_featurestore_with_timeout(self, update_featurestore_mock): featurestore_name=_TEST_FEATURESTORE_ID ) my_featurestore.update( - labels=_TEST_LABELS_UPDATE, update_request_timeout=180.0, + labels=_TEST_LABELS_UPDATE, + update_request_timeout=180.0, ) expected_featurestore = gca_featurestore.Featurestore( @@ -1049,8 +1051,8 @@ def test_create_entity_type(self, create_entity_type_mock, sync): entity_type_id=_TEST_ENTITY_TYPE_ID, description=_TEST_DESCRIPTION, labels=_TEST_LABELS, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -1080,15 +1082,16 @@ def test_create_entity_type_with_timeout(self, create_entity_type_mock, sync): entity_type_id=_TEST_ENTITY_TYPE_ID, description=_TEST_DESCRIPTION, labels=_TEST_LABELS, - create_request_timeout=180.0, sync=sync, + create_request_timeout=180.0, ) if not sync: my_entity_type.wait() expected_entity_type = gca_entity_type.EntityType( - labels=_TEST_LABELS, description=_TEST_DESCRIPTION, + labels=_TEST_LABELS, + description=_TEST_DESCRIPTION, ) create_entity_type_mock.assert_called_once_with( parent=_TEST_FEATURESTORE_NAME, @@ -1340,8 +1343,8 @@ def test_batch_serve_to_bq(self, batch_read_feature_values_mock, sync): bq_destination_output_uri=_TEST_BQ_DESTINATION_URI, serving_feature_ids=_TEST_SERVING_FEATURE_IDS, read_instances_uri=_TEST_BQ_SOURCE_URI, - serve_request_timeout=None, sync=sync, + serve_request_timeout=None, ) if not sync: @@ -1372,21 +1375,23 @@ def test_batch_serve_to_bq_with_timeout(self, batch_read_feature_values_mock, sy ), ] - expected_batch_read_feature_values_request = gca_featurestore_service.BatchReadFeatureValuesRequest( - featurestore=my_featurestore.resource_name, - destination=gca_featurestore_service.FeatureValueDestination( - bigquery_destination=_TEST_BQ_DESTINATION, - ), - entity_type_specs=expected_entity_type_specs, - bigquery_read_instances=_TEST_BQ_SOURCE, + expected_batch_read_feature_values_request = ( + gca_featurestore_service.BatchReadFeatureValuesRequest( + featurestore=my_featurestore.resource_name, + destination=gca_featurestore_service.FeatureValueDestination( + bigquery_destination=_TEST_BQ_DESTINATION, + ), + entity_type_specs=expected_entity_type_specs, + bigquery_read_instances=_TEST_BQ_SOURCE, + ) ) my_featurestore.batch_serve_to_bq( bq_destination_output_uri=_TEST_BQ_DESTINATION_URI, serving_feature_ids=_TEST_SERVING_FEATURE_IDS, read_instances_uri=_TEST_BQ_SOURCE_URI, - serve_request_timeout=180.0, sync=sync, + serve_request_timeout=180.0, ) if not sync: @@ -1433,8 +1438,8 @@ def test_batch_serve_to_gcs(self, batch_read_feature_values_mock, sync): gcs_destination_type=_TEST_GCS_DESTINATION_TYPE_TFRECORD, serving_feature_ids=_TEST_SERVING_FEATURE_IDS, read_instances_uri=_TEST_GCS_CSV_SOURCE_URI, - serve_request_timeout=None, sync=sync, + serve_request_timeout=None, ) if not sync: @@ -1594,7 +1599,8 @@ def test_update_entity_type(self, update_entity_type_mock): my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME) my_entity_type.update( - labels=_TEST_LABELS_UPDATE, update_request_timeout=None, + labels=_TEST_LABELS_UPDATE, + update_request_timeout=None, ) expected_entity_type = gca_entity_type.EntityType( @@ -1689,12 +1695,9 @@ def test_create_feature(self, create_feature_mock, sync): ) create_feature_mock.assert_called_once_with( -<<<<<<< HEAD - request=expected_request, metadata=_TEST_REQUEST_METADATA, timeout=None, -======= request=expected_request, metadata=_TEST_REQUEST_METADATA, ->>>>>>> main + timeout=None, ) @pytest.mark.usefixtures("get_entity_type_mock") @@ -1836,8 +1839,8 @@ def test_ingest_from_bq(self, import_feature_values_mock, sync): feature_time=_TEST_FEATURE_TIME_FIELD, bq_source_uri=_TEST_BQ_SOURCE_URI, feature_source_fields=_TEST_IMPORTING_FEATURE_SOURCE_FIELDS, - ingest_request_timeout=None, sync=sync, + ingest_request_timeout=None, ) if not sync: @@ -1873,22 +1876,25 @@ def test_ingest_from_bq_with_timeout(self, import_feature_values_mock, sync): feature_time=_TEST_FEATURE_TIME_FIELD, bq_source_uri=_TEST_BQ_SOURCE_URI, feature_source_fields=_TEST_IMPORTING_FEATURE_SOURCE_FIELDS, - ingest_request_timeout=180.0, sync=sync, + ingest_request_timeout=180.0, ) if not sync: my_entity_type.wait() - true_import_feature_values_request = gca_featurestore_service.ImportFeatureValuesRequest( - entity_type=_TEST_ENTITY_TYPE_NAME, - feature_specs=[ - gca_featurestore_service.ImportFeatureValuesRequest.FeatureSpec( - id="my_feature_id_1", source_field="my_feature_id_1_source_field" - ), - ], - bigquery_source=_TEST_BQ_SOURCE, - feature_time_field=_TEST_FEATURE_TIME_FIELD, + true_import_feature_values_request = ( + gca_featurestore_service.ImportFeatureValuesRequest( + entity_type=_TEST_ENTITY_TYPE_NAME, + feature_specs=[ + gca_featurestore_service.ImportFeatureValuesRequest.FeatureSpec( + id="my_feature_id_1", + source_field="my_feature_id_1_source_field", + ), + ], + bigquery_source=_TEST_BQ_SOURCE, + feature_time_field=_TEST_FEATURE_TIME_FIELD, + ) ) import_feature_values_mock.assert_called_once_with( request=true_import_feature_values_request, @@ -1907,8 +1913,8 @@ def test_ingest_from_gcs(self, import_feature_values_mock, sync): feature_time=_TEST_FEATURE_TIME, gcs_source_uris=_TEST_GCS_AVRO_SOURCE_URIS, gcs_source_type=_TEST_GCS_SOURCE_TYPE_AVRO, - ingest_request_timeout=None, sync=sync, + ingest_request_timeout=None, ) if not sync: @@ -2121,7 +2127,8 @@ def test_read_single_entity(self, read_feature_values_mock): ) ) result = my_entity_type.read( - entity_ids=_TEST_READ_ENTITY_ID, read_request_timeout=None, + entity_ids=_TEST_READ_ENTITY_ID, + read_request_timeout=None, ) read_feature_values_mock.assert_called_once_with( request=expected_read_feature_values_request, @@ -2137,15 +2144,18 @@ def test_read_single_entity(self, read_feature_values_mock): def test_read_single_entity_with_timeout(self, read_feature_values_mock): aiplatform.init(project=_TEST_PROJECT) my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME) - expected_read_feature_values_request = gca_featurestore_online_service.ReadFeatureValuesRequest( - entity_type=my_entity_type.resource_name, - entity_id=_TEST_READ_ENTITY_ID, - feature_selector=gca_feature_selector.FeatureSelector( - id_matcher=gca_feature_selector.IdMatcher(ids=["*"]) - ), + expected_read_feature_values_request = ( + gca_featurestore_online_service.ReadFeatureValuesRequest( + entity_type=my_entity_type.resource_name, + entity_id=_TEST_READ_ENTITY_ID, + feature_selector=gca_feature_selector.FeatureSelector( + id_matcher=gca_feature_selector.IdMatcher(ids=["*"]) + ), + ) ) my_entity_type.read( - entity_ids=_TEST_READ_ENTITY_ID, read_request_timeout=180.0, + entity_ids=_TEST_READ_ENTITY_ID, + read_request_timeout=180.0, ) read_feature_values_mock.assert_called_once_with( request=expected_read_feature_values_request, @@ -2666,7 +2676,8 @@ def test_update_feature(self, update_feature_mock): my_feature = aiplatform.Feature(feature_name=_TEST_FEATURE_NAME) my_feature.update( - labels=_TEST_LABELS_UPDATE, update_request_timeout=None, + labels=_TEST_LABELS_UPDATE, + update_request_timeout=None, ) expected_feature = gca_feature.Feature( diff --git a/tests/unit/aiplatform/test_hyperparameter_tuning_job.py b/tests/unit/aiplatform/test_hyperparameter_tuning_job.py index ae0a07a777..fd7e58b34b 100644 --- a/tests/unit/aiplatform/test_hyperparameter_tuning_job.py +++ b/tests/unit/aiplatform/test_hyperparameter_tuning_job.py @@ -403,8 +403,8 @@ def test_create_hyperparameter_tuning_job( network=_TEST_NETWORK, timeout=_TEST_TIMEOUT, restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) job.wait() @@ -469,8 +469,8 @@ def test_create_hyperparameter_tuning_job_with_timeout( network=_TEST_NETWORK, timeout=_TEST_TIMEOUT, restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, - create_request_timeout=180.0, sync=sync, + create_request_timeout=180.0, ) job.wait() @@ -713,8 +713,8 @@ def test_create_hyperparameter_tuning_job_with_tensorboard( timeout=_TEST_TIMEOUT, restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, tensorboard=test_custom_job._TEST_TENSORBOARD_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) job.wait() @@ -785,8 +785,8 @@ def test_create_hyperparameter_tuning_job_with_enable_web_access( timeout=_TEST_TIMEOUT, restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, enable_web_access=test_custom_job._TEST_ENABLE_WEB_ACCESS, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) job.wait() diff --git a/tests/unit/aiplatform/test_jobs.py b/tests/unit/aiplatform/test_jobs.py index 239690d6f3..6b52fb7902 100644 --- a/tests/unit/aiplatform/test_jobs.py +++ b/tests/unit/aiplatform/test_jobs.py @@ -507,8 +507,8 @@ def test_batch_predict_gcs_source_and_dest( job_display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME, gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE, gcs_destination_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) batch_prediction_job.wait_for_resource_creation() @@ -552,8 +552,8 @@ def test_batch_predict_gcs_source_and_dest_with_timeout( job_display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME, gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE, gcs_destination_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX, - create_request_timeout=180.0, sync=sync, + create_request_timeout=180.0, ) batch_prediction_job.wait_for_resource_creation() @@ -617,8 +617,8 @@ def test_batch_predict_gcs_source_bq_dest( job_display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME, gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE, bigquery_destination_prefix=_TEST_BATCH_PREDICTION_BQ_PREFIX, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) batch_prediction_job.wait_for_resource_creation() @@ -679,8 +679,8 @@ def test_batch_predict_with_all_args( explanation_parameters=_TEST_EXPLANATION_PARAMETERS, labels=_TEST_LABEL, credentials=creds, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) batch_prediction_job.wait_for_resource_creation() diff --git a/tests/unit/aiplatform/test_models.py b/tests/unit/aiplatform/test_models.py index c23a0da4bc..88d9be865f 100644 --- a/tests/unit/aiplatform/test_models.py +++ b/tests/unit/aiplatform/test_models.py @@ -550,8 +550,8 @@ def test_upload_uploads_and_gets_model( serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE, serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE, serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE, - upload_request_timeout=None, sync=sync, + upload_request_timeout=None, ) if not sync: @@ -583,8 +583,8 @@ def test_upload_with_timeout(self, upload_model_mock, get_model_mock, sync): my_model = models.Model.upload( display_name=_TEST_MODEL_NAME, serving_container_image_uri=_TEST_SERVING_CONTAINER_IMAGE, - upload_request_timeout=180.0, sync=sync, + upload_request_timeout=180.0, ) if not sync: @@ -595,7 +595,8 @@ def test_upload_with_timeout(self, upload_model_mock, get_model_mock, sync): ) managed_model = gca_model.Model( - display_name=_TEST_MODEL_NAME, container_spec=container_spec, + display_name=_TEST_MODEL_NAME, + container_spec=container_spec, ) upload_model_mock.assert_called_once_with( @@ -679,8 +680,8 @@ def test_upload_uploads_and_gets_model_with_all_args( explanation_metadata=_TEST_EXPLANATION_METADATA, explanation_parameters=_TEST_EXPLANATION_PARAMETERS, labels=_TEST_LABEL, - upload_request_timeout=None, sync=sync, + upload_request_timeout=None, ) if not sync: @@ -752,8 +753,8 @@ def test_upload_uploads_and_gets_model_with_custom_project( serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE, serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE, project=_TEST_PROJECT_2, - upload_request_timeout=None, sync=sync, + upload_request_timeout=None, ) if not sync: @@ -843,8 +844,8 @@ def test_upload_uploads_and_gets_model_with_custom_location( serving_container_predict_route=_TEST_SERVING_CONTAINER_PREDICTION_ROUTE, serving_container_health_route=_TEST_SERVING_CONTAINER_HEALTH_ROUTE, location=_TEST_LOCATION_2, - upload_request_timeout=None, sync=sync, + upload_request_timeout=None, ) if not sync: @@ -930,7 +931,8 @@ def test_deploy_with_timeout(self, deploy_model_mock, sync): test_endpoint.wait() automatic_resources = gca_machine_resources.AutomaticResources( - min_replica_count=1, max_replica_count=1, + min_replica_count=1, + max_replica_count=1, ) deployed_model = gca_endpoint.DeployedModel( automatic_resources=automatic_resources, @@ -992,8 +994,8 @@ def test_deploy_no_endpoint_dedicated_resources(self, deploy_model_mock, sync): accelerator_type=_TEST_ACCELERATOR_TYPE, accelerator_count=_TEST_ACCELERATOR_COUNT, service_account=_TEST_SERVICE_ACCOUNT, - deploy_request_timeout=None, sync=sync, + deploy_request_timeout=None, ) if not sync: @@ -1036,8 +1038,8 @@ def test_deploy_no_endpoint_with_explanations(self, deploy_model_mock, sync): accelerator_count=_TEST_ACCELERATOR_COUNT, explanation_metadata=_TEST_EXPLANATION_METADATA, explanation_parameters=_TEST_EXPLANATION_PARAMETERS, - deploy_request_timeout=None, sync=sync, + deploy_request_timeout=None, ) if not sync: @@ -1106,8 +1108,8 @@ def test_init_aiplatform_with_encryption_key_name_and_batch_predict_gcs_source_a job_display_name=_TEST_BATCH_PREDICTION_DISPLAY_NAME, gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE, gcs_destination_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -1155,8 +1157,8 @@ def test_batch_predict_gcs_source_and_dest( job_display_name=_TEST_BATCH_PREDICTION_DISPLAY_NAME, gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE, gcs_destination_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -1203,8 +1205,8 @@ def test_batch_predict_gcs_source_bq_dest( job_display_name=_TEST_BATCH_PREDICTION_DISPLAY_NAME, gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE, bigquery_destination_prefix=_TEST_BATCH_PREDICTION_BQ_PREFIX, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -1262,8 +1264,8 @@ def test_batch_predict_with_all_args(self, create_batch_prediction_job_mock, syn labels=_TEST_LABEL, credentials=creds, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -1638,8 +1640,8 @@ def test_upload_xgboost_model_file_uploads_and_gets_model( display_name=_TEST_MODEL_NAME, project=_TEST_PROJECT, location=_TEST_LOCATION, - upload_request_timeout=None, sync=sync, + upload_request_timeout=None, ) if not sync: @@ -1749,8 +1751,8 @@ def test_upload_scikit_learn_model_file_uploads_and_gets_model( display_name=_TEST_MODEL_NAME, project=_TEST_PROJECT, location=_TEST_LOCATION, - upload_request_timeout=None, sync=sync, + upload_request_timeout=None, ) if not sync: @@ -1799,8 +1801,8 @@ def test_upload_tensorflow_saved_model_uploads_and_gets_model( display_name=_TEST_MODEL_NAME, project=_TEST_PROJECT, location=_TEST_LOCATION, - upload_request_timeout=None, sync=sync, + upload_request_timeout=None, ) if not sync: diff --git a/tests/unit/aiplatform/test_pipeline_jobs.py b/tests/unit/aiplatform/test_pipeline_jobs.py index cdd10d3858..3188360276 100644 --- a/tests/unit/aiplatform/test_pipeline_jobs.py +++ b/tests/unit/aiplatform/test_pipeline_jobs.py @@ -282,10 +282,7 @@ def test_run_call_pipeline_service_create( service_account=_TEST_SERVICE_ACCOUNT, network=_TEST_NETWORK, sync=sync, -<<<<<<< HEAD create_request_timeout=None, -======= ->>>>>>> main ) if not sync: @@ -330,7 +327,8 @@ def test_run_call_pipeline_service_create( ) @pytest.mark.parametrize( - "job_spec_json", [_TEST_PIPELINE_SPEC, _TEST_PIPELINE_JOB], + "job_spec_json", + [_TEST_PIPELINE_SPEC, _TEST_PIPELINE_JOB], ) @pytest.mark.parametrize("sync", [True, False]) def test_run_call_pipeline_service_create_with_timeout( @@ -405,7 +403,8 @@ def test_run_call_pipeline_service_create_with_timeout( # ) @pytest.mark.parametrize( - "job_spec_json", [_TEST_PIPELINE_SPEC_LEGACY, _TEST_PIPELINE_JOB_LEGACY], + "job_spec_json", + [_TEST_PIPELINE_SPEC_LEGACY, _TEST_PIPELINE_JOB_LEGACY], ) @pytest.mark.parametrize("sync", [True, False]) def test_run_call_pipeline_service_create_legacy( diff --git a/tests/unit/aiplatform/test_tensorboard.py b/tests/unit/aiplatform/test_tensorboard.py index 4fe63e3c26..f7df67e56e 100644 --- a/tests/unit/aiplatform/test_tensorboard.py +++ b/tests/unit/aiplatform/test_tensorboard.py @@ -363,7 +363,8 @@ def test_create_tensorboard_with_default_encryption_key( ) tensorboard.Tensorboard.create( - display_name=_TEST_DISPLAY_NAME, create_request_timeout=None, + display_name=_TEST_DISPLAY_NAME, + create_request_timeout=None, ) expected_tensorboard = gca_tensorboard.Tensorboard( @@ -406,7 +407,9 @@ def test_create_tensorboard(self, create_tensorboard_mock): @pytest.mark.usefixtures("get_tensorboard_mock") def test_create_tensorboard_with_timeout(self, create_tensorboard_mock): - aiplatform.init(project=_TEST_PROJECT,) + aiplatform.init( + project=_TEST_PROJECT, + ) tensorboard.Tensorboard.create( display_name=_TEST_DISPLAY_NAME, @@ -415,7 +418,8 @@ def test_create_tensorboard_with_timeout(self, create_tensorboard_mock): ) expected_tensorboard = gca_tensorboard.Tensorboard( - display_name=_TEST_DISPLAY_NAME, encryption_spec=_TEST_ENCRYPTION_SPEC, + display_name=_TEST_DISPLAY_NAME, + encryption_spec=_TEST_ENCRYPTION_SPEC, ) create_tensorboard_mock.assert_called_once_with( @@ -552,7 +556,9 @@ def test_create_tensorboard_experiment_with_timeout( self, create_tensorboard_experiment_mock, get_tensorboard_experiment_mock ): - aiplatform.init(project=_TEST_PROJECT,) + aiplatform.init( + project=_TEST_PROJECT, + ) tensorboard.TensorboardExperiment.create( tensorboard_experiment_id=_TEST_TENSORBOARD_EXPERIMENT_ID, @@ -561,8 +567,10 @@ def test_create_tensorboard_experiment_with_timeout( create_request_timeout=180.0, ) - expected_tensorboard_experiment = gca_tensorboard_experiment.TensorboardExperiment( - display_name=_TEST_DISPLAY_NAME, + expected_tensorboard_experiment = ( + gca_tensorboard_experiment.TensorboardExperiment( + display_name=_TEST_DISPLAY_NAME, + ) ) create_tensorboard_experiment_mock.assert_called_once_with( @@ -674,7 +682,9 @@ def test_create_tensorboard_run_with_timeout( self, create_tensorboard_run_mock, get_tensorboard_run_mock ): - aiplatform.init(project=_TEST_PROJECT,) + aiplatform.init( + project=_TEST_PROJECT, + ) tensorboard.TensorboardRun.create( tensorboard_run_id=_TEST_TENSORBOARD_RUN_ID, diff --git a/tests/unit/aiplatform/test_training_jobs.py b/tests/unit/aiplatform/test_training_jobs.py index b384e3efc0..06eaa9218a 100644 --- a/tests/unit/aiplatform/test_training_jobs.py +++ b/tests/unit/aiplatform/test_training_jobs.py @@ -898,8 +898,8 @@ def test_run_call_pipeline_service_create_with_tabular_dataset( test_fraction_split=_TEST_TEST_FRACTION_SPLIT, timestamp_split_column_name=_TEST_TIMESTAMP_SPLIT_COLUMN_NAME, tensorboard=_TEST_TENSORBOARD_RESOURCE_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -1080,8 +1080,8 @@ def test_custom_training_tabular_done( test_fraction_split=_TEST_TEST_FRACTION_SPLIT, timestamp_split_column_name=_TEST_TIMESTAMP_SPLIT_COLUMN_NAME, tensorboard=_TEST_TENSORBOARD_RESOURCE_NAME, - create_request_timeout=None, sync=False, + create_request_timeout=None, ) assert job.done() is False @@ -1142,8 +1142,8 @@ def test_run_call_pipeline_service_create_with_tabular_dataset_and_timeout( test_fraction_split=_TEST_TEST_FRACTION_SPLIT, timestamp_split_column_name=_TEST_TIMESTAMP_SPLIT_COLUMN_NAME, tensorboard=_TEST_TENSORBOARD_RESOURCE_NAME, - create_request_timeout=180.0, sync=sync, + create_request_timeout=180.0, ) if not sync: @@ -1295,8 +1295,8 @@ def test_run_call_pipeline_service_create_with_bigquery_destination( accelerator_count=_TEST_ACCELERATOR_COUNT, model_display_name=_TEST_MODEL_DISPLAY_NAME, predefined_split_column_name=_TEST_PREDEFINED_SPLIT_COLUMN_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -1444,8 +1444,8 @@ def test_run_called_twice_raises( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) with pytest.raises(RuntimeError): @@ -1461,8 +1461,8 @@ def test_run_called_twice_raises( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -1498,8 +1498,8 @@ def test_run_with_invalid_accelerator_type_raises( accelerator_type=_TEST_INVALID_ACCELERATOR_TYPE, accelerator_count=_TEST_ACCELERATOR_COUNT, model_display_name=_TEST_MODEL_DISPLAY_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -1530,8 +1530,8 @@ def test_run_with_two_splits_raises( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -1564,8 +1564,8 @@ def test_run_with_incomplete_model_info_raises_with_model_to_upload( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -1606,8 +1606,8 @@ def test_run_call_pipeline_service_create_with_no_dataset( training_filter_split=_TEST_TRAINING_FILTER_SPLIT, validation_filter_split=_TEST_VALIDATION_FILTER_SPLIT, test_filter_split=_TEST_TEST_FILTER_SPLIT, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -1718,8 +1718,8 @@ def test_run_call_pipeline_service_create_with_enable_web_access( accelerator_type=_TEST_ACCELERATOR_TYPE, accelerator_count=_TEST_ACCELERATOR_COUNT, enable_web_access=_TEST_ENABLE_WEB_ACCESS, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -1759,8 +1759,8 @@ def test_run_call_pipeline_service_create_with_scheduling(self, sync, caplog): accelerator_count=_TEST_ACCELERATOR_COUNT, timeout=_TEST_TIMEOUT, restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -1810,8 +1810,8 @@ def test_run_returns_none_if_no_model_to_upload( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) assert model is None @@ -1846,8 +1846,8 @@ def test_get_model_raises_if_no_model_to_upload( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -1885,8 +1885,8 @@ def test_run_raises_if_pipeline_fails( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -1967,8 +1967,8 @@ def test_run_call_pipeline_service_create_distributed_training( accelerator_type=_TEST_ACCELERATOR_TYPE, accelerator_count=_TEST_ACCELERATOR_COUNT, model_display_name=_TEST_MODEL_DISPLAY_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -2119,8 +2119,8 @@ def test_run_call_pipeline_service_create_distributed_training_with_reduction_se reduction_server_replica_count=_TEST_REDUCTION_SERVER_REPLICA_COUNT, reduction_server_machine_type=_TEST_REDUCTION_SERVER_MACHINE_TYPE, reduction_server_container_uri=_TEST_REDUCTION_SERVER_CONTAINER_URI, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -2383,8 +2383,8 @@ def test_run_call_pipeline_service_create_with_nontabular_dataset_without_model_ training_filter_split=_TEST_TRAINING_FILTER_SPLIT, validation_filter_split=_TEST_VALIDATION_FILTER_SPLIT, test_filter_split=_TEST_TEST_FILTER_SPLIT, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -2642,8 +2642,8 @@ def test_custom_container_training_tabular_done( predefined_split_column_name=_TEST_PREDEFINED_SPLIT_COLUMN_NAME, service_account=_TEST_SERVICE_ACCOUNT, tensorboard=_TEST_TENSORBOARD_RESOURCE_NAME, - create_request_timeout=None, sync=False, + create_request_timeout=None, ) assert job.done() is False @@ -2698,8 +2698,8 @@ def test_run_call_pipeline_service_create_with_tabular_dataset( predefined_split_column_name=_TEST_PREDEFINED_SPLIT_COLUMN_NAME, service_account=_TEST_SERVICE_ACCOUNT, tensorboard=_TEST_TENSORBOARD_RESOURCE_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -2863,8 +2863,8 @@ def test_run_call_pipeline_service_create_with_tabular_dataset_and_timeout( predefined_split_column_name=_TEST_PREDEFINED_SPLIT_COLUMN_NAME, service_account=_TEST_SERVICE_ACCOUNT, tensorboard=_TEST_TENSORBOARD_RESOURCE_NAME, - create_request_timeout=180.0, sync=sync, + create_request_timeout=180.0, ) if not sync: @@ -3025,8 +3025,8 @@ def test_run_call_pipeline_service_create_with_bigquery_destination( validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, timestamp_split_column_name=_TEST_TIMESTAMP_SPLIT_COLUMN_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -3173,8 +3173,8 @@ def test_run_called_twice_raises( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) with pytest.raises(RuntimeError): @@ -3190,8 +3190,8 @@ def test_run_called_twice_raises( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -3227,8 +3227,8 @@ def test_run_with_invalid_accelerator_type_raises( accelerator_type=_TEST_INVALID_ACCELERATOR_TYPE, accelerator_count=_TEST_ACCELERATOR_COUNT, model_display_name=_TEST_MODEL_DISPLAY_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -3258,8 +3258,8 @@ def test_run_with_two_split_raises( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -3292,8 +3292,8 @@ def test_run_with_incomplete_model_info_raises_with_model_to_upload( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -3325,8 +3325,8 @@ def test_run_call_pipeline_service_create_with_no_dataset( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -3424,8 +3424,8 @@ def test_run_call_pipeline_service_create_with_enable_web_access( accelerator_type=_TEST_ACCELERATOR_TYPE, accelerator_count=_TEST_ACCELERATOR_COUNT, enable_web_access=_TEST_ENABLE_WEB_ACCESS, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -3464,8 +3464,8 @@ def test_run_call_pipeline_service_create_with_scheduling(self, sync, caplog): accelerator_count=_TEST_ACCELERATOR_COUNT, timeout=_TEST_TIMEOUT, restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -3509,8 +3509,8 @@ def test_run_returns_none_if_no_model_to_upload( machine_type=_TEST_MACHINE_TYPE, accelerator_type=_TEST_ACCELERATOR_TYPE, accelerator_count=_TEST_ACCELERATOR_COUNT, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) assert model is None @@ -3544,8 +3544,8 @@ def test_get_model_raises_if_no_model_to_upload( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -3582,8 +3582,8 @@ def test_run_raises_if_pipeline_fails( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -3659,8 +3659,8 @@ def test_run_call_pipeline_service_create_distributed_training( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -3802,8 +3802,8 @@ def test_run_call_pipeline_service_create_distributed_training_with_reduction_se reduction_server_replica_count=_TEST_REDUCTION_SERVER_REPLICA_COUNT, reduction_server_machine_type=_TEST_REDUCTION_SERVER_MACHINE_TYPE, reduction_server_container_uri=_TEST_REDUCTION_SERVER_CONTAINER_URI, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -3933,8 +3933,8 @@ def test_run_call_pipeline_service_create_with_nontabular_dataset( training_filter_split=_TEST_TRAINING_FILTER_SPLIT, validation_filter_split=_TEST_VALIDATION_FILTER_SPLIT, test_filter_split=_TEST_TEST_FILTER_SPLIT, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -4460,8 +4460,8 @@ def test_run_call_pipeline_service_create_with_tabular_dataset( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -4631,8 +4631,8 @@ def test_run_call_pipeline_service_create_with_tabular_dataset_with_timeout( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, - create_request_timeout=180.0, sync=sync, + create_request_timeout=180.0, ) if not sync: @@ -4780,8 +4780,8 @@ def test_run_call_pipeline_service_create_with_tabular_dataset_without_model_dis accelerator_type=_TEST_ACCELERATOR_TYPE, accelerator_count=_TEST_ACCELERATOR_COUNT, predefined_split_column_name=_TEST_PREDEFINED_SPLIT_COLUMN_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -4935,8 +4935,8 @@ def test_run_call_pipeline_service_create_with_bigquery_destination( validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, timestamp_split_column_name=_TEST_TIMESTAMP_SPLIT_COLUMN_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -5082,8 +5082,8 @@ def test_run_called_twice_raises( accelerator_type=_TEST_ACCELERATOR_TYPE, accelerator_count=_TEST_ACCELERATOR_COUNT, model_display_name=_TEST_MODEL_DISPLAY_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) with pytest.raises(RuntimeError): @@ -5096,8 +5096,8 @@ def test_run_called_twice_raises( accelerator_type=_TEST_ACCELERATOR_TYPE, accelerator_count=_TEST_ACCELERATOR_COUNT, model_display_name=_TEST_MODEL_DISPLAY_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -5137,8 +5137,8 @@ def test_run_with_invalid_accelerator_type_raises( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -5170,8 +5170,8 @@ def test_run_with_two_split_raises( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -5205,8 +5205,8 @@ def test_run_with_incomplete_model_info_raises_with_model_to_upload( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) @pytest.mark.parametrize("sync", [True, False]) @@ -5239,8 +5239,8 @@ def test_run_call_pipeline_service_create_with_no_dataset( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -5340,8 +5340,8 @@ def test_run_call_pipeline_service_create_with_enable_web_access( accelerator_type=_TEST_ACCELERATOR_TYPE, accelerator_count=_TEST_ACCELERATOR_COUNT, enable_web_access=_TEST_ENABLE_WEB_ACCESS, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -5381,8 +5381,8 @@ def test_run_call_pipeline_service_create_with_scheduling(self, sync, caplog): accelerator_count=_TEST_ACCELERATOR_COUNT, timeout=_TEST_TIMEOUT, restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -5432,8 +5432,8 @@ def test_run_returns_none_if_no_model_to_upload( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) assert model is None @@ -5468,8 +5468,8 @@ def test_get_model_raises_if_no_model_to_upload( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -5583,8 +5583,8 @@ def test_run_call_pipeline_service_create_distributed_training( training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, test_fraction_split=_TEST_TEST_FRACTION_SPLIT, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -5729,8 +5729,8 @@ def test_run_call_pipeline_service_create_distributed_training_with_reduction_se reduction_server_replica_count=_TEST_REDUCTION_SERVER_REPLICA_COUNT, reduction_server_machine_type=_TEST_REDUCTION_SERVER_MACHINE_TYPE, reduction_server_container_uri=_TEST_REDUCTION_SERVER_CONTAINER_URI, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -5861,8 +5861,8 @@ def test_run_call_pipeline_service_create_with_nontabular_dataset_without_model_ training_filter_split=_TEST_TRAINING_FILTER_SPLIT, validation_filter_split=_TEST_VALIDATION_FILTER_SPLIT, test_filter_split=_TEST_TEST_FILTER_SPLIT, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: From 12bb2f97e78bca3d266a5b21412d2000b2b10def Mon Sep 17 00:00:00 2001 From: Sara Robinson Date: Fri, 1 Apr 2022 13:10:28 -0400 Subject: [PATCH 19/19] finish moving timeout arg to end of function signatures --- google/cloud/aiplatform/training_jobs.py | 28 +++++++++---------- .../test_automl_forecasting_training_jobs.py | 2 +- .../test_automl_image_training_jobs.py | 2 +- .../test_automl_text_training_jobs.py | 4 +-- .../test_automl_video_training_jobs.py | 2 +- tests/unit/aiplatform/test_custom_job.py | 2 +- tests/unit/aiplatform/test_datasets.py | 12 ++++---- tests/unit/aiplatform/test_end_to_end.py | 2 +- .../test_hyperparameter_tuning_job.py | 2 +- 9 files changed, 28 insertions(+), 28 deletions(-) diff --git a/google/cloud/aiplatform/training_jobs.py b/google/cloud/aiplatform/training_jobs.py index 31829edd3a..dd64b7f2af 100644 --- a/google/cloud/aiplatform/training_jobs.py +++ b/google/cloud/aiplatform/training_jobs.py @@ -2168,8 +2168,8 @@ def _run( enable_web_access: bool = False, tensorboard: Optional[str] = None, reduction_server_container_uri: Optional[str] = None, - create_request_timeout: Optional[float] = None, sync=True, + create_request_timeout: Optional[float] = None, ) -> Optional[models.Model]: """Packages local script and launches training_job. @@ -3540,12 +3540,12 @@ def run( [export_evaluated_data_items_bigquery_destination_uri] is specified. additional_experiments (List[str]): Optional. Additional experiment flags for the automl tables training. - create_request_timeout (float): - Optional. The timeout for the create request in seconds. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will be immediately returned and synced when the Future has completed. + create_request_timeout (float): + Optional. The timeout for the create request in seconds. Returns: model: The trained Vertex AI Model resource or None if training did not produce a Vertex AI Model. @@ -3605,8 +3605,8 @@ def _run( export_evaluated_data_items: bool = False, export_evaluated_data_items_bigquery_destination_uri: Optional[str] = None, export_evaluated_data_items_override_destination: bool = False, - create_request_timeout: Optional[float] = None, sync: bool = True, + create_request_timeout: Optional[float] = None, ) -> models.Model: """Runs the training job and returns a model. @@ -4255,8 +4255,8 @@ def _run( budget_milli_node_hours: int = 1000, model_display_name: Optional[str] = None, model_labels: Optional[Dict[str, str]] = None, - create_request_timeout: Optional[float] = None, sync: bool = True, + create_request_timeout: Optional[float] = None, ) -> models.Model: """Runs the training job and returns a model. @@ -4717,8 +4717,8 @@ def run( model_display_name: Optional[str] = None, model_labels: Optional[Dict[str, str]] = None, disable_early_stopping: bool = False, - create_request_timeout: Optional[float] = False, sync: bool = True, + create_request_timeout: Optional[float] = False, ) -> models.Model: """Runs the AutoML Image training job and returns a model. @@ -4877,8 +4877,8 @@ def _run( model_display_name: Optional[str] = None, model_labels: Optional[Dict[str, str]] = None, disable_early_stopping: bool = False, - create_request_timeout: Optional[float] = None, sync: bool = True, + create_request_timeout: Optional[float] = None, ) -> models.Model: """Runs the training job and returns a model. @@ -5562,12 +5562,12 @@ def run( `service_account` is required with provided `tensorboard`. For more information on configuring your service account please visit: https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training - create_request_timeout (float): - Optional. The timeout for the create request in seconds. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will be immediately returned and synced when the Future has completed. + create_request_timeout (float): + Optional. The timeout for the create request in seconds. Returns: model: The trained Vertex AI Model resource or None if training did not @@ -5778,12 +5778,12 @@ def _run( https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training reduction_server_container_uri (str): Optional. The Uri of the reduction server container image. - create_request_timeout (float): - Optional. The timeout for the create request in seconds. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will be immediately returned and synced when the Future has completed. + create_request_timeout (float): + Optional. The timeout for the create request in seconds. Returns: model: The trained Vertex AI Model resource or None if training did not @@ -6060,12 +6060,12 @@ def run( are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. - create_request_timeout (float): - Optional. The timeout for the create request in seconds. sync: bool = True Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will be immediately returned and synced when the Future has completed. + create_request_timeout (float): + Optional. The timeout for the create request in seconds. Returns: model: The trained Vertex AI Model resource or None if training did not produce a Vertex AI Model. @@ -6508,8 +6508,8 @@ def _run( test_filter_split: Optional[str] = None, model_display_name: Optional[str] = None, model_labels: Optional[Dict[str, str]] = None, - create_request_timeout: Optional[float] = None, sync: bool = True, + create_request_timeout: Optional[float] = None, ) -> models.Model: """Runs the training job and returns a model. diff --git a/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py b/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py index 26224bea2a..953da7af91 100644 --- a/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py +++ b/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py @@ -713,8 +713,8 @@ def test_splits_fraction( export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION, quantiles=_TEST_TRAINING_QUANTILES, validation_options=_TEST_TRAINING_VALIDATION_OPTIONS, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: diff --git a/tests/unit/aiplatform/test_automl_image_training_jobs.py b/tests/unit/aiplatform/test_automl_image_training_jobs.py index bb9c1c2de3..80187df7ae 100644 --- a/tests/unit/aiplatform/test_automl_image_training_jobs.py +++ b/tests/unit/aiplatform/test_automl_image_training_jobs.py @@ -667,8 +667,8 @@ def test_splits_filter( test_filter_split=_TEST_FILTER_SPLIT_TEST, budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS, disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: diff --git a/tests/unit/aiplatform/test_automl_text_training_jobs.py b/tests/unit/aiplatform/test_automl_text_training_jobs.py index 0e08954df7..4980034140 100644 --- a/tests/unit/aiplatform/test_automl_text_training_jobs.py +++ b/tests/unit/aiplatform/test_automl_text_training_jobs.py @@ -632,8 +632,8 @@ def test_run_call_pipeline_if_no_model_display_name_nor_model_labels( model_from_job = job.run( dataset=mock_dataset_text, model_display_name=None, # Omit model_display_name - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: @@ -861,8 +861,8 @@ def test_splits_filter( training_filter_split=_TEST_FILTER_SPLIT_TRAINING, validation_filter_split=_TEST_FILTER_SPLIT_VALIDATION, test_filter_split=_TEST_FILTER_SPLIT_TEST, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: diff --git a/tests/unit/aiplatform/test_automl_video_training_jobs.py b/tests/unit/aiplatform/test_automl_video_training_jobs.py index 592640d860..e0e2a524e2 100644 --- a/tests/unit/aiplatform/test_automl_video_training_jobs.py +++ b/tests/unit/aiplatform/test_automl_video_training_jobs.py @@ -324,8 +324,8 @@ def test_splits_fraction( model_display_name=_TEST_MODEL_DISPLAY_NAME, training_fraction_split=_TEST_FRACTION_SPLIT_TRAINING, test_fraction_split=_TEST_FRACTION_SPLIT_TEST, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) if not sync: diff --git a/tests/unit/aiplatform/test_custom_job.py b/tests/unit/aiplatform/test_custom_job.py index d9173b06f7..f16f3a1cc8 100644 --- a/tests/unit/aiplatform/test_custom_job.py +++ b/tests/unit/aiplatform/test_custom_job.py @@ -638,8 +638,8 @@ def test_create_custom_job_with_tensorboard( network=_TEST_NETWORK, timeout=_TEST_TIMEOUT, restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) job.wait() diff --git a/tests/unit/aiplatform/test_datasets.py b/tests/unit/aiplatform/test_datasets.py index 7b907c667e..bab93280b5 100644 --- a/tests/unit/aiplatform/test_datasets.py +++ b/tests/unit/aiplatform/test_datasets.py @@ -821,8 +821,8 @@ def test_create_then_import( display_name=_TEST_DISPLAY_NAME, metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_NONTABULAR, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) my_dataset.import_data( @@ -991,8 +991,8 @@ def test_import_data(self, import_data_mock, sync): my_dataset.import_data( gcs_source=[_TEST_SOURCE_URI_GCS], import_schema_uri=_TEST_IMPORT_SCHEMA_URI_IMAGE, - import_request_timeout=None, sync=sync, + import_request_timeout=None, ) if not sync: @@ -1507,8 +1507,8 @@ def test_import_data(self, import_data_mock, sync): my_dataset.import_data( gcs_source=[_TEST_SOURCE_URI_GCS], import_schema_uri=_TEST_IMPORT_SCHEMA_URI_TEXT, - import_request_timeout=None, sync=sync, + import_request_timeout=None, ) if not sync: @@ -1542,8 +1542,8 @@ def test_create_then_import( my_dataset.import_data( gcs_source=[_TEST_SOURCE_URI_GCS], import_schema_uri=_TEST_IMPORT_SCHEMA_URI_TEXT, - import_request_timeout=None, sync=sync, + import_request_timeout=None, ) if not sync: @@ -1721,8 +1721,8 @@ def test_import_data(self, import_data_mock, sync): my_dataset.import_data( gcs_source=[_TEST_SOURCE_URI_GCS], import_schema_uri=_TEST_IMPORT_SCHEMA_URI_VIDEO, - import_request_timeout=None, sync=sync, + import_request_timeout=None, ) if not sync: @@ -1756,8 +1756,8 @@ def test_create_then_import( my_dataset.import_data( gcs_source=[_TEST_SOURCE_URI_GCS], import_schema_uri=_TEST_IMPORT_SCHEMA_URI_VIDEO, - import_request_timeout=None, sync=sync, + import_request_timeout=None, ) if not sync: diff --git a/tests/unit/aiplatform/test_end_to_end.py b/tests/unit/aiplatform/test_end_to_end.py index bc1bdba0cc..c31b17ab1c 100644 --- a/tests/unit/aiplatform/test_end_to_end.py +++ b/tests/unit/aiplatform/test_end_to_end.py @@ -110,8 +110,8 @@ def test_dataset_create_to_model_predict( gcs_source=test_datasets._TEST_SOURCE_URI_GCS, import_schema_uri=test_datasets._TEST_IMPORT_SCHEMA_URI, data_item_labels=test_datasets._TEST_DATA_LABEL_ITEMS, - import_request_timeout=None, sync=sync, + import_request_timeout=None, ) job = aiplatform.CustomTrainingJob( diff --git a/tests/unit/aiplatform/test_hyperparameter_tuning_job.py b/tests/unit/aiplatform/test_hyperparameter_tuning_job.py index fd7e58b34b..727f106fb5 100644 --- a/tests/unit/aiplatform/test_hyperparameter_tuning_job.py +++ b/tests/unit/aiplatform/test_hyperparameter_tuning_job.py @@ -531,8 +531,8 @@ def test_run_hyperparameter_tuning_job_with_fail_raises( network=_TEST_NETWORK, timeout=_TEST_TIMEOUT, restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, - create_request_timeout=None, sync=sync, + create_request_timeout=None, ) job.wait()