From 3eefbbf0c685555adde30b8e4a9f4020fc89348e Mon Sep 17 00:00:00 2001 From: Sasha Sobran Date: Fri, 9 Jul 2021 13:44:20 -0400 Subject: [PATCH 01/11] add initial properties --- google/cloud/aiplatform/base.py | 22 +++++++++++++++++++ google/cloud/aiplatform/datasets/dataset.py | 1 + .../aiplatform/datasets/tabular_dataset.py | 2 ++ google/cloud/aiplatform/jobs.py | 3 +++ 4 files changed, 28 insertions(+) diff --git a/google/cloud/aiplatform/base.py b/google/cloud/aiplatform/base.py index 2df7d7234e..afc076a766 100644 --- a/google/cloud/aiplatform/base.py +++ b/google/cloud/aiplatform/base.py @@ -23,6 +23,7 @@ import logging import sys import threading +import time from typing import ( Any, Callable, @@ -540,21 +541,25 @@ def _sync_gca_resource(self): @property def name(self) -> str: """Name of this resource.""" + self._assert_gca_resource_is_available() return self._gca_resource.name.split("/")[-1] @property def resource_name(self) -> str: """Full qualified resource name.""" + self._assert_gca_resource_is_available() return self._gca_resource.name @property def display_name(self) -> str: """Display name of this resource.""" + self._assert_gca_resource_is_available() return self._gca_resource.display_name @property def create_time(self) -> datetime.datetime: """Time this resource was created.""" + self._assert_gca_resource_is_available() return self._gca_resource.create_time @property @@ -570,6 +575,7 @@ def encryption_spec(self) -> Optional[gca_encryption_spec.EncryptionSpec]: If this is set, then all resources created by this Vertex AI resource will be encrypted with the provided encryption key. """ + self._assert_gca_resource_is_available() return getattr(self._gca_resource, "encryption_spec") @property @@ -578,13 +584,19 @@ def labels(self) -> Dict[str, str]: Read more about labels at https://goo.gl/xmQnxf """ + self._assert_gca_resource_is_available() return self._gca_resource.labels @property def gca_resource(self) -> proto.Message: """The underlying resource proto represenation.""" + self._assert_gca_resource_is_available() return self._gca_resource + def _assert_gca_resource_is_available(self, remediation_str: str=''): + if self._gca_resource is None: + raise RuntimeError(f"{self.__class__} resource has not been created. {remediation_str}") + def __repr__(self) -> str: return f"{object.__repr__(self)} \nresource name: {self.resource_name}" @@ -1061,6 +1073,16 @@ def __repr__(self) -> str: return FutureManager.__repr__(self) + def wait_for_resource_creation(self) -> None: + """Wait until underlying resource is created.""" + while getattr(self._gca_resource, 'name', None) is None: + self._raise_future_exception() # will raise if exception occured async + time.sleep(1) + + def _assert_gca_resource_is_available(self, remediation_str: str = '') -> None: + super(self, VertexAiResourceNounWithFutureManager)._assert_gca_resource_is_available( + remediation_str or 'To wait for resource creation use wait_for_resource_creation.') + def get_annotation_class(annotation: type) -> type: """Helper method to retrieve type annotation. diff --git a/google/cloud/aiplatform/datasets/dataset.py b/google/cloud/aiplatform/datasets/dataset.py index df402d0c99..3985e8a7d3 100644 --- a/google/cloud/aiplatform/datasets/dataset.py +++ b/google/cloud/aiplatform/datasets/dataset.py @@ -84,6 +84,7 @@ def __init__( @property def metadata_schema_uri(self) -> str: """The metadata schema uri of this dataset resource.""" + self._assert_gca_resource_is_available_and_wait_for_creation() return self._gca_resource.metadata_schema_uri def _validate_metadata_schema_uri(self) -> None: diff --git a/google/cloud/aiplatform/datasets/tabular_dataset.py b/google/cloud/aiplatform/datasets/tabular_dataset.py index 71c9d4f7d7..fa7867ba4d 100644 --- a/google/cloud/aiplatform/datasets/tabular_dataset.py +++ b/google/cloud/aiplatform/datasets/tabular_dataset.py @@ -52,6 +52,8 @@ def column_names(self) -> List[str]: RuntimeError: When no valid source is found. """ + self._assert_gca_resource_is_available_and_wait_for_creation() + metadata = self._gca_resource.metadata if metadata is None: diff --git a/google/cloud/aiplatform/jobs.py b/google/cloud/aiplatform/jobs.py index 3a23609f59..e6220b19ab 100644 --- a/google/cloud/aiplatform/jobs.py +++ b/google/cloud/aiplatform/jobs.py @@ -330,6 +330,7 @@ def output_info(self,) -> Optional[aiplatform.gapic.BatchPredictionJob.OutputInf This is only available for batch predicition jobs that have run successfully. """ + self._assert_gca_resource_is_available() return self._gca_resource.output_info @property @@ -337,11 +338,13 @@ def partial_failures(self) -> Optional[Sequence[status_pb2.Status]]: """Partial failures encountered. For example, single files that can't be read. This field never exceeds 20 entries. Status details fields contain standard GCP error details.""" + self._assert_gca_resource_is_available() return getattr(self._gca_resource, "partial_failures") @property def completion_stats(self) -> Optional[gca_completion_stats.CompletionStats]: """Statistics on completed and failed prediction instances.""" + self._assert_gca_resource_is_available() return getattr(self._gca_resource, "completion_stats") @classmethod From 8bd4df1f5142e4f51be658e3315939444a5a1482 Mon Sep 17 00:00:00 2001 From: Sasha Sobran Date: Mon, 12 Jul 2021 14:57:40 -0400 Subject: [PATCH 02/11] checkpoint --- google/cloud/aiplatform/base.py | 4 +-- google/cloud/aiplatform/datasets/dataset.py | 2 +- .../aiplatform/datasets/tabular_dataset.py | 2 +- google/cloud/aiplatform/jobs.py | 29 ++++++++----------- google/cloud/aiplatform/models.py | 10 +++++++ google/cloud/aiplatform/pipeline_jobs.py | 27 +++++------------ 6 files changed, 33 insertions(+), 41 deletions(-) diff --git a/google/cloud/aiplatform/base.py b/google/cloud/aiplatform/base.py index afc076a766..5a37fad17d 100644 --- a/google/cloud/aiplatform/base.py +++ b/google/cloud/aiplatform/base.py @@ -1079,9 +1079,9 @@ def wait_for_resource_creation(self) -> None: self._raise_future_exception() # will raise if exception occured async time.sleep(1) - def _assert_gca_resource_is_available(self, remediation_str: str = '') -> None: + def _assert_gca_resource_is_available(self) -> None: super(self, VertexAiResourceNounWithFutureManager)._assert_gca_resource_is_available( - remediation_str or 'To wait for resource creation use wait_for_resource_creation.') + 'To wait for resource creation use wait_for_resource_creation.') def get_annotation_class(annotation: type) -> type: diff --git a/google/cloud/aiplatform/datasets/dataset.py b/google/cloud/aiplatform/datasets/dataset.py index 3985e8a7d3..aae54d27f3 100644 --- a/google/cloud/aiplatform/datasets/dataset.py +++ b/google/cloud/aiplatform/datasets/dataset.py @@ -84,7 +84,7 @@ def __init__( @property def metadata_schema_uri(self) -> str: """The metadata schema uri of this dataset resource.""" - self._assert_gca_resource_is_available_and_wait_for_creation() + self._assert_gca_resource_is_available() return self._gca_resource.metadata_schema_uri def _validate_metadata_schema_uri(self) -> None: diff --git a/google/cloud/aiplatform/datasets/tabular_dataset.py b/google/cloud/aiplatform/datasets/tabular_dataset.py index fa7867ba4d..a9f4aaaf66 100644 --- a/google/cloud/aiplatform/datasets/tabular_dataset.py +++ b/google/cloud/aiplatform/datasets/tabular_dataset.py @@ -52,7 +52,7 @@ def column_names(self) -> List[str]: RuntimeError: When no valid source is found. """ - self._assert_gca_resource_is_available_and_wait_for_creation() + self._assert_gca_resource_is_available() metadata = self._gca_resource.metadata diff --git a/google/cloud/aiplatform/jobs.py b/google/cloud/aiplatform/jobs.py index e6220b19ab..c3e98686ef 100644 --- a/google/cloud/aiplatform/jobs.py +++ b/google/cloud/aiplatform/jobs.py @@ -775,6 +775,8 @@ def iter_outputs( GCS or BQ output provided. """ + self._assert_gca_resource_is_available() + if self.state != gca_job_state.JobState.JOB_STATE_SUCCEEDED: raise RuntimeError( f"Cannot read outputs until BatchPredictionJob has succeeded, " @@ -858,27 +860,17 @@ def __init__( project=project, location=location ) + def _assert_gca_resource_is_available(self): + if getattr(self._gca_resource, 'name', None) is None: + raise RuntimeError( + f"{self.__class__} resource has not been created." + "If job run has alaready been invoked use wait_for_resource_creation." + "Otherwise run the job with job.run.") + @abc.abstractmethod def run(self) -> None: pass - @property - def _has_run(self) -> bool: - """Property returns true if this class has a resource name.""" - return bool(self._gca_resource.name) - - @property - def state(self) -> gca_job_state.JobState: - """Current state of job. - - Raises: - RuntimeError if job run has not been called. - """ - if not self._has_run: - raise RuntimeError("Job has not run. No state available.") - - return super().state - @classmethod def get( cls, @@ -1044,6 +1036,7 @@ def network(self) -> Optional[str]: Private services access must already be configured for the network. If left unspecified, the CustomJob is not peered with any network. """ + self._assert_gca_resource_is_available() return getattr(self._gca_resource, "network") @classmethod @@ -1515,6 +1508,7 @@ def network(self) -> Optional[str]: Private services access must already be configured for the network. If left unspecified, the HyperparameterTuningJob is not peered with any network. """ + self._assert_gca_resource_is_available() return getattr(self._gca_resource.trial_job_spec, "network") @base.optional_sync() @@ -1615,4 +1609,5 @@ def run( @property def trials(self) -> List[gca_study_compat.Trial]: + self._assert_gca_resource_is_available() return list(self._gca_resource.trials) diff --git a/google/cloud/aiplatform/models.py b/google/cloud/aiplatform/models.py index 56cad667cb..a0f5cfb76b 100644 --- a/google/cloud/aiplatform/models.py +++ b/google/cloud/aiplatform/models.py @@ -146,6 +146,7 @@ def network(self) -> Optional[str]: Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. """ + self._assert_gca_resource_is_available() return getattr(self._gca_resource, "network") @classmethod @@ -1283,11 +1284,13 @@ class Model(base.VertexAiResourceNounWithFutureManager): def uri(self) -> Optional[str]: """Path to the directory containing the Model artifact and any of its supporting files. Not present for AutoML Models.""" + self._assert_gca_resource_is_available() return self._gca_resource.artifact_uri or None @property def description(self) -> str: """Description of the model.""" + self._assert_gca_resource_is_available() return self._gca_resource.description @property @@ -1302,6 +1305,7 @@ def supported_export_formats( {'tf-saved-model': []} """ + self._assert_gca_resource_is_available() return { export_format.id: [ gca_model_compat.Model.ExportFormat.ExportableContent(content) @@ -1328,6 +1332,7 @@ def supported_deployment_resources_types( predictions by using a `BatchPredictionJob`, if it has at least one entry each in `Model.supported_input_storage_formats` and `Model.supported_output_storage_formats`.""" + self._assert_gca_resource_is_available() return list(self._gca_resource.supported_deployment_resources_types) @property @@ -1343,6 +1348,7 @@ def supported_input_storage_formats(self) -> List[str]: `supported_deployment_resources_types`, it could serve online predictions by using `Endpoint.predict()` or `Endpoint.explain()`. """ + self._assert_gca_resource_is_available() return list(self._gca_resource.supported_input_storage_formats) @property @@ -1363,12 +1369,14 @@ def supported_output_storage_formats(self) -> List[str]: `supported_deployment_resources_types`, it could serve online predictions by using `Endpoint.predict()` or `Endpoint.explain()`. """ + self._assert_gca_resource_is_available() return list(self._gca_resource.supported_output_storage_formats) @property def predict_schemata(self) -> Optional[aiplatform.gapic.PredictSchemata]: """The schemata that describe formats of the Model's predictions and explanations, if available.""" + self._assert_gca_resource_is_available() return getattr(self._gca_resource, "predict_schemata") @property @@ -1379,6 +1387,7 @@ def training_job(self) -> Optional["aiplatform.training_jobs._TrainingJob"]: api_core.exceptions.NotFound: If the Model's training job resource cannot be found on the Vertex service. """ + self._assert_gca_resource_is_available() job_name = getattr(self._gca_resource, "training_pipeline") if not job_name: @@ -1400,6 +1409,7 @@ def training_job(self) -> Optional["aiplatform.training_jobs._TrainingJob"]: def container_spec(self) -> Optional[aiplatform.gapic.ModelContainerSpec]: """The specification of the container that is to be used when deploying this Model. Not present for AutoML Models.""" + self._assert_gca_resource_is_available() return getattr(self._gca_resource, "container_spec") def __init__( diff --git a/google/cloud/aiplatform/pipeline_jobs.py b/google/cloud/aiplatform/pipeline_jobs.py index 84b39c2ae8..5dce47c660 100644 --- a/google/cloud/aiplatform/pipeline_jobs.py +++ b/google/cloud/aiplatform/pipeline_jobs.py @@ -207,6 +207,13 @@ def __init__( ), ) + def _assert_gca_resource_is_available(self): + if getattr(self._gca_resource, 'name', None) is None: + raise RuntimeError( + f"{self.__class__} resource has not been created." + "If job run has alaready been invoked use wait_for_resource_creation." + "Otherwise run the job with job.run.") + @base.optional_sync() def run( self, @@ -255,17 +262,9 @@ def pipeline_spec(self): @property def state(self) -> Optional[gca_pipeline_state_v1beta1.PipelineState]: """Current pipeline state.""" - if not self._has_run: - raise RuntimeError("Job has not run. No state available.") - self._sync_gca_resource() return self._gca_resource.state - @property - def _has_run(self) -> bool: - """Helper property to check if this pipeline job has been run.""" - return bool(self._gca_resource.create_time) - @property def has_failed(self) -> bool: """Returns True if pipeline has failed. @@ -283,10 +282,6 @@ def _dashboard_uri(self) -> str: url = f"https://console.cloud.google.com/vertex-ai/locations/{fields.location}/pipelines/runs/{fields.id}?project={fields.project}" return url - def _sync_gca_resource(self): - """Helper method to sync the local gca_source against the service.""" - self._gca_resource = self.api_client.get_pipeline_job(name=self.resource_name) - def _block_until_complete(self): """Helper method to block and check on job until complete.""" # Used these numbers so failures surface fast @@ -316,13 +311,5 @@ def cancel(self) -> None: makes a best effort to cancel the job, but success is not guaranteed. On successful cancellation, the PipelineJob is not deleted; instead it becomes a job with state set to `CANCELLED`. - - Raises: - RuntimeError: If this PipelineJob has not started running. """ - if not self._has_run: - raise RuntimeError( - "This PipelineJob has not been launched, use the `run()` method " - "to start. `cancel()` can only be called on a job that is running." - ) self.api_client.cancel_pipeline_job(name=self.resource_name) From aedd9cbea214be0f12d6311251abeef9c08e18a0 Mon Sep 17 00:00:00 2001 From: Sasha Sobran Date: Thu, 15 Jul 2021 13:38:13 -0400 Subject: [PATCH 03/11] fix: add checks for gcs_resource before accessing properties --- google/cloud/aiplatform/base.py | 14 ++++++++------ google/cloud/aiplatform/jobs.py | 9 ++++----- google/cloud/aiplatform/pipeline_jobs.py | 10 +++++----- google/cloud/aiplatform/training_jobs.py | 7 +------ .../test_automl_forecasting_training_jobs.py | 2 +- .../aiplatform/test_automl_image_training_jobs.py | 2 +- .../test_automl_tabular_training_jobs.py | 2 +- .../aiplatform/test_automl_text_training_jobs.py | 2 +- .../aiplatform/test_automl_video_training_jobs.py | 2 +- tests/unit/aiplatform/test_training_jobs.py | 2 +- 10 files changed, 24 insertions(+), 28 deletions(-) diff --git a/google/cloud/aiplatform/base.py b/google/cloud/aiplatform/base.py index 5a37fad17d..36d7034b14 100644 --- a/google/cloud/aiplatform/base.py +++ b/google/cloud/aiplatform/base.py @@ -593,9 +593,9 @@ def gca_resource(self) -> proto.Message: self._assert_gca_resource_is_available() return self._gca_resource - def _assert_gca_resource_is_available(self, remediation_str: str=''): + def _assert_gca_resource_is_available(self): if self._gca_resource is None: - raise RuntimeError(f"{self.__class__} resource has not been created. {remediation_str}") + raise RuntimeError(f"{self.__class__} resource has not been created") def __repr__(self) -> str: return f"{object.__repr__(self)} \nresource name: {self.resource_name}" @@ -1075,14 +1075,16 @@ def __repr__(self) -> str: def wait_for_resource_creation(self) -> None: """Wait until underlying resource is created.""" + self._raise_future_exception() while getattr(self._gca_resource, 'name', None) is None: self._raise_future_exception() # will raise if exception occured async time.sleep(1) - def _assert_gca_resource_is_available(self) -> None: - super(self, VertexAiResourceNounWithFutureManager)._assert_gca_resource_is_available( - 'To wait for resource creation use wait_for_resource_creation.') - + def _assert_gca_resource_is_available(self): + if self._gca_resource is None: + raise RuntimeError(f"{self.__class__} resource has not been created." + + (f" Resource failed with: {self._exception}" if self._exception else + " To wait for resource creation use wait_for_resource_creation.")) def get_annotation_class(annotation: type) -> type: """Helper method to retrieve type annotation. diff --git a/google/cloud/aiplatform/jobs.py b/google/cloud/aiplatform/jobs.py index c3e98686ef..4ea7754096 100644 --- a/google/cloud/aiplatform/jobs.py +++ b/google/cloud/aiplatform/jobs.py @@ -861,11 +861,10 @@ def __init__( ) def _assert_gca_resource_is_available(self): - if getattr(self._gca_resource, 'name', None) is None: - raise RuntimeError( - f"{self.__class__} resource has not been created." - "If job run has alaready been invoked use wait_for_resource_creation." - "Otherwise run the job with job.run.") + if not getattr(self._gca_resource, 'name', None): + raise RuntimeError(f"{self.__class__} resource has not been created." + + (f" Resource failed with: {self._exception}" if self._exception else + " To wait for resource creation use wait_for_resource_creation.")) @abc.abstractmethod def run(self) -> None: diff --git a/google/cloud/aiplatform/pipeline_jobs.py b/google/cloud/aiplatform/pipeline_jobs.py index 5dce47c660..ac57126591 100644 --- a/google/cloud/aiplatform/pipeline_jobs.py +++ b/google/cloud/aiplatform/pipeline_jobs.py @@ -208,11 +208,10 @@ def __init__( ) def _assert_gca_resource_is_available(self): - if getattr(self._gca_resource, 'name', None) is None: - raise RuntimeError( - f"{self.__class__} resource has not been created." - "If job run has alaready been invoked use wait_for_resource_creation." - "Otherwise run the job with job.run.") + if not getattr(self._gca_resource, 'name', None): + raise RuntimeError(f"{self.__class__} resource has not been created." + + (f" Resource failed with: {self._exception}" if self._exception else + " To wait for resource creation use wait_for_resource_creation.")) @base.optional_sync() def run( @@ -230,6 +229,7 @@ def run( network (str): Optional. The full name of the Compute Engine network to which the job should be peered. For example, projects/12345/global/networks/myVPC. + Private services access must already be configured for the network. If left unspecified, the job is not peered with any network. sync (bool): diff --git a/google/cloud/aiplatform/training_jobs.py b/google/cloud/aiplatform/training_jobs.py index 0b66c74fc1..2a6ecd374f 100644 --- a/google/cloud/aiplatform/training_jobs.py +++ b/google/cloud/aiplatform/training_jobs.py @@ -766,12 +766,6 @@ def _dashboard_uri(self) -> str: url = f"https://console.cloud.google.com/ai/platform/locations/{fields.location}/training/{fields.id}?project={fields.project}" return url - def _sync_gca_resource(self): - """Helper method to sync the local gca_source against the service.""" - self._gca_resource = self.api_client.get_training_pipeline( - name=self.resource_name - ) - @property def _has_run(self) -> bool: """Helper property to check if this training job has been run.""" @@ -1102,6 +1096,7 @@ def network(self) -> Optional[str]: unspecified, the CustomTrainingJob is not peered with any network. """ # Return `network` value in training task inputs if set in Map + self._assert_gca_resource_is_available() return self._gca_resource.training_task_inputs.get("network") def _prepare_and_validate_run( diff --git a/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py b/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py index b3649fa57b..d7b2e85001 100644 --- a/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py +++ b/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py @@ -113,7 +113,7 @@ _TEST_MODEL_NAME = "projects/my-project/locations/us-central1/models/12345" _TEST_PIPELINE_RESOURCE_NAME = ( - "projects/my-project/locations/us-central1/trainingPipeline/12345" + "projects/my-project/locations/us-central1/trainingPipelines/12345" ) diff --git a/tests/unit/aiplatform/test_automl_image_training_jobs.py b/tests/unit/aiplatform/test_automl_image_training_jobs.py index ec0de7140b..326b096523 100644 --- a/tests/unit/aiplatform/test_automl_image_training_jobs.py +++ b/tests/unit/aiplatform/test_automl_image_training_jobs.py @@ -76,7 +76,7 @@ ) _TEST_PIPELINE_RESOURCE_NAME = ( - f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/trainingPipeline/12345" + f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/trainingPipelines/12345" ) # CMEK encryption diff --git a/tests/unit/aiplatform/test_automl_tabular_training_jobs.py b/tests/unit/aiplatform/test_automl_tabular_training_jobs.py index 761b03b5a0..c43804d743 100644 --- a/tests/unit/aiplatform/test_automl_tabular_training_jobs.py +++ b/tests/unit/aiplatform/test_automl_tabular_training_jobs.py @@ -94,7 +94,7 @@ _TEST_MODEL_NAME = "projects/my-project/locations/us-central1/models/12345" _TEST_PIPELINE_RESOURCE_NAME = ( - "projects/my-project/locations/us-central1/trainingPipeline/12345" + "projects/my-project/locations/us-central1/trainingPipelines/12345" ) # CMEK encryption diff --git a/tests/unit/aiplatform/test_automl_text_training_jobs.py b/tests/unit/aiplatform/test_automl_text_training_jobs.py index 101ff79ef5..6620e6a3e1 100644 --- a/tests/unit/aiplatform/test_automl_text_training_jobs.py +++ b/tests/unit/aiplatform/test_automl_text_training_jobs.py @@ -61,7 +61,7 @@ ) _TEST_PIPELINE_RESOURCE_NAME = ( - f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/trainingPipeline/12345" + f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/trainingPipelines/12345" ) # CMEK encryption diff --git a/tests/unit/aiplatform/test_automl_video_training_jobs.py b/tests/unit/aiplatform/test_automl_video_training_jobs.py index 66f1692fcf..662426d56d 100644 --- a/tests/unit/aiplatform/test_automl_video_training_jobs.py +++ b/tests/unit/aiplatform/test_automl_video_training_jobs.py @@ -57,7 +57,7 @@ ) _TEST_PIPELINE_RESOURCE_NAME = ( - f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/trainingPipeline/12345" + f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/trainingPipelines/12345" ) # CMEK encryption diff --git a/tests/unit/aiplatform/test_training_jobs.py b/tests/unit/aiplatform/test_training_jobs.py index 0995e0cb95..72c17dedc5 100644 --- a/tests/unit/aiplatform/test_training_jobs.py +++ b/tests/unit/aiplatform/test_training_jobs.py @@ -141,7 +141,7 @@ _TEST_MODEL_NAME = "projects/my-project/locations/us-central1/models/12345" _TEST_PIPELINE_RESOURCE_NAME = ( - "projects/my-project/locations/us-central1/trainingPipeline/12345" + "projects/my-project/locations/us-central1/trainingPipelines/12345" ) _TEST_CREDENTIALS = mock.Mock(spec=auth_credentials.AnonymousCredentials()) From 46b539466e310ffd4e134a5aafe0e9647a628c07 Mon Sep 17 00:00:00 2001 From: Sasha Sobran Date: Thu, 15 Jul 2021 14:52:10 -0400 Subject: [PATCH 04/11] fix: add check to see if user invoked APIs before waiting --- google/cloud/aiplatform/base.py | 13 ++++++++++--- google/cloud/aiplatform/pipeline_jobs.py | 8 ++++---- tests/unit/aiplatform/test_pipeline_jobs.py | 2 +- 3 files changed, 15 insertions(+), 8 deletions(-) diff --git a/google/cloud/aiplatform/base.py b/google/cloud/aiplatform/base.py index 36d7034b14..357700b9d6 100644 --- a/google/cloud/aiplatform/base.py +++ b/google/cloud/aiplatform/base.py @@ -1075,16 +1075,23 @@ def __repr__(self) -> str: def wait_for_resource_creation(self) -> None: """Wait until underlying resource is created.""" + self._raise_future_exception() + + # If the user calls this but didn't actually invoke an API to create + if self._are_futures_done() and getattr(self, '_gca_resource', None) is None: + self._raise_future_exception() + raise RuntimeError('Resource is not scheduled to be created.') + + while getattr(self._gca_resource, 'name', None) is None: self._raise_future_exception() # will raise if exception occured async time.sleep(1) def _assert_gca_resource_is_available(self): if self._gca_resource is None: - raise RuntimeError(f"{self.__class__} resource has not been created." + - (f" Resource failed with: {self._exception}" if self._exception else - " To wait for resource creation use wait_for_resource_creation.")) + raise RuntimeError(f"{self.__class__.__name__} resource has not been created." + + (f" Resource failed with: {self._exception}" if self._exception else "")) def get_annotation_class(annotation: type) -> type: """Helper method to retrieve type annotation. diff --git a/google/cloud/aiplatform/pipeline_jobs.py b/google/cloud/aiplatform/pipeline_jobs.py index ac57126591..1a0161b30c 100644 --- a/google/cloud/aiplatform/pipeline_jobs.py +++ b/google/cloud/aiplatform/pipeline_jobs.py @@ -208,10 +208,10 @@ def __init__( ) def _assert_gca_resource_is_available(self): - if not getattr(self._gca_resource, 'name', None): - raise RuntimeError(f"{self.__class__} resource has not been created." + - (f" Resource failed with: {self._exception}" if self._exception else - " To wait for resource creation use wait_for_resource_creation.")) + # TODO(b/193800063) Change this to name after this fix + if not getattr(self._gca_resource, 'create_time', None): + raise RuntimeError(f"{self.__class__.__name__} resource has not been created." + + (f" Resource failed with: {self._exception}" if self._exception else "")) @base.optional_sync() def run( diff --git a/tests/unit/aiplatform/test_pipeline_jobs.py b/tests/unit/aiplatform/test_pipeline_jobs.py index 18dc692d38..6f8c0318ce 100644 --- a/tests/unit/aiplatform/test_pipeline_jobs.py +++ b/tests/unit/aiplatform/test_pipeline_jobs.py @@ -268,4 +268,4 @@ def test_cancel_pipeline_job_without_running( with pytest.raises(RuntimeError) as e: job.cancel() - assert e.match(regexp=r"PipelineJob has not been launched") + assert e.match(regexp=r"PipelineJob resource has not been created") From e66b3c126b295b155be6ebf85c31885770a8c360 Mon Sep 17 00:00:00 2001 From: Sasha Sobran Date: Thu, 15 Jul 2021 16:40:05 -0400 Subject: [PATCH 05/11] test: add automl training job tests --- google/cloud/aiplatform/base.py | 11 +- .../test_automl_tabular_training_jobs.py | 186 +++++++++++++++++- 2 files changed, 191 insertions(+), 6 deletions(-) diff --git a/google/cloud/aiplatform/base.py b/google/cloud/aiplatform/base.py index 357700b9d6..7fb773ceb5 100644 --- a/google/cloud/aiplatform/base.py +++ b/google/cloud/aiplatform/base.py @@ -595,7 +595,7 @@ def gca_resource(self) -> proto.Message: def _assert_gca_resource_is_available(self): if self._gca_resource is None: - raise RuntimeError(f"{self.__class__} resource has not been created") + raise RuntimeError(f"{self.__class__.__name__} resource has not been created") def __repr__(self) -> str: return f"{object.__repr__(self)} \nresource name: {self.resource_name}" @@ -1075,17 +1075,18 @@ def __repr__(self) -> str: def wait_for_resource_creation(self) -> None: """Wait until underlying resource is created.""" - - self._raise_future_exception() # If the user calls this but didn't actually invoke an API to create if self._are_futures_done() and getattr(self, '_gca_resource', None) is None: self._raise_future_exception() - raise RuntimeError('Resource is not scheduled to be created.') + raise RuntimeError(f'{self.__class__.__name__} resource is not scheduled to be created.') while getattr(self._gca_resource, 'name', None) is None: - self._raise_future_exception() # will raise if exception occured async + # breaks out of loop if creation has failed async + if self._are_futures_done() and getattr(self, '_gca_resource', None) is None: + self._raise_future_exception() + time.sleep(1) def _assert_gca_resource_is_available(self): diff --git a/tests/unit/aiplatform/test_automl_tabular_training_jobs.py b/tests/unit/aiplatform/test_automl_tabular_training_jobs.py index c43804d743..34b7370f09 100644 --- a/tests/unit/aiplatform/test_automl_tabular_training_jobs.py +++ b/tests/unit/aiplatform/test_automl_tabular_training_jobs.py @@ -126,6 +126,15 @@ def mock_pipeline_service_create(): ) yield mock_create_training_pipeline +@pytest.fixture +def mock_pipeline_service_create_fail(): + with mock.patch.object( + pipeline_service_client.PipelineServiceClient, "create_training_pipeline" + ) as mock_create_training_pipeline: + mock_create_training_pipeline.side_effect = RuntimeError('Mock fail') + yield mock_create_training_pipeline + + @pytest.fixture def mock_pipeline_service_get(): @@ -250,6 +259,10 @@ def test_run_call_pipeline_service_create( sync=sync, ) + job.wait_for_resource_creation() + + assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME + if not sync: model_from_job.wait() @@ -331,6 +344,10 @@ def test_run_call_pipeline_if_no_model_display_name( disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING, ) + job.wait_for_resource_creation() + + assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME + if not sync: model_from_job.wait() @@ -402,6 +419,10 @@ def test_run_call_pipeline_service_create_if_no_column_transformations( sync=sync, ) + job.wait_for_resource_creation() + + assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME + if not sync: model_from_job.wait() @@ -479,6 +500,10 @@ def test_run_call_pipeline_service_create_if_set_additional_experiments( sync=sync, ) + job.wait_for_resource_creation() + + assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME + if not sync: model_from_job.wait() @@ -543,6 +568,10 @@ def test_run_called_twice_raises(self, mock_dataset_tabular, sync): sync=sync, ) + job.wait_for_resource_creation() + + assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME + with pytest.raises(RuntimeError): job.run( dataset=mock_dataset_tabular, @@ -582,11 +611,115 @@ def test_run_raises_if_pipeline_fails( ) if not sync: - job.wait() + job.wait() + + with pytest.raises(RuntimeError): + job.get_model() + + def test_wait_for_resource_creation_does_not_fail_if_creation_does_not_fail( + self, mock_pipeline_service_create_and_get_with_fail, mock_dataset_tabular + ): + + aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME) + + job = training_jobs.AutoMLTabularTrainingJob( + display_name=_TEST_DISPLAY_NAME, + optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE, + optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME, + column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS, + optimization_objective_recall_value=None, + optimization_objective_precision_value=None, + ) + + job.run( + model_display_name=_TEST_MODEL_DISPLAY_NAME, + dataset=mock_dataset_tabular, + target_column=_TEST_TRAINING_TARGET_COLUMN, + training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, + validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, + test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + sync=False, + ) + + job.wait_for_resource_creation() + + assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME + + with pytest.raises(RuntimeError): + job.wait() with pytest.raises(RuntimeError): job.get_model() + @pytest.mark.usefixtures('mock_pipeline_service_create_fail') + @pytest.mark.parametrize('sync', [True, False]) + def test_create_fails( + self, mock_dataset_tabular, sync + ): + + aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME) + + job = training_jobs.AutoMLTabularTrainingJob( + display_name=_TEST_DISPLAY_NAME, + optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE, + optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME, + column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS, + optimization_objective_recall_value=None, + optimization_objective_precision_value=None, + ) + + if sync: + with pytest.raises(RuntimeError): + job.run( + model_display_name=_TEST_MODEL_DISPLAY_NAME, + dataset=mock_dataset_tabular, + target_column=_TEST_TRAINING_TARGET_COLUMN, + training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, + validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, + test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + sync=sync, + ) + + with pytest.raises(RuntimeError) as e: + job.wait_for_resource_creation() + assert e.match(regexp=r"AutoMLTabularTrainingJob resource is not scheduled to be created.") + + with pytest.raises(RuntimeError) as e: + assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME + assert e.match(regexp=r"AutoMLTabularTrainingJob resource has not been created.") + + job.wait() + + with pytest.raises(RuntimeError) as e: + job.get_model() + e.match(regexp="TrainingPipeline has not been launched. You must run this TrainingPipeline using TrainingPipeline.run.") + + else: + job.run( + model_display_name=_TEST_MODEL_DISPLAY_NAME, + dataset=mock_dataset_tabular, + target_column=_TEST_TRAINING_TARGET_COLUMN, + training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, + validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, + test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + sync=sync, + ) + + with pytest.raises(RuntimeError) as e: + job.wait_for_resource_creation() + assert e.match(regexp=r"Mock fail") + + with pytest.raises(RuntimeError) as e: + assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME + assert e.match(regexp=r"AutoMLTabularTrainingJob resource has not been created. Resource failed with: Mock fail") + + with pytest.raises(RuntimeError): + job.wait() + + with pytest.raises(RuntimeError): + job.get_model() + + def test_raises_before_run_is_called(self, mock_pipeline_service_create): aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME) @@ -607,3 +740,54 @@ def test_raises_before_run_is_called(self, mock_pipeline_service_create): with pytest.raises(RuntimeError): job.state + + with pytest.raises(RuntimeError) as e: + job.wait_for_resource_creation() + assert e.match(regexp=r"AutoMLTabularTrainingJob resource is not scheduled to be created.") + + + + # pytest.usefixtures('mock_pipeline_service_get') + def test_properties_throw_if_not_available(self): + + job = training_jobs.AutoMLTabularTrainingJob( + display_name=_TEST_DISPLAY_NAME, + optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE, + optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME, + ) + + with pytest.raises(RuntimeError) as e: + name = job.name + assert e.match(regexp=r"AutoMLTabularTrainingJob resource has not been created") + + with pytest.raises(RuntimeError) as e: + name = job.resource_name + assert e.match(regexp=r"AutoMLTabularTrainingJob resource has not been created") + + with pytest.raises(RuntimeError) as e: + name = job.display_name + assert e.match(regexp=r"AutoMLTabularTrainingJob resource has not been created") + + with pytest.raises(RuntimeError) as e: + name = job.create_time + assert e.match(regexp=r"AutoMLTabularTrainingJob resource has not been created") + + with pytest.raises(RuntimeError) as e: + name = job.encryption_spec + assert e.match(regexp=r"AutoMLTabularTrainingJob resource has not been created") + + with pytest.raises(RuntimeError) as e: + name = job.labels + assert e.match(regexp=r"AutoMLTabularTrainingJob resource has not been created") + + with pytest.raises(RuntimeError) as e: + name = job.gca_resource + assert e.match(regexp=r"AutoMLTabularTrainingJob resource has not been created") + + + + + + + + From bfa3912443c6e3b957ab137cc9a006076a4f010e Mon Sep 17 00:00:00 2001 From: Sasha Sobran Date: Mon, 19 Jul 2021 10:28:57 -0400 Subject: [PATCH 06/11] Add wait_for_resource_creation only on runnable classes. --- google/cloud/aiplatform/base.py | 24 +++++- google/cloud/aiplatform/jobs.py | 4 + google/cloud/aiplatform/pipeline_jobs.py | 4 + google/cloud/aiplatform/training_jobs.py | 4 + .../test_automl_tabular_training_jobs.py | 8 -- tests/unit/aiplatform/test_datasets.py | 73 +++++++++++++++++++ tests/unit/aiplatform/test_jobs.py | 2 + 7 files changed, 109 insertions(+), 10 deletions(-) diff --git a/google/cloud/aiplatform/base.py b/google/cloud/aiplatform/base.py index 7fb773ceb5..22a7979072 100644 --- a/google/cloud/aiplatform/base.py +++ b/google/cloud/aiplatform/base.py @@ -1073,8 +1073,22 @@ def __repr__(self) -> str: return FutureManager.__repr__(self) - def wait_for_resource_creation(self) -> None: - """Wait until underlying resource is created.""" + def _wait_for_resource_creation(self) -> None: + """Wait until underlying resource is created. + + Currently this should only be used on subclasses that implement the construct then + `run` pattern because the underlying sync=False implementation will not update + downstream resource noun object's _gca_resource until the entire invoked method is complete. + + Ex: + + job = CustomTrainingJob() + job.run(sync=False, ...) + job._wait_for_resource_creation() + + Raises: + RuntimeError if the resource has not been scheduled to be created. + """ # If the user calls this but didn't actually invoke an API to create if self._are_futures_done() and getattr(self, '_gca_resource', None) is None: @@ -1090,10 +1104,16 @@ def wait_for_resource_creation(self) -> None: time.sleep(1) def _assert_gca_resource_is_available(self): + """Helper method to raise when accessing properties that do not exist. + + Raises: + RuntimeError when resource has not been created. + """ if self._gca_resource is None: raise RuntimeError(f"{self.__class__.__name__} resource has not been created." + (f" Resource failed with: {self._exception}" if self._exception else "")) + def get_annotation_class(annotation: type) -> type: """Helper method to retrieve type annotation. diff --git a/google/cloud/aiplatform/jobs.py b/google/cloud/aiplatform/jobs.py index 4ea7754096..8c7466ca10 100644 --- a/google/cloud/aiplatform/jobs.py +++ b/google/cloud/aiplatform/jobs.py @@ -907,6 +907,10 @@ def get( return self + def wait_for_resource_creation(self): + """Waits until resource has been created.""" + self._wait_for_resource_creation() + class DataLabelingJob(_Job): _resource_noun = "dataLabelingJobs" diff --git a/google/cloud/aiplatform/pipeline_jobs.py b/google/cloud/aiplatform/pipeline_jobs.py index 1a0161b30c..e4424167db 100644 --- a/google/cloud/aiplatform/pipeline_jobs.py +++ b/google/cloud/aiplatform/pipeline_jobs.py @@ -313,3 +313,7 @@ def cancel(self) -> None: becomes a job with state set to `CANCELLED`. """ self.api_client.cancel_pipeline_job(name=self.resource_name) + + def wait_for_resource_creation(self): + """Waits until resource has been created.""" + self._wait_for_resource_creation() diff --git a/google/cloud/aiplatform/training_jobs.py b/google/cloud/aiplatform/training_jobs.py index 2a6ecd374f..90c9576bfb 100644 --- a/google/cloud/aiplatform/training_jobs.py +++ b/google/cloud/aiplatform/training_jobs.py @@ -852,6 +852,10 @@ def cancel(self) -> None: ) self.api_client.cancel_training_pipeline(name=self.resource_name) + def wait_for_resource_creation(self): + """Waits until resource has been created.""" + self._wait_for_resource_creation() + class _CustomTrainingJob(_TrainingJob): """ABC for Custom Training Pipelines..""" diff --git a/tests/unit/aiplatform/test_automl_tabular_training_jobs.py b/tests/unit/aiplatform/test_automl_tabular_training_jobs.py index 34b7370f09..6e68a84975 100644 --- a/tests/unit/aiplatform/test_automl_tabular_training_jobs.py +++ b/tests/unit/aiplatform/test_automl_tabular_training_jobs.py @@ -783,11 +783,3 @@ def test_properties_throw_if_not_available(self): with pytest.raises(RuntimeError) as e: name = job.gca_resource assert e.match(regexp=r"AutoMLTabularTrainingJob resource has not been created") - - - - - - - - diff --git a/tests/unit/aiplatform/test_datasets.py b/tests/unit/aiplatform/test_datasets.py index 5da47bea59..472a322aaa 100644 --- a/tests/unit/aiplatform/test_datasets.py +++ b/tests/unit/aiplatform/test_datasets.py @@ -298,6 +298,14 @@ def create_dataset_mock(): create_dataset_mock.return_value = create_dataset_lro_mock yield create_dataset_mock +@pytest.fixture +def create_dataset_mock_fail(): + with patch.object( + dataset_service_client.DatasetServiceClient, "create_dataset" + ) as create_dataset_mock: + create_dataset_mock.side_effect = RuntimeError('Mock fail') + yield create_dataset_mock + @pytest.fixture def delete_dataset_mock(): @@ -320,6 +328,14 @@ def import_data_mock(): import_data_mock.return_value = mock.Mock(operation.Operation) yield import_data_mock +@pytest.fixture +def import_data_mock_fail(): + with patch.object( + dataset_service_client.DatasetServiceClient, "import_data" + ) as import_data_mock: + import_data_mock.side_effect = RuntimeError('Mock fail') + yield import_data_mock + @pytest.fixture def export_data_mock(): @@ -902,6 +918,8 @@ def test_create_dataset_with_default_encryption_key( if not sync: my_dataset.wait() + assert my_dataset.metadata_schema_uri == _TEST_METADATA_SCHEMA_URI_TABULAR + expected_dataset = gca_dataset.Dataset( display_name=_TEST_DISPLAY_NAME, metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_TABULAR, @@ -915,6 +933,25 @@ def test_create_dataset_with_default_encryption_key( metadata=_TEST_REQUEST_METADATA, ) + @pytest.mark.usefixtures("create_dataset_mock_fail") + def test_create_dataset_fail(self): + aiplatform.init( + project=_TEST_PROJECT, encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, + ) + + my_dataset = datasets.TabularDataset.create( + display_name=_TEST_DISPLAY_NAME, bq_source=_TEST_SOURCE_URI_BQ, sync=False, + ) + + with pytest.raises(RuntimeError) as e: + my_dataset.wait() + assert e.match(regexp=r"Mock fail") + + with pytest.raises(RuntimeError) as e: + my_dataset.metadata_schema_uri == _TEST_METADATA_SCHEMA_URI_TABULAR + assert e.match(regexp=r"TabularDataset resource has not been created. Resource failed with: Mock fail") + + @pytest.mark.usefixtures("get_dataset_tabular_bq_mock") @pytest.mark.parametrize("sync", [True, False]) def test_create_dataset(self, create_dataset_mock, sync): @@ -1136,6 +1173,42 @@ def test_create_and_import_dataset( expected_dataset.name = _TEST_NAME assert my_dataset._gca_resource == expected_dataset + + @pytest.mark.usefixtures("create_dataset_mock", "get_dataset_text_mock", "import_data_mock_fail") + @pytest.mark.parametrize("sync", [True, False]) + def test_create_then_import_dataset_fails(self, sync): + aiplatform.init(project=_TEST_PROJECT) + + my_dataset = datasets.TextDataset.create( + display_name=_TEST_DISPLAY_NAME, + encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME, + sync=sync, + ) + + + if sync: + + with pytest.raises(RuntimeError) as e: + my_dataset.import_data( + gcs_source=[_TEST_SOURCE_URI_GCS], + import_schema_uri=_TEST_IMPORT_SCHEMA_URI_TEXT, + sync=sync, + ) + e.match(regexp=r"Mock fail") + + else: + + my_dataset.import_data( + gcs_source=[_TEST_SOURCE_URI_GCS], + import_schema_uri=_TEST_IMPORT_SCHEMA_URI_TEXT, + sync=sync, + ) + + with pytest.raises(RuntimeError) as e: + my_dataset.wait() + e.match(regexp=r"Mock fail") + + @pytest.mark.usefixtures("get_dataset_text_mock") @pytest.mark.parametrize("sync", [True, False]) def test_import_data(self, import_data_mock, sync): diff --git a/tests/unit/aiplatform/test_jobs.py b/tests/unit/aiplatform/test_jobs.py index acc7317ebb..ed8cb31c62 100644 --- a/tests/unit/aiplatform/test_jobs.py +++ b/tests/unit/aiplatform/test_jobs.py @@ -472,6 +472,8 @@ def test_batch_predict_gcs_source_bq_dest( if not sync: batch_prediction_job.wait() + assert batch_prediction_job.output_info == gca_batch_prediction_job.BatchPredictionJob.OutputInfo() + # Construct expected request expected_gapic_batch_prediction_job = gca_batch_prediction_job.BatchPredictionJob( display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME, From 34fc547cdddee622e5696f2a7016191e08348cf4 Mon Sep 17 00:00:00 2001 From: Sasha Sobran Date: Wed, 21 Jul 2021 10:55:54 -0400 Subject: [PATCH 07/11] checkpoint --- google/cloud/aiplatform/base.py | 14 ++-- google/cloud/aiplatform/jobs.py | 8 +- tests/unit/aiplatform/test_custom_job.py | 62 ++++++++++++++- tests/unit/aiplatform/test_datasets.py | 6 +- .../test_hyperparameter_tuning_job.py | 79 ++++++++++++++++++- tests/unit/aiplatform/test_jobs.py | 44 +++++++++++ tests/unit/aiplatform/test_models.py | 33 ++++++++ 7 files changed, 229 insertions(+), 17 deletions(-) diff --git a/google/cloud/aiplatform/base.py b/google/cloud/aiplatform/base.py index 22a7979072..b39171166c 100644 --- a/google/cloud/aiplatform/base.py +++ b/google/cloud/aiplatform/base.py @@ -594,6 +594,11 @@ def gca_resource(self) -> proto.Message: return self._gca_resource def _assert_gca_resource_is_available(self): + """Helper method to raise when property is not accessible. + + Raises: + RuntimeError if _gca_resource is has not been created. + """ if self._gca_resource is None: raise RuntimeError(f"{self.__class__.__name__} resource has not been created") @@ -1091,14 +1096,13 @@ def _wait_for_resource_creation(self) -> None: """ # If the user calls this but didn't actually invoke an API to create - if self._are_futures_done() and getattr(self, '_gca_resource', None) is None: + if self._are_futures_done() and not getattr(self._gca_resource, 'name', None): self._raise_future_exception() raise RuntimeError(f'{self.__class__.__name__} resource is not scheduled to be created.') - - while getattr(self._gca_resource, 'name', None) is None: + while not getattr(self._gca_resource, 'name', None): # breaks out of loop if creation has failed async - if self._are_futures_done() and getattr(self, '_gca_resource', None) is None: + if self._are_futures_done() and not getattr(self._gca_resource, 'name', None): self._raise_future_exception() time.sleep(1) @@ -1109,7 +1113,7 @@ def _assert_gca_resource_is_available(self): Raises: RuntimeError when resource has not been created. """ - if self._gca_resource is None: + if not getattr(self._gca_resource, 'name', None): raise RuntimeError(f"{self.__class__.__name__} resource has not been created." + (f" Resource failed with: {self._exception}" if self._exception else "")) diff --git a/google/cloud/aiplatform/jobs.py b/google/cloud/aiplatform/jobs.py index 8c7466ca10..a40935f330 100644 --- a/google/cloud/aiplatform/jobs.py +++ b/google/cloud/aiplatform/jobs.py @@ -860,12 +860,6 @@ def __init__( project=project, location=location ) - def _assert_gca_resource_is_available(self): - if not getattr(self._gca_resource, 'name', None): - raise RuntimeError(f"{self.__class__} resource has not been created." + - (f" Resource failed with: {self._exception}" if self._exception else - " To wait for resource creation use wait_for_resource_creation.")) - @abc.abstractmethod def run(self) -> None: pass @@ -1040,7 +1034,7 @@ def network(self) -> Optional[str]: unspecified, the CustomJob is not peered with any network. """ self._assert_gca_resource_is_available() - return getattr(self._gca_resource, "network") + return self._gca_resource.job_spec.network @classmethod def from_local_script( diff --git a/tests/unit/aiplatform/test_custom_job.py b/tests/unit/aiplatform/test_custom_job.py index 7797e0edef..d8f7220f33 100644 --- a/tests/unit/aiplatform/test_custom_job.py +++ b/tests/unit/aiplatform/test_custom_job.py @@ -162,6 +162,11 @@ def get_custom_job_mock_with_fail(): state=gca_job_state_compat.JobState.JOB_STATE_FAILED, error=status_pb2.Status(message="Test Error"), ), + _get_custom_job_proto( + name=_TEST_CUSTOM_JOB_NAME, + state=gca_job_state_compat.JobState.JOB_STATE_FAILED, + error=status_pb2.Status(message="Test Error"), + ) ] yield get_custom_job_mock @@ -177,6 +182,14 @@ def create_custom_job_mock(): ) yield create_custom_job_mock +@pytest.fixture +def create_custom_job_mock_fail(): + with mock.patch.object( + job_service_client.JobServiceClient, "create_custom_job" + ) as create_custom_job_mock: + create_custom_job_mock.side_effect = RuntimeError('Mock fail') + yield create_custom_job_mock + @pytest.fixture def create_custom_job_v1beta1_mock(): @@ -221,6 +234,10 @@ def test_create_custom_job(self, create_custom_job_mock, get_custom_job_mock, sy sync=sync, ) + job.wait_for_resource_creation() + + assert job.resource_name == _TEST_CUSTOM_JOB_NAME + job.wait() expected_custom_job = _get_custom_job_proto() @@ -233,6 +250,7 @@ def test_create_custom_job(self, create_custom_job_mock, get_custom_job_mock, sy assert ( job._gca_resource.state == gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED ) + assert job.network == _TEST_NETWORK @pytest.mark.parametrize("sync", [True, False]) def test_run_custom_job_with_fail_raises( @@ -249,6 +267,11 @@ def test_run_custom_job_with_fail_raises( display_name=_TEST_DISPLAY_NAME, worker_pool_specs=_TEST_WORKER_POOL_SPEC ) + with pytest.raises(RuntimeError) as e: + job.wait_for_resource_creation() + assert e.match(r"CustomJob resource is not scheduled to be created.") + + with pytest.raises(RuntimeError): job.run( service_account=_TEST_SERVICE_ACCOUNT, @@ -260,6 +283,10 @@ def test_run_custom_job_with_fail_raises( job.wait() + # shouldn't fail + job.wait_for_resource_creation() + assert job.resource_name == _TEST_CUSTOM_JOB_NAME + expected_custom_job = _get_custom_job_proto() create_custom_job_mock.assert_called_once_with( @@ -267,7 +294,40 @@ def test_run_custom_job_with_fail_raises( ) assert job.job_spec == expected_custom_job.job_spec - assert job._gca_resource.state == gca_job_state_compat.JobState.JOB_STATE_FAILED + assert job.state == gca_job_state_compat.JobState.JOB_STATE_FAILED + + @pytest.mark.usefixtures('create_custom_job_mock_fail') + def test_run_custom_job_with_fail_at_creation(self): + aiplatform.init( + project=_TEST_PROJECT, + location=_TEST_LOCATION, + staging_bucket=_TEST_STAGING_BUCKET, + encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME, + ) + + job = aiplatform.CustomJob( + display_name=_TEST_DISPLAY_NAME, worker_pool_specs=_TEST_WORKER_POOL_SPEC + ) + + job.run( + service_account=_TEST_SERVICE_ACCOUNT, + network=_TEST_NETWORK, + timeout=_TEST_TIMEOUT, + restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, + sync=False, + ) + + with pytest.raises(RuntimeError) as e: + job.wait_for_resource_creation() + assert e.match('Mock fail') + + with pytest.raises(RuntimeError) as e: + resource_name = job.resource_name + assert e.match('CustomJob resource has not been created. Resource failed with: Mock fail') + + with pytest.raises(RuntimeError) as e: + network = job.network + assert e.match('CustomJob resource has not been created. Resource failed with: Mock fail') def test_custom_job_get_state_raises_without_run(self): aiplatform.init( diff --git a/tests/unit/aiplatform/test_datasets.py b/tests/unit/aiplatform/test_datasets.py index 472a322aaa..7e147bcb03 100644 --- a/tests/unit/aiplatform/test_datasets.py +++ b/tests/unit/aiplatform/test_datasets.py @@ -948,7 +948,11 @@ def test_create_dataset_fail(self): assert e.match(regexp=r"Mock fail") with pytest.raises(RuntimeError) as e: - my_dataset.metadata_schema_uri == _TEST_METADATA_SCHEMA_URI_TABULAR + metadata_schema_uri = my_dataset.metadata_schema_uri + assert e.match(regexp=r"TabularDataset resource has not been created. Resource failed with: Mock fail") + + with pytest.raises(RuntimeError) as e: + column_names = my_dataset.column_names assert e.match(regexp=r"TabularDataset resource has not been created. Resource failed with: Mock fail") diff --git a/tests/unit/aiplatform/test_hyperparameter_tuning_job.py b/tests/unit/aiplatform/test_hyperparameter_tuning_job.py index f4102fc3bb..1ba0c9091a 100644 --- a/tests/unit/aiplatform/test_hyperparameter_tuning_job.py +++ b/tests/unit/aiplatform/test_hyperparameter_tuning_job.py @@ -169,6 +169,10 @@ def get_hyperparameter_tuning_job_mock(): name=_TEST_HYPERPARAMETERTUNING_JOB_NAME, state=gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED, ), + _get_hyperparameter_tuning_job_proto( + name=_TEST_HYPERPARAMETERTUNING_JOB_NAME, + state=gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED, + ), ] yield get_hyperparameter_tuning_job_mock @@ -207,6 +211,14 @@ def create_hyperparameter_tuning_job_mock(): ) yield create_hyperparameter_tuning_job_mock +@pytest.fixture +def create_hyperparameter_tuning_job_mock_fail(): + with mock.patch.object( + job_service_client.JobServiceClient, "create_hyperparameter_tuning_job" + ) as create_hyperparameter_tuning_job_mock: + create_hyperparameter_tuning_job_mock.side_effect = RuntimeError('Mock fail') + yield create_hyperparameter_tuning_job_mock + @pytest.fixture def create_hyperparameter_tuning_job_v1beta1_mock(): @@ -287,9 +299,10 @@ def test_create_hyperparameter_tuning_job( hyperparameter_tuning_job=expected_hyperparameter_tuning_job, ) - assert ( - job._gca_resource.state == gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED - ) + assert job.state == gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED + assert job.network == _TEST_NETWORK + assert job.trials == [] + @pytest.mark.parametrize("sync", [True, False]) def test_run_hyperparameter_tuning_job_with_fail_raises( @@ -351,6 +364,66 @@ def test_run_hyperparameter_tuning_job_with_fail_raises( assert job._gca_resource.state == gca_job_state_compat.JobState.JOB_STATE_FAILED + + @pytest.mark.usefixtures('create_hyperparameter_tuning_job_mock_fail') + def test_run_hyperparameter_tuning_job_with_fail_at_creation(self): + aiplatform.init( + project=_TEST_PROJECT, + location=_TEST_LOCATION, + staging_bucket=_TEST_STAGING_BUCKET, + encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME, + ) + + custom_job = aiplatform.CustomJob( + display_name=test_custom_job._TEST_DISPLAY_NAME, + worker_pool_specs=test_custom_job._TEST_WORKER_POOL_SPEC, + ) + + job = aiplatform.HyperparameterTuningJob( + display_name=_TEST_DISPLAY_NAME, + custom_job=custom_job, + metric_spec={_TEST_METRIC_SPEC_KEY: _TEST_METRIC_SPEC_VALUE}, + parameter_spec={ + "lr": hpt.DoubleParameterSpec(min=0.001, max=0.1, scale="log"), + "units": hpt.IntegerParameterSpec(min=4, max=1028, scale="linear"), + "activation": hpt.CategoricalParameterSpec( + values=["relu", "sigmoid", "elu", "selu", "tanh"] + ), + "batch_size": hpt.DiscreteParameterSpec( + values=[16, 32], scale="linear" + ), + }, + parallel_trial_count=_TEST_PARALLEL_TRIAL_COUNT, + max_trial_count=_TEST_MAX_TRIAL_COUNT, + max_failed_trial_count=_TEST_MAX_FAILED_TRIAL_COUNT, + search_algorithm=_TEST_SEARCH_ALGORITHM, + measurement_selection=_TEST_MEASUREMENT_SELECTION, + ) + + job.run( + service_account=_TEST_SERVICE_ACCOUNT, + network=_TEST_NETWORK, + timeout=_TEST_TIMEOUT, + restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, + sync=False, + ) + + with pytest.raises(RuntimeError) as e: + job.wait_for_resource_creation() + assert e.match('Mock fail') + + with pytest.raises(RuntimeError) as e: + resource_name = job.resource_name + assert e.match('HyperparameterTuningJob resource has not been created. Resource failed with: Mock fail') + + with pytest.raises(RuntimeError) as e: + network = job.network + assert e.match('HyperparameterTuningJob resource has not been created. Resource failed with: Mock fail') + + with pytest.raises(RuntimeError) as e: + trials = job.trials + assert e.match('HyperparameterTuningJob resource has not been created. Resource failed with: Mock fail') + def test_hyperparameter_tuning_job_get_state_raises_without_run(self): aiplatform.init( project=_TEST_PROJECT, diff --git a/tests/unit/aiplatform/test_jobs.py b/tests/unit/aiplatform/test_jobs.py index ed8cb31c62..5b65cf27e0 100644 --- a/tests/unit/aiplatform/test_jobs.py +++ b/tests/unit/aiplatform/test_jobs.py @@ -240,6 +240,15 @@ def create_batch_prediction_job_mock(): yield create_batch_prediction_job_mock +@pytest.fixture +def create_batch_prediction_job_mock_fail(): + with mock.patch.object( + job_service_client.JobServiceClient, "create_batch_prediction_job" + ) as create_batch_prediction_job_mock: + create_batch_prediction_job_mock.side_effect = RuntimeError('Mock fail') + yield create_batch_prediction_job_mock + + @pytest.fixture def create_batch_prediction_job_with_explanations_mock(): with mock.patch.object( @@ -564,6 +573,41 @@ def test_batch_predict_with_all_args( batch_prediction_job=expected_gapic_batch_prediction_job, ) + + @pytest.mark.usefixtures("create_batch_prediction_job_mock_fail") + def test_batch_predict_create_fails(self): + aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION) + + batch_prediction_job = jobs.BatchPredictionJob.create( + model_name=_TEST_MODEL_NAME, + job_display_name=_TEST_BATCH_PREDICTION_JOB_DISPLAY_NAME, + gcs_source=_TEST_BATCH_PREDICTION_GCS_SOURCE, + bigquery_destination_prefix=_TEST_BATCH_PREDICTION_BQ_PREFIX, + sync=False, + ) + + with pytest.raises(RuntimeError) as e: + batch_prediction_job.wait() + assert e.match(regexp=r'Mock fail') + + with pytest.raises(RuntimeError) as e: + output_info = batch_prediction_job.output_info + assert e.match(regexp=r"BatchPredictionJob resource has not been created. Resource failed with: Mock fail") + + with pytest.raises(RuntimeError) as e: + partial_failures = batch_prediction_job.partial_failures + assert e.match(regexp=r"BatchPredictionJob resource has not been created. Resource failed with: Mock fail") + + with pytest.raises(RuntimeError) as e: + completion_stats = batch_prediction_job.completion_stats + assert e.match(regexp=r"BatchPredictionJob resource has not been created. Resource failed with: Mock fail") + + with pytest.raises(RuntimeError) as e: + iter_outputs = batch_prediction_job.iter_outputs() + assert e.match(regexp=r"BatchPredictionJob resource has not been created. Resource failed with: Mock fail") + + + @pytest.mark.usefixtures("get_batch_prediction_job_mock") def test_batch_predict_no_source(self, create_batch_prediction_job_mock): aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION) diff --git a/tests/unit/aiplatform/test_models.py b/tests/unit/aiplatform/test_models.py index be4f7f61bd..f00e96c89c 100644 --- a/tests/unit/aiplatform/test_models.py +++ b/tests/unit/aiplatform/test_models.py @@ -255,6 +255,8 @@ def get_model_with_custom_project_mock(): get_model_mock.return_value = gca_model.Model( display_name=_TEST_MODEL_NAME, name=_TEST_MODEL_RESOURCE_NAME_CUSTOM_PROJECT, + artifact_uri=_TEST_ARTIFACT_URI, + description=_TEST_DESCRIPTION ) yield get_model_mock @@ -726,6 +728,37 @@ def test_upload_uploads_and_gets_model_with_custom_project( name=test_model_resource_name ) + assert my_model.uri == _TEST_ARTIFACT_URI + assert my_model.supported_export_formats == {} + assert my_model.supported_deployment_resources_types == [] + assert my_model.supported_input_storage_formats == [] + assert my_model.supported_output_storage_formats == [] + assert my_model.description == _TEST_DESCRIPTION + + @pytest.mark.usefixtures("get_model_with_custom_project_mock") + def test_accessing_properties_with_no_resource_raises( + self, + upload_model_with_custom_project_mock, + get_model_with_custom_project_mock, + sync, + ): + + test_model_resource_name = model_service_client.ModelServiceClient.model_path( + _TEST_PROJECT_2, _TEST_LOCATION, _TEST_ID + ) + + my_model = models.Model(test_model_resource_name) + my_model._gca_resource = None + + assert my_model.uri == _TEST_ARTIFACT_URI + assert my_model.supported_export_formats == {} + assert my_model.supported_deployment_resources_types == [] + assert my_model.supported_input_storage_formats == [] + assert my_model.supported_output_storage_formats == [] + assert my_model.description == _TEST_DESCRIPTION + + + @pytest.mark.usefixtures("get_model_with_custom_location_mock") @pytest.mark.parametrize("sync", [True, False]) def test_upload_uploads_and_gets_model_with_custom_location( From 73ca8438795b9626df9125c6013f039fc3f542df Mon Sep 17 00:00:00 2001 From: Sasha Sobran Date: Thu, 22 Jul 2021 16:42:31 -0400 Subject: [PATCH 08/11] test: update tests and fix network property --- google/cloud/aiplatform/models.py | 2 +- tests/unit/aiplatform/test_endpoints.py | 30 +++++++++++++++++++--- tests/unit/aiplatform/test_models.py | 33 ++++++++++++++++++------- 3 files changed, 51 insertions(+), 14 deletions(-) diff --git a/google/cloud/aiplatform/models.py b/google/cloud/aiplatform/models.py index a0f5cfb76b..137c355e3d 100644 --- a/google/cloud/aiplatform/models.py +++ b/google/cloud/aiplatform/models.py @@ -147,7 +147,7 @@ def network(self) -> Optional[str]: unspecified, the Endpoint is not peered with any network. """ self._assert_gca_resource_is_available() - return getattr(self._gca_resource, "network") + return getattr(self._gca_resource, "network", None) @classmethod def create( diff --git a/tests/unit/aiplatform/test_endpoints.py b/tests/unit/aiplatform/test_endpoints.py index e9f7de971a..4b9ba32a0d 100644 --- a/tests/unit/aiplatform/test_endpoints.py +++ b/tests/unit/aiplatform/test_endpoints.py @@ -147,13 +147,13 @@ _TEST_ENDPOINT_LIST = [ gca_endpoint.Endpoint( - display_name="aac", create_time=datetime.now() - timedelta(minutes=15) + name=_TEST_ENDPOINT_NAME, display_name="aac", create_time=datetime.now() - timedelta(minutes=15) ), gca_endpoint.Endpoint( - display_name="aab", create_time=datetime.now() - timedelta(minutes=5) + name=_TEST_ENDPOINT_NAME, display_name="aab", create_time=datetime.now() - timedelta(minutes=5) ), gca_endpoint.Endpoint( - display_name="aaa", create_time=datetime.now() - timedelta(minutes=10) + name=_TEST_ENDPOINT_NAME, display_name="aaa", create_time=datetime.now() - timedelta(minutes=10) ), ] @@ -487,7 +487,29 @@ def test_create(self, create_endpoint_mock, sync): ) expected_endpoint.name = _TEST_ENDPOINT_NAME - assert my_endpoint._gca_resource == expected_endpoint + assert my_endpoint.gca_resource == expected_endpoint + assert my_endpoint.network == None + + + @pytest.mark.usefixtures("get_endpoint_mock") + def test_accessing_properties_with_no_resource_raises( + self, + ): + + my_endpoint = aiplatform.Endpoint(_TEST_ENDPOINT_NAME) + + + my_endpoint._gca_resource = None + + with pytest.raises(RuntimeError) as e: + gca_resource = my_endpoint.gca_resource + e.match(regexp=r"Endpoint resource has not been created.") + + with pytest.raises(RuntimeError) as e: + network = my_endpoint.network + e.match(regexp=r"Endpoint resource has not been created.") + + @pytest.mark.usefixtures("get_endpoint_mock") @pytest.mark.parametrize("sync", [True, False]) diff --git a/tests/unit/aiplatform/test_models.py b/tests/unit/aiplatform/test_models.py index f00e96c89c..82b8212ed9 100644 --- a/tests/unit/aiplatform/test_models.py +++ b/tests/unit/aiplatform/test_models.py @@ -738,9 +738,6 @@ def test_upload_uploads_and_gets_model_with_custom_project( @pytest.mark.usefixtures("get_model_with_custom_project_mock") def test_accessing_properties_with_no_resource_raises( self, - upload_model_with_custom_project_mock, - get_model_with_custom_project_mock, - sync, ): test_model_resource_name = model_service_client.ModelServiceClient.model_path( @@ -750,13 +747,31 @@ def test_accessing_properties_with_no_resource_raises( my_model = models.Model(test_model_resource_name) my_model._gca_resource = None - assert my_model.uri == _TEST_ARTIFACT_URI - assert my_model.supported_export_formats == {} - assert my_model.supported_deployment_resources_types == [] - assert my_model.supported_input_storage_formats == [] - assert my_model.supported_output_storage_formats == [] - assert my_model.description == _TEST_DESCRIPTION + with pytest.raises(RuntimeError) as e: + uri = my_model.uri + e.match(regexp=r"Model resource has not been created.") + + with pytest.raises(RuntimeError) as e: + supported_export_formats = my_model.supported_export_formats + e.match(regexp=r"Model resource has not been created.") + + with pytest.raises(RuntimeError) as e: + supported_deployment_resources_types = my_model.supported_deployment_resources_types + e.match(regexp=r"Model resource has not been created.") + + + with pytest.raises(RuntimeError) as e: + supported_input_storage_formats = my_model.supported_input_storage_formats + e.match(regexp=r"Model resource has not been created.") + + + with pytest.raises(RuntimeError) as e: + supported_output_storage_formats = my_model.supported_output_storage_formats + e.match(regexp=r"Model resource has not been created.") + with pytest.raises(RuntimeError) as e: + description = my_model.description + e.match(regexp=r"Model resource has not been created.") @pytest.mark.usefixtures("get_model_with_custom_location_mock") From 1605ab03b716b06707c0a7d2c6af52665471d6f2 Mon Sep 17 00:00:00 2001 From: Sasha Sobran Date: Fri, 23 Jul 2021 11:17:26 -0400 Subject: [PATCH 09/11] chore: linting --- google/cloud/aiplatform/base.py | 38 ++++--- google/cloud/aiplatform/pipeline_jobs.py | 14 ++- .../test_automl_tabular_training_jobs.py | 101 +++++++++++------- tests/unit/aiplatform/test_custom_job.py | 22 ++-- tests/unit/aiplatform/test_datasets.py | 46 +++++--- tests/unit/aiplatform/test_endpoints.py | 26 ++--- .../test_hyperparameter_tuning_job.py | 27 +++-- tests/unit/aiplatform/test_jobs.py | 36 ++++--- tests/unit/aiplatform/test_models.py | 21 ++-- 9 files changed, 196 insertions(+), 135 deletions(-) diff --git a/google/cloud/aiplatform/base.py b/google/cloud/aiplatform/base.py index b39171166c..ab60eeaa25 100644 --- a/google/cloud/aiplatform/base.py +++ b/google/cloud/aiplatform/base.py @@ -600,7 +600,9 @@ def _assert_gca_resource_is_available(self): RuntimeError if _gca_resource is has not been created. """ if self._gca_resource is None: - raise RuntimeError(f"{self.__class__.__name__} resource has not been created") + raise RuntimeError( + f"{self.__class__.__name__} resource has not been created" + ) def __repr__(self) -> str: return f"{object.__repr__(self)} \nresource name: {self.resource_name}" @@ -1086,25 +1088,27 @@ def _wait_for_resource_creation(self) -> None: downstream resource noun object's _gca_resource until the entire invoked method is complete. Ex: - job = CustomTrainingJob() job.run(sync=False, ...) - job._wait_for_resource_creation() - + job._wait_for_resource_creation() Raises: RuntimeError if the resource has not been scheduled to be created. """ - - # If the user calls this but didn't actually invoke an API to create - if self._are_futures_done() and not getattr(self._gca_resource, 'name', None): + + # If the user calls this but didn't actually invoke an API to create + if self._are_futures_done() and not getattr(self._gca_resource, "name", None): self._raise_future_exception() - raise RuntimeError(f'{self.__class__.__name__} resource is not scheduled to be created.') + raise RuntimeError( + f"{self.__class__.__name__} resource is not scheduled to be created." + ) - while not getattr(self._gca_resource, 'name', None): + while not getattr(self._gca_resource, "name", None): # breaks out of loop if creation has failed async - if self._are_futures_done() and not getattr(self._gca_resource, 'name', None): + if self._are_futures_done() and not getattr( + self._gca_resource, "name", None + ): self._raise_future_exception() - + time.sleep(1) def _assert_gca_resource_is_available(self): @@ -1113,9 +1117,15 @@ def _assert_gca_resource_is_available(self): Raises: RuntimeError when resource has not been created. """ - if not getattr(self._gca_resource, 'name', None): - raise RuntimeError(f"{self.__class__.__name__} resource has not been created." + - (f" Resource failed with: {self._exception}" if self._exception else "")) + if not getattr(self._gca_resource, "name", None): + raise RuntimeError( + f"{self.__class__.__name__} resource has not been created." + + ( + f" Resource failed with: {self._exception}" + if self._exception + else "" + ) + ) def get_annotation_class(annotation: type) -> type: diff --git a/google/cloud/aiplatform/pipeline_jobs.py b/google/cloud/aiplatform/pipeline_jobs.py index f739fff1e8..84eec34284 100644 --- a/google/cloud/aiplatform/pipeline_jobs.py +++ b/google/cloud/aiplatform/pipeline_jobs.py @@ -222,9 +222,15 @@ def __init__( def _assert_gca_resource_is_available(self): # TODO(b/193800063) Change this to name after this fix - if not getattr(self._gca_resource, 'create_time', None): - raise RuntimeError(f"{self.__class__.__name__} resource has not been created." + - (f" Resource failed with: {self._exception}" if self._exception else "")) + if not getattr(self._gca_resource, "create_time", None): + raise RuntimeError( + f"{self.__class__.__name__} resource has not been created." + + ( + f" Resource failed with: {self._exception}" + if self._exception + else "" + ) + ) @base.optional_sync() def run( @@ -242,7 +248,7 @@ def run( network (str): Optional. The full name of the Compute Engine network to which the job should be peered. For example, projects/12345/global/networks/myVPC. - + Private services access must already be configured for the network. If left unspecified, the job is not peered with any network. sync (bool): diff --git a/tests/unit/aiplatform/test_automl_tabular_training_jobs.py b/tests/unit/aiplatform/test_automl_tabular_training_jobs.py index 01d4ee85ad..ee6e490342 100644 --- a/tests/unit/aiplatform/test_automl_tabular_training_jobs.py +++ b/tests/unit/aiplatform/test_automl_tabular_training_jobs.py @@ -163,16 +163,16 @@ def mock_pipeline_service_create(): ) yield mock_create_training_pipeline + @pytest.fixture def mock_pipeline_service_create_fail(): with mock.patch.object( pipeline_service_client.PipelineServiceClient, "create_training_pipeline" ) as mock_create_training_pipeline: - mock_create_training_pipeline.side_effect = RuntimeError('Mock fail') + mock_create_training_pipeline.side_effect = RuntimeError("Mock fail") yield mock_create_training_pipeline - @pytest.fixture def mock_pipeline_service_get(): with mock.patch.object( @@ -852,7 +852,7 @@ def test_run_raises_if_pipeline_fails( ) if not sync: - job.wait() + job.wait() with pytest.raises(RuntimeError): job.get_model() @@ -887,16 +887,14 @@ def test_wait_for_resource_creation_does_not_fail_if_creation_does_not_fail( assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME with pytest.raises(RuntimeError): - job.wait() + job.wait() with pytest.raises(RuntimeError): job.get_model() - @pytest.mark.usefixtures('mock_pipeline_service_create_fail') - @pytest.mark.parametrize('sync', [True, False]) - def test_create_fails( - self, mock_dataset_tabular, sync - ): + @pytest.mark.usefixtures("mock_pipeline_service_create_fail") + @pytest.mark.parametrize("sync", [True, False]) + def test_create_fails(self, mock_dataset_tabular, sync): aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME) @@ -923,28 +921,34 @@ def test_create_fails( with pytest.raises(RuntimeError) as e: job.wait_for_resource_creation() - assert e.match(regexp=r"AutoMLTabularTrainingJob resource is not scheduled to be created.") + assert e.match( + regexp=r"AutoMLTabularTrainingJob resource is not scheduled to be created." + ) with pytest.raises(RuntimeError) as e: assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME - assert e.match(regexp=r"AutoMLTabularTrainingJob resource has not been created.") + assert e.match( + regexp=r"AutoMLTabularTrainingJob resource has not been created." + ) job.wait() with pytest.raises(RuntimeError) as e: job.get_model() - e.match(regexp="TrainingPipeline has not been launched. You must run this TrainingPipeline using TrainingPipeline.run.") + e.match( + regexp="TrainingPipeline has not been launched. You must run this TrainingPipeline using TrainingPipeline.run." + ) else: job.run( - model_display_name=_TEST_MODEL_DISPLAY_NAME, - dataset=mock_dataset_tabular, - target_column=_TEST_TRAINING_TARGET_COLUMN, - training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, - validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, - test_fraction_split=_TEST_TEST_FRACTION_SPLIT, - sync=sync, - ) + model_display_name=_TEST_MODEL_DISPLAY_NAME, + dataset=mock_dataset_tabular, + target_column=_TEST_TRAINING_TARGET_COLUMN, + training_fraction_split=_TEST_TRAINING_FRACTION_SPLIT, + validation_fraction_split=_TEST_VALIDATION_FRACTION_SPLIT, + test_fraction_split=_TEST_TEST_FRACTION_SPLIT, + sync=sync, + ) with pytest.raises(RuntimeError) as e: job.wait_for_resource_creation() @@ -952,14 +956,15 @@ def test_create_fails( with pytest.raises(RuntimeError) as e: assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME - assert e.match(regexp=r"AutoMLTabularTrainingJob resource has not been created. Resource failed with: Mock fail") - + assert e.match( + regexp=r"AutoMLTabularTrainingJob resource has not been created. Resource failed with: Mock fail" + ) + with pytest.raises(RuntimeError): - job.wait() + job.wait() with pytest.raises(RuntimeError): job.get_model() - def test_raises_before_run_is_called(self, mock_pipeline_service_create): aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME) @@ -984,9 +989,9 @@ def test_raises_before_run_is_called(self, mock_pipeline_service_create): with pytest.raises(RuntimeError) as e: job.wait_for_resource_creation() - assert e.match(regexp=r"AutoMLTabularTrainingJob resource is not scheduled to be created.") - - + assert e.match( + regexp=r"AutoMLTabularTrainingJob resource is not scheduled to be created." + ) # pytest.usefixtures('mock_pipeline_service_get') def test_properties_throw_if_not_available(self): @@ -998,29 +1003,43 @@ def test_properties_throw_if_not_available(self): ) with pytest.raises(RuntimeError) as e: - name = job.name - assert e.match(regexp=r"AutoMLTabularTrainingJob resource has not been created") + job.name + assert e.match( + regexp=r"AutoMLTabularTrainingJob resource has not been created" + ) with pytest.raises(RuntimeError) as e: - name = job.resource_name - assert e.match(regexp=r"AutoMLTabularTrainingJob resource has not been created") + job.resource_name + assert e.match( + regexp=r"AutoMLTabularTrainingJob resource has not been created" + ) with pytest.raises(RuntimeError) as e: - name = job.display_name - assert e.match(regexp=r"AutoMLTabularTrainingJob resource has not been created") + job.display_name + assert e.match( + regexp=r"AutoMLTabularTrainingJob resource has not been created" + ) with pytest.raises(RuntimeError) as e: - name = job.create_time - assert e.match(regexp=r"AutoMLTabularTrainingJob resource has not been created") + job.create_time + assert e.match( + regexp=r"AutoMLTabularTrainingJob resource has not been created" + ) with pytest.raises(RuntimeError) as e: - name = job.encryption_spec - assert e.match(regexp=r"AutoMLTabularTrainingJob resource has not been created") + job.encryption_spec + assert e.match( + regexp=r"AutoMLTabularTrainingJob resource has not been created" + ) with pytest.raises(RuntimeError) as e: - name = job.labels - assert e.match(regexp=r"AutoMLTabularTrainingJob resource has not been created") + job.labels + assert e.match( + regexp=r"AutoMLTabularTrainingJob resource has not been created" + ) with pytest.raises(RuntimeError) as e: - name = job.gca_resource - assert e.match(regexp=r"AutoMLTabularTrainingJob resource has not been created") + job.gca_resource + assert e.match( + regexp=r"AutoMLTabularTrainingJob resource has not been created" + ) diff --git a/tests/unit/aiplatform/test_custom_job.py b/tests/unit/aiplatform/test_custom_job.py index d8f7220f33..de144d5241 100644 --- a/tests/unit/aiplatform/test_custom_job.py +++ b/tests/unit/aiplatform/test_custom_job.py @@ -166,7 +166,7 @@ def get_custom_job_mock_with_fail(): name=_TEST_CUSTOM_JOB_NAME, state=gca_job_state_compat.JobState.JOB_STATE_FAILED, error=status_pb2.Status(message="Test Error"), - ) + ), ] yield get_custom_job_mock @@ -182,12 +182,13 @@ def create_custom_job_mock(): ) yield create_custom_job_mock + @pytest.fixture def create_custom_job_mock_fail(): with mock.patch.object( job_service_client.JobServiceClient, "create_custom_job" ) as create_custom_job_mock: - create_custom_job_mock.side_effect = RuntimeError('Mock fail') + create_custom_job_mock.side_effect = RuntimeError("Mock fail") yield create_custom_job_mock @@ -271,7 +272,6 @@ def test_run_custom_job_with_fail_raises( job.wait_for_resource_creation() assert e.match(r"CustomJob resource is not scheduled to be created.") - with pytest.raises(RuntimeError): job.run( service_account=_TEST_SERVICE_ACCOUNT, @@ -296,7 +296,7 @@ def test_run_custom_job_with_fail_raises( assert job.job_spec == expected_custom_job.job_spec assert job.state == gca_job_state_compat.JobState.JOB_STATE_FAILED - @pytest.mark.usefixtures('create_custom_job_mock_fail') + @pytest.mark.usefixtures("create_custom_job_mock_fail") def test_run_custom_job_with_fail_at_creation(self): aiplatform.init( project=_TEST_PROJECT, @@ -319,15 +319,19 @@ def test_run_custom_job_with_fail_at_creation(self): with pytest.raises(RuntimeError) as e: job.wait_for_resource_creation() - assert e.match('Mock fail') + assert e.match("Mock fail") with pytest.raises(RuntimeError) as e: - resource_name = job.resource_name - assert e.match('CustomJob resource has not been created. Resource failed with: Mock fail') + job.resource_name + assert e.match( + "CustomJob resource has not been created. Resource failed with: Mock fail" + ) with pytest.raises(RuntimeError) as e: - network = job.network - assert e.match('CustomJob resource has not been created. Resource failed with: Mock fail') + job.network + assert e.match( + "CustomJob resource has not been created. Resource failed with: Mock fail" + ) def test_custom_job_get_state_raises_without_run(self): aiplatform.init( diff --git a/tests/unit/aiplatform/test_datasets.py b/tests/unit/aiplatform/test_datasets.py index e800fe1c4f..3457ccc7bd 100644 --- a/tests/unit/aiplatform/test_datasets.py +++ b/tests/unit/aiplatform/test_datasets.py @@ -115,19 +115,29 @@ _TEST_DATASET_LIST = [ gca_dataset.Dataset( - display_name="a", metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_TABULAR + name=_TEST_NAME, + display_name="a", + metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_TABULAR, ), gca_dataset.Dataset( - display_name="d", metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_NONTABULAR + name=_TEST_NAME, + display_name="d", + metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_NONTABULAR, ), gca_dataset.Dataset( - display_name="b", metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_TABULAR + name=_TEST_NAME, + display_name="b", + metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_TABULAR, ), gca_dataset.Dataset( - display_name="e", metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_TEXT + name=_TEST_NAME, + display_name="e", + metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_TEXT, ), gca_dataset.Dataset( - display_name="c", metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_TABULAR + name=_TEST_NAME, + display_name="c", + metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_TABULAR, ), ] @@ -298,12 +308,13 @@ def create_dataset_mock(): create_dataset_mock.return_value = create_dataset_lro_mock yield create_dataset_mock + @pytest.fixture def create_dataset_mock_fail(): with patch.object( dataset_service_client.DatasetServiceClient, "create_dataset" ) as create_dataset_mock: - create_dataset_mock.side_effect = RuntimeError('Mock fail') + create_dataset_mock.side_effect = RuntimeError("Mock fail") yield create_dataset_mock @@ -328,12 +339,13 @@ def import_data_mock(): import_data_mock.return_value = mock.Mock(operation.Operation) yield import_data_mock + @pytest.fixture def import_data_mock_fail(): with patch.object( dataset_service_client.DatasetServiceClient, "import_data" ) as import_data_mock: - import_data_mock.side_effect = RuntimeError('Mock fail') + import_data_mock.side_effect = RuntimeError("Mock fail") yield import_data_mock @@ -1001,13 +1013,16 @@ def test_create_dataset_fail(self): assert e.match(regexp=r"Mock fail") with pytest.raises(RuntimeError) as e: - metadata_schema_uri = my_dataset.metadata_schema_uri - assert e.match(regexp=r"TabularDataset resource has not been created. Resource failed with: Mock fail") + my_dataset.metadata_schema_uri + assert e.match( + regexp=r"TabularDataset resource has not been created. Resource failed with: Mock fail" + ) with pytest.raises(RuntimeError) as e: - column_names = my_dataset.column_names - assert e.match(regexp=r"TabularDataset resource has not been created. Resource failed with: Mock fail") - + my_dataset.column_names + assert e.match( + regexp=r"TabularDataset resource has not been created. Resource failed with: Mock fail" + ) @pytest.mark.usefixtures("get_dataset_tabular_bq_mock") @pytest.mark.parametrize("sync", [True, False]) @@ -1239,8 +1254,9 @@ def test_create_and_import_dataset( expected_dataset.name = _TEST_NAME assert my_dataset._gca_resource == expected_dataset - - @pytest.mark.usefixtures("create_dataset_mock", "get_dataset_text_mock", "import_data_mock_fail") + @pytest.mark.usefixtures( + "create_dataset_mock", "get_dataset_text_mock", "import_data_mock_fail" + ) @pytest.mark.parametrize("sync", [True, False]) def test_create_then_import_dataset_fails(self, sync): aiplatform.init(project=_TEST_PROJECT) @@ -1251,7 +1267,6 @@ def test_create_then_import_dataset_fails(self, sync): sync=sync, ) - if sync: with pytest.raises(RuntimeError) as e: @@ -1274,7 +1289,6 @@ def test_create_then_import_dataset_fails(self, sync): my_dataset.wait() e.match(regexp=r"Mock fail") - @pytest.mark.usefixtures("get_dataset_text_mock") @pytest.mark.parametrize("sync", [True, False]) def test_import_data(self, import_data_mock, sync): diff --git a/tests/unit/aiplatform/test_endpoints.py b/tests/unit/aiplatform/test_endpoints.py index 4b9ba32a0d..0ae76ea988 100644 --- a/tests/unit/aiplatform/test_endpoints.py +++ b/tests/unit/aiplatform/test_endpoints.py @@ -147,13 +147,19 @@ _TEST_ENDPOINT_LIST = [ gca_endpoint.Endpoint( - name=_TEST_ENDPOINT_NAME, display_name="aac", create_time=datetime.now() - timedelta(minutes=15) + name=_TEST_ENDPOINT_NAME, + display_name="aac", + create_time=datetime.now() - timedelta(minutes=15), ), gca_endpoint.Endpoint( - name=_TEST_ENDPOINT_NAME, display_name="aab", create_time=datetime.now() - timedelta(minutes=5) + name=_TEST_ENDPOINT_NAME, + display_name="aab", + create_time=datetime.now() - timedelta(minutes=5), ), gca_endpoint.Endpoint( - name=_TEST_ENDPOINT_NAME, display_name="aaa", create_time=datetime.now() - timedelta(minutes=10) + name=_TEST_ENDPOINT_NAME, + display_name="aaa", + create_time=datetime.now() - timedelta(minutes=10), ), ] @@ -488,29 +494,23 @@ def test_create(self, create_endpoint_mock, sync): expected_endpoint.name = _TEST_ENDPOINT_NAME assert my_endpoint.gca_resource == expected_endpoint - assert my_endpoint.network == None - + assert my_endpoint.network is None @pytest.mark.usefixtures("get_endpoint_mock") - def test_accessing_properties_with_no_resource_raises( - self, - ): + def test_accessing_properties_with_no_resource_raises(self,): my_endpoint = aiplatform.Endpoint(_TEST_ENDPOINT_NAME) - my_endpoint._gca_resource = None with pytest.raises(RuntimeError) as e: - gca_resource = my_endpoint.gca_resource + my_endpoint.gca_resource e.match(regexp=r"Endpoint resource has not been created.") with pytest.raises(RuntimeError) as e: - network = my_endpoint.network + my_endpoint.network e.match(regexp=r"Endpoint resource has not been created.") - - @pytest.mark.usefixtures("get_endpoint_mock") @pytest.mark.parametrize("sync", [True, False]) def test_create_with_description(self, create_endpoint_mock, sync): diff --git a/tests/unit/aiplatform/test_hyperparameter_tuning_job.py b/tests/unit/aiplatform/test_hyperparameter_tuning_job.py index 1ba0c9091a..e2d716e729 100644 --- a/tests/unit/aiplatform/test_hyperparameter_tuning_job.py +++ b/tests/unit/aiplatform/test_hyperparameter_tuning_job.py @@ -211,12 +211,13 @@ def create_hyperparameter_tuning_job_mock(): ) yield create_hyperparameter_tuning_job_mock + @pytest.fixture def create_hyperparameter_tuning_job_mock_fail(): with mock.patch.object( job_service_client.JobServiceClient, "create_hyperparameter_tuning_job" ) as create_hyperparameter_tuning_job_mock: - create_hyperparameter_tuning_job_mock.side_effect = RuntimeError('Mock fail') + create_hyperparameter_tuning_job_mock.side_effect = RuntimeError("Mock fail") yield create_hyperparameter_tuning_job_mock @@ -303,7 +304,6 @@ def test_create_hyperparameter_tuning_job( assert job.network == _TEST_NETWORK assert job.trials == [] - @pytest.mark.parametrize("sync", [True, False]) def test_run_hyperparameter_tuning_job_with_fail_raises( self, @@ -364,8 +364,7 @@ def test_run_hyperparameter_tuning_job_with_fail_raises( assert job._gca_resource.state == gca_job_state_compat.JobState.JOB_STATE_FAILED - - @pytest.mark.usefixtures('create_hyperparameter_tuning_job_mock_fail') + @pytest.mark.usefixtures("create_hyperparameter_tuning_job_mock_fail") def test_run_hyperparameter_tuning_job_with_fail_at_creation(self): aiplatform.init( project=_TEST_PROJECT, @@ -410,19 +409,25 @@ def test_run_hyperparameter_tuning_job_with_fail_at_creation(self): with pytest.raises(RuntimeError) as e: job.wait_for_resource_creation() - assert e.match('Mock fail') + assert e.match("Mock fail") with pytest.raises(RuntimeError) as e: - resource_name = job.resource_name - assert e.match('HyperparameterTuningJob resource has not been created. Resource failed with: Mock fail') + job.resource_name + assert e.match( + "HyperparameterTuningJob resource has not been created. Resource failed with: Mock fail" + ) with pytest.raises(RuntimeError) as e: - network = job.network - assert e.match('HyperparameterTuningJob resource has not been created. Resource failed with: Mock fail') + job.network + assert e.match( + "HyperparameterTuningJob resource has not been created. Resource failed with: Mock fail" + ) with pytest.raises(RuntimeError) as e: - trials = job.trials - assert e.match('HyperparameterTuningJob resource has not been created. Resource failed with: Mock fail') + job.trials + assert e.match( + "HyperparameterTuningJob resource has not been created. Resource failed with: Mock fail" + ) def test_hyperparameter_tuning_job_get_state_raises_without_run(self): aiplatform.init( diff --git a/tests/unit/aiplatform/test_jobs.py b/tests/unit/aiplatform/test_jobs.py index 5b65cf27e0..76584cd0c4 100644 --- a/tests/unit/aiplatform/test_jobs.py +++ b/tests/unit/aiplatform/test_jobs.py @@ -245,7 +245,7 @@ def create_batch_prediction_job_mock_fail(): with mock.patch.object( job_service_client.JobServiceClient, "create_batch_prediction_job" ) as create_batch_prediction_job_mock: - create_batch_prediction_job_mock.side_effect = RuntimeError('Mock fail') + create_batch_prediction_job_mock.side_effect = RuntimeError("Mock fail") yield create_batch_prediction_job_mock @@ -481,7 +481,10 @@ def test_batch_predict_gcs_source_bq_dest( if not sync: batch_prediction_job.wait() - assert batch_prediction_job.output_info == gca_batch_prediction_job.BatchPredictionJob.OutputInfo() + assert ( + batch_prediction_job.output_info + == gca_batch_prediction_job.BatchPredictionJob.OutputInfo() + ) # Construct expected request expected_gapic_batch_prediction_job = gca_batch_prediction_job.BatchPredictionJob( @@ -573,7 +576,6 @@ def test_batch_predict_with_all_args( batch_prediction_job=expected_gapic_batch_prediction_job, ) - @pytest.mark.usefixtures("create_batch_prediction_job_mock_fail") def test_batch_predict_create_fails(self): aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION) @@ -588,25 +590,31 @@ def test_batch_predict_create_fails(self): with pytest.raises(RuntimeError) as e: batch_prediction_job.wait() - assert e.match(regexp=r'Mock fail') + assert e.match(regexp=r"Mock fail") with pytest.raises(RuntimeError) as e: - output_info = batch_prediction_job.output_info - assert e.match(regexp=r"BatchPredictionJob resource has not been created. Resource failed with: Mock fail") + batch_prediction_job.output_info + assert e.match( + regexp=r"BatchPredictionJob resource has not been created. Resource failed with: Mock fail" + ) with pytest.raises(RuntimeError) as e: - partial_failures = batch_prediction_job.partial_failures - assert e.match(regexp=r"BatchPredictionJob resource has not been created. Resource failed with: Mock fail") + batch_prediction_job.partial_failures + assert e.match( + regexp=r"BatchPredictionJob resource has not been created. Resource failed with: Mock fail" + ) with pytest.raises(RuntimeError) as e: - completion_stats = batch_prediction_job.completion_stats - assert e.match(regexp=r"BatchPredictionJob resource has not been created. Resource failed with: Mock fail") + batch_prediction_job.completion_stats + assert e.match( + regexp=r"BatchPredictionJob resource has not been created. Resource failed with: Mock fail" + ) with pytest.raises(RuntimeError) as e: - iter_outputs = batch_prediction_job.iter_outputs() - assert e.match(regexp=r"BatchPredictionJob resource has not been created. Resource failed with: Mock fail") - - + batch_prediction_job.iter_outputs() + assert e.match( + regexp=r"BatchPredictionJob resource has not been created. Resource failed with: Mock fail" + ) @pytest.mark.usefixtures("get_batch_prediction_job_mock") def test_batch_predict_no_source(self, create_batch_prediction_job_mock): diff --git a/tests/unit/aiplatform/test_models.py b/tests/unit/aiplatform/test_models.py index 82b8212ed9..600b880d14 100644 --- a/tests/unit/aiplatform/test_models.py +++ b/tests/unit/aiplatform/test_models.py @@ -256,7 +256,7 @@ def get_model_with_custom_project_mock(): display_name=_TEST_MODEL_NAME, name=_TEST_MODEL_RESOURCE_NAME_CUSTOM_PROJECT, artifact_uri=_TEST_ARTIFACT_URI, - description=_TEST_DESCRIPTION + description=_TEST_DESCRIPTION, ) yield get_model_mock @@ -736,9 +736,7 @@ def test_upload_uploads_and_gets_model_with_custom_project( assert my_model.description == _TEST_DESCRIPTION @pytest.mark.usefixtures("get_model_with_custom_project_mock") - def test_accessing_properties_with_no_resource_raises( - self, - ): + def test_accessing_properties_with_no_resource_raises(self,): test_model_resource_name = model_service_client.ModelServiceClient.model_path( _TEST_PROJECT_2, _TEST_LOCATION, _TEST_ID @@ -748,32 +746,29 @@ def test_accessing_properties_with_no_resource_raises( my_model._gca_resource = None with pytest.raises(RuntimeError) as e: - uri = my_model.uri + my_model.uri e.match(regexp=r"Model resource has not been created.") with pytest.raises(RuntimeError) as e: - supported_export_formats = my_model.supported_export_formats + my_model.supported_export_formats e.match(regexp=r"Model resource has not been created.") with pytest.raises(RuntimeError) as e: - supported_deployment_resources_types = my_model.supported_deployment_resources_types + my_model.supported_deployment_resources_types e.match(regexp=r"Model resource has not been created.") - with pytest.raises(RuntimeError) as e: - supported_input_storage_formats = my_model.supported_input_storage_formats + my_model.supported_input_storage_formats e.match(regexp=r"Model resource has not been created.") - with pytest.raises(RuntimeError) as e: - supported_output_storage_formats = my_model.supported_output_storage_formats + my_model.supported_output_storage_formats e.match(regexp=r"Model resource has not been created.") with pytest.raises(RuntimeError) as e: - description = my_model.description + my_model.description e.match(regexp=r"Model resource has not been created.") - @pytest.mark.usefixtures("get_model_with_custom_location_mock") @pytest.mark.parametrize("sync", [True, False]) def test_upload_uploads_and_gets_model_with_custom_location( From f736126e265040d376f3dd4946c29faf7083fa1a Mon Sep 17 00:00:00 2001 From: Sasha Sobran Date: Mon, 26 Jul 2021 12:18:10 -0400 Subject: [PATCH 10/11] chore: type hints --- google/cloud/aiplatform/base.py | 4 ++-- google/cloud/aiplatform/jobs.py | 2 +- google/cloud/aiplatform/pipeline_jobs.py | 4 ++-- google/cloud/aiplatform/training_jobs.py | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/google/cloud/aiplatform/base.py b/google/cloud/aiplatform/base.py index ab60eeaa25..fbf22da588 100644 --- a/google/cloud/aiplatform/base.py +++ b/google/cloud/aiplatform/base.py @@ -593,7 +593,7 @@ def gca_resource(self) -> proto.Message: self._assert_gca_resource_is_available() return self._gca_resource - def _assert_gca_resource_is_available(self): + def _assert_gca_resource_is_available(self) -> None: """Helper method to raise when property is not accessible. Raises: @@ -1111,7 +1111,7 @@ def _wait_for_resource_creation(self) -> None: time.sleep(1) - def _assert_gca_resource_is_available(self): + def _assert_gca_resource_is_available(self) -> None: """Helper method to raise when accessing properties that do not exist. Raises: diff --git a/google/cloud/aiplatform/jobs.py b/google/cloud/aiplatform/jobs.py index a40935f330..dcd6b95fc5 100644 --- a/google/cloud/aiplatform/jobs.py +++ b/google/cloud/aiplatform/jobs.py @@ -901,7 +901,7 @@ def get( return self - def wait_for_resource_creation(self): + def wait_for_resource_creation(self) -> None: """Waits until resource has been created.""" self._wait_for_resource_creation() diff --git a/google/cloud/aiplatform/pipeline_jobs.py b/google/cloud/aiplatform/pipeline_jobs.py index 84eec34284..483cdbcb91 100644 --- a/google/cloud/aiplatform/pipeline_jobs.py +++ b/google/cloud/aiplatform/pipeline_jobs.py @@ -220,7 +220,7 @@ def __init__( ), ) - def _assert_gca_resource_is_available(self): + def _assert_gca_resource_is_available(self) -> None: # TODO(b/193800063) Change this to name after this fix if not getattr(self._gca_resource, "create_time", None): raise RuntimeError( @@ -377,6 +377,6 @@ def cancel(self) -> None: """ self.api_client.cancel_pipeline_job(name=self.resource_name) - def wait_for_resource_creation(self): + def wait_for_resource_creation(self) -> None: """Waits until resource has been created.""" self._wait_for_resource_creation() diff --git a/google/cloud/aiplatform/training_jobs.py b/google/cloud/aiplatform/training_jobs.py index 301d7c040d..5f9d7c3445 100644 --- a/google/cloud/aiplatform/training_jobs.py +++ b/google/cloud/aiplatform/training_jobs.py @@ -853,7 +853,7 @@ def cancel(self) -> None: ) self.api_client.cancel_training_pipeline(name=self.resource_name) - def wait_for_resource_creation(self): + def wait_for_resource_creation(self) -> None: """Waits until resource has been created.""" self._wait_for_resource_creation() From 0d4329e5234ef3098db12af75252fbd96894c31e Mon Sep 17 00:00:00 2001 From: Sasha Sobran Date: Thu, 29 Jul 2021 11:49:24 -0400 Subject: [PATCH 11/11] chore: address PR review comments --- google/cloud/aiplatform/base.py | 3 +++ tests/unit/aiplatform/test_automl_tabular_training_jobs.py | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/google/cloud/aiplatform/base.py b/google/cloud/aiplatform/base.py index 7cf272c48a..1a3eed8add 100644 --- a/google/cloud/aiplatform/base.py +++ b/google/cloud/aiplatform/base.py @@ -1114,6 +1114,9 @@ def _wait_for_resource_creation(self) -> None: def _assert_gca_resource_is_available(self) -> None: """Helper method to raise when accessing properties that do not exist. + Overrides VertexAiResourceNoun to provide a more informative exception if + resource creation has failed asynchronously. + Raises: RuntimeError when resource has not been created. """ diff --git a/tests/unit/aiplatform/test_automl_tabular_training_jobs.py b/tests/unit/aiplatform/test_automl_tabular_training_jobs.py index ee6e490342..78a99ee6e3 100644 --- a/tests/unit/aiplatform/test_automl_tabular_training_jobs.py +++ b/tests/unit/aiplatform/test_automl_tabular_training_jobs.py @@ -908,7 +908,7 @@ def test_create_fails(self, mock_dataset_tabular, sync): ) if sync: - with pytest.raises(RuntimeError): + with pytest.raises(RuntimeError) as e: job.run( model_display_name=_TEST_MODEL_DISPLAY_NAME, dataset=mock_dataset_tabular, @@ -918,6 +918,7 @@ def test_create_fails(self, mock_dataset_tabular, sync): test_fraction_split=_TEST_TEST_FRACTION_SPLIT, sync=sync, ) + assert e.match("Mock fail") with pytest.raises(RuntimeError) as e: job.wait_for_resource_creation() @@ -993,7 +994,6 @@ def test_raises_before_run_is_called(self, mock_pipeline_service_create): regexp=r"AutoMLTabularTrainingJob resource is not scheduled to be created." ) - # pytest.usefixtures('mock_pipeline_service_get') def test_properties_throw_if_not_available(self): job = training_jobs.AutoMLTabularTrainingJob(