diff --git a/README.rst b/README.rst index 391fa89e8f..3db9fc5022 100644 --- a/README.rst +++ b/README.rst @@ -3,6 +3,9 @@ Python Client for Cloud AI Platform |beta| |pypi| |versions| + +:Warning: This library is a pre-release product and is subject to breaking changes. + `Cloud AI Platform`_: Cloud AI Platform is a suite of machine learning tools that enables developers to train high-quality models specific to their business needs. It offers both novices and experts the best workbench for machine learning diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py index 46b78e540d..99dcfbc5b6 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py @@ -165,7 +165,7 @@ def create_dataset( Args: request (:class:`~.dataset_service.CreateDatasetRequest`): The request object. Request message for - [DatasetService.CreateDataset][google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset]. + ``DatasetService.CreateDataset``. parent (:class:`str`): Required. The resource name of the Location to create the Dataset in. Format: @@ -190,7 +190,7 @@ def create_dataset( An object representing a long-running operation. The result type for the operation will be - :class:``~.gca_dataset.Dataset``: A collection of + :class:`~.gca_dataset.Dataset`: A collection of DataItems and Annotations on them. """ @@ -249,7 +249,7 @@ def get_dataset( Args: request (:class:`~.dataset_service.GetDatasetRequest`): The request object. Request message for - [DatasetService.GetDataset][google.cloud.aiplatform.v1beta1.DatasetService.GetDataset]. + ``DatasetService.GetDataset``. name (:class:`str`): Required. The name of the Dataset resource. @@ -319,7 +319,7 @@ def update_dataset( Args: request (:class:`~.dataset_service.UpdateDatasetRequest`): The request object. Request message for - [DatasetService.UpdateDataset][google.cloud.aiplatform.v1beta1.DatasetService.UpdateDataset]. + ``DatasetService.UpdateDataset``. dataset (:class:`~.gca_dataset.Dataset`): Required. The Dataset which replaces the resource on the server. @@ -330,8 +330,7 @@ def update_dataset( Required. The update mask applies to the resource. For the ``FieldMask`` definition, see - [FieldMask](https: - //tinyurl.com/dev-google-protobuf#google.protobuf.FieldMask). + [FieldMask](https://tinyurl.com/dev-google-protobuf#google.protobuf.FieldMask). Updatable fields: - ``display_name`` @@ -400,7 +399,7 @@ def list_datasets( Args: request (:class:`~.dataset_service.ListDatasetsRequest`): The request object. Request message for - [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. + ``DatasetService.ListDatasets``. parent (:class:`str`): Required. The name of the Dataset's parent resource. Format: ``projects/{project}/locations/{location}`` @@ -417,7 +416,7 @@ def list_datasets( Returns: ~.pagers.ListDatasetsPager: Response message for - [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. + ``DatasetService.ListDatasets``. Iterating over this object will yield results and resolve additional pages automatically. @@ -480,7 +479,7 @@ def delete_dataset( Args: request (:class:`~.dataset_service.DeleteDatasetRequest`): The request object. Request message for - [DatasetService.DeleteDataset][google.cloud.aiplatform.v1beta1.DatasetService.DeleteDataset]. + ``DatasetService.DeleteDataset``. name (:class:`str`): Required. The resource name of the Dataset to delete. Format: @@ -500,7 +499,7 @@ def delete_dataset( An object representing a long-running operation. The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that + :class:`~.empty.Empty`: A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For @@ -570,7 +569,7 @@ def import_data( Args: request (:class:`~.dataset_service.ImportDataRequest`): The request object. Request message for - [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. + ``DatasetService.ImportData``. name (:class:`str`): Required. The name of the Dataset resource. Format: ``projects/{project}/locations/{location}/datasets/{dataset}`` @@ -596,9 +595,9 @@ def import_data( An object representing a long-running operation. The result type for the operation will be - :class:``~.dataset_service.ImportDataResponse``: + :class:`~.dataset_service.ImportDataResponse`: Response message for - [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. + ``DatasetService.ImportData``. """ # Create or coerce a protobuf request object. @@ -655,7 +654,7 @@ def export_data( Args: request (:class:`~.dataset_service.ExportDataRequest`): The request object. Request message for - [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. + ``DatasetService.ExportData``. name (:class:`str`): Required. The name of the Dataset resource. Format: ``projects/{project}/locations/{location}/datasets/{dataset}`` @@ -680,9 +679,9 @@ def export_data( An object representing a long-running operation. The result type for the operation will be - :class:``~.dataset_service.ExportDataResponse``: + :class:`~.dataset_service.ExportDataResponse`: Response message for - [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. + ``DatasetService.ExportData``. """ # Create or coerce a protobuf request object. @@ -738,7 +737,7 @@ def list_data_items( Args: request (:class:`~.dataset_service.ListDataItemsRequest`): The request object. Request message for - [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. + ``DatasetService.ListDataItems``. parent (:class:`str`): Required. The resource name of the Dataset to list DataItems from. Format: @@ -756,7 +755,7 @@ def list_data_items( Returns: ~.pagers.ListDataItemsPager: Response message for - [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. + ``DatasetService.ListDataItems``. Iterating over this object will yield results and resolve additional pages automatically. @@ -819,7 +818,7 @@ def get_annotation_spec( Args: request (:class:`~.dataset_service.GetAnnotationSpecRequest`): The request object. Request message for - [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1beta1.DatasetService.GetAnnotationSpec]. + ``DatasetService.GetAnnotationSpec``. name (:class:`str`): Required. The name of the AnnotationSpec resource. Format: @@ -892,7 +891,7 @@ def list_annotations( Args: request (:class:`~.dataset_service.ListAnnotationsRequest`): The request object. Request message for - [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. + ``DatasetService.ListAnnotations``. parent (:class:`str`): Required. The resource name of the DataItem to list Annotations from. Format: @@ -911,7 +910,7 @@ def list_annotations( Returns: ~.pagers.ListAnnotationsPager: Response message for - [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. + ``DatasetService.ListAnnotations``. Iterating over this object will yield results and resolve additional pages automatically. diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py index d3135dc885..0ed3efe87a 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py @@ -161,7 +161,7 @@ def create_endpoint( Args: request (:class:`~.endpoint_service.CreateEndpointRequest`): The request object. Request message for - [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint]. + ``EndpointService.CreateEndpoint``. parent (:class:`str`): Required. The resource name of the Location to create the Endpoint in. Format: @@ -186,7 +186,7 @@ def create_endpoint( An object representing a long-running operation. The result type for the operation will be - :class:``~.gca_endpoint.Endpoint``: Models are deployed + :class:`~.gca_endpoint.Endpoint`: Models are deployed into it, and afterwards Endpoint is called to obtain predictions and explanations. @@ -246,7 +246,7 @@ def get_endpoint( Args: request (:class:`~.endpoint_service.GetEndpointRequest`): The request object. Request message for - [EndpointService.GetEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.GetEndpoint] + ``EndpointService.GetEndpoint`` name (:class:`str`): Required. The name of the Endpoint resource. Format: ``projects/{project}/locations/{location}/endpoints/{endpoint}`` @@ -318,7 +318,7 @@ def list_endpoints( Args: request (:class:`~.endpoint_service.ListEndpointsRequest`): The request object. Request message for - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. + ``EndpointService.ListEndpoints``. parent (:class:`str`): Required. The resource name of the Location from which to list the Endpoints. Format: @@ -336,7 +336,7 @@ def list_endpoints( Returns: ~.pagers.ListEndpointsPager: Response message for - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. + ``EndpointService.ListEndpoints``. Iterating over this object will yield results and resolve additional pages automatically. @@ -400,7 +400,7 @@ def update_endpoint( Args: request (:class:`~.endpoint_service.UpdateEndpointRequest`): The request object. Request message for - [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. + ``EndpointService.UpdateEndpoint``. endpoint (:class:`~.gca_endpoint.Endpoint`): Required. The Endpoint which replaces the resource on the server. @@ -474,7 +474,7 @@ def delete_endpoint( Args: request (:class:`~.endpoint_service.DeleteEndpointRequest`): The request object. Request message for - [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeleteEndpoint]. + ``EndpointService.DeleteEndpoint``. name (:class:`str`): Required. The name of the Endpoint resource to be deleted. Format: @@ -494,7 +494,7 @@ def delete_endpoint( An object representing a long-running operation. The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that + :class:`~.empty.Empty`: A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For @@ -568,7 +568,7 @@ def deploy_model( Args: request (:class:`~.endpoint_service.DeployModelRequest`): The request object. Request message for - [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. + ``EndpointService.DeployModel``. endpoint (:class:`str`): Required. The name of the Endpoint resource into which to deploy a Model. Format: @@ -579,10 +579,10 @@ def deploy_model( deployed_model (:class:`~.gca_endpoint.DeployedModel`): Required. The DeployedModel to be created within the Endpoint. Note that - [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + ``Endpoint.traffic_split`` must be updated for the DeployedModel to start receiving traffic, either as part of this call, or via - [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. + ``EndpointService.UpdateEndpoint``. This corresponds to the ``deployed_model`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -592,7 +592,7 @@ def deploy_model( DeployedModel. If this field is non-empty, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + ``traffic_split`` will be overwritten with it. To refer to the ID of the just being deployed Model, a "0" should be used, and the actual ID of the new DeployedModel will be filled in its @@ -600,7 +600,7 @@ def deploy_model( add up to 100. If this field is empty, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + ``traffic_split`` is not updated. This corresponds to the ``traffic_split`` field on the ``request`` instance; if ``request`` is provided, this @@ -617,9 +617,9 @@ def deploy_model( An object representing a long-running operation. The result type for the operation will be - :class:``~.endpoint_service.DeployModelResponse``: + :class:`~.endpoint_service.DeployModelResponse`: Response message for - [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. + ``EndpointService.DeployModel``. """ # Create or coerce a protobuf request object. @@ -685,7 +685,7 @@ def undeploy_model( Args: request (:class:`~.endpoint_service.UndeployModelRequest`): The request object. Request message for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. + ``EndpointService.UndeployModel``. endpoint (:class:`str`): Required. The name of the Endpoint resource from which to undeploy a Model. Format: @@ -701,7 +701,7 @@ def undeploy_model( should not be set. traffic_split (:class:`Sequence[~.endpoint_service.UndeployModelRequest.TrafficSplitEntry]`): If this field is provided, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + ``traffic_split`` will be overwritten with it. If last DeployedModel is being undeployed from the Endpoint, the [Endpoint.traffic_split] will always end up empty when @@ -724,9 +724,9 @@ def undeploy_model( An object representing a long-running operation. The result type for the operation will be - :class:``~.endpoint_service.UndeployModelResponse``: + :class:`~.endpoint_service.UndeployModelResponse`: Response message for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. + ``EndpointService.UndeployModel``. """ # Create or coerce a protobuf request object. diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/client.py b/google/cloud/aiplatform_v1beta1/services/job_service/client.py index 15d1760d9a..b56a9a7871 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/client.py @@ -124,12 +124,14 @@ def custom_job_path(project: str, location: str, custom_job: str,) -> str: ) @staticmethod - def data_labeling_job_path( - project: str, location: str, data_labeling_job: str, + def batch_prediction_job_path( + project: str, location: str, batch_prediction_job: str, ) -> str: - """Return a fully-qualified data_labeling_job string.""" - return "projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}".format( - project=project, location=location, data_labeling_job=data_labeling_job, + """Return a fully-qualified batch_prediction_job string.""" + return "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format( + project=project, + location=location, + batch_prediction_job=batch_prediction_job, ) @staticmethod @@ -144,14 +146,12 @@ def hyperparameter_tuning_job_path( ) @staticmethod - def batch_prediction_job_path( - project: str, location: str, batch_prediction_job: str, + def data_labeling_job_path( + project: str, location: str, data_labeling_job: str, ) -> str: - """Return a fully-qualified batch_prediction_job string.""" - return "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format( - project=project, - location=location, - batch_prediction_job=batch_prediction_job, + """Return a fully-qualified data_labeling_job string.""" + return "projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}".format( + project=project, location=location, data_labeling_job=data_labeling_job, ) def __init__( @@ -210,7 +210,7 @@ def create_custom_job( Args: request (:class:`~.job_service.CreateCustomJobRequest`): The request object. Request message for - [JobService.CreateCustomJob][google.cloud.aiplatform.v1beta1.JobService.CreateCustomJob]. + ``JobService.CreateCustomJob``. parent (:class:`str`): Required. The resource name of the Location to create the CustomJob in. Format: @@ -289,7 +289,7 @@ def get_custom_job( Args: request (:class:`~.job_service.GetCustomJobRequest`): The request object. Request message for - [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob]. + ``JobService.GetCustomJob``. name (:class:`str`): Required. The name of the CustomJob resource. Format: ``projects/{project}/locations/{location}/customJobs/{custom_job}`` @@ -366,7 +366,7 @@ def list_custom_jobs( Args: request (:class:`~.job_service.ListCustomJobsRequest`): The request object. Request message for - [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs]. + ``JobService.ListCustomJobs``. parent (:class:`str`): Required. The resource name of the Location to list the CustomJobs from. Format: @@ -384,7 +384,7 @@ def list_custom_jobs( Returns: ~.pagers.ListCustomJobsPager: Response message for - [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs] + ``JobService.ListCustomJobs`` Iterating over this object will yield results and resolve additional pages automatically. @@ -447,7 +447,7 @@ def delete_custom_job( Args: request (:class:`~.job_service.DeleteCustomJobRequest`): The request object. Request message for - [JobService.DeleteCustomJob][google.cloud.aiplatform.v1beta1.JobService.DeleteCustomJob]. + ``JobService.DeleteCustomJob``. name (:class:`str`): Required. The name of the CustomJob resource to be deleted. Format: @@ -467,7 +467,7 @@ def delete_custom_job( An object representing a long-running operation. The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that + :class:`~.empty.Empty`: A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For @@ -534,21 +534,21 @@ def cancel_custom_job( r"""Cancels a CustomJob. Starts asynchronous cancellation on the CustomJob. The server makes a best effort to cancel the job, but success is not guaranteed. Clients can use - [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob] + ``JobService.GetCustomJob`` or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On successful cancellation, the CustomJob is not deleted; instead it becomes a job with a - [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of + ``CustomJob.error`` + value with a ``google.rpc.Status.code`` of 1, corresponding to ``Code.CANCELLED``, and - [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] + ``CustomJob.state`` is set to ``CANCELLED``. Args: request (:class:`~.job_service.CancelCustomJobRequest`): The request object. Request message for - [JobService.CancelCustomJob][google.cloud.aiplatform.v1beta1.JobService.CancelCustomJob]. + ``JobService.CancelCustomJob``. name (:class:`str`): Required. The name of the CustomJob to cancel. Format: ``projects/{project}/locations/{location}/customJobs/{custom_job}`` @@ -771,7 +771,7 @@ def list_data_labeling_jobs( Returns: ~.pagers.ListDataLabelingJobsPager: Response message for - [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. + ``JobService.ListDataLabelingJobs``. Iterating over this object will yield results and resolve additional pages automatically. @@ -834,7 +834,7 @@ def delete_data_labeling_job( Args: request (:class:`~.job_service.DeleteDataLabelingJobRequest`): The request object. Request message for - [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.DeleteDataLabelingJob]. + ``JobService.DeleteDataLabelingJob``. name (:class:`str`): Required. The name of the DataLabelingJob to be deleted. Format: @@ -855,7 +855,7 @@ def delete_data_labeling_job( An object representing a long-running operation. The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that + :class:`~.empty.Empty`: A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For @@ -985,7 +985,7 @@ def create_hyperparameter_tuning_job( Args: request (:class:`~.job_service.CreateHyperparameterTuningJobRequest`): The request object. Request message for - [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CreateHyperparameterTuningJob]. + ``JobService.CreateHyperparameterTuningJob``. parent (:class:`str`): Required. The resource name of the Location to create the HyperparameterTuningJob in. Format: @@ -1061,7 +1061,7 @@ def get_hyperparameter_tuning_job( Args: request (:class:`~.job_service.GetHyperparameterTuningJobRequest`): The request object. Request message for - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob]. + ``JobService.GetHyperparameterTuningJob``. name (:class:`str`): Required. The name of the HyperparameterTuningJob resource. Format: @@ -1136,7 +1136,7 @@ def list_hyperparameter_tuning_jobs( Args: request (:class:`~.job_service.ListHyperparameterTuningJobsRequest`): The request object. Request message for - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs]. + ``JobService.ListHyperparameterTuningJobs``. parent (:class:`str`): Required. The resource name of the Location to list the HyperparameterTuningJobs from. Format: @@ -1154,7 +1154,7 @@ def list_hyperparameter_tuning_jobs( Returns: ~.pagers.ListHyperparameterTuningJobsPager: Response message for - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs] + ``JobService.ListHyperparameterTuningJobs`` Iterating over this object will yield results and resolve additional pages automatically. @@ -1217,7 +1217,7 @@ def delete_hyperparameter_tuning_job( Args: request (:class:`~.job_service.DeleteHyperparameterTuningJobRequest`): The request object. Request message for - [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.DeleteHyperparameterTuningJob]. + ``JobService.DeleteHyperparameterTuningJob``. name (:class:`str`): Required. The name of the HyperparameterTuningJob resource to be deleted. Format: @@ -1238,7 +1238,7 @@ def delete_hyperparameter_tuning_job( An object representing a long-running operation. The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that + :class:`~.empty.Empty`: A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For @@ -1306,21 +1306,21 @@ def cancel_hyperparameter_tuning_job( cancellation on the HyperparameterTuningJob. The server makes a best effort to cancel the job, but success is not guaranteed. Clients can use - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob] + ``JobService.GetHyperparameterTuningJob`` or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On successful cancellation, the HyperparameterTuningJob is not deleted; instead it becomes a job with a - [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of + ``HyperparameterTuningJob.error`` + value with a ``google.rpc.Status.code`` of 1, corresponding to ``Code.CANCELLED``, and - [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state] + ``HyperparameterTuningJob.state`` is set to ``CANCELLED``. Args: request (:class:`~.job_service.CancelHyperparameterTuningJobRequest`): The request object. Request message for - [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CancelHyperparameterTuningJob]. + ``JobService.CancelHyperparameterTuningJob``. name (:class:`str`): Required. The name of the HyperparameterTuningJob to cancel. Format: @@ -1382,7 +1382,7 @@ def create_batch_prediction_job( Args: request (:class:`~.job_service.CreateBatchPredictionJobRequest`): The request object. Request message for - [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CreateBatchPredictionJob]. + ``JobService.CreateBatchPredictionJob``. parent (:class:`str`): Required. The resource name of the Location to create the BatchPredictionJob in. Format: @@ -1406,7 +1406,7 @@ def create_batch_prediction_job( Returns: ~.gca_batch_prediction_job.BatchPredictionJob: A job that uses a - [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] + ``Model`` to produce predictions on multiple [input instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. If predictions for significant portion of the instances @@ -1461,7 +1461,7 @@ def get_batch_prediction_job( Args: request (:class:`~.job_service.GetBatchPredictionJobRequest`): The request object. Request message for - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob]. + ``JobService.GetBatchPredictionJob``. name (:class:`str`): Required. The name of the BatchPredictionJob resource. Format: @@ -1480,7 +1480,7 @@ def get_batch_prediction_job( Returns: ~.batch_prediction_job.BatchPredictionJob: A job that uses a - [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] + ``Model`` to produce predictions on multiple [input instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. If predictions for significant portion of the instances @@ -1539,7 +1539,7 @@ def list_batch_prediction_jobs( Args: request (:class:`~.job_service.ListBatchPredictionJobsRequest`): The request object. Request message for - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs]. + ``JobService.ListBatchPredictionJobs``. parent (:class:`str`): Required. The resource name of the Location to list the BatchPredictionJobs from. Format: @@ -1557,7 +1557,7 @@ def list_batch_prediction_jobs( Returns: ~.pagers.ListBatchPredictionJobsPager: Response message for - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs] + ``JobService.ListBatchPredictionJobs`` Iterating over this object will yield results and resolve additional pages automatically. @@ -1621,7 +1621,7 @@ def delete_batch_prediction_job( Args: request (:class:`~.job_service.DeleteBatchPredictionJobRequest`): The request object. Request message for - [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.DeleteBatchPredictionJob]. + ``JobService.DeleteBatchPredictionJob``. name (:class:`str`): Required. The name of the BatchPredictionJob resource to be deleted. Format: @@ -1642,7 +1642,7 @@ def delete_batch_prediction_job( An object representing a long-running operation. The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that + :class:`~.empty.Empty`: A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For @@ -1711,18 +1711,18 @@ def cancel_batch_prediction_job( Starts asynchronous cancellation on the BatchPredictionJob. The server makes the best effort to cancel the job, but success is not guaranteed. Clients can use - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob] + ``JobService.GetBatchPredictionJob`` or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On a successful cancellation, the BatchPredictionJob is not deleted;instead its - [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state] + ``BatchPredictionJob.state`` is set to ``CANCELLED``. Any files already outputted by the job are not deleted. Args: request (:class:`~.job_service.CancelBatchPredictionJobRequest`): The request object. Request message for - [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CancelBatchPredictionJob]. + ``JobService.CancelBatchPredictionJob``. name (:class:`str`): Required. The name of the BatchPredictionJob to cancel. Format: diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py index cdb049c585..a598c180cf 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py @@ -263,15 +263,15 @@ def cancel_custom_job( Cancels a CustomJob. Starts asynchronous cancellation on the CustomJob. The server makes a best effort to cancel the job, but success is not guaranteed. Clients can use - [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob] + ``JobService.GetCustomJob`` or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On successful cancellation, the CustomJob is not deleted; instead it becomes a job with a - [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of + ``CustomJob.error`` + value with a ``google.rpc.Status.code`` of 1, corresponding to ``Code.CANCELLED``, and - [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] + ``CustomJob.state`` is set to ``CANCELLED``. Returns: @@ -568,15 +568,15 @@ def cancel_hyperparameter_tuning_job( cancellation on the HyperparameterTuningJob. The server makes a best effort to cancel the job, but success is not guaranteed. Clients can use - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob] + ``JobService.GetHyperparameterTuningJob`` or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On successful cancellation, the HyperparameterTuningJob is not deleted; instead it becomes a job with a - [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of + ``HyperparameterTuningJob.error`` + value with a ``google.rpc.Status.code`` of 1, corresponding to ``Code.CANCELLED``, and - [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state] + ``HyperparameterTuningJob.state`` is set to ``CANCELLED``. Returns: @@ -725,11 +725,11 @@ def cancel_batch_prediction_job( Starts asynchronous cancellation on the BatchPredictionJob. The server makes the best effort to cancel the job, but success is not guaranteed. Clients can use - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob] + ``JobService.GetBatchPredictionJob`` or other methods to check whether the cancellation succeeded or whether the job completed despite cancellation. On a successful cancellation, the BatchPredictionJob is not deleted;instead its - [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state] + ``BatchPredictionJob.state`` is set to ``CANCELLED``. Any files already outputted by the job are not deleted. diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/client.py b/google/cloud/aiplatform_v1beta1/services/model_service/client.py index 801456a7c5..dab285be4c 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/client.py @@ -164,7 +164,7 @@ def upload_model( Args: request (:class:`~.model_service.UploadModelRequest`): The request object. Request message for - [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel]. + ``ModelService.UploadModel``. parent (:class:`str`): Required. The resource name of the Location into which to upload the Model. Format: @@ -189,9 +189,9 @@ def upload_model( An object representing a long-running operation. The result type for the operation will be - :class:``~.model_service.UploadModelResponse``: Response + :class:`~.model_service.UploadModelResponse`: Response message of - [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] + ``ModelService.UploadModel`` operation. """ @@ -250,7 +250,7 @@ def get_model( Args: request (:class:`~.model_service.GetModelRequest`): The request object. Request message for - [ModelService.GetModel][google.cloud.aiplatform.v1beta1.ModelService.GetModel]. + ``ModelService.GetModel``. name (:class:`str`): Required. The name of the Model resource. Format: ``projects/{project}/locations/{location}/models/{model}`` @@ -317,7 +317,7 @@ def list_models( Args: request (:class:`~.model_service.ListModelsRequest`): The request object. Request message for - [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels]. + ``ModelService.ListModels``. parent (:class:`str`): Required. The resource name of the Location to list the Models from. Format: @@ -335,7 +335,7 @@ def list_models( Returns: ~.pagers.ListModelsPager: Response message for - [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels] + ``ModelService.ListModels`` Iterating over this object will yield results and resolve additional pages automatically. @@ -397,7 +397,7 @@ def update_model( Args: request (:class:`~.model_service.UpdateModelRequest`): The request object. Request message for - [ModelService.UpdateModel][google.cloud.aiplatform.v1beta1.ModelService.UpdateModel]. + ``ModelService.UpdateModel``. model (:class:`~.gca_model.Model`): Required. The Model which replaces the resource on the server. @@ -408,9 +408,7 @@ def update_model( Required. The update mask applies to the resource. For the ``FieldMask`` definition, see - [FieldMask](https: - //developers.google.com/protocol-buffers // - /docs/reference/google.protobuf#fieldmask). + [FieldMask](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask). This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -474,7 +472,7 @@ def delete_model( Args: request (:class:`~.model_service.DeleteModelRequest`): The request object. Request message for - [ModelService.DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel]. + ``ModelService.DeleteModel``. name (:class:`str`): Required. The name of the Model resource to be deleted. Format: @@ -494,7 +492,7 @@ def delete_model( An object representing a long-running operation. The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that + :class:`~.empty.Empty`: A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For @@ -567,7 +565,7 @@ def export_model( Args: request (:class:`~.model_service.ExportModelRequest`): The request object. Request message for - [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel]. + ``ModelService.ExportModel``. name (:class:`str`): Required. The resource name of the Model to export. Format: @@ -593,9 +591,9 @@ def export_model( An object representing a long-running operation. The result type for the operation will be - :class:``~.model_service.ExportModelResponse``: Response + :class:`~.model_service.ExportModelResponse`: Response message of - [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel] + ``ModelService.ExportModel`` operation. """ @@ -654,7 +652,7 @@ def get_model_evaluation( Args: request (:class:`~.model_service.GetModelEvaluationRequest`): The request object. Request message for - [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluation]. + ``ModelService.GetModelEvaluation``. name (:class:`str`): Required. The name of the ModelEvaluation resource. Format: @@ -729,7 +727,7 @@ def list_model_evaluations( Args: request (:class:`~.model_service.ListModelEvaluationsRequest`): The request object. Request message for - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. + ``ModelService.ListModelEvaluations``. parent (:class:`str`): Required. The resource name of the Model to list the ModelEvaluations from. Format: @@ -747,7 +745,7 @@ def list_model_evaluations( Returns: ~.pagers.ListModelEvaluationsPager: Response message for - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. + ``ModelService.ListModelEvaluations``. Iterating over this object will yield results and resolve additional pages automatically. @@ -810,7 +808,7 @@ def get_model_evaluation_slice( Args: request (:class:`~.model_service.GetModelEvaluationSliceRequest`): The request object. Request message for - [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluationSlice]. + ``ModelService.GetModelEvaluationSlice``. name (:class:`str`): Required. The name of the ModelEvaluationSlice resource. Format: @@ -885,7 +883,7 @@ def list_model_evaluation_slices( Args: request (:class:`~.model_service.ListModelEvaluationSlicesRequest`): The request object. Request message for - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. + ``ModelService.ListModelEvaluationSlices``. parent (:class:`str`): Required. The resource name of the ModelEvaluation to list the ModelEvaluationSlices from. Format: @@ -904,7 +902,7 @@ def list_model_evaluation_slices( Returns: ~.pagers.ListModelEvaluationSlicesPager: Response message for - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. + ``ModelService.ListModelEvaluationSlices``. Iterating over this object will yield results and resolve additional pages automatically. diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py index 7da23adbc3..2530414b9a 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py @@ -104,6 +104,13 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): from_service_account_json = from_service_account_file + @staticmethod + def model_path(project: str, location: str, model: str,) -> str: + """Return a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format( + project=project, location=location, model=model, + ) + @staticmethod def training_pipeline_path( project: str, location: str, training_pipeline: str, @@ -113,13 +120,6 @@ def training_pipeline_path( project=project, location=location, training_pipeline=training_pipeline, ) - @staticmethod - def model_path(project: str, location: str, model: str,) -> str: - """Return a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format( - project=project, location=location, model=model, - ) - def __init__( self, *, @@ -176,7 +176,7 @@ def create_training_pipeline( Args: request (:class:`~.pipeline_service.CreateTrainingPipelineRequest`): The request object. Request message for - [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CreateTrainingPipeline]. + ``PipelineService.CreateTrainingPipeline``. parent (:class:`str`): Required. The resource name of the Location to create the TrainingPipeline in. Format: @@ -203,7 +203,7 @@ def create_training_pipeline( training a Model. It always executes the training task, and optionally may also export data from AI Platform's Dataset which becomes the training input, - [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] + ``upload`` the Model to AI Platform, and evaluate the Model. """ @@ -254,7 +254,7 @@ def get_training_pipeline( Args: request (:class:`~.pipeline_service.GetTrainingPipelineRequest`): The request object. Request message for - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline]. + ``PipelineService.GetTrainingPipeline``. name (:class:`str`): Required. The name of the TrainingPipeline resource. Format: @@ -276,7 +276,7 @@ def get_training_pipeline( training a Model. It always executes the training task, and optionally may also export data from AI Platform's Dataset which becomes the training input, - [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] + ``upload`` the Model to AI Platform, and evaluate the Model. """ @@ -331,7 +331,7 @@ def list_training_pipelines( Args: request (:class:`~.pipeline_service.ListTrainingPipelinesRequest`): The request object. Request message for - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines]. + ``PipelineService.ListTrainingPipelines``. parent (:class:`str`): Required. The resource name of the Location to list the TrainingPipelines from. Format: @@ -349,7 +349,7 @@ def list_training_pipelines( Returns: ~.pagers.ListTrainingPipelinesPager: Response message for - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines] + ``PipelineService.ListTrainingPipelines`` Iterating over this object will yield results and resolve additional pages automatically. @@ -412,7 +412,7 @@ def delete_training_pipeline( Args: request (:class:`~.pipeline_service.DeleteTrainingPipelineRequest`): The request object. Request message for - [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.DeleteTrainingPipeline]. + ``PipelineService.DeleteTrainingPipeline``. name (:class:`str`): Required. The name of the TrainingPipeline resource to be deleted. Format: @@ -433,7 +433,7 @@ def delete_training_pipeline( An object representing a long-running operation. The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that + :class:`~.empty.Empty`: A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For @@ -500,21 +500,21 @@ def cancel_training_pipeline( r"""Cancels a TrainingPipeline. Starts asynchronous cancellation on the TrainingPipeline. The server makes a best effort to cancel the pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline] + ``PipelineService.GetTrainingPipeline`` or other methods to check whether the cancellation succeeded or whether the pipeline completed despite cancellation. On successful cancellation, the TrainingPipeline is not deleted; instead it becomes a pipeline with a - [TrainingPipeline.error][google.cloud.aiplatform.v1beta1.TrainingPipeline.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of + ``TrainingPipeline.error`` + value with a ``google.rpc.Status.code`` of 1, corresponding to ``Code.CANCELLED``, and - [TrainingPipeline.state][google.cloud.aiplatform.v1beta1.TrainingPipeline.state] + ``TrainingPipeline.state`` is set to ``CANCELLED``. Args: request (:class:`~.pipeline_service.CancelTrainingPipelineRequest`): The request object. Request message for - [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CancelTrainingPipeline]. + ``PipelineService.CancelTrainingPipeline``. name (:class:`str`): Required. The name of the TrainingPipeline to cancel. Format: diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py index 5c79d9870d..7ce95caab7 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py @@ -262,15 +262,15 @@ def cancel_training_pipeline( Cancels a TrainingPipeline. Starts asynchronous cancellation on the TrainingPipeline. The server makes a best effort to cancel the pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline] + ``PipelineService.GetTrainingPipeline`` or other methods to check whether the cancellation succeeded or whether the pipeline completed despite cancellation. On successful cancellation, the TrainingPipeline is not deleted; instead it becomes a pipeline with a - [TrainingPipeline.error][google.cloud.aiplatform.v1beta1.TrainingPipeline.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of + ``TrainingPipeline.error`` + value with a ``google.rpc.Status.code`` of 1, corresponding to ``Code.CANCELLED``, and - [TrainingPipeline.state][google.cloud.aiplatform.v1beta1.TrainingPipeline.state] + ``TrainingPipeline.state`` is set to ``CANCELLED``. Returns: diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py index 1e6194bb9a..dbdf226471 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py @@ -151,7 +151,7 @@ def predict( Args: request (:class:`~.prediction_service.PredictRequest`): The request object. Request message for - [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. + ``PredictionService.Predict``. endpoint (:class:`str`): Required. The name of the Endpoint requested to serve the prediction. Format: @@ -170,7 +170,7 @@ def predict( Endpoint's DeployedModels' [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. + ``instance_schema_uri``. This corresponds to the ``instances`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -180,7 +180,7 @@ def predict( DeployedModels' [Model's ][google.cloud.aiplatform.v1beta1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. + ``parameters_schema_uri``. This corresponds to the ``parameters`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -194,7 +194,7 @@ def predict( Returns: ~.prediction_service.PredictResponse: Response message for - [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. + ``PredictionService.Predict``. """ # Create or coerce a protobuf request object. @@ -246,17 +246,17 @@ def explain( If [ExplainRequest.deployed_model_id] is specified, the corresponding DeployModel must have - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + ``explanation_spec`` populated. If [ExplainRequest.deployed_model_id] is not specified, all DeployedModels must have - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + ``explanation_spec`` populated. Only deployed AutoML tabular Models have explanation_spec. Args: request (:class:`~.prediction_service.ExplainRequest`): The request object. Request message for - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. + ``PredictionService.Explain``. endpoint (:class:`str`): Required. The name of the Endpoint requested to serve the explanation. Format: @@ -275,7 +275,7 @@ def explain( specified via Endpoint's DeployedModels' [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. + ``instance_schema_uri``. This corresponds to the ``instances`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -285,14 +285,14 @@ def explain( DeployedModels' [Model's ][google.cloud.aiplatform.v1beta1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. + ``parameters_schema_uri``. This corresponds to the ``parameters`` field on the ``request`` instance; if ``request`` is provided, this should not be set. deployed_model_id (:class:`str`): If specified, this ExplainRequest will be served by the chosen DeployedModel, overriding - [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split]. + ``Endpoint.traffic_split``. This corresponds to the ``deployed_model_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -306,7 +306,7 @@ def explain( Returns: ~.prediction_service.ExplainResponse: Response message for - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. + ``PredictionService.Explain``. """ # Create or coerce a protobuf request object. diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py index b657bcaa16..55824a233c 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py @@ -154,10 +154,10 @@ def explain( If [ExplainRequest.deployed_model_id] is specified, the corresponding DeployModel must have - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + ``explanation_spec`` populated. If [ExplainRequest.deployed_model_id] is not specified, all DeployedModels must have - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + ``explanation_spec`` populated. Only deployed AutoML tabular Models have explanation_spec. diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py index b0f7bb38c8..ddc9c26ab9 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py @@ -168,7 +168,7 @@ def create_specialist_pool( Args: request (:class:`~.specialist_pool_service.CreateSpecialistPoolRequest`): The request object. Request message for - [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool]. + ``SpecialistPoolService.CreateSpecialistPool``. parent (:class:`str`): Required. The parent Project name for the new SpecialistPool. The form is @@ -194,7 +194,7 @@ def create_specialist_pool( An object representing a long-running operation. The result type for the operation will be - :class:``~.gca_specialist_pool.SpecialistPool``: + :class:`~.gca_specialist_pool.SpecialistPool`: SpecialistPool represents customers' own workforce to work on their data labeling jobs. It includes a group of specialist managers who are responsible for managing the @@ -260,7 +260,7 @@ def get_specialist_pool( Args: request (:class:`~.specialist_pool_service.GetSpecialistPoolRequest`): The request object. Request message for - [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.GetSpecialistPool]. + ``SpecialistPoolService.GetSpecialistPool``. name (:class:`str`): Required. The name of the SpecialistPool resource. The form is @@ -342,7 +342,7 @@ def list_specialist_pools( Args: request (:class:`~.specialist_pool_service.ListSpecialistPoolsRequest`): The request object. Request message for - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. + ``SpecialistPoolService.ListSpecialistPools``. parent (:class:`str`): Required. The name of the SpecialistPool's parent resource. Format: @@ -360,7 +360,7 @@ def list_specialist_pools( Returns: ~.pagers.ListSpecialistPoolsPager: Response message for - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. + ``SpecialistPoolService.ListSpecialistPools``. Iterating over this object will yield results and resolve additional pages automatically. @@ -424,7 +424,7 @@ def delete_specialist_pool( Args: request (:class:`~.specialist_pool_service.DeleteSpecialistPoolRequest`): The request object. Request message for - [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.DeleteSpecialistPool]. + ``SpecialistPoolService.DeleteSpecialistPool``. name (:class:`str`): Required. The resource name of the SpecialistPool to delete. Format: @@ -444,7 +444,7 @@ def delete_specialist_pool( An object representing a long-running operation. The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that + :class:`~.empty.Empty`: A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For @@ -514,7 +514,7 @@ def update_specialist_pool( Args: request (:class:`~.specialist_pool_service.UpdateSpecialistPoolRequest`): The request object. Request message for - [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool]. + ``SpecialistPoolService.UpdateSpecialistPool``. specialist_pool (:class:`~.gca_specialist_pool.SpecialistPool`): Required. The SpecialistPool which replaces the resource on the server. @@ -539,7 +539,7 @@ def update_specialist_pool( An object representing a long-running operation. The result type for the operation will be - :class:``~.gca_specialist_pool.SpecialistPool``: + :class:`~.gca_specialist_pool.SpecialistPool`: SpecialistPool represents customers' own workforce to work on their data labeling jobs. It includes a group of specialist managers who are responsible for managing the diff --git a/google/cloud/aiplatform_v1beta1/types/annotation.py b/google/cloud/aiplatform_v1beta1/types/annotation.py index e6eca04509..34f3edfa5e 100644 --- a/google/cloud/aiplatform_v1beta1/types/annotation.py +++ b/google/cloud/aiplatform_v1beta1/types/annotation.py @@ -38,17 +38,17 @@ class Annotation(proto.Message): payload_schema_uri (str): Required. Google Cloud Storage URI points to a YAML file describing - [payload][google.cloud.aiplatform.v1beta1.Annotation.payload]. + ``payload``. The schema is defined as an `OpenAPI 3.0.2 Schema Object `__. The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the chosen schema must be consistent with the parent Dataset's - [metadata][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri]. + ``metadata``. payload (~.struct.Value): Required. The schema of the payload can be found in - [payload_schema][google.cloud.aiplatform.v1beta1.Annotation.payload_schema_uri]. + ``payload_schema``. create_time (~.timestamp.Timestamp): Output only. Timestamp when this Annotation was created. diff --git a/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py b/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py index bc67ec8796..332faaa6a9 100644 --- a/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py +++ b/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py @@ -39,7 +39,7 @@ class BatchPredictionJob(proto.Message): r"""A job that uses a - [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] to + ``Model`` to produce predictions on multiple [input instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. If predictions for significant portion of the instances fail, the @@ -65,33 +65,33 @@ class BatchPredictionJob(proto.Message): may be specified via the [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. + ``instance_schema_uri``. model_parameters (~.struct.Value): The parameters that govern the predictions. The schema of the parameters may be specified via the [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. + ``parameters_schema_uri``. output_config (~.batch_prediction_job.BatchPredictionJob.OutputConfig): Required. The Configuration specifying where output predictions should be written. The schema of any single prediction may be specified as a concatenation of [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] + ``instance_schema_uri`` and - [prediction_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.prediction_schema_uri]. + ``prediction_schema_uri``. dedicated_resources (~.machine_resources.BatchDedicatedResources): The config of resources used by the Model during the batch prediction. If the Model - [supports][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types] + ``supports`` DEDICATED_RESOURCES this config may be provided (and the job will use these resources), if the Model doesn't support AUTOMATIC_RESOURCES, this config must be provided. manual_batch_tuning_parameters (~.gca_manual_batch_tuning_parameters.ManualBatchTuningParameters): Immutable. Parameters configuring the batch behavior. Currently only applicable when - [dedicated_resources][google.cloud.aiplatform.v1beta1.BatchPredictionJob.dedicated_resources] + ``dedicated_resources`` are used (in other cases AI Platform does the tuning itself). generate_explanation (bool): @@ -174,9 +174,9 @@ class BatchPredictionJob(proto.Message): class InputConfig(proto.Message): r"""Configures the input to - [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. + ``BatchPredictionJob``. See - [Model.supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats] + ``Model.supported_input_storage_formats`` for Model's supported input formats, and how instances should be expressed via any of them. @@ -195,7 +195,7 @@ class InputConfig(proto.Message): Required. The format in which instances are given, must be one of the [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] - [supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats]. + ``supported_input_storage_formats``. """ gcs_source = proto.Field(proto.MESSAGE, number=2, message=io.GcsSource,) @@ -206,9 +206,9 @@ class InputConfig(proto.Message): class OutputConfig(proto.Message): r"""Configures the output of - [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. + ``BatchPredictionJob``. See - [Model.supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats] + ``Model.supported_output_storage_formats`` for supported output formats, and how predictions are expressed via any of them. @@ -223,15 +223,15 @@ class OutputConfig(proto.Message): ``predictions_0002.``, ..., ``predictions_N.`` are created where ```` depends on chosen - [predictions_format][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.predictions_format], + ``predictions_format``, and N may equal 0001 and depends on the total number of successfully predicted instances. If the Model has both - [instance][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] + ``instance`` and - [prediction][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri] + ``prediction`` schemata defined then each such file contains predictions as per the - [predictions_format][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.predictions_format]. + ``predictions_format``. If prediction for any instance failed (partially or completely), then an additional ``errors_0001.``, ``errors_0002.``,..., ``errors_N.`` @@ -250,9 +250,9 @@ class OutputConfig(proto.Message): YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset two tables will be created, ``predictions``, and ``errors``. If the Model has both - [instance][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] + ``instance`` and - [prediction][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri] + ``prediction`` schemata defined then the tables have columns as follows: The ``predictions`` table contains instances for which the prediction succeeded, it has columns as per a concatenation @@ -267,7 +267,7 @@ class OutputConfig(proto.Message): predictions, must be one of the [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] - [supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats]. + ``supported_output_storage_formats``. """ gcs_destination = proto.Field( @@ -280,7 +280,7 @@ class OutputConfig(proto.Message): class OutputInfo(proto.Message): r"""Further describes this job's output. Supplements - [output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config]. + ``output_config``. Attributes: gcs_output_directory (str): diff --git a/google/cloud/aiplatform_v1beta1/types/custom_job.py b/google/cloud/aiplatform_v1beta1/types/custom_job.py index 870f12af71..3d466ab72f 100644 --- a/google/cloud/aiplatform_v1beta1/types/custom_job.py +++ b/google/cloud/aiplatform_v1beta1/types/custom_job.py @@ -110,13 +110,13 @@ class CustomJobSpec(proto.Message): The Google Cloud Storage location to store the output of this CustomJob or HyperparameterTuningJob. For HyperparameterTuningJob, - [base_output_directory][CustomJob.job_spec.base_output_directory] + ``base_output_directory`` of each child CustomJob backing a Trial is set to a subdirectory of name - [id][google.cloud.aiplatform.v1beta1.Trial.id] under parent + ``id`` under parent HyperparameterTuningJob's - [base_output_directory][HyperparameterTuningJob.trial_job_spec.base_output_directory]. + ``base_output_directory``. Following AI Platform environment variables will be passed to containers or python modules when this field is set: diff --git a/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py b/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py index 9639bd070f..19da27e6a9 100644 --- a/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py +++ b/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py @@ -114,7 +114,7 @@ class DataLabelingJob(proto.Message): - "aiplatform.googleapis.com/schema": output only, its value is the - [inputs_schema][google.cloud.aiplatform.v1beta1.DataLabelingJob.inputs_schema_uri]'s + ``inputs_schema``'s title. specialist_pools (Sequence[str]): The SpecialistPools' resource names diff --git a/google/cloud/aiplatform_v1beta1/types/dataset.py b/google/cloud/aiplatform_v1beta1/types/dataset.py index 5f30adff8a..3675d8f42a 100644 --- a/google/cloud/aiplatform_v1beta1/types/dataset.py +++ b/google/cloud/aiplatform_v1beta1/types/dataset.py @@ -114,7 +114,7 @@ class ImportDataConfig(proto.Message): if their content bytes are identical (e.g. image bytes or pdf bytes). These labels will be overridden by Annotation labels specified inside index file refenced by - [import_schema_uri][google.cloud.aiplatform.v1beta1.ImportDataConfig.import_schema_uri], + ``import_schema_uri``, e.g. jsonl file. import_schema_uri (str): Required. Points to a YAML file stored on Google Cloud @@ -150,7 +150,7 @@ class ExportDataConfig(proto.Message): to-be-exported DataItems(specified by [data_items_filter][]) that match this filter will be exported. The filter syntax is the same as in - [ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. + ``ListAnnotations``. """ gcs_destination = proto.Field(proto.MESSAGE, number=1, message=io.GcsDestination,) diff --git a/google/cloud/aiplatform_v1beta1/types/dataset_service.py b/google/cloud/aiplatform_v1beta1/types/dataset_service.py index 2d4025b535..56b51a97cb 100644 --- a/google/cloud/aiplatform_v1beta1/types/dataset_service.py +++ b/google/cloud/aiplatform_v1beta1/types/dataset_service.py @@ -52,7 +52,7 @@ class CreateDatasetRequest(proto.Message): r"""Request message for - [DatasetService.CreateDataset][google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset]. + ``DatasetService.CreateDataset``. Attributes: parent (str): @@ -69,7 +69,7 @@ class CreateDatasetRequest(proto.Message): class CreateDatasetOperationMetadata(proto.Message): r"""Runtime operation information for - [DatasetService.CreateDataset][google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset]. + ``DatasetService.CreateDataset``. Attributes: generic_metadata (~.operation.GenericOperationMetadata): @@ -83,7 +83,7 @@ class CreateDatasetOperationMetadata(proto.Message): class GetDatasetRequest(proto.Message): r"""Request message for - [DatasetService.GetDataset][google.cloud.aiplatform.v1beta1.DatasetService.GetDataset]. + ``DatasetService.GetDataset``. Attributes: name (str): @@ -98,7 +98,7 @@ class GetDatasetRequest(proto.Message): class UpdateDatasetRequest(proto.Message): r"""Request message for - [DatasetService.UpdateDataset][google.cloud.aiplatform.v1beta1.DatasetService.UpdateDataset]. + ``DatasetService.UpdateDataset``. Attributes: dataset (~.gca_dataset.Dataset): @@ -108,8 +108,7 @@ class UpdateDatasetRequest(proto.Message): Required. The update mask applies to the resource. For the ``FieldMask`` definition, see - [FieldMask](https: - //tinyurl.com/dev-google-protobuf#google.protobuf.FieldMask). + [FieldMask](https://tinyurl.com/dev-google-protobuf#google.protobuf.FieldMask). Updatable fields: - ``display_name`` @@ -123,7 +122,7 @@ class UpdateDatasetRequest(proto.Message): class ListDatasetsRequest(proto.Message): r"""Request message for - [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. + ``DatasetService.ListDatasets``. Attributes: parent (str): @@ -157,7 +156,7 @@ class ListDatasetsRequest(proto.Message): class ListDatasetsResponse(proto.Message): r"""Response message for - [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. + ``DatasetService.ListDatasets``. Attributes: datasets (Sequence[~.gca_dataset.Dataset]): @@ -179,7 +178,7 @@ def raw_page(self): class DeleteDatasetRequest(proto.Message): r"""Request message for - [DatasetService.DeleteDataset][google.cloud.aiplatform.v1beta1.DatasetService.DeleteDataset]. + ``DatasetService.DeleteDataset``. Attributes: name (str): @@ -193,7 +192,7 @@ class DeleteDatasetRequest(proto.Message): class ImportDataRequest(proto.Message): r"""Request message for - [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. + ``DatasetService.ImportData``. Attributes: name (str): @@ -213,13 +212,13 @@ class ImportDataRequest(proto.Message): class ImportDataResponse(proto.Message): r"""Response message for - [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. + ``DatasetService.ImportData``. """ class ImportDataOperationMetadata(proto.Message): r"""Runtime operation information for - [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. + ``DatasetService.ImportData``. Attributes: generic_metadata (~.operation.GenericOperationMetadata): @@ -233,7 +232,7 @@ class ImportDataOperationMetadata(proto.Message): class ExportDataRequest(proto.Message): r"""Request message for - [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. + ``DatasetService.ExportData``. Attributes: name (str): @@ -251,7 +250,7 @@ class ExportDataRequest(proto.Message): class ExportDataResponse(proto.Message): r"""Response message for - [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. + ``DatasetService.ExportData``. Attributes: exported_files (Sequence[str]): @@ -264,7 +263,7 @@ class ExportDataResponse(proto.Message): class ExportDataOperationMetadata(proto.Message): r"""Runtime operation information for - [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. + ``DatasetService.ExportData``. Attributes: generic_metadata (~.operation.GenericOperationMetadata): @@ -283,7 +282,7 @@ class ExportDataOperationMetadata(proto.Message): class ListDataItemsRequest(proto.Message): r"""Request message for - [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. + ``DatasetService.ListDataItems``. Attributes: parent (str): @@ -314,7 +313,7 @@ class ListDataItemsRequest(proto.Message): class ListDataItemsResponse(proto.Message): r"""Response message for - [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. + ``DatasetService.ListDataItems``. Attributes: data_items (Sequence[~.data_item.DataItem]): @@ -336,7 +335,7 @@ def raw_page(self): class GetAnnotationSpecRequest(proto.Message): r"""Request message for - [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1beta1.DatasetService.GetAnnotationSpec]. + ``DatasetService.GetAnnotationSpec``. Attributes: name (str): @@ -353,7 +352,7 @@ class GetAnnotationSpecRequest(proto.Message): class ListAnnotationsRequest(proto.Message): r"""Request message for - [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. + ``DatasetService.ListAnnotations``. Attributes: parent (str): @@ -385,7 +384,7 @@ class ListAnnotationsRequest(proto.Message): class ListAnnotationsResponse(proto.Message): r"""Response message for - [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. + ``DatasetService.ListAnnotations``. Attributes: annotations (Sequence[~.annotation.Annotation]): diff --git a/google/cloud/aiplatform_v1beta1/types/endpoint.py b/google/cloud/aiplatform_v1beta1/types/endpoint.py index 0f9eac501c..315a9de179 100644 --- a/google/cloud/aiplatform_v1beta1/types/endpoint.py +++ b/google/cloud/aiplatform_v1beta1/types/endpoint.py @@ -45,9 +45,9 @@ class Endpoint(proto.Message): deployed_models (Sequence[~.endpoint.DeployedModel]): Output only. The models deployed in this Endpoint. To add or remove DeployedModels use - [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel] + ``EndpointService.DeployModel`` and - [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel] + ``EndpointService.UndeployModel`` respectively. traffic_split (Sequence[~.endpoint.Endpoint.TrafficSplitEntry]): A map from a DeployedModel's ID to the @@ -124,17 +124,17 @@ class DeployedModel(proto.Message): Explanation configuration for this DeployedModel. When deploying a Model using - [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel], + ``EndpointService.DeployModel``, this value overrides the value of - [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec]. + ``Model.explanation_spec``. All fields of - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + ``explanation_spec`` are optional in the request. If a field of - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + ``explanation_spec`` is not populated, the value of the same field of - [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] + ``Model.explanation_spec`` is inherited. The corresponding - [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] + ``Model.explanation_spec`` must be populated, otherwise explanation for this Model is not allowed. diff --git a/google/cloud/aiplatform_v1beta1/types/endpoint_service.py b/google/cloud/aiplatform_v1beta1/types/endpoint_service.py index 616cdf0eba..43e8eacdfb 100644 --- a/google/cloud/aiplatform_v1beta1/types/endpoint_service.py +++ b/google/cloud/aiplatform_v1beta1/types/endpoint_service.py @@ -45,7 +45,7 @@ class CreateEndpointRequest(proto.Message): r"""Request message for - [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint]. + ``EndpointService.CreateEndpoint``. Attributes: parent (str): @@ -62,7 +62,7 @@ class CreateEndpointRequest(proto.Message): class CreateEndpointOperationMetadata(proto.Message): r"""Runtime operation information for - [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint]. + ``EndpointService.CreateEndpoint``. Attributes: generic_metadata (~.operation.GenericOperationMetadata): @@ -76,7 +76,7 @@ class CreateEndpointOperationMetadata(proto.Message): class GetEndpointRequest(proto.Message): r"""Request message for - [EndpointService.GetEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.GetEndpoint] + ``EndpointService.GetEndpoint`` Attributes: name (str): @@ -89,7 +89,7 @@ class GetEndpointRequest(proto.Message): class ListEndpointsRequest(proto.Message): r"""Request message for - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. + ``EndpointService.ListEndpoints``. Attributes: parent (str): @@ -125,9 +125,9 @@ class ListEndpointsRequest(proto.Message): page_token (str): Optional. The standard list page token. Typically obtained via - [ListEndpointsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListEndpointsResponse.next_page_token] + ``ListEndpointsResponse.next_page_token`` of the previous - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints] + ``EndpointService.ListEndpoints`` call. read_mask (~.field_mask.FieldMask): Optional. Mask specifying which fields to @@ -143,14 +143,14 @@ class ListEndpointsRequest(proto.Message): class ListEndpointsResponse(proto.Message): r"""Response message for - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. + ``EndpointService.ListEndpoints``. Attributes: endpoints (Sequence[~.gca_endpoint.Endpoint]): List of Endpoints in the requested page. next_page_token (str): A token to retrieve next page of results. Pass to - [ListEndpointsRequest.page_token][google.cloud.aiplatform.v1beta1.ListEndpointsRequest.page_token] + ``ListEndpointsRequest.page_token`` to obtain that page. """ @@ -166,7 +166,7 @@ def raw_page(self): class UpdateEndpointRequest(proto.Message): r"""Request message for - [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. + ``EndpointService.UpdateEndpoint``. Attributes: endpoint (~.gca_endpoint.Endpoint): @@ -183,7 +183,7 @@ class UpdateEndpointRequest(proto.Message): class DeleteEndpointRequest(proto.Message): r"""Request message for - [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeleteEndpoint]. + ``EndpointService.DeleteEndpoint``. Attributes: name (str): @@ -197,7 +197,7 @@ class DeleteEndpointRequest(proto.Message): class DeployModelRequest(proto.Message): r"""Request message for - [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. + ``EndpointService.DeployModel``. Attributes: endpoint (str): @@ -207,17 +207,17 @@ class DeployModelRequest(proto.Message): deployed_model (~.gca_endpoint.DeployedModel): Required. The DeployedModel to be created within the Endpoint. Note that - [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + ``Endpoint.traffic_split`` must be updated for the DeployedModel to start receiving traffic, either as part of this call, or via - [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. + ``EndpointService.UpdateEndpoint``. traffic_split (Sequence[~.endpoint_service.DeployModelRequest.TrafficSplitEntry]): A map from a DeployedModel's ID to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel. If this field is non-empty, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + ``traffic_split`` will be overwritten with it. To refer to the ID of the just being deployed Model, a "0" should be used, and the actual ID of the new DeployedModel will be filled in its place by @@ -225,7 +225,7 @@ class DeployModelRequest(proto.Message): 100. If this field is empty, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + ``traffic_split`` is not updated. """ @@ -238,7 +238,7 @@ class DeployModelRequest(proto.Message): class DeployModelResponse(proto.Message): r"""Response message for - [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. + ``EndpointService.DeployModel``. Attributes: deployed_model (~.gca_endpoint.DeployedModel): @@ -253,7 +253,7 @@ class DeployModelResponse(proto.Message): class DeployModelOperationMetadata(proto.Message): r"""Runtime operation information for - [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. + ``EndpointService.DeployModel``. Attributes: generic_metadata (~.operation.GenericOperationMetadata): @@ -267,7 +267,7 @@ class DeployModelOperationMetadata(proto.Message): class UndeployModelRequest(proto.Message): r"""Request message for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. + ``EndpointService.UndeployModel``. Attributes: endpoint (str): @@ -279,7 +279,7 @@ class UndeployModelRequest(proto.Message): undeployed from the Endpoint. traffic_split (Sequence[~.endpoint_service.UndeployModelRequest.TrafficSplitEntry]): If this field is provided, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + ``traffic_split`` will be overwritten with it. If last DeployedModel is being undeployed from the Endpoint, the [Endpoint.traffic_split] will always end up empty when this call returns. A @@ -295,13 +295,13 @@ class UndeployModelRequest(proto.Message): class UndeployModelResponse(proto.Message): r"""Response message for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. + ``EndpointService.UndeployModel``. """ class UndeployModelOperationMetadata(proto.Message): r"""Runtime operation information for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. + ``EndpointService.UndeployModel``. Attributes: generic_metadata (~.operation.GenericOperationMetadata): diff --git a/google/cloud/aiplatform_v1beta1/types/explanation.py b/google/cloud/aiplatform_v1beta1/types/explanation.py index 6abc83ce3a..5e20ef2699 100644 --- a/google/cloud/aiplatform_v1beta1/types/explanation.py +++ b/google/cloud/aiplatform_v1beta1/types/explanation.py @@ -36,9 +36,9 @@ class Explanation(proto.Message): - r"""Explanation of a [prediction][ExplainResponse.predictions] produced + r"""Explanation of a ``prediction`` produced by the Model on a given - [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances]. + ``instance``. Currently, only AutoML tabular Models support explanation. @@ -53,7 +53,7 @@ class Explanation(proto.Message): that predict multiple outputs, such as multiclass Models that predict multiple classes, each element explains one specific item. - [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] + ``Attribution.output_index`` can be used to identify which output this attribution is explaining. """ @@ -79,21 +79,21 @@ class ModelExplanation(proto.Message): that predict multiple outputs, such as multiclass Models that predict multiple classes, each element explains one specific item. - [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] + ``Attribution.output_index`` can be used to identify which output this attribution is explaining. The - [baselineOutputValue][google.cloud.aiplatform.v1beta1.Attribution.baseline_output_value], - [instanceOutputValue][google.cloud.aiplatform.v1beta1.Attribution.instance_output_value] + ``baselineOutputValue``, + ``instanceOutputValue`` and - [featureAttributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions] + ``featureAttributions`` fields are averaged over the test data. NOTE: Currently AutoML tabular classification Models produce only one attribution, which averages attributions over all the classes it predicts. - [Attribution.approximation_error][google.cloud.aiplatform.v1beta1.Attribution.approximation_error] + ``Attribution.approximation_error`` is not populated. """ @@ -110,14 +110,14 @@ class Attribution(proto.Message): Output only. Model predicted output if the input instance is constructed from the baselines of all the features defined in - [ExplanationMetadata.inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]. + ``ExplanationMetadata.inputs``. The field name of the output is determined by the key in - [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs]. + ``ExplanationMetadata.outputs``. If the Model predicted output is a tensor value (for example, an ndarray), this is the value in the output located by - [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]. + ``output_index``. If there are multiple baselines, their output values are averaged. @@ -125,12 +125,12 @@ class Attribution(proto.Message): Output only. Model predicted output on the corresponding [explanation instance][ExplainRequest.instances]. The field name of the output is determined by the key in - [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs]. + ``ExplanationMetadata.outputs``. If the Model predicted output is a tensor value (for example, an ndarray), this is the value in the output located by - [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]. + ``output_index``. feature_attributions (~.struct.Value): Output only. Attributions of each explained feature. Features are extracted from the [prediction @@ -140,7 +140,7 @@ class Attribution(proto.Message): The value is a struct, whose keys are the name of the feature. The values are how much the feature in the - [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] + ``instance`` contributed to the predicted result. The format of the value is determined by the feature's input @@ -152,21 +152,21 @@ class Attribution(proto.Message): - If the feature is an array of scalar values, the attribution value is an - [array][google.protobuf.Value.list_value]. + ``array``. - If the feature is a struct, the attribution value is a - [struct][google.protobuf.Value.struct_value]. The keys in + ``struct``. The keys in the attribution value struct are the same as the keys in the feature struct. The formats of the values in the attribution struct are determined by the formats of the values in the feature struct. The - [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1beta1.ExplanationMetadata.feature_attributions_schema_uri] + ``ExplanationMetadata.feature_attributions_schema_uri`` field, pointed to by the - [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec] + ``ExplanationSpec`` field of the - [Endpoint.deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] + ``Endpoint.deployed_models`` object, points to the schema file that describes the features and their attribution values (if it is populated). output_index (Sequence[int]): @@ -181,7 +181,7 @@ class Attribution(proto.Message): dimension of the output vector. Indexes start from 0. output_display_name (str): Output only. The display name of the output identified by - [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index], + ``output_index``, e.g. the predicted class name by a multi-classification Model. @@ -191,14 +191,14 @@ class Attribution(proto.Message): explained output, and can be located using output_index. approximation_error (float): Output only. Error of - [feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions] + ``feature_attributions`` caused by approximation used in the explanation method. Lower value means more precise attributions. For Sampled Shapley - [attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.sampled_shapley_attribution], + ``attribution``, increasing - [path_count][google.cloud.aiplatform.v1beta1.SampledShapleyAttribution.path_count] + ``path_count`` might reduce the error. """ diff --git a/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py b/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py index 12c7c4bc6f..1b9f005857 100644 --- a/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py +++ b/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py @@ -38,7 +38,7 @@ class ExplanationMetadata(proto.Message): An empty InputMetadata is valid. It describes a text feature which has the name specified as the key in - [ExplanationMetadata.inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]. + ``ExplanationMetadata.inputs``. The baseline of the empty feature is chosen by AI Platform. outputs (Sequence[~.explanation_metadata.ExplanationMetadata.OutputsEntry]): Required. Map from output names to output @@ -73,12 +73,12 @@ class InputMetadata(proto.Message): The element of the baselines must be in the same format as the feature's input in the - [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances][]. + ``instance``[]. The schema of any single instance may be specified via Endpoint's DeployedModels' [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. + ``instance_schema_uri``. """ input_baselines = proto.RepeatedField( @@ -102,9 +102,9 @@ class OutputMetadata(proto.Message): The shape of the value must be an n-dimensional array of strings. The number of dimentions must match that of the outputs to be explained. The - [Attribution.output_display_name][google.cloud.aiplatform.v1beta1.Attribution.output_display_name] + ``Attribution.output_display_name`` is populated by locating in the mapping with - [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]. + ``Attribution.output_index``. display_name_mapping_key (str): Specify a field name in the prediction to look for the display name. @@ -114,7 +114,7 @@ class OutputMetadata(proto.Message): The display names in the prediction must have the same shape of the outputs, so that it can be located by - [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] + ``Attribution.output_index`` for a specific output. """ diff --git a/google/cloud/aiplatform_v1beta1/types/job_service.py b/google/cloud/aiplatform_v1beta1/types/job_service.py index 302d909b08..98e80c19a2 100644 --- a/google/cloud/aiplatform_v1beta1/types/job_service.py +++ b/google/cloud/aiplatform_v1beta1/types/job_service.py @@ -64,7 +64,7 @@ class CreateCustomJobRequest(proto.Message): r"""Request message for - [JobService.CreateCustomJob][google.cloud.aiplatform.v1beta1.JobService.CreateCustomJob]. + ``JobService.CreateCustomJob``. Attributes: parent (str): @@ -81,7 +81,7 @@ class CreateCustomJobRequest(proto.Message): class GetCustomJobRequest(proto.Message): r"""Request message for - [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob]. + ``JobService.GetCustomJob``. Attributes: name (str): @@ -94,7 +94,7 @@ class GetCustomJobRequest(proto.Message): class ListCustomJobsRequest(proto.Message): r"""Request message for - [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs]. + ``JobService.ListCustomJobs``. Attributes: parent (str): @@ -123,9 +123,9 @@ class ListCustomJobsRequest(proto.Message): The standard list page size. page_token (str): The standard list page token. Typically obtained via - [ListCustomJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListCustomJobsResponse.next_page_token] + ``ListCustomJobsResponse.next_page_token`` of the previous - [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs] + ``JobService.ListCustomJobs`` call. read_mask (~.field_mask.FieldMask): Mask specifying which fields to read. @@ -140,14 +140,14 @@ class ListCustomJobsRequest(proto.Message): class ListCustomJobsResponse(proto.Message): r"""Response message for - [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs] + ``JobService.ListCustomJobs`` Attributes: custom_jobs (Sequence[~.gca_custom_job.CustomJob]): List of CustomJobs in the requested page. next_page_token (str): A token to retrieve next page of results. Pass to - [ListCustomJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListCustomJobsRequest.page_token] + ``ListCustomJobsRequest.page_token`` to obtain that page. """ @@ -163,7 +163,7 @@ def raw_page(self): class DeleteCustomJobRequest(proto.Message): r"""Request message for - [JobService.DeleteCustomJob][google.cloud.aiplatform.v1beta1.JobService.DeleteCustomJob]. + ``JobService.DeleteCustomJob``. Attributes: name (str): @@ -177,7 +177,7 @@ class DeleteCustomJobRequest(proto.Message): class CancelCustomJobRequest(proto.Message): r"""Request message for - [JobService.CancelCustomJob][google.cloud.aiplatform.v1beta1.JobService.CancelCustomJob]. + ``JobService.CancelCustomJob``. Attributes: name (str): @@ -270,7 +270,7 @@ class ListDataLabelingJobsRequest(proto.Message): class ListDataLabelingJobsResponse(proto.Message): r"""Response message for - [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. + ``JobService.ListDataLabelingJobs``. Attributes: data_labeling_jobs (Sequence[~.gca_data_labeling_job.DataLabelingJob]): @@ -292,7 +292,7 @@ def raw_page(self): class DeleteDataLabelingJobRequest(proto.Message): r"""Request message for - [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.DeleteDataLabelingJob]. + ``JobService.DeleteDataLabelingJob``. Attributes: name (str): @@ -321,7 +321,7 @@ class CancelDataLabelingJobRequest(proto.Message): class CreateHyperparameterTuningJobRequest(proto.Message): r"""Request message for - [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CreateHyperparameterTuningJob]. + ``JobService.CreateHyperparameterTuningJob``. Attributes: parent (str): @@ -343,7 +343,7 @@ class CreateHyperparameterTuningJobRequest(proto.Message): class GetHyperparameterTuningJobRequest(proto.Message): r"""Request message for - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob]. + ``JobService.GetHyperparameterTuningJob``. Attributes: name (str): @@ -358,7 +358,7 @@ class GetHyperparameterTuningJobRequest(proto.Message): class ListHyperparameterTuningJobsRequest(proto.Message): r"""Request message for - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs]. + ``JobService.ListHyperparameterTuningJobs``. Attributes: parent (str): @@ -387,9 +387,9 @@ class ListHyperparameterTuningJobsRequest(proto.Message): The standard list page size. page_token (str): The standard list page token. Typically obtained via - [ListHyperparameterTuningJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListHyperparameterTuningJobsResponse.next_page_token] + ``ListHyperparameterTuningJobsResponse.next_page_token`` of the previous - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs] + ``JobService.ListHyperparameterTuningJobs`` call. read_mask (~.field_mask.FieldMask): Mask specifying which fields to read. @@ -404,16 +404,16 @@ class ListHyperparameterTuningJobsRequest(proto.Message): class ListHyperparameterTuningJobsResponse(proto.Message): r"""Response message for - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs] + ``JobService.ListHyperparameterTuningJobs`` Attributes: hyperparameter_tuning_jobs (Sequence[~.gca_hyperparameter_tuning_job.HyperparameterTuningJob]): List of HyperparameterTuningJobs in the requested page. - [HyperparameterTuningJob.trials][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.trials] + ``HyperparameterTuningJob.trials`` of the jobs will be not be returned. next_page_token (str): A token to retrieve next page of results. Pass to - [ListHyperparameterTuningJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListHyperparameterTuningJobsRequest.page_token] + ``ListHyperparameterTuningJobsRequest.page_token`` to obtain that page. """ @@ -431,7 +431,7 @@ def raw_page(self): class DeleteHyperparameterTuningJobRequest(proto.Message): r"""Request message for - [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.DeleteHyperparameterTuningJob]. + ``JobService.DeleteHyperparameterTuningJob``. Attributes: name (str): @@ -446,7 +446,7 @@ class DeleteHyperparameterTuningJobRequest(proto.Message): class CancelHyperparameterTuningJobRequest(proto.Message): r"""Request message for - [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CancelHyperparameterTuningJob]. + ``JobService.CancelHyperparameterTuningJob``. Attributes: name (str): @@ -461,7 +461,7 @@ class CancelHyperparameterTuningJobRequest(proto.Message): class CreateBatchPredictionJobRequest(proto.Message): r"""Request message for - [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CreateBatchPredictionJob]. + ``JobService.CreateBatchPredictionJob``. Attributes: parent (str): @@ -480,7 +480,7 @@ class CreateBatchPredictionJobRequest(proto.Message): class GetBatchPredictionJobRequest(proto.Message): r"""Request message for - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob]. + ``JobService.GetBatchPredictionJob``. Attributes: name (str): @@ -495,7 +495,7 @@ class GetBatchPredictionJobRequest(proto.Message): class ListBatchPredictionJobsRequest(proto.Message): r"""Request message for - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs]. + ``JobService.ListBatchPredictionJobs``. Attributes: parent (str): @@ -524,9 +524,9 @@ class ListBatchPredictionJobsRequest(proto.Message): The standard list page size. page_token (str): The standard list page token. Typically obtained via - [ListBatchPredictionJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListBatchPredictionJobsResponse.next_page_token] + ``ListBatchPredictionJobsResponse.next_page_token`` of the previous - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs] + ``JobService.ListBatchPredictionJobs`` call. read_mask (~.field_mask.FieldMask): Mask specifying which fields to read. @@ -541,7 +541,7 @@ class ListBatchPredictionJobsRequest(proto.Message): class ListBatchPredictionJobsResponse(proto.Message): r"""Response message for - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs] + ``JobService.ListBatchPredictionJobs`` Attributes: batch_prediction_jobs (Sequence[~.gca_batch_prediction_job.BatchPredictionJob]): @@ -549,7 +549,7 @@ class ListBatchPredictionJobsResponse(proto.Message): page. next_page_token (str): A token to retrieve next page of results. Pass to - [ListBatchPredictionJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListBatchPredictionJobsRequest.page_token] + ``ListBatchPredictionJobsRequest.page_token`` to obtain that page. """ @@ -565,7 +565,7 @@ def raw_page(self): class DeleteBatchPredictionJobRequest(proto.Message): r"""Request message for - [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.DeleteBatchPredictionJob]. + ``JobService.DeleteBatchPredictionJob``. Attributes: name (str): @@ -580,7 +580,7 @@ class DeleteBatchPredictionJobRequest(proto.Message): class CancelBatchPredictionJobRequest(proto.Message): r"""Request message for - [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CancelBatchPredictionJob]. + ``JobService.CancelBatchPredictionJob``. Attributes: name (str): diff --git a/google/cloud/aiplatform_v1beta1/types/machine_resources.py b/google/cloud/aiplatform_v1beta1/types/machine_resources.py index ff73b6db72..30b81e3efc 100644 --- a/google/cloud/aiplatform_v1beta1/types/machine_resources.py +++ b/google/cloud/aiplatform_v1beta1/types/machine_resources.py @@ -75,14 +75,14 @@ class MachineSpec(proto.Message): When used for [DeployedMode][] this field is optional and the default value is ``n1-standard-2``. If used for - [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob] + ``BatchPredictionJob`` or as part of - [WorkerPoolSpec][google.cloud.aiplatform.v1beta1.WorkerPoolSpec] + ``WorkerPoolSpec`` this field is required. accelerator_type (~.gca_accelerator_type.AcceleratorType): Immutable. The type of accelerator(s) that may be attached to the machine as per - [accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count]. + ``accelerator_count``. accelerator_count (int): The number of accelerators to attach to the machine. @@ -110,10 +110,10 @@ class DedicatedResources(proto.Message): against it increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed. Note: if - [machine_spec.accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count] + ``machine_spec.accelerator_count`` is above 0, currently the model will be always deployed precisely on - [min_replica_count][google.cloud.aiplatform.v1beta1.DedicatedResources.min_replica_count]. + ``min_replica_count``. max_replica_count (int): Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If @@ -124,7 +124,7 @@ class DedicatedResources(proto.Message): beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use - [min_replica_count][google.cloud.aiplatform.v1beta1.DedicatedResources.min_replica_count] + ``min_replica_count`` as the default value. """ @@ -144,7 +144,7 @@ class AutomaticResources(proto.Message): Immutable. The minimum number of replicas this DeployedModel will be always deployed on. If traffic against it increases, it may dynamically be deployed onto more replicas up to - [max_replica_count][google.cloud.aiplatform.v1beta1.AutomaticResources.max_replica_count], + ``max_replica_count``, and as traffic decreases, some of these extra replicas may be freed. If requested value is too large, the deployment will error. @@ -182,7 +182,7 @@ class BatchDedicatedResources(proto.Message): Immutable. The number of machine replicas used at the start of the batch operation. If not set, AI Platform decides starting number, not greater than - [max_replica_count][google.cloud.aiplatform.v1beta1.BatchDedicatedResources.max_replica_count] + ``max_replica_count`` max_replica_count (int): Immutable. The maximum number of machine replicas the batch operation may be scaled to. diff --git a/google/cloud/aiplatform_v1beta1/types/model.py b/google/cloud/aiplatform_v1beta1/types/model.py index e2a8d2995b..39cb44206e 100644 --- a/google/cloud/aiplatform_v1beta1/types/model.py +++ b/google/cloud/aiplatform_v1beta1/types/model.py @@ -46,9 +46,9 @@ class Model(proto.Message): predict_schemata (~.model.PredictSchemata): The schemata that describe formats of the Model's predictions and explanations as given and returned via - [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] + ``PredictionService.Predict`` and - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. + ``PredictionService.Explain``. metadata_schema_uri (str): Immutable. Points to a YAML file stored on Google Cloud Storage describing additional information about the Model, @@ -65,7 +65,7 @@ class Model(proto.Message): metadata (~.struct.Value): Immutable. An additional information about the Model; the schema of the metadata can be found in - [metadata_schema][google.cloud.aiplatform.v1beta1.Model.metadata_schema_uri]. + ``metadata_schema``. Unset if the Model does not have any additional information. supported_export_formats (Sequence[~.model.Model.ExportFormat]): Output only. The formats in which this Model @@ -79,7 +79,7 @@ class Model(proto.Message): Input only. The specification of the container that is to be used when deploying this Model. The specification is ingested upon - [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel], + ``ModelService.UploadModel``, and all binaries it contains are copied and stored internally by AI Platform. Not present for AutoML Models. artifact_uri (str): @@ -90,68 +90,68 @@ class Model(proto.Message): Output only. When this Model is deployed, its prediction resources are described by the ``prediction_resources`` field of the - [Endpoint.deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] + ``Endpoint.deployed_models`` object. Because not all Models support all resource configuration types, the configuration types this Model supports are listed here. If no configuration types are listed, the Model cannot be deployed to an - [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] and + ``Endpoint`` and does not support online predictions - ([PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] + (``PredictionService.Predict`` or - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]). + ``PredictionService.Explain``). Such a Model can serve predictions by using a - [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob], + ``BatchPredictionJob``, if it has at least one entry each in - [supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats] + ``supported_input_storage_formats`` and - [supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats]. + ``supported_output_storage_formats``. supported_input_storage_formats (Sequence[str]): Output only. The formats this Model supports in - [BatchPredictionJob.input_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. + ``BatchPredictionJob.input_config``. If - [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] + ``PredictSchemata.instance_schema_uri`` exists, the instances should be given as per that schema. The possible formats are: - ``jsonl`` The JSON Lines format, where each instance is a single line. Uses - [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. + ``GcsSource``. - ``csv`` The CSV format, where each instance is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses - [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. + ``GcsSource``. - ``tf-record`` The TFRecord format, where each instance is a single record in tfrecord syntax. Uses - [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. + ``GcsSource``. - ``tf-record-gzip`` Similar to ``tf-record``, but the file is gzipped. Uses - [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. + ``GcsSource``. - ``bigquery`` Each instance is a single row in BigQuery. Uses - [BigQuerySource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.bigquery_source]. + ``BigQuerySource``. If this Model doesn't support any of these formats it means it cannot be used with a - [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. + ``BatchPredictionJob``. However, if it has - [supported_deployment_resources_types][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types], + ``supported_deployment_resources_types``, it could serve online predictions by using - [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] + ``PredictionService.Predict`` or - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. + ``PredictionService.Explain``. supported_output_storage_formats (Sequence[str]): Output only. The formats this Model supports in - [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config]. + ``BatchPredictionJob.output_config``. If both - [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] + ``PredictSchemata.instance_schema_uri`` and - [PredictSchemata.prediction_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.prediction_schema_uri] + ``PredictSchemata.prediction_schema_uri`` exist, the predictions are returned together with their instances. In other words, the prediction has the original instance data first, followed by the actual prediction @@ -161,27 +161,27 @@ class Model(proto.Message): - ``jsonl`` The JSON Lines format, where each prediction is a single line. Uses - [GcsDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.gcs_destination]. + ``GcsDestination``. - ``csv`` The CSV format, where each prediction is a single comma-separated line. The first line in the file is the header, containing comma-separated field names. Uses - [GcsDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.gcs_destination]. + ``GcsDestination``. - ``bigquery`` Each prediction is a single row in a BigQuery table, uses - [BigQueryDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.bigquery_destination] + ``BigQueryDestination`` . If this Model doesn't support any of these formats it means it cannot be used with a - [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. + ``BatchPredictionJob``. However, if it has - [supported_deployment_resources_types][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types], + ``supported_deployment_resources_types``, it could serve online predictions by using - [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] + ``PredictionService.Predict`` or - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. + ``PredictionService.Explain``. create_time (~.timestamp.Timestamp): Output only. Timestamp when this Model was uploaded into AI Platform. @@ -200,17 +200,17 @@ class Model(proto.Message): Model can be used for [requesting explanation][google.cloud.aiplatform.v1beta1.PredictionService.Explain] after being - [deployed][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel] + ``deployed`` iff it is populated. All fields of the explanation_spec can be overridden by - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + ``explanation_spec`` of - [DeployModelRequest.deployed_model][google.cloud.aiplatform.v1beta1.DeployModelRequest.deployed_model]. + ``DeployModelRequest.deployed_model``. This field is populated only for tabular AutoML Models. Specifying it with - [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] + ``ModelService.UploadModel`` is not supported. etag (str): Used to perform consistent read-modify-write @@ -308,20 +308,20 @@ class ExportableContent(proto.Enum): class PredictSchemata(proto.Message): r"""Contains the schemata used in Model's predictions and explanations via - [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict], - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain] + ``PredictionService.Predict``, + ``PredictionService.Explain`` and - [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. + ``BatchPredictionJob``. Attributes: instance_schema_uri (str): Immutable. Points to a YAML file stored on Google Cloud Storage describing the format of a single instance, which are used in - [PredictRequest.instances][google.cloud.aiplatform.v1beta1.PredictRequest.instances], - [ExplainRequest.instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] + ``PredictRequest.instances``, + ``ExplainRequest.instances`` and - [BatchPredictionJob.input_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. + ``BatchPredictionJob.input_config``. The schema is defined as an OpenAPI 3.0.2 `Schema Object `__. AutoML Models always have this field populated by AI @@ -333,10 +333,10 @@ class PredictSchemata(proto.Message): Immutable. Points to a YAML file stored on Google Cloud Storage describing the parameters of prediction and explanation via - [PredictRequest.parameters][google.cloud.aiplatform.v1beta1.PredictRequest.parameters], - [ExplainRequest.parameters][google.cloud.aiplatform.v1beta1.ExplainRequest.parameters] + ``PredictRequest.parameters``, + ``ExplainRequest.parameters`` and - [BatchPredictionJob.model_parameters][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model_parameters]. + ``BatchPredictionJob.model_parameters``. The schema is defined as an OpenAPI 3.0.2 `Schema Object `__. AutoML Models always have this field populated by AI @@ -349,10 +349,10 @@ class PredictSchemata(proto.Message): Immutable. Points to a YAML file stored on Google Cloud Storage describing the format of a single prediction produced by this Model, which are returned via - [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions], - [ExplainResponse.explanations][google.cloud.aiplatform.v1beta1.ExplainResponse.explanations], + ``PredictResponse.predictions``, + ``ExplainResponse.explanations``, and - [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config]. + ``BatchPredictionJob.output_config``. The schema is defined as an OpenAPI 3.0.2 `Schema Object `__. AutoML Models always have this field populated by AI @@ -377,7 +377,7 @@ class ModelContainerSpec(proto.Message): Required. Immutable. The URI of the Model serving container file in the Container Registry. The container image is ingested upon - [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel], + ``ModelService.UploadModel``, stored internally, and this original path is afterwards not used. command (Sequence[str]): diff --git a/google/cloud/aiplatform_v1beta1/types/model_evaluation.py b/google/cloud/aiplatform_v1beta1/types/model_evaluation.py index 5e54055a9e..13f49e963e 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_evaluation.py +++ b/google/cloud/aiplatform_v1beta1/types/model_evaluation.py @@ -40,23 +40,23 @@ class ModelEvaluation(proto.Message): metrics_schema_uri (str): Output only. Points to a YAML file stored on Google Cloud Storage describing the - [metrics][google.cloud.aiplatform.v1beta1.ModelEvaluation.metrics] + ``metrics`` of this ModelEvaluation. The schema is defined as an OpenAPI 3.0.2 `Schema Object `__. metrics (~.struct.Value): Output only. Evaluation metrics of the Model. The schema of the metrics is stored in - [metrics_schema_uri][google.cloud.aiplatform.v1beta1.ModelEvaluation.metrics_schema_uri] + ``metrics_schema_uri`` create_time (~.timestamp.Timestamp): Output only. Timestamp when this ModelEvaluation was created. slice_dimensions (Sequence[str]): Output only. All possible - [dimensions][ModelEvaluationSlice.slice.dimension] of + ``dimensions`` of ModelEvaluationSlices. The dimensions can be used as the filter of the - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices] + ``ModelService.ListModelEvaluationSlices`` request, in the form of ``slice.dimension = ``. model_explanation (~.explanation.ModelExplanation): Output only. Aggregated explanation metrics diff --git a/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py b/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py index 8f8125d51d..fe8dc19754 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py +++ b/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py @@ -42,14 +42,14 @@ class ModelEvaluationSlice(proto.Message): metrics_schema_uri (str): Output only. Points to a YAML file stored on Google Cloud Storage describing the - [metrics][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.metrics] + ``metrics`` of this ModelEvaluationSlice. The schema is defined as an OpenAPI 3.0.2 `Schema Object `__. metrics (~.struct.Value): Output only. Sliced evaluation metrics of the Model. The schema of the metrics is stored in - [metrics_schema_uri][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.metrics_schema_uri] + ``metrics_schema_uri`` create_time (~.timestamp.Timestamp): Output only. Timestamp when this ModelEvaluationSlice was created. @@ -65,9 +65,9 @@ class Slice(proto.Message): - ``annotationSpec``: This slice is on the test data that has either ground truth or prediction with - [AnnotationSpec.display_name][google.cloud.aiplatform.v1beta1.AnnotationSpec.display_name] + ``AnnotationSpec.display_name`` equals to - [value][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.Slice.value]. + ``value``. value (str): Output only. The value of the dimension in this slice. diff --git a/google/cloud/aiplatform_v1beta1/types/model_service.py b/google/cloud/aiplatform_v1beta1/types/model_service.py index 4a5978045e..42706490c1 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_service.py +++ b/google/cloud/aiplatform_v1beta1/types/model_service.py @@ -52,7 +52,7 @@ class UploadModelRequest(proto.Message): r"""Request message for - [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel]. + ``ModelService.UploadModel``. Attributes: parent (str): @@ -69,7 +69,7 @@ class UploadModelRequest(proto.Message): class UploadModelOperationMetadata(proto.Message): r"""Details of - [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] + ``ModelService.UploadModel`` operation. Attributes: @@ -84,7 +84,7 @@ class UploadModelOperationMetadata(proto.Message): class UploadModelResponse(proto.Message): r"""Response message of - [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] + ``ModelService.UploadModel`` operation. Attributes: @@ -98,7 +98,7 @@ class UploadModelResponse(proto.Message): class GetModelRequest(proto.Message): r"""Request message for - [ModelService.GetModel][google.cloud.aiplatform.v1beta1.ModelService.GetModel]. + ``ModelService.GetModel``. Attributes: name (str): @@ -111,7 +111,7 @@ class GetModelRequest(proto.Message): class ListModelsRequest(proto.Message): r"""Request message for - [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels]. + ``ModelService.ListModels``. Attributes: parent (str): @@ -124,9 +124,9 @@ class ListModelsRequest(proto.Message): The standard list page size. page_token (str): The standard list page token. Typically obtained via - [ListModelsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelsResponse.next_page_token] + ``ListModelsResponse.next_page_token`` of the previous - [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels] + ``ModelService.ListModels`` call. read_mask (~.field_mask.FieldMask): Mask specifying which fields to read. @@ -141,14 +141,14 @@ class ListModelsRequest(proto.Message): class ListModelsResponse(proto.Message): r"""Response message for - [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels] + ``ModelService.ListModels`` Attributes: models (Sequence[~.gca_model.Model]): List of Models in the requested page. next_page_token (str): A token to retrieve next page of results. Pass to - [ListModelsRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelsRequest.page_token] + ``ListModelsRequest.page_token`` to obtain that page. """ @@ -162,7 +162,7 @@ def raw_page(self): class UpdateModelRequest(proto.Message): r"""Request message for - [ModelService.UpdateModel][google.cloud.aiplatform.v1beta1.ModelService.UpdateModel]. + ``ModelService.UpdateModel``. Attributes: model (~.gca_model.Model): @@ -172,8 +172,7 @@ class UpdateModelRequest(proto.Message): Required. The update mask applies to the resource. For the ``FieldMask`` definition, see - [FieldMask](https: //developers.google.com/protocol-buffers - // /docs/reference/google.protobuf#fieldmask). + [FieldMask](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask). """ model = proto.Field(proto.MESSAGE, number=1, message=gca_model.Model,) @@ -182,7 +181,7 @@ class UpdateModelRequest(proto.Message): class DeleteModelRequest(proto.Message): r"""Request message for - [ModelService.DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel]. + ``ModelService.DeleteModel``. Attributes: name (str): @@ -196,7 +195,7 @@ class DeleteModelRequest(proto.Message): class ExportModelRequest(proto.Message): r"""Request message for - [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel]. + ``ModelService.ExportModel``. Attributes: name (str): @@ -250,7 +249,7 @@ class OutputConfig(proto.Message): class ExportModelOperationMetadata(proto.Message): r"""Details of - [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel] + ``ModelService.ExportModel`` operation. Attributes: @@ -263,7 +262,7 @@ class ExportModelOperationMetadata(proto.Message): class OutputInfo(proto.Message): r"""Further describes the output of the ExportModel. Supplements - [ExportModelRequest.OutputConfig][google.cloud.aiplatform.v1beta1.ExportModelRequest.OutputConfig]. + ``ExportModelRequest.OutputConfig``. Attributes: artifact_output_uri (str): @@ -289,14 +288,14 @@ class OutputInfo(proto.Message): class ExportModelResponse(proto.Message): r"""Response message of - [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel] + ``ModelService.ExportModel`` operation. """ class GetModelEvaluationRequest(proto.Message): r"""Request message for - [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluation]. + ``ModelService.GetModelEvaluation``. Attributes: name (str): @@ -310,7 +309,7 @@ class GetModelEvaluationRequest(proto.Message): class ListModelEvaluationsRequest(proto.Message): r"""Request message for - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. + ``ModelService.ListModelEvaluations``. Attributes: parent (str): @@ -323,9 +322,9 @@ class ListModelEvaluationsRequest(proto.Message): The standard list page size. page_token (str): The standard list page token. Typically obtained via - [ListModelEvaluationsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationsResponse.next_page_token] + ``ListModelEvaluationsResponse.next_page_token`` of the previous - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations] + ``ModelService.ListModelEvaluations`` call. read_mask (~.field_mask.FieldMask): Mask specifying which fields to read. @@ -340,7 +339,7 @@ class ListModelEvaluationsRequest(proto.Message): class ListModelEvaluationsResponse(proto.Message): r"""Response message for - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. + ``ModelService.ListModelEvaluations``. Attributes: model_evaluations (Sequence[~.model_evaluation.ModelEvaluation]): @@ -348,7 +347,7 @@ class ListModelEvaluationsResponse(proto.Message): page. next_page_token (str): A token to retrieve next page of results. Pass to - [ListModelEvaluationsRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationsRequest.page_token] + ``ListModelEvaluationsRequest.page_token`` to obtain that page. """ @@ -364,7 +363,7 @@ def raw_page(self): class GetModelEvaluationSliceRequest(proto.Message): r"""Request message for - [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluationSlice]. + ``ModelService.GetModelEvaluationSlice``. Attributes: name (str): @@ -379,7 +378,7 @@ class GetModelEvaluationSliceRequest(proto.Message): class ListModelEvaluationSlicesRequest(proto.Message): r"""Request message for - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. + ``ModelService.ListModelEvaluationSlices``. Attributes: parent (str): @@ -395,9 +394,9 @@ class ListModelEvaluationSlicesRequest(proto.Message): The standard list page size. page_token (str): The standard list page token. Typically obtained via - [ListModelEvaluationSlicesResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationSlicesResponse.next_page_token] + ``ListModelEvaluationSlicesResponse.next_page_token`` of the previous - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices] + ``ModelService.ListModelEvaluationSlices`` call. read_mask (~.field_mask.FieldMask): Mask specifying which fields to read. @@ -412,7 +411,7 @@ class ListModelEvaluationSlicesRequest(proto.Message): class ListModelEvaluationSlicesResponse(proto.Message): r"""Response message for - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. + ``ModelService.ListModelEvaluationSlices``. Attributes: model_evaluation_slices (Sequence[~.model_evaluation_slice.ModelEvaluationSlice]): @@ -420,7 +419,7 @@ class ListModelEvaluationSlicesResponse(proto.Message): page. next_page_token (str): A token to retrieve next page of results. Pass to - [ListModelEvaluationSlicesRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationSlicesRequest.page_token] + ``ListModelEvaluationSlicesRequest.page_token`` to obtain that page. """ diff --git a/google/cloud/aiplatform_v1beta1/types/pipeline_service.py b/google/cloud/aiplatform_v1beta1/types/pipeline_service.py index 089d6185c8..7ba3638c51 100644 --- a/google/cloud/aiplatform_v1beta1/types/pipeline_service.py +++ b/google/cloud/aiplatform_v1beta1/types/pipeline_service.py @@ -39,7 +39,7 @@ class CreateTrainingPipelineRequest(proto.Message): r"""Request message for - [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CreateTrainingPipeline]. + ``PipelineService.CreateTrainingPipeline``. Attributes: parent (str): @@ -58,7 +58,7 @@ class CreateTrainingPipelineRequest(proto.Message): class GetTrainingPipelineRequest(proto.Message): r"""Request message for - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline]. + ``PipelineService.GetTrainingPipeline``. Attributes: name (str): @@ -72,7 +72,7 @@ class GetTrainingPipelineRequest(proto.Message): class ListTrainingPipelinesRequest(proto.Message): r"""Request message for - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines]. + ``PipelineService.ListTrainingPipelines``. Attributes: parent (str): @@ -99,9 +99,9 @@ class ListTrainingPipelinesRequest(proto.Message): The standard list page size. page_token (str): The standard list page token. Typically obtained via - [ListTrainingPipelinesResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListTrainingPipelinesResponse.next_page_token] + ``ListTrainingPipelinesResponse.next_page_token`` of the previous - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines] + ``PipelineService.ListTrainingPipelines`` call. read_mask (~.field_mask.FieldMask): Mask specifying which fields to read. @@ -116,7 +116,7 @@ class ListTrainingPipelinesRequest(proto.Message): class ListTrainingPipelinesResponse(proto.Message): r"""Response message for - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines] + ``PipelineService.ListTrainingPipelines`` Attributes: training_pipelines (Sequence[~.gca_training_pipeline.TrainingPipeline]): @@ -124,7 +124,7 @@ class ListTrainingPipelinesResponse(proto.Message): page. next_page_token (str): A token to retrieve next page of results. Pass to - [ListTrainingPipelinesRequest.page_token][google.cloud.aiplatform.v1beta1.ListTrainingPipelinesRequest.page_token] + ``ListTrainingPipelinesRequest.page_token`` to obtain that page. """ @@ -140,7 +140,7 @@ def raw_page(self): class DeleteTrainingPipelineRequest(proto.Message): r"""Request message for - [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.DeleteTrainingPipeline]. + ``PipelineService.DeleteTrainingPipeline``. Attributes: name (str): @@ -155,7 +155,7 @@ class DeleteTrainingPipelineRequest(proto.Message): class CancelTrainingPipelineRequest(proto.Message): r"""Request message for - [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CancelTrainingPipeline]. + ``PipelineService.CancelTrainingPipeline``. Attributes: name (str): diff --git a/google/cloud/aiplatform_v1beta1/types/prediction_service.py b/google/cloud/aiplatform_v1beta1/types/prediction_service.py index efff997a32..e937a6d8e4 100644 --- a/google/cloud/aiplatform_v1beta1/types/prediction_service.py +++ b/google/cloud/aiplatform_v1beta1/types/prediction_service.py @@ -35,7 +35,7 @@ class PredictRequest(proto.Message): r"""Request message for - [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. + ``PredictionService.Predict``. Attributes: endpoint (str): @@ -53,14 +53,14 @@ class PredictRequest(proto.Message): DeployedModels' [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. + ``instance_schema_uri``. parameters (~.struct.Value): The parameters that govern the prediction. The schema of the parameters may be specified via Endpoint's DeployedModels' [Model's ][google.cloud.aiplatform.v1beta1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. + ``parameters_schema_uri``. """ endpoint = proto.Field(proto.STRING, number=1) @@ -70,7 +70,7 @@ class PredictRequest(proto.Message): class PredictResponse(proto.Message): r"""Response message for - [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. + ``PredictionService.Predict``. Attributes: predictions (Sequence[~.struct.Value]): @@ -79,7 +79,7 @@ class PredictResponse(proto.Message): Endpoint's DeployedModels' [Model's ][google.cloud.aiplatform.v1beta1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [prediction_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.prediction_schema_uri]. + ``prediction_schema_uri``. deployed_model_id (str): ID of the Endpoint's DeployedModel that served this prediction. @@ -91,7 +91,7 @@ class PredictResponse(proto.Message): class ExplainRequest(proto.Message): r"""Request message for - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. + ``PredictionService.Explain``. Attributes: endpoint (str): @@ -109,18 +109,18 @@ class ExplainRequest(proto.Message): DeployedModels' [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. + ``instance_schema_uri``. parameters (~.struct.Value): The parameters that govern the prediction. The schema of the parameters may be specified via Endpoint's DeployedModels' [Model's ][google.cloud.aiplatform.v1beta1.DeployedModel.model] [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. + ``parameters_schema_uri``. deployed_model_id (str): If specified, this ExplainRequest will be served by the chosen DeployedModel, overriding - [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split]. + ``Endpoint.traffic_split``. """ endpoint = proto.Field(proto.STRING, number=1) @@ -131,7 +131,7 @@ class ExplainRequest(proto.Message): class ExplainResponse(proto.Message): r"""Response message for - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. + ``PredictionService.Explain``. Attributes: explanations (Sequence[~.explanation.Explanation]): @@ -139,7 +139,7 @@ class ExplainResponse(proto.Message): predictions][PredictionResponse.predictions][]. It has the same number of elements as - [instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] + ``instances`` to be explained. deployed_model_id (str): ID of the Endpoint's DeployedModel that diff --git a/google/cloud/aiplatform_v1beta1/types/specialist_pool_service.py b/google/cloud/aiplatform_v1beta1/types/specialist_pool_service.py index fad6429b9a..02f0dac96f 100644 --- a/google/cloud/aiplatform_v1beta1/types/specialist_pool_service.py +++ b/google/cloud/aiplatform_v1beta1/types/specialist_pool_service.py @@ -40,7 +40,7 @@ class CreateSpecialistPoolRequest(proto.Message): r"""Request message for - [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool]. + ``SpecialistPoolService.CreateSpecialistPool``. Attributes: parent (str): @@ -59,7 +59,7 @@ class CreateSpecialistPoolRequest(proto.Message): class CreateSpecialistPoolOperationMetadata(proto.Message): r"""Runtime operation information for - [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool]. + ``SpecialistPoolService.CreateSpecialistPool``. Attributes: generic_metadata (~.operation.GenericOperationMetadata): @@ -73,7 +73,7 @@ class CreateSpecialistPoolOperationMetadata(proto.Message): class GetSpecialistPoolRequest(proto.Message): r"""Request message for - [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.GetSpecialistPool]. + ``SpecialistPoolService.GetSpecialistPool``. Attributes: name (str): @@ -88,7 +88,7 @@ class GetSpecialistPoolRequest(proto.Message): class ListSpecialistPoolsRequest(proto.Message): r"""Request message for - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. + ``SpecialistPoolService.ListSpecialistPools``. Attributes: parent (str): @@ -98,9 +98,9 @@ class ListSpecialistPoolsRequest(proto.Message): The standard list page size. page_token (str): The standard list page token. Typically obtained by - [ListSpecialistPoolsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListSpecialistPoolsResponse.next_page_token] + ``ListSpecialistPoolsResponse.next_page_token`` of the previous - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools] + ``SpecialistPoolService.ListSpecialistPools`` call. Return first page if empty. read_mask (~.field_mask.FieldMask): Mask specifying which fields to read. @@ -115,7 +115,7 @@ class ListSpecialistPoolsRequest(proto.Message): class ListSpecialistPoolsResponse(proto.Message): r"""Response message for - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. + ``SpecialistPoolService.ListSpecialistPools``. Attributes: specialist_pools (Sequence[~.gca_specialist_pool.SpecialistPool]): @@ -137,7 +137,7 @@ def raw_page(self): class DeleteSpecialistPoolRequest(proto.Message): r"""Request message for - [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.DeleteSpecialistPool]. + ``SpecialistPoolService.DeleteSpecialistPool``. Attributes: name (str): @@ -157,7 +157,7 @@ class DeleteSpecialistPoolRequest(proto.Message): class UpdateSpecialistPoolRequest(proto.Message): r"""Request message for - [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool]. + ``SpecialistPoolService.UpdateSpecialistPool``. Attributes: specialist_pool (~.gca_specialist_pool.SpecialistPool): @@ -176,7 +176,7 @@ class UpdateSpecialistPoolRequest(proto.Message): class UpdateSpecialistPoolOperationMetadata(proto.Message): r"""Runtime operation metadata for - [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool]. + ``SpecialistPoolService.UpdateSpecialistPool``. Attributes: specialist_pool (str): diff --git a/google/cloud/aiplatform_v1beta1/types/training_pipeline.py b/google/cloud/aiplatform_v1beta1/types/training_pipeline.py index cd637cde9e..bb32b7b787 100644 --- a/google/cloud/aiplatform_v1beta1/types/training_pipeline.py +++ b/google/cloud/aiplatform_v1beta1/types/training_pipeline.py @@ -44,7 +44,7 @@ class TrainingPipeline(proto.Message): Model. It always executes the training task, and optionally may also export data from AI Platform's Dataset which becomes the training input, - [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] + ``upload`` the Model to AI Platform, and evaluate the Model. Attributes: @@ -57,11 +57,11 @@ class TrainingPipeline(proto.Message): input_data_config (~.training_pipeline.InputDataConfig): Specifies AI Platform owned input data that may be used for training the Model. The TrainingPipeline's - [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition] + ``training_task_definition`` should make clear whether this config is used and if there are any special requirements on how it should be filled. If nothing about this config is mentioned in the - [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition], + ``training_task_definition``, then it should be assumed that the TrainingPipeline does not depend on this configuration. training_task_definition (str): @@ -80,27 +80,27 @@ class TrainingPipeline(proto.Message): training_task_inputs (~.struct.Value): Required. The training task's parameter(s), as specified in the - [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]'s + ``training_task_definition``'s ``inputs``. training_task_metadata (~.struct.Value): Output only. The metadata information as specified in the - [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]'s + ``training_task_definition``'s ``metadata``. This metadata is an auxiliary runtime and final information about the training task. While the pipeline is running this information is populated only at a best effort basis. Only present if the pipeline's - [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition] + ``training_task_definition`` contains ``metadata`` object. model_to_upload (~.model.Model): Describes the Model that may be uploaded (via [ModelService.UploadMode][]) by this TrainingPipeline. The TrainingPipeline's - [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition] + ``training_task_definition`` should make clear whether this Model description should be populated, and if there are any special requirements regarding how it should be filled. If nothing is mentioned in the - [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition], + ``training_task_definition``, then it should be assumed that this field should not be filled and the training task either uploads the Model without a need of this information, or that training task @@ -108,7 +108,7 @@ class TrainingPipeline(proto.Message): When the Pipeline's state becomes ``PIPELINE_STATE_SUCCEEDED`` and the trained Model had been uploaded into AI Platform, then the model_to_upload's - resource [name][google.cloud.aiplatform.v1beta1.Model.name] + resource ``name`` is populated. The Model is always uploaded into the Project and Location in which this pipeline is. state (~.pipeline_state.PipelineState): @@ -208,7 +208,7 @@ class InputDataConfig(proto.Message): the DataItem they are on (for the auto-assigned that role is decided by AI Platform). A filter with same syntax as the one used in - [ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations] + ``ListAnnotations`` may be used, but note here it filters across all Annotations of the Dataset, and not just within a single DataItem. annotation_schema_uri (str): @@ -216,16 +216,13 @@ class InputDataConfig(proto.Message): Google Cloud Storage URI points to a YAML file describing annotation schema. The schema is defined as an OpenAPI 3.0.2 - [Schema Object]( - - https: //github.com/OAI/OpenAPI-Specification/b // - lob/master/versions/3.0.2.md#schema-object) The schema files + [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#schema-object) The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the chosen schema must be consistent with - [metadata][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri] + ``metadata`` of the Dataset specified by - [dataset_id][google.cloud.aiplatform.v1beta1.InputDataConfig.dataset_id]. + ``dataset_id``. Only Annotations that both match this schema and belong to DataItems not ignored by the split method are used in @@ -233,11 +230,11 @@ class InputDataConfig(proto.Message): the role of the DataItem they are on. When used in conjunction with - [annotations_filter][google.cloud.aiplatform.v1beta1.InputDataConfig.annotations_filter], + ``annotations_filter``, the Annotations used for training are filtered by both - [annotations_filter][google.cloud.aiplatform.v1beta1.InputDataConfig.annotations_filter] + ``annotations_filter`` and - [annotation_schema_uri][google.cloud.aiplatform.v1beta1.InputDataConfig.annotation_schema_uri]. + ``annotation_schema_uri``. """ fraction_split = proto.Field(proto.MESSAGE, number=2, message="FractionSplit",) @@ -289,7 +286,7 @@ class FilterSplit(proto.Message): Required. A filter on DataItems of the Dataset. DataItems that match this filter are used to train the Model. A filter with same syntax as the one used in - [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems] + ``DatasetService.ListDataItems`` may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it will be assigned to the first set that applies to it in the training, @@ -298,7 +295,7 @@ class FilterSplit(proto.Message): Required. A filter on DataItems of the Dataset. DataItems that match this filter are used to validate the Model. A filter with same syntax as the one used in - [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems] + ``DatasetService.ListDataItems`` may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it will be assigned to the first set that applies to it in the training, @@ -307,7 +304,7 @@ class FilterSplit(proto.Message): Required. A filter on DataItems of the Dataset. DataItems that match this filter are used to test the Model. A filter with same syntax as the one used in - [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems] + ``DatasetService.ListDataItems`` may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it will be assigned to the first set that applies to it in the training, diff --git a/noxfile.py b/noxfile.py index b203d2e28c..615e2c6793 100644 --- a/noxfile.py +++ b/noxfile.py @@ -26,9 +26,9 @@ BLACK_VERSION = "black==19.10b0" BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] -DEFAULT_PYTHON_VERSION = "" -SYSTEM_TEST_PYTHON_VERSIONS = [] -UNIT_TEST_PYTHON_VERSIONS = [] +DEFAULT_PYTHON_VERSION = "3.8" +SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] +UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8"] @nox.session(python=DEFAULT_PYTHON_VERSION) @@ -156,7 +156,6 @@ def docs(session): shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) session.run( "sphinx-build", - "-W", # warnings as errors "-T", # show full traceback on exception "-N", # no colors "-b", diff --git a/samples/requirements.txt b/samples/requirements.txt deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/synth.metadata b/synth.metadata index e5d0429c87..2f581d2593 100644 --- a/synth.metadata +++ b/synth.metadata @@ -4,14 +4,14 @@ "git": { "name": ".", "remote": "https://github.com/dizcology/python-aiplatform.git", - "sha": "42fa6fd761ad446872cd4bb1aca3d88abb8d7085" + "sha": "7e83ff65457e88aa155e68ddd959933a68da46af" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "da29da32b3a988457b49ae290112b74f14b713cc" + "sha": "487eba79f8260e34205d8ceb1ebcc65685085e19" } } ], diff --git a/synth.py b/synth.py index 7b92c5fd3b..31c3e11493 100644 --- a/synth.py +++ b/synth.py @@ -35,20 +35,18 @@ # version="v1beta1", # bazel_target="//google/cloud/aiplatform/v1beta1:aiplatform-v1beta1-py", # ) -library = gapic.py_library( - 'aiplatform', - 'v1beta1', - generator_version='0.20' -) +library = gapic.py_library("aiplatform", "v1beta1", generator_version="0.20") s.move( - library, - excludes=[ - "setup.py", - "README.rst", - "docs/index.rst", + library, + excludes=[ + ".kokoro", + "setup.py", + "README.rst", + "docs/index.rst", "google/cloud/aiplatform/__init__.py", - ] + "tests/unit/aiplatform_v1beta1/test_prediction_service.py", + ], ) # ---------------------------------------------------------------------------- @@ -56,32 +54,81 @@ # ---------------------------------------------------------------------------- # https://github.com/googleapis/gapic-generator-python/issues/336 -s.replace( - '**/client.py', - ' operation.from_gapic', - ' ga_operation.from_gapic' -) +s.replace("**/client.py", " operation.from_gapic", " ga_operation.from_gapic") s.replace( - '**/client.py', - 'client_options: ClientOptions = ', - 'client_options: ClientOptions.ClientOptions = ' + "**/client.py", + "client_options: ClientOptions = ", + "client_options: ClientOptions.ClientOptions = ", ) # https://github.com/googleapis/gapic-generator-python/issues/413 s.replace( - 'google/cloud/aiplatform_v1beta1/services/prediction_service/client.py', - 'request.instances = instances', - 'request.instances.extend(instances)' + "google/cloud/aiplatform_v1beta1/services/prediction_service/client.py", + "request.instances = instances", + "request.instances.extend(instances)", ) +# post processing to fix the generated reference doc +from synthtool import transforms as st +import re + +# https://github.com/googleapis/gapic-generator-python/issues/479 +paths = st._filter_files(st._expand_paths("google/cloud/**/*.py", ".")) + +pattern = r"(:\w+:``[^`]+``)" +expr = re.compile(pattern, flags=re.MULTILINE) +replaces = [] +for path in paths: + with path.open("r+") as fh: + content = fh.read() + matches = re.findall(expr, content) + if matches: + for match in matches: + before = match + after = match.replace("``", "`") + replaces.append((path, before, after)) + +for path, before, after in replaces: + s.replace([path], before, after) + + +# https://github.com/googleapis/gapic-generator-python/issues/483 +paths = st._filter_files(st._expand_paths("google/cloud/**/*.py", ".")) +pattern = r"(?P\[(?P[\w.]+)\]\[(?P[\w.]+)\])" +expr = re.compile(pattern, flags=re.MULTILINE) +replaces = [] +for path in paths: + with path.open("r+") as fh: + content = fh.read() + for match in expr.finditer(content): + before = match.groupdict()["full"].replace("[", "\[").replace("]", "\]") + after = match.groupdict()["first"] + after = f"``{after}``" + replaces.append((path, before, after)) + +for path, before, after in replaces: + s.replace([path], before, after) + + +s.replace("google/cloud/**/*.py", "\]\(\n\n\s*", "](") + +s.replace("google/cloud/**/*.py", "\s*//\n\s*", "") + +s.replace("google/cloud/**/*.py", "https:[\n]*\s*//", "https://") + +s.replace("google/cloud/**/*.py", "[\n]*\s*//\s*/", "/") + # ---------------------------------------------------------------------------- # Add templated files # ---------------------------------------------------------------------------- templated_files = common.py_library(cov_level=99, microgenerator=True) s.move( - templated_files, excludes=[".coveragerc"] -) # the microgenerator has a good coveragerc file + templated_files, excludes=[".coveragerc"] +) # the microgenerator has a good coveragerc file + +# Don't treat docs warnings as errors +s.replace("noxfile.py", """["']-W["'], # warnings as errors""", "") s.shell.run(["nox", "-s", "blacken"], hide_output=False) diff --git a/tests/unit/aiplatform_v1beta1/test_dataset_service.py b/tests/unit/gapic/test_dataset_service.py similarity index 100% rename from tests/unit/aiplatform_v1beta1/test_dataset_service.py rename to tests/unit/gapic/test_dataset_service.py diff --git a/tests/unit/aiplatform_v1beta1/test_endpoint_service.py b/tests/unit/gapic/test_endpoint_service.py similarity index 100% rename from tests/unit/aiplatform_v1beta1/test_endpoint_service.py rename to tests/unit/gapic/test_endpoint_service.py diff --git a/tests/unit/aiplatform_v1beta1/test_job_service.py b/tests/unit/gapic/test_job_service.py similarity index 100% rename from tests/unit/aiplatform_v1beta1/test_job_service.py rename to tests/unit/gapic/test_job_service.py index 027ba49f8e..92ed0d37e3 100644 --- a/tests/unit/aiplatform_v1beta1/test_job_service.py +++ b/tests/unit/gapic/test_job_service.py @@ -2074,16 +2074,16 @@ def test_custom_job_path(): assert expected == actual -def test_data_labeling_job_path(): +def test_batch_prediction_job_path(): project = "squid" location = "clam" - data_labeling_job = "whelk" + batch_prediction_job = "whelk" - expected = "projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}".format( - project=project, location=location, data_labeling_job=data_labeling_job, + expected = "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format( + project=project, location=location, batch_prediction_job=batch_prediction_job, ) - actual = JobServiceClient.data_labeling_job_path( - project, location, data_labeling_job + actual = JobServiceClient.batch_prediction_job_path( + project, location, batch_prediction_job ) assert expected == actual @@ -2104,15 +2104,15 @@ def test_hyperparameter_tuning_job_path(): assert expected == actual -def test_batch_prediction_job_path(): +def test_data_labeling_job_path(): project = "squid" location = "clam" - batch_prediction_job = "whelk" + data_labeling_job = "whelk" - expected = "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format( - project=project, location=location, batch_prediction_job=batch_prediction_job, + expected = "projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}".format( + project=project, location=location, data_labeling_job=data_labeling_job, ) - actual = JobServiceClient.batch_prediction_job_path( - project, location, batch_prediction_job + actual = JobServiceClient.data_labeling_job_path( + project, location, data_labeling_job ) assert expected == actual diff --git a/tests/unit/aiplatform_v1beta1/test_model_service.py b/tests/unit/gapic/test_model_service.py similarity index 100% rename from tests/unit/aiplatform_v1beta1/test_model_service.py rename to tests/unit/gapic/test_model_service.py diff --git a/tests/unit/aiplatform_v1beta1/test_pipeline_service.py b/tests/unit/gapic/test_pipeline_service.py similarity index 100% rename from tests/unit/aiplatform_v1beta1/test_pipeline_service.py rename to tests/unit/gapic/test_pipeline_service.py index 842564c259..c7c2db4449 100644 --- a/tests/unit/aiplatform_v1beta1/test_pipeline_service.py +++ b/tests/unit/gapic/test_pipeline_service.py @@ -649,27 +649,27 @@ def test_pipeline_service_grpc_lro_client(): assert transport.operations_client is transport.operations_client -def test_training_pipeline_path(): +def test_model_path(): project = "squid" location = "clam" - training_pipeline = "whelk" + model = "whelk" - expected = "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format( - project=project, location=location, training_pipeline=training_pipeline, - ) - actual = PipelineServiceClient.training_pipeline_path( - project, location, training_pipeline + expected = "projects/{project}/locations/{location}/models/{model}".format( + project=project, location=location, model=model, ) + actual = PipelineServiceClient.model_path(project, location, model) assert expected == actual -def test_model_path(): +def test_training_pipeline_path(): project = "squid" location = "clam" - model = "whelk" + training_pipeline = "whelk" - expected = "projects/{project}/locations/{location}/models/{model}".format( - project=project, location=location, model=model, + expected = "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format( + project=project, location=location, training_pipeline=training_pipeline, + ) + actual = PipelineServiceClient.training_pipeline_path( + project, location, training_pipeline ) - actual = PipelineServiceClient.model_path(project, location, model) assert expected == actual diff --git a/tests/unit/aiplatform_v1beta1/test_prediction_service.py b/tests/unit/gapic/test_prediction_service.py similarity index 100% rename from tests/unit/aiplatform_v1beta1/test_prediction_service.py rename to tests/unit/gapic/test_prediction_service.py diff --git a/tests/unit/aiplatform_v1beta1/test_specialist_pool_service.py b/tests/unit/gapic/test_specialist_pool_service.py similarity index 100% rename from tests/unit/aiplatform_v1beta1/test_specialist_pool_service.py rename to tests/unit/gapic/test_specialist_pool_service.py