Skip to content

Commit

Permalink
🦉 Updates from OwlBot post-processor
Browse files Browse the repository at this point in the history
  • Loading branch information
gcf-owl-bot[bot] committed Sep 7, 2023
1 parent aebcb7e commit 22597ee
Show file tree
Hide file tree
Showing 1,702 changed files with 134 additions and 705,654 deletions.
2 changes: 2 additions & 0 deletions google/cloud/aiplatform_v1beta1/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -534,6 +534,7 @@
from .types.study import Study
from .types.study import StudySpec
from .types.study import Trial
from .types.study import TrialContext
from .types.tensorboard import Tensorboard
from .types.tensorboard_data import Scalar
from .types.tensorboard_data import TensorboardBlob
Expand Down Expand Up @@ -1164,6 +1165,7 @@
"TrainingConfig",
"TrainingPipeline",
"Trial",
"TrialContext",
"UndeployIndexOperationMetadata",
"UndeployIndexRequest",
"UndeployIndexResponse",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
from google.api_core import operation as gac_operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import pagers
from google.cloud.aiplatform_v1beta1.types import encryption_spec
from google.cloud.aiplatform_v1beta1.types import index_endpoint
from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint
from google.cloud.aiplatform_v1beta1.types import index_endpoint_service
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@
from google.api_core import operation as gac_operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import pagers
from google.cloud.aiplatform_v1beta1.types import encryption_spec
from google.cloud.aiplatform_v1beta1.types import index_endpoint
from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint
from google.cloud.aiplatform_v1beta1.types import index_endpoint_service
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@
from google.api_core import operation_async # type: ignore
from google.cloud.aiplatform_v1beta1.services.index_service import pagers
from google.cloud.aiplatform_v1beta1.types import deployed_index_ref
from google.cloud.aiplatform_v1beta1.types import encryption_spec
from google.cloud.aiplatform_v1beta1.types import index
from google.cloud.aiplatform_v1beta1.types import index as gca_index
from google.cloud.aiplatform_v1beta1.types import index_service
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@
from google.api_core import operation_async # type: ignore
from google.cloud.aiplatform_v1beta1.services.index_service import pagers
from google.cloud.aiplatform_v1beta1.types import deployed_index_ref
from google.cloud.aiplatform_v1beta1.types import encryption_spec
from google.cloud.aiplatform_v1beta1.types import index
from google.cloud.aiplatform_v1beta1.types import index as gca_index
from google.cloud.aiplatform_v1beta1.types import index_service
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -208,40 +208,40 @@ def parse_annotated_dataset_path(path: str) -> Dict[str, str]:
@staticmethod
def dataset_path(
project: str,
location: str,
dataset: str,
) -> str:
"""Returns a fully-qualified dataset string."""
return "projects/{project}/locations/{location}/datasets/{dataset}".format(
return "projects/{project}/datasets/{dataset}".format(
project=project,
location=location,
dataset=dataset,
)

@staticmethod
def parse_dataset_path(path: str) -> Dict[str, str]:
"""Parses a dataset path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/datasets/(?P<dataset>.+?)$",
path,
)
m = re.match(r"^projects/(?P<project>.+?)/datasets/(?P<dataset>.+?)$", path)
return m.groupdict() if m else {}

@staticmethod
def dataset_path(
project: str,
location: str,
dataset: str,
) -> str:
"""Returns a fully-qualified dataset string."""
return "projects/{project}/datasets/{dataset}".format(
return "projects/{project}/locations/{location}/datasets/{dataset}".format(
project=project,
location=location,
dataset=dataset,
)

@staticmethod
def parse_dataset_path(path: str) -> Dict[str, str]:
"""Parses a dataset path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/datasets/(?P<dataset>.+?)$", path)
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/datasets/(?P<dataset>.+?)$",
path,
)
return m.groupdict() if m else {}

@staticmethod
Expand Down
2 changes: 2 additions & 0 deletions google/cloud/aiplatform_v1beta1/types/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -603,6 +603,7 @@
Study,
StudySpec,
Trial,
TrialContext,
)
from .tensorboard import (
Tensorboard,
Expand Down Expand Up @@ -1178,6 +1179,7 @@
"Study",
"StudySpec",
"Trial",
"TrialContext",
"Tensorboard",
"Scalar",
"TensorboardBlob",
Expand Down
11 changes: 11 additions & 0 deletions google/cloud/aiplatform_v1beta1/types/index.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import proto # type: ignore

from google.cloud.aiplatform_v1beta1.types import deployed_index_ref
from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore

Expand Down Expand Up @@ -101,6 +102,11 @@ class Index(proto.Message):
index_update_method (google.cloud.aiplatform_v1beta1.types.Index.IndexUpdateMethod):
Immutable. The update method to use with this Index. If not
set, BATCH_UPDATE will be used by default.
encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec):
Immutable. Customer-managed encryption key
spec for an Index. If set, this Index and all
sub-resources of this Index will be secured by
this key.
"""

class IndexUpdateMethod(proto.Enum):
Expand Down Expand Up @@ -181,6 +187,11 @@ class IndexUpdateMethod(proto.Enum):
number=16,
enum=IndexUpdateMethod,
)
encryption_spec: gca_encryption_spec.EncryptionSpec = proto.Field(
proto.MESSAGE,
number=17,
message=gca_encryption_spec.EncryptionSpec,
)


class IndexDatapoint(proto.Message):
Expand Down
11 changes: 11 additions & 0 deletions google/cloud/aiplatform_v1beta1/types/index_endpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

import proto # type: ignore

from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec
from google.cloud.aiplatform_v1beta1.types import machine_resources
from google.cloud.aiplatform_v1beta1.types import service_networking
from google.protobuf import timestamp_pb2 # type: ignore
Expand Down Expand Up @@ -119,6 +120,11 @@ class IndexEndpoint(proto.Message):
[public_endpoint_enabled][google.cloud.aiplatform.v1beta1.IndexEndpoint.public_endpoint_enabled]
is true, this field will be populated with the domain name
to use for this index endpoint.
encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec):
Immutable. Customer-managed encryption key
spec for an IndexEndpoint. If set, this
IndexEndpoint and all sub-resources of this
IndexEndpoint will be secured by this key.
"""

name: str = proto.Field(
Expand Down Expand Up @@ -180,6 +186,11 @@ class IndexEndpoint(proto.Message):
proto.STRING,
number=14,
)
encryption_spec: gca_encryption_spec.EncryptionSpec = proto.Field(
proto.MESSAGE,
number=15,
message=gca_encryption_spec.EncryptionSpec,
)


class DeployedIndex(proto.Message):
Expand Down
33 changes: 33 additions & 0 deletions google/cloud/aiplatform_v1beta1/types/study.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
manifest={
"Study",
"Trial",
"TrialContext",
"StudySpec",
"Measurement",
},
Expand Down Expand Up @@ -288,6 +289,38 @@ class Parameter(proto.Message):
)


class TrialContext(proto.Message):
r"""Next ID: 3
Attributes:
description (str):
A human-readable field which can store a
description of this context. This will become
part of the resulting Trial's description field.
parameters (MutableSequence[google.cloud.aiplatform_v1beta1.types.Trial.Parameter]):
If/when a Trial is generated or selected from this Context,
its Parameters will match any parameters specified here.
(I.e. if this context specifies parameter name:'a'
int_value:3, then a resulting Trial will have int_value:3
for its parameter named 'a'.) Note that we first attempt to
match existing REQUESTED Trials with contexts, and if there
are no matches, we generate suggestions in the subspace
defined by the parameters specified here. NOTE: a Context
without any Parameters matches the entire feasible search
space.
"""

description: str = proto.Field(
proto.STRING,
number=1,
)
parameters: MutableSequence["Trial.Parameter"] = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="Trial.Parameter",
)


class StudySpec(proto.Message):
r"""Represents specification of a Study.
Expand Down
47 changes: 47 additions & 0 deletions google/cloud/aiplatform_v1beta1/types/vizier_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,48 @@ class SuggestTrialsRequest(proto.Message):
``client_id``, the service will return the identical
suggested Trial if the Trial is pending, and provide a new
Trial if the last suggested Trial was completed.
contexts (MutableSequence[google.cloud.aiplatform_v1beta1.types.TrialContext]):
Optional. This allows you to specify the "context" for a
Trial; a context is a slice (a subspace) of the search
space.
Typical uses for contexts:
1) You are using Vizier to tune a server for best
performance, but there's a strong weekly cycle. The
context specifies the day-of-week. This allows Tuesday to
generalize from Wednesday without assuming that
everything is identical.
2) Imagine you're optimizing some medical treatment for
people. As they walk in the door, you know certain facts
about them (e.g. sex, weight, height, blood-pressure).
Put that information in the context, and Vizier will
adapt its suggestions to the patient.
3) You want to do a fair A/B test efficiently. Specify the
"A" and "B" conditions as contexts, and Vizier will
generalize between "A" and "B" conditions. If they are
similar, this will allow Vizier to converge to the
optimum faster than if "A" and "B" were separate Studies.
NOTE: You can also enter contexts as REQUESTED Trials,
e.g. via the CreateTrial() RPC; that's the asynchronous
option where you don't need a close association between
contexts and suggestions.
NOTE: All the Parameters you set in a context MUST be
defined in the Study. NOTE: You must supply 0 or
$suggestion_count contexts. If you don't supply any
contexts, Vizier will make suggestions from the full search
space specified in the StudySpec; if you supply a full set
of context, each suggestion will match the corresponding
context. NOTE: A Context with no features set matches
anything, and allows suggestions from the full search space.
NOTE: Contexts MUST lie within the search space specified in
the StudySpec. It's an error if they don't. NOTE: Contexts
preferentially match ACTIVE then REQUESTED trials before new
suggestions are generated. NOTE: Generation of suggestions
involves a match between a Context and (optionally) a
REQUESTED trial; if that match is not fully specified, a
suggestion will be geneated in the merged subspace.
"""

parent: str = proto.Field(
Expand All @@ -229,6 +271,11 @@ class SuggestTrialsRequest(proto.Message):
proto.STRING,
number=3,
)
contexts: MutableSequence[gca_study.TrialContext] = proto.RepeatedField(
proto.MESSAGE,
number=4,
message=gca_study.TrialContext,
)


class SuggestTrialsResponse(proto.Message):
Expand Down
13 changes: 0 additions & 13 deletions owl-bot-staging/v1/.coveragerc

This file was deleted.

33 changes: 0 additions & 33 deletions owl-bot-staging/v1/.flake8

This file was deleted.

2 changes: 0 additions & 2 deletions owl-bot-staging/v1/MANIFEST.in

This file was deleted.

49 changes: 0 additions & 49 deletions owl-bot-staging/v1/README.rst

This file was deleted.

10 changes: 0 additions & 10 deletions owl-bot-staging/v1/docs/aiplatform_v1/dataset_service.rst

This file was deleted.

Loading

0 comments on commit 22597ee

Please sign in to comment.