From acaa3ff26ab0d317362e2be65ac5edcf803b13a1 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2024 14:32:16 -0400 Subject: [PATCH] feat: add fields and the BackupType proto for Hot Backups (#2300) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: add grpc-java dependencies to WORKSPACE and update gapic-generator-java to 2.43.0 PiperOrigin-RevId: 656487430 Source-Link: https://github.com/googleapis/googleapis/commit/cf16946acfefb6f5a33122802ebbaebf5bb45645 Source-Link: https://github.com/googleapis/googleapis-gen/commit/4b4c8ab484b34251b142ad17d14e25a33f32c1b0 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNGI0YzhhYjQ4NGIzNDI1MWIxNDJhZDE3ZDE0ZTI1YTMzZjMyYzFiMCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * feat: add fields and the BackupType proto for Hot Backups docs: clarify comments and fix typos PiperOrigin-RevId: 658791576 Source-Link: https://github.com/googleapis/googleapis/commit/c93b54fa3060c7185f6dc724f0f9ec0c12bc44fc Source-Link: https://github.com/googleapis/googleapis-gen/commit/e52ba38a95a82f7588d0dd3a2284c98850dab9e1 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZTUyYmEzOGE5NWE4MmY3NTg4ZDBkZDNhMjI4NGM5ODg1MGRhYjllMSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../v2/BaseBigtableTableAdminClient.java | 14 +- .../BigtableInstanceAdminStubSettings.java | 2 + .../stub/BigtableTableAdminStubSettings.java | 2 + .../data/v2/stub/BigtableStubSettings.java | 2 + .../reflect-config.json | 9 + .../v2/BaseBigtableTableAdminClientTest.java | 9 + .../admin/v2/BigtableTableAdminGrpc.java | 8 +- .../com/google/bigtable/admin/v2/Backup.java | 838 ++++++++++++++++-- .../google/bigtable/admin/v2/BackupInfo.java | 21 +- .../admin/v2/BackupInfoOrBuilder.java | 6 +- .../bigtable/admin/v2/BackupOrBuilder.java | 123 ++- .../bigtable/admin/v2/CopyBackupRequest.java | 14 +- .../admin/v2/CopyBackupRequestOrBuilder.java | 4 +- .../google/bigtable/admin/v2/TableProto.java | 52 +- .../admin/v2/bigtable_table_admin.proto | 4 +- .../google/bigtable/admin/v2/table.proto | 48 +- 16 files changed, 1011 insertions(+), 145 deletions(-) diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/BaseBigtableTableAdminClient.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/BaseBigtableTableAdminClient.java index 799aebf58e..cb4787b2e4 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/BaseBigtableTableAdminClient.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/BaseBigtableTableAdminClient.java @@ -4068,7 +4068,7 @@ public final UnaryCallable listBackupsC * Create a new table by restoring from a completed backup. The returned table [long-running * operation][google.longrunning.Operation] can be used to track the progress of the operation, * and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is - * [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The + * [RestoreTableMetadata][google.bigtable.admin.v2.RestoreTableMetadata]. The * [response][google.longrunning.Operation.response] type is * [Table][google.bigtable.admin.v2.Table], if successful. * @@ -4104,7 +4104,7 @@ public final OperationFuture restoreTableAsync( * Create a new table by restoring from a completed backup. The returned table [long-running * operation][google.longrunning.Operation] can be used to track the progress of the operation, * and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is - * [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The + * [RestoreTableMetadata][google.bigtable.admin.v2.RestoreTableMetadata]. The * [response][google.longrunning.Operation.response] type is * [Table][google.bigtable.admin.v2.Table], if successful. * @@ -4140,7 +4140,7 @@ public final OperationFuture restoreTableAsync( * Create a new table by restoring from a completed backup. The returned table [long-running * operation][google.longrunning.Operation] can be used to track the progress of the operation, * and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is - * [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The + * [RestoreTableMetadata][google.bigtable.admin.v2.RestoreTableMetadata]. The * [response][google.longrunning.Operation.response] type is * [Table][google.bigtable.admin.v2.Table], if successful. * @@ -4197,7 +4197,7 @@ public final UnaryCallable restoreTableCallable( * } * * @param parent Required. The name of the destination cluster that will contain the backup copy. - * The cluster must already exists. Values are of the form: + * The cluster must already exist. Values are of the form: * `projects/{project}/instances/{instance}/clusters/{cluster}`. * @param backupId Required. The id of the new backup. The `backup_id` along with `parent` are * combined as {parent}/backups/{backup_id} to create the full backup name, of the form: @@ -4255,7 +4255,7 @@ public final OperationFuture copyBackupAsync( * } * * @param parent Required. The name of the destination cluster that will contain the backup copy. - * The cluster must already exists. Values are of the form: + * The cluster must already exist. Values are of the form: * `projects/{project}/instances/{instance}/clusters/{cluster}`. * @param backupId Required. The id of the new backup. The `backup_id` along with `parent` are * combined as {parent}/backups/{backup_id} to create the full backup name, of the form: @@ -4312,7 +4312,7 @@ public final OperationFuture copyBackupAsync( * } * * @param parent Required. The name of the destination cluster that will contain the backup copy. - * The cluster must already exists. Values are of the form: + * The cluster must already exist. Values are of the form: * `projects/{project}/instances/{instance}/clusters/{cluster}`. * @param backupId Required. The id of the new backup. The `backup_id` along with `parent` are * combined as {parent}/backups/{backup_id} to create the full backup name, of the form: @@ -4370,7 +4370,7 @@ public final OperationFuture copyBackupAsync( * } * * @param parent Required. The name of the destination cluster that will contain the backup copy. - * The cluster must already exists. Values are of the form: + * The cluster must already exist. Values are of the form: * `projects/{project}/instances/{instance}/clusters/{cluster}`. * @param backupId Required. The id of the new backup. The `backup_id` along with `parent` are * combined as {parent}/backups/{backup_id} to create the full backup name, of the form: diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/stub/BigtableInstanceAdminStubSettings.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/stub/BigtableInstanceAdminStubSettings.java index 588e64bc99..7caeef5761 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/stub/BigtableInstanceAdminStubSettings.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/stub/BigtableInstanceAdminStubSettings.java @@ -21,6 +21,7 @@ import com.google.api.core.ApiFunction; import com.google.api.core.ApiFuture; +import com.google.api.core.ObsoleteApi; import com.google.api.gax.core.GaxProperties; import com.google.api.gax.core.GoogleCredentialsProvider; import com.google.api.gax.core.InstantiatingExecutorProvider; @@ -467,6 +468,7 @@ public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuild } /** Returns the default service endpoint. */ + @ObsoleteApi("Use getEndpoint() instead") public static String getDefaultEndpoint() { return "bigtableadmin.googleapis.com:443"; } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/stub/BigtableTableAdminStubSettings.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/stub/BigtableTableAdminStubSettings.java index b5d9e94af5..a9619f7b16 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/stub/BigtableTableAdminStubSettings.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/stub/BigtableTableAdminStubSettings.java @@ -23,6 +23,7 @@ import com.google.api.core.ApiFunction; import com.google.api.core.ApiFuture; +import com.google.api.core.ObsoleteApi; import com.google.api.gax.core.GaxProperties; import com.google.api.gax.core.GoogleCredentialsProvider; import com.google.api.gax.core.InstantiatingExecutorProvider; @@ -683,6 +684,7 @@ public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuild } /** Returns the default service endpoint. */ + @ObsoleteApi("Use getEndpoint() instead") public static String getDefaultEndpoint() { return "bigtableadmin.googleapis.com:443"; } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableStubSettings.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableStubSettings.java index 22d394fe57..f3897f802d 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableStubSettings.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableStubSettings.java @@ -18,6 +18,7 @@ import com.google.api.core.ApiFunction; import com.google.api.core.InternalApi; +import com.google.api.core.ObsoleteApi; import com.google.api.gax.core.GaxProperties; import com.google.api.gax.core.GoogleCredentialsProvider; import com.google.api.gax.core.InstantiatingExecutorProvider; @@ -180,6 +181,7 @@ public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuild } /** Returns the default service endpoint. */ + @ObsoleteApi("Use getEndpoint() instead") public static String getDefaultEndpoint() { return "bigtable.googleapis.com:443"; } diff --git a/google-cloud-bigtable/src/main/resources/META-INF/native-image/com.google.cloud.bigtable.admin.v2/reflect-config.json b/google-cloud-bigtable/src/main/resources/META-INF/native-image/com.google.cloud.bigtable.admin.v2/reflect-config.json index f85ef97ad0..942551b0d3 100644 --- a/google-cloud-bigtable/src/main/resources/META-INF/native-image/com.google.cloud.bigtable.admin.v2/reflect-config.json +++ b/google-cloud-bigtable/src/main/resources/META-INF/native-image/com.google.cloud.bigtable.admin.v2/reflect-config.json @@ -593,6 +593,15 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.bigtable.admin.v2.Backup$BackupType", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.bigtable.admin.v2.Backup$Builder", "queryAllDeclaredConstructors": true, diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/BaseBigtableTableAdminClientTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/BaseBigtableTableAdminClientTest.java index 44e2f63211..95cd5022ff 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/BaseBigtableTableAdminClientTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/BaseBigtableTableAdminClientTest.java @@ -2096,6 +2096,7 @@ public void createBackupTest() throws Exception { .setEndTime(Timestamp.newBuilder().build()) .setSizeBytes(-1796325715) .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .setHotToStandardTime(Timestamp.newBuilder().build()) .build(); Operation resultOperation = Operation.newBuilder() @@ -2155,6 +2156,7 @@ public void createBackupTest2() throws Exception { .setEndTime(Timestamp.newBuilder().build()) .setSizeBytes(-1796325715) .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .setHotToStandardTime(Timestamp.newBuilder().build()) .build(); Operation resultOperation = Operation.newBuilder() @@ -2214,6 +2216,7 @@ public void getBackupTest() throws Exception { .setEndTime(Timestamp.newBuilder().build()) .setSizeBytes(-1796325715) .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .setHotToStandardTime(Timestamp.newBuilder().build()) .build(); mockBigtableTableAdmin.addResponse(expectedResponse); @@ -2259,6 +2262,7 @@ public void getBackupTest2() throws Exception { .setEndTime(Timestamp.newBuilder().build()) .setSizeBytes(-1796325715) .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .setHotToStandardTime(Timestamp.newBuilder().build()) .build(); mockBigtableTableAdmin.addResponse(expectedResponse); @@ -2304,6 +2308,7 @@ public void updateBackupTest() throws Exception { .setEndTime(Timestamp.newBuilder().build()) .setSizeBytes(-1796325715) .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .setHotToStandardTime(Timestamp.newBuilder().build()) .build(); mockBigtableTableAdmin.addResponse(expectedResponse); @@ -2569,6 +2574,7 @@ public void copyBackupTest() throws Exception { .setEndTime(Timestamp.newBuilder().build()) .setSizeBytes(-1796325715) .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .setHotToStandardTime(Timestamp.newBuilder().build()) .build(); Operation resultOperation = Operation.newBuilder() @@ -2632,6 +2638,7 @@ public void copyBackupTest2() throws Exception { .setEndTime(Timestamp.newBuilder().build()) .setSizeBytes(-1796325715) .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .setHotToStandardTime(Timestamp.newBuilder().build()) .build(); Operation resultOperation = Operation.newBuilder() @@ -2695,6 +2702,7 @@ public void copyBackupTest3() throws Exception { .setEndTime(Timestamp.newBuilder().build()) .setSizeBytes(-1796325715) .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .setHotToStandardTime(Timestamp.newBuilder().build()) .build(); Operation resultOperation = Operation.newBuilder() @@ -2758,6 +2766,7 @@ public void copyBackupTest4() throws Exception { .setEndTime(Timestamp.newBuilder().build()) .setSizeBytes(-1796325715) .setEncryptionInfo(EncryptionInfo.newBuilder().build()) + .setHotToStandardTime(Timestamp.newBuilder().build()) .build(); Operation resultOperation = Operation.newBuilder() diff --git a/grpc-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/BigtableTableAdminGrpc.java b/grpc-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/BigtableTableAdminGrpc.java index 7620d7e496..8e49c828c2 100644 --- a/grpc-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/BigtableTableAdminGrpc.java +++ b/grpc-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/BigtableTableAdminGrpc.java @@ -1834,7 +1834,7 @@ default void listBackups( * returned table [long-running operation][google.longrunning.Operation] can * be used to track the progress of the operation, and to cancel it. The * [metadata][google.longrunning.Operation.metadata] field type is - * [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The + * [RestoreTableMetadata][google.bigtable.admin.v2.RestoreTableMetadata]. The * [response][google.longrunning.Operation.response] type is * [Table][google.bigtable.admin.v2.Table], if successful. * @@ -2402,7 +2402,7 @@ public void listBackups( * returned table [long-running operation][google.longrunning.Operation] can * be used to track the progress of the operation, and to cancel it. The * [metadata][google.longrunning.Operation.metadata] field type is - * [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The + * [RestoreTableMetadata][google.bigtable.admin.v2.RestoreTableMetadata]. The * [response][google.longrunning.Operation.response] type is * [Table][google.bigtable.admin.v2.Table], if successful. * @@ -2887,7 +2887,7 @@ public com.google.bigtable.admin.v2.ListBackupsResponse listBackups( * returned table [long-running operation][google.longrunning.Operation] can * be used to track the progress of the operation, and to cancel it. The * [metadata][google.longrunning.Operation.metadata] field type is - * [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The + * [RestoreTableMetadata][google.bigtable.admin.v2.RestoreTableMetadata]. The * [response][google.longrunning.Operation.response] type is * [Table][google.bigtable.admin.v2.Table], if successful. * @@ -3364,7 +3364,7 @@ protected BigtableTableAdminFutureStub build( * returned table [long-running operation][google.longrunning.Operation] can * be used to track the progress of the operation, and to cancel it. The * [metadata][google.longrunning.Operation.metadata] field type is - * [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The + * [RestoreTableMetadata][google.bigtable.admin.v2.RestoreTableMetadata]. The * [response][google.longrunning.Operation.response] type is * [Table][google.bigtable.admin.v2.Table], if successful. * diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/Backup.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/Backup.java index 21cf46c668..428505de48 100644 --- a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/Backup.java +++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/Backup.java @@ -43,6 +43,7 @@ private Backup() { sourceTable_ = ""; sourceBackup_ = ""; state_ = 0; + backupType_ = 0; } @java.lang.Override @@ -225,6 +226,173 @@ private State(int value) { // @@protoc_insertion_point(enum_scope:google.bigtable.admin.v2.Backup.State) } + /** + * + * + *
+   * The type of the backup.
+   * 
+ * + * Protobuf enum {@code google.bigtable.admin.v2.Backup.BackupType} + */ + public enum BackupType implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+     * Not specified.
+     * 
+ * + * BACKUP_TYPE_UNSPECIFIED = 0; + */ + BACKUP_TYPE_UNSPECIFIED(0), + /** + * + * + *
+     * The default type for Cloud Bigtable managed backups. Supported for
+     * backups created in both HDD and SSD instances. Requires optimization when
+     * restored to a table in an SSD instance.
+     * 
+ * + * STANDARD = 1; + */ + STANDARD(1), + /** + * + * + *
+     * A backup type with faster restore to SSD performance. Only supported for
+     * backups created in SSD instances. A new SSD table restored from a hot
+     * backup reaches production performance more quickly than a standard
+     * backup.
+     * 
+ * + * HOT = 2; + */ + HOT(2), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+     * Not specified.
+     * 
+ * + * BACKUP_TYPE_UNSPECIFIED = 0; + */ + public static final int BACKUP_TYPE_UNSPECIFIED_VALUE = 0; + /** + * + * + *
+     * The default type for Cloud Bigtable managed backups. Supported for
+     * backups created in both HDD and SSD instances. Requires optimization when
+     * restored to a table in an SSD instance.
+     * 
+ * + * STANDARD = 1; + */ + public static final int STANDARD_VALUE = 1; + /** + * + * + *
+     * A backup type with faster restore to SSD performance. Only supported for
+     * backups created in SSD instances. A new SSD table restored from a hot
+     * backup reaches production performance more quickly than a standard
+     * backup.
+     * 
+ * + * HOT = 2; + */ + public static final int HOT_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static BackupType valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static BackupType forNumber(int value) { + switch (value) { + case 0: + return BACKUP_TYPE_UNSPECIFIED; + case 1: + return STANDARD; + case 2: + return HOT; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public BackupType findValueByNumber(int number) { + return BackupType.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.bigtable.admin.v2.Backup.getDescriptor().getEnumTypes().get(1); + } + + private static final BackupType[] VALUES = values(); + + public static BackupType valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private BackupType(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.bigtable.admin.v2.Backup.BackupType) + } + private int bitField0_; public static final int NAME_FIELD_NUMBER = 1; @@ -364,7 +532,8 @@ public com.google.protobuf.ByteString getSourceTableBytes() { *
    * Output only. Name of the backup from which this backup was copied. If a
    * backup is not created by copying a backup, this field will be empty. Values
-   * are of the form: projects/<project>/instances/<instance>/backups/<backup>.
+   * are of the form:
+   * projects/<project>/instances/<instance>/clusters/<cluster>/backups/<backup>
    * 
* * string source_backup = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -389,7 +558,8 @@ public java.lang.String getSourceBackup() { *
    * Output only. Name of the backup from which this backup was copied. If a
    * backup is not created by copying a backup, this field will be empty. Values
-   * are of the form: projects/<project>/instances/<instance>/backups/<backup>.
+   * are of the form:
+   * projects/<project>/instances/<instance>/clusters/<cluster>/backups/<backup>
    * 
* * string source_backup = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -415,11 +585,13 @@ public com.google.protobuf.ByteString getSourceBackupBytes() { * * *
-   * Required. The expiration time of the backup, with microseconds
-   * granularity that must be at least 6 hours and at most 90 days
-   * from the time the request is received. Once the `expire_time`
-   * has passed, Cloud Bigtable will delete the backup and free the
-   * resources used by the backup.
+   * Required. The expiration time of the backup.
+   * When creating a backup or updating its `expire_time`, the value must be
+   * greater than the backup creation time by:
+   * - At least 6 hours
+   * - At most 90 days
+   *
+   * Once the `expire_time` has passed, Cloud Bigtable will delete the backup.
    * 
* * .google.protobuf.Timestamp expire_time = 3 [(.google.api.field_behavior) = REQUIRED]; @@ -435,11 +607,13 @@ public boolean hasExpireTime() { * * *
-   * Required. The expiration time of the backup, with microseconds
-   * granularity that must be at least 6 hours and at most 90 days
-   * from the time the request is received. Once the `expire_time`
-   * has passed, Cloud Bigtable will delete the backup and free the
-   * resources used by the backup.
+   * Required. The expiration time of the backup.
+   * When creating a backup or updating its `expire_time`, the value must be
+   * greater than the backup creation time by:
+   * - At least 6 hours
+   * - At most 90 days
+   *
+   * Once the `expire_time` has passed, Cloud Bigtable will delete the backup.
    * 
* * .google.protobuf.Timestamp expire_time = 3 [(.google.api.field_behavior) = REQUIRED]; @@ -455,11 +629,13 @@ public com.google.protobuf.Timestamp getExpireTime() { * * *
-   * Required. The expiration time of the backup, with microseconds
-   * granularity that must be at least 6 hours and at most 90 days
-   * from the time the request is received. Once the `expire_time`
-   * has passed, Cloud Bigtable will delete the backup and free the
-   * resources used by the backup.
+   * Required. The expiration time of the backup.
+   * When creating a backup or updating its `expire_time`, the value must be
+   * greater than the backup creation time by:
+   * - At least 6 hours
+   * - At most 90 days
+   *
+   * Once the `expire_time` has passed, Cloud Bigtable will delete the backup.
    * 
* * .google.protobuf.Timestamp expire_time = 3 [(.google.api.field_behavior) = REQUIRED]; @@ -696,6 +872,112 @@ public com.google.bigtable.admin.v2.EncryptionInfoOrBuilder getEncryptionInfoOrB : encryptionInfo_; } + public static final int BACKUP_TYPE_FIELD_NUMBER = 11; + private int backupType_ = 0; + /** + * + * + *
+   * Indicates the backup type of the backup.
+   * 
+ * + * .google.bigtable.admin.v2.Backup.BackupType backup_type = 11; + * + * @return The enum numeric value on the wire for backupType. + */ + @java.lang.Override + public int getBackupTypeValue() { + return backupType_; + } + /** + * + * + *
+   * Indicates the backup type of the backup.
+   * 
+ * + * .google.bigtable.admin.v2.Backup.BackupType backup_type = 11; + * + * @return The backupType. + */ + @java.lang.Override + public com.google.bigtable.admin.v2.Backup.BackupType getBackupType() { + com.google.bigtable.admin.v2.Backup.BackupType result = + com.google.bigtable.admin.v2.Backup.BackupType.forNumber(backupType_); + return result == null ? com.google.bigtable.admin.v2.Backup.BackupType.UNRECOGNIZED : result; + } + + public static final int HOT_TO_STANDARD_TIME_FIELD_NUMBER = 12; + private com.google.protobuf.Timestamp hotToStandardTime_; + /** + * + * + *
+   * The time at which the hot backup will be converted to a standard backup.
+   * Once the `hot_to_standard_time` has passed, Cloud Bigtable will convert the
+   * hot backup to a standard backup. This value must be greater than the backup
+   * creation time by:
+   * - At least 24 hours
+   *
+   * This field only applies for hot backups. When creating or updating a
+   * standard backup, attempting to set this field will fail the request.
+   * 
+ * + * .google.protobuf.Timestamp hot_to_standard_time = 12; + * + * @return Whether the hotToStandardTime field is set. + */ + @java.lang.Override + public boolean hasHotToStandardTime() { + return ((bitField0_ & 0x00000010) != 0); + } + /** + * + * + *
+   * The time at which the hot backup will be converted to a standard backup.
+   * Once the `hot_to_standard_time` has passed, Cloud Bigtable will convert the
+   * hot backup to a standard backup. This value must be greater than the backup
+   * creation time by:
+   * - At least 24 hours
+   *
+   * This field only applies for hot backups. When creating or updating a
+   * standard backup, attempting to set this field will fail the request.
+   * 
+ * + * .google.protobuf.Timestamp hot_to_standard_time = 12; + * + * @return The hotToStandardTime. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getHotToStandardTime() { + return hotToStandardTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : hotToStandardTime_; + } + /** + * + * + *
+   * The time at which the hot backup will be converted to a standard backup.
+   * Once the `hot_to_standard_time` has passed, Cloud Bigtable will convert the
+   * hot backup to a standard backup. This value must be greater than the backup
+   * creation time by:
+   * - At least 24 hours
+   *
+   * This field only applies for hot backups. When creating or updating a
+   * standard backup, attempting to set this field will fail the request.
+   * 
+ * + * .google.protobuf.Timestamp hot_to_standard_time = 12; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getHotToStandardTimeOrBuilder() { + return hotToStandardTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : hotToStandardTime_; + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -737,6 +1019,13 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(sourceBackup_)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 10, sourceBackup_); } + if (backupType_ + != com.google.bigtable.admin.v2.Backup.BackupType.BACKUP_TYPE_UNSPECIFIED.getNumber()) { + output.writeEnum(11, backupType_); + } + if (((bitField0_ & 0x00000010) != 0)) { + output.writeMessage(12, getHotToStandardTime()); + } getUnknownFields().writeTo(output); } @@ -773,6 +1062,13 @@ public int getSerializedSize() { if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(sourceBackup_)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(10, sourceBackup_); } + if (backupType_ + != com.google.bigtable.admin.v2.Backup.BackupType.BACKUP_TYPE_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(11, backupType_); + } + if (((bitField0_ & 0x00000010) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(12, getHotToStandardTime()); + } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; @@ -809,6 +1105,11 @@ public boolean equals(final java.lang.Object obj) { if (hasEncryptionInfo()) { if (!getEncryptionInfo().equals(other.getEncryptionInfo())) return false; } + if (backupType_ != other.backupType_) return false; + if (hasHotToStandardTime() != other.hasHotToStandardTime()) return false; + if (hasHotToStandardTime()) { + if (!getHotToStandardTime().equals(other.getHotToStandardTime())) return false; + } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @@ -846,6 +1147,12 @@ public int hashCode() { hash = (37 * hash) + ENCRYPTION_INFO_FIELD_NUMBER; hash = (53 * hash) + getEncryptionInfo().hashCode(); } + hash = (37 * hash) + BACKUP_TYPE_FIELD_NUMBER; + hash = (53 * hash) + backupType_; + if (hasHotToStandardTime()) { + hash = (37 * hash) + HOT_TO_STANDARD_TIME_FIELD_NUMBER; + hash = (53 * hash) + getHotToStandardTime().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -989,6 +1296,7 @@ private void maybeForceBuilderInitialization() { getStartTimeFieldBuilder(); getEndTimeFieldBuilder(); getEncryptionInfoFieldBuilder(); + getHotToStandardTimeFieldBuilder(); } } @@ -1021,6 +1329,12 @@ public Builder clear() { encryptionInfoBuilder_.dispose(); encryptionInfoBuilder_ = null; } + backupType_ = 0; + hotToStandardTime_ = null; + if (hotToStandardTimeBuilder_ != null) { + hotToStandardTimeBuilder_.dispose(); + hotToStandardTimeBuilder_ = null; + } return this; } @@ -1089,6 +1403,16 @@ private void buildPartial0(com.google.bigtable.admin.v2.Backup result) { encryptionInfoBuilder_ == null ? encryptionInfo_ : encryptionInfoBuilder_.build(); to_bitField0_ |= 0x00000008; } + if (((from_bitField0_ & 0x00000200) != 0)) { + result.backupType_ = backupType_; + } + if (((from_bitField0_ & 0x00000400) != 0)) { + result.hotToStandardTime_ = + hotToStandardTimeBuilder_ == null + ? hotToStandardTime_ + : hotToStandardTimeBuilder_.build(); + to_bitField0_ |= 0x00000010; + } result.bitField0_ |= to_bitField0_; } @@ -1170,6 +1494,12 @@ public Builder mergeFrom(com.google.bigtable.admin.v2.Backup other) { if (other.hasEncryptionInfo()) { mergeEncryptionInfo(other.getEncryptionInfo()); } + if (other.backupType_ != 0) { + setBackupTypeValue(other.getBackupTypeValue()); + } + if (other.hasHotToStandardTime()) { + mergeHotToStandardTime(other.getHotToStandardTime()); + } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; @@ -1250,6 +1580,19 @@ public Builder mergeFrom( bitField0_ |= 0x00000004; break; } // case 82 + case 88: + { + backupType_ = input.readEnum(); + bitField0_ |= 0x00000200; + break; + } // case 88 + case 98: + { + input.readMessage( + getHotToStandardTimeFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000400; + break; + } // case 98 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { @@ -1553,7 +1896,8 @@ public Builder setSourceTableBytes(com.google.protobuf.ByteString value) { *
      * Output only. Name of the backup from which this backup was copied. If a
      * backup is not created by copying a backup, this field will be empty. Values
-     * are of the form: projects/<project>/instances/<instance>/backups/<backup>.
+     * are of the form:
+     * projects/<project>/instances/<instance>/clusters/<cluster>/backups/<backup>
      * 
* * string source_backup = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -1577,7 +1921,8 @@ public java.lang.String getSourceBackup() { *
      * Output only. Name of the backup from which this backup was copied. If a
      * backup is not created by copying a backup, this field will be empty. Values
-     * are of the form: projects/<project>/instances/<instance>/backups/<backup>.
+     * are of the form:
+     * projects/<project>/instances/<instance>/clusters/<cluster>/backups/<backup>
      * 
* * string source_backup = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -1601,7 +1946,8 @@ public com.google.protobuf.ByteString getSourceBackupBytes() { *
      * Output only. Name of the backup from which this backup was copied. If a
      * backup is not created by copying a backup, this field will be empty. Values
-     * are of the form: projects/<project>/instances/<instance>/backups/<backup>.
+     * are of the form:
+     * projects/<project>/instances/<instance>/clusters/<cluster>/backups/<backup>
      * 
* * string source_backup = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -1624,7 +1970,8 @@ public Builder setSourceBackup(java.lang.String value) { *
      * Output only. Name of the backup from which this backup was copied. If a
      * backup is not created by copying a backup, this field will be empty. Values
-     * are of the form: projects/<project>/instances/<instance>/backups/<backup>.
+     * are of the form:
+     * projects/<project>/instances/<instance>/clusters/<cluster>/backups/<backup>
      * 
* * string source_backup = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -1643,7 +1990,8 @@ public Builder clearSourceBackup() { *
      * Output only. Name of the backup from which this backup was copied. If a
      * backup is not created by copying a backup, this field will be empty. Values
-     * are of the form: projects/<project>/instances/<instance>/backups/<backup>.
+     * are of the form:
+     * projects/<project>/instances/<instance>/clusters/<cluster>/backups/<backup>
      * 
* * string source_backup = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -1672,11 +2020,13 @@ public Builder setSourceBackupBytes(com.google.protobuf.ByteString value) { * * *
-     * Required. The expiration time of the backup, with microseconds
-     * granularity that must be at least 6 hours and at most 90 days
-     * from the time the request is received. Once the `expire_time`
-     * has passed, Cloud Bigtable will delete the backup and free the
-     * resources used by the backup.
+     * Required. The expiration time of the backup.
+     * When creating a backup or updating its `expire_time`, the value must be
+     * greater than the backup creation time by:
+     * - At least 6 hours
+     * - At most 90 days
+     *
+     * Once the `expire_time` has passed, Cloud Bigtable will delete the backup.
      * 
* * .google.protobuf.Timestamp expire_time = 3 [(.google.api.field_behavior) = REQUIRED]; @@ -1691,11 +2041,13 @@ public boolean hasExpireTime() { * * *
-     * Required. The expiration time of the backup, with microseconds
-     * granularity that must be at least 6 hours and at most 90 days
-     * from the time the request is received. Once the `expire_time`
-     * has passed, Cloud Bigtable will delete the backup and free the
-     * resources used by the backup.
+     * Required. The expiration time of the backup.
+     * When creating a backup or updating its `expire_time`, the value must be
+     * greater than the backup creation time by:
+     * - At least 6 hours
+     * - At most 90 days
+     *
+     * Once the `expire_time` has passed, Cloud Bigtable will delete the backup.
      * 
* * .google.protobuf.Timestamp expire_time = 3 [(.google.api.field_behavior) = REQUIRED]; @@ -1716,11 +2068,13 @@ public com.google.protobuf.Timestamp getExpireTime() { * * *
-     * Required. The expiration time of the backup, with microseconds
-     * granularity that must be at least 6 hours and at most 90 days
-     * from the time the request is received. Once the `expire_time`
-     * has passed, Cloud Bigtable will delete the backup and free the
-     * resources used by the backup.
+     * Required. The expiration time of the backup.
+     * When creating a backup or updating its `expire_time`, the value must be
+     * greater than the backup creation time by:
+     * - At least 6 hours
+     * - At most 90 days
+     *
+     * Once the `expire_time` has passed, Cloud Bigtable will delete the backup.
      * 
* * .google.protobuf.Timestamp expire_time = 3 [(.google.api.field_behavior) = REQUIRED]; @@ -1743,11 +2097,13 @@ public Builder setExpireTime(com.google.protobuf.Timestamp value) { * * *
-     * Required. The expiration time of the backup, with microseconds
-     * granularity that must be at least 6 hours and at most 90 days
-     * from the time the request is received. Once the `expire_time`
-     * has passed, Cloud Bigtable will delete the backup and free the
-     * resources used by the backup.
+     * Required. The expiration time of the backup.
+     * When creating a backup or updating its `expire_time`, the value must be
+     * greater than the backup creation time by:
+     * - At least 6 hours
+     * - At most 90 days
+     *
+     * Once the `expire_time` has passed, Cloud Bigtable will delete the backup.
      * 
* * .google.protobuf.Timestamp expire_time = 3 [(.google.api.field_behavior) = REQUIRED]; @@ -1767,11 +2123,13 @@ public Builder setExpireTime(com.google.protobuf.Timestamp.Builder builderForVal * * *
-     * Required. The expiration time of the backup, with microseconds
-     * granularity that must be at least 6 hours and at most 90 days
-     * from the time the request is received. Once the `expire_time`
-     * has passed, Cloud Bigtable will delete the backup and free the
-     * resources used by the backup.
+     * Required. The expiration time of the backup.
+     * When creating a backup or updating its `expire_time`, the value must be
+     * greater than the backup creation time by:
+     * - At least 6 hours
+     * - At most 90 days
+     *
+     * Once the `expire_time` has passed, Cloud Bigtable will delete the backup.
      * 
* * .google.protobuf.Timestamp expire_time = 3 [(.google.api.field_behavior) = REQUIRED]; @@ -1799,11 +2157,13 @@ public Builder mergeExpireTime(com.google.protobuf.Timestamp value) { * * *
-     * Required. The expiration time of the backup, with microseconds
-     * granularity that must be at least 6 hours and at most 90 days
-     * from the time the request is received. Once the `expire_time`
-     * has passed, Cloud Bigtable will delete the backup and free the
-     * resources used by the backup.
+     * Required. The expiration time of the backup.
+     * When creating a backup or updating its `expire_time`, the value must be
+     * greater than the backup creation time by:
+     * - At least 6 hours
+     * - At most 90 days
+     *
+     * Once the `expire_time` has passed, Cloud Bigtable will delete the backup.
      * 
* * .google.protobuf.Timestamp expire_time = 3 [(.google.api.field_behavior) = REQUIRED]; @@ -1823,11 +2183,13 @@ public Builder clearExpireTime() { * * *
-     * Required. The expiration time of the backup, with microseconds
-     * granularity that must be at least 6 hours and at most 90 days
-     * from the time the request is received. Once the `expire_time`
-     * has passed, Cloud Bigtable will delete the backup and free the
-     * resources used by the backup.
+     * Required. The expiration time of the backup.
+     * When creating a backup or updating its `expire_time`, the value must be
+     * greater than the backup creation time by:
+     * - At least 6 hours
+     * - At most 90 days
+     *
+     * Once the `expire_time` has passed, Cloud Bigtable will delete the backup.
      * 
* * .google.protobuf.Timestamp expire_time = 3 [(.google.api.field_behavior) = REQUIRED]; @@ -1842,11 +2204,13 @@ public com.google.protobuf.Timestamp.Builder getExpireTimeBuilder() { * * *
-     * Required. The expiration time of the backup, with microseconds
-     * granularity that must be at least 6 hours and at most 90 days
-     * from the time the request is received. Once the `expire_time`
-     * has passed, Cloud Bigtable will delete the backup and free the
-     * resources used by the backup.
+     * Required. The expiration time of the backup.
+     * When creating a backup or updating its `expire_time`, the value must be
+     * greater than the backup creation time by:
+     * - At least 6 hours
+     * - At most 90 days
+     *
+     * Once the `expire_time` has passed, Cloud Bigtable will delete the backup.
      * 
* * .google.protobuf.Timestamp expire_time = 3 [(.google.api.field_behavior) = REQUIRED]; @@ -1865,11 +2229,13 @@ public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() { * * *
-     * Required. The expiration time of the backup, with microseconds
-     * granularity that must be at least 6 hours and at most 90 days
-     * from the time the request is received. Once the `expire_time`
-     * has passed, Cloud Bigtable will delete the backup and free the
-     * resources used by the backup.
+     * Required. The expiration time of the backup.
+     * When creating a backup or updating its `expire_time`, the value must be
+     * greater than the backup creation time by:
+     * - At least 6 hours
+     * - At most 90 days
+     *
+     * Once the `expire_time` has passed, Cloud Bigtable will delete the backup.
      * 
* * .google.protobuf.Timestamp expire_time = 3 [(.google.api.field_behavior) = REQUIRED]; @@ -2675,6 +3041,344 @@ public com.google.bigtable.admin.v2.EncryptionInfoOrBuilder getEncryptionInfoOrB return encryptionInfoBuilder_; } + private int backupType_ = 0; + /** + * + * + *
+     * Indicates the backup type of the backup.
+     * 
+ * + * .google.bigtable.admin.v2.Backup.BackupType backup_type = 11; + * + * @return The enum numeric value on the wire for backupType. + */ + @java.lang.Override + public int getBackupTypeValue() { + return backupType_; + } + /** + * + * + *
+     * Indicates the backup type of the backup.
+     * 
+ * + * .google.bigtable.admin.v2.Backup.BackupType backup_type = 11; + * + * @param value The enum numeric value on the wire for backupType to set. + * @return This builder for chaining. + */ + public Builder setBackupTypeValue(int value) { + backupType_ = value; + bitField0_ |= 0x00000200; + onChanged(); + return this; + } + /** + * + * + *
+     * Indicates the backup type of the backup.
+     * 
+ * + * .google.bigtable.admin.v2.Backup.BackupType backup_type = 11; + * + * @return The backupType. + */ + @java.lang.Override + public com.google.bigtable.admin.v2.Backup.BackupType getBackupType() { + com.google.bigtable.admin.v2.Backup.BackupType result = + com.google.bigtable.admin.v2.Backup.BackupType.forNumber(backupType_); + return result == null ? com.google.bigtable.admin.v2.Backup.BackupType.UNRECOGNIZED : result; + } + /** + * + * + *
+     * Indicates the backup type of the backup.
+     * 
+ * + * .google.bigtable.admin.v2.Backup.BackupType backup_type = 11; + * + * @param value The backupType to set. + * @return This builder for chaining. + */ + public Builder setBackupType(com.google.bigtable.admin.v2.Backup.BackupType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000200; + backupType_ = value.getNumber(); + onChanged(); + return this; + } + /** + * + * + *
+     * Indicates the backup type of the backup.
+     * 
+ * + * .google.bigtable.admin.v2.Backup.BackupType backup_type = 11; + * + * @return This builder for chaining. + */ + public Builder clearBackupType() { + bitField0_ = (bitField0_ & ~0x00000200); + backupType_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp hotToStandardTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + hotToStandardTimeBuilder_; + /** + * + * + *
+     * The time at which the hot backup will be converted to a standard backup.
+     * Once the `hot_to_standard_time` has passed, Cloud Bigtable will convert the
+     * hot backup to a standard backup. This value must be greater than the backup
+     * creation time by:
+     * - At least 24 hours
+     *
+     * This field only applies for hot backups. When creating or updating a
+     * standard backup, attempting to set this field will fail the request.
+     * 
+ * + * .google.protobuf.Timestamp hot_to_standard_time = 12; + * + * @return Whether the hotToStandardTime field is set. + */ + public boolean hasHotToStandardTime() { + return ((bitField0_ & 0x00000400) != 0); + } + /** + * + * + *
+     * The time at which the hot backup will be converted to a standard backup.
+     * Once the `hot_to_standard_time` has passed, Cloud Bigtable will convert the
+     * hot backup to a standard backup. This value must be greater than the backup
+     * creation time by:
+     * - At least 24 hours
+     *
+     * This field only applies for hot backups. When creating or updating a
+     * standard backup, attempting to set this field will fail the request.
+     * 
+ * + * .google.protobuf.Timestamp hot_to_standard_time = 12; + * + * @return The hotToStandardTime. + */ + public com.google.protobuf.Timestamp getHotToStandardTime() { + if (hotToStandardTimeBuilder_ == null) { + return hotToStandardTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : hotToStandardTime_; + } else { + return hotToStandardTimeBuilder_.getMessage(); + } + } + /** + * + * + *
+     * The time at which the hot backup will be converted to a standard backup.
+     * Once the `hot_to_standard_time` has passed, Cloud Bigtable will convert the
+     * hot backup to a standard backup. This value must be greater than the backup
+     * creation time by:
+     * - At least 24 hours
+     *
+     * This field only applies for hot backups. When creating or updating a
+     * standard backup, attempting to set this field will fail the request.
+     * 
+ * + * .google.protobuf.Timestamp hot_to_standard_time = 12; + */ + public Builder setHotToStandardTime(com.google.protobuf.Timestamp value) { + if (hotToStandardTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + hotToStandardTime_ = value; + } else { + hotToStandardTimeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + /** + * + * + *
+     * The time at which the hot backup will be converted to a standard backup.
+     * Once the `hot_to_standard_time` has passed, Cloud Bigtable will convert the
+     * hot backup to a standard backup. This value must be greater than the backup
+     * creation time by:
+     * - At least 24 hours
+     *
+     * This field only applies for hot backups. When creating or updating a
+     * standard backup, attempting to set this field will fail the request.
+     * 
+ * + * .google.protobuf.Timestamp hot_to_standard_time = 12; + */ + public Builder setHotToStandardTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (hotToStandardTimeBuilder_ == null) { + hotToStandardTime_ = builderForValue.build(); + } else { + hotToStandardTimeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000400; + onChanged(); + return this; + } + /** + * + * + *
+     * The time at which the hot backup will be converted to a standard backup.
+     * Once the `hot_to_standard_time` has passed, Cloud Bigtable will convert the
+     * hot backup to a standard backup. This value must be greater than the backup
+     * creation time by:
+     * - At least 24 hours
+     *
+     * This field only applies for hot backups. When creating or updating a
+     * standard backup, attempting to set this field will fail the request.
+     * 
+ * + * .google.protobuf.Timestamp hot_to_standard_time = 12; + */ + public Builder mergeHotToStandardTime(com.google.protobuf.Timestamp value) { + if (hotToStandardTimeBuilder_ == null) { + if (((bitField0_ & 0x00000400) != 0) + && hotToStandardTime_ != null + && hotToStandardTime_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getHotToStandardTimeBuilder().mergeFrom(value); + } else { + hotToStandardTime_ = value; + } + } else { + hotToStandardTimeBuilder_.mergeFrom(value); + } + if (hotToStandardTime_ != null) { + bitField0_ |= 0x00000400; + onChanged(); + } + return this; + } + /** + * + * + *
+     * The time at which the hot backup will be converted to a standard backup.
+     * Once the `hot_to_standard_time` has passed, Cloud Bigtable will convert the
+     * hot backup to a standard backup. This value must be greater than the backup
+     * creation time by:
+     * - At least 24 hours
+     *
+     * This field only applies for hot backups. When creating or updating a
+     * standard backup, attempting to set this field will fail the request.
+     * 
+ * + * .google.protobuf.Timestamp hot_to_standard_time = 12; + */ + public Builder clearHotToStandardTime() { + bitField0_ = (bitField0_ & ~0x00000400); + hotToStandardTime_ = null; + if (hotToStandardTimeBuilder_ != null) { + hotToStandardTimeBuilder_.dispose(); + hotToStandardTimeBuilder_ = null; + } + onChanged(); + return this; + } + /** + * + * + *
+     * The time at which the hot backup will be converted to a standard backup.
+     * Once the `hot_to_standard_time` has passed, Cloud Bigtable will convert the
+     * hot backup to a standard backup. This value must be greater than the backup
+     * creation time by:
+     * - At least 24 hours
+     *
+     * This field only applies for hot backups. When creating or updating a
+     * standard backup, attempting to set this field will fail the request.
+     * 
+ * + * .google.protobuf.Timestamp hot_to_standard_time = 12; + */ + public com.google.protobuf.Timestamp.Builder getHotToStandardTimeBuilder() { + bitField0_ |= 0x00000400; + onChanged(); + return getHotToStandardTimeFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * The time at which the hot backup will be converted to a standard backup.
+     * Once the `hot_to_standard_time` has passed, Cloud Bigtable will convert the
+     * hot backup to a standard backup. This value must be greater than the backup
+     * creation time by:
+     * - At least 24 hours
+     *
+     * This field only applies for hot backups. When creating or updating a
+     * standard backup, attempting to set this field will fail the request.
+     * 
+ * + * .google.protobuf.Timestamp hot_to_standard_time = 12; + */ + public com.google.protobuf.TimestampOrBuilder getHotToStandardTimeOrBuilder() { + if (hotToStandardTimeBuilder_ != null) { + return hotToStandardTimeBuilder_.getMessageOrBuilder(); + } else { + return hotToStandardTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : hotToStandardTime_; + } + } + /** + * + * + *
+     * The time at which the hot backup will be converted to a standard backup.
+     * Once the `hot_to_standard_time` has passed, Cloud Bigtable will convert the
+     * hot backup to a standard backup. This value must be greater than the backup
+     * creation time by:
+     * - At least 24 hours
+     *
+     * This field only applies for hot backups. When creating or updating a
+     * standard backup, attempting to set this field will fail the request.
+     * 
+ * + * .google.protobuf.Timestamp hot_to_standard_time = 12; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getHotToStandardTimeFieldBuilder() { + if (hotToStandardTimeBuilder_ == null) { + hotToStandardTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getHotToStandardTime(), getParentForChildren(), isClean()); + hotToStandardTime_ = null; + } + return hotToStandardTimeBuilder_; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/BackupInfo.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/BackupInfo.java index 18d0819ca9..39c98ad78b 100644 --- a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/BackupInfo.java +++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/BackupInfo.java @@ -282,7 +282,8 @@ public com.google.protobuf.ByteString getSourceTableBytes() { *
    * Output only. Name of the backup from which this backup was copied. If a
    * backup is not created by copying a backup, this field will be empty. Values
-   * are of the form: projects/<project>/instances/<instance>/backups/<backup>.
+   * are of the form:
+   * projects/<project>/instances/<instance>/clusters/<cluster>/backups/<backup>
    * 
* * string source_backup = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -307,7 +308,8 @@ public java.lang.String getSourceBackup() { *
    * Output only. Name of the backup from which this backup was copied. If a
    * backup is not created by copying a backup, this field will be empty. Values
-   * are of the form: projects/<project>/instances/<instance>/backups/<backup>.
+   * are of the form:
+   * projects/<project>/instances/<instance>/clusters/<cluster>/backups/<backup>
    * 
* * string source_backup = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -1408,7 +1410,8 @@ public Builder setSourceTableBytes(com.google.protobuf.ByteString value) { *
      * Output only. Name of the backup from which this backup was copied. If a
      * backup is not created by copying a backup, this field will be empty. Values
-     * are of the form: projects/<project>/instances/<instance>/backups/<backup>.
+     * are of the form:
+     * projects/<project>/instances/<instance>/clusters/<cluster>/backups/<backup>
      * 
* * string source_backup = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -1432,7 +1435,8 @@ public java.lang.String getSourceBackup() { *
      * Output only. Name of the backup from which this backup was copied. If a
      * backup is not created by copying a backup, this field will be empty. Values
-     * are of the form: projects/<project>/instances/<instance>/backups/<backup>.
+     * are of the form:
+     * projects/<project>/instances/<instance>/clusters/<cluster>/backups/<backup>
      * 
* * string source_backup = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -1456,7 +1460,8 @@ public com.google.protobuf.ByteString getSourceBackupBytes() { *
      * Output only. Name of the backup from which this backup was copied. If a
      * backup is not created by copying a backup, this field will be empty. Values
-     * are of the form: projects/<project>/instances/<instance>/backups/<backup>.
+     * are of the form:
+     * projects/<project>/instances/<instance>/clusters/<cluster>/backups/<backup>
      * 
* * string source_backup = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -1479,7 +1484,8 @@ public Builder setSourceBackup(java.lang.String value) { *
      * Output only. Name of the backup from which this backup was copied. If a
      * backup is not created by copying a backup, this field will be empty. Values
-     * are of the form: projects/<project>/instances/<instance>/backups/<backup>.
+     * are of the form:
+     * projects/<project>/instances/<instance>/clusters/<cluster>/backups/<backup>
      * 
* * string source_backup = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -1498,7 +1504,8 @@ public Builder clearSourceBackup() { *
      * Output only. Name of the backup from which this backup was copied. If a
      * backup is not created by copying a backup, this field will be empty. Values
-     * are of the form: projects/<project>/instances/<instance>/backups/<backup>.
+     * are of the form:
+     * projects/<project>/instances/<instance>/clusters/<cluster>/backups/<backup>
      * 
* * string source_backup = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/BackupInfoOrBuilder.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/BackupInfoOrBuilder.java index 6a93a024bd..ff3349eea5 100644 --- a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/BackupInfoOrBuilder.java +++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/BackupInfoOrBuilder.java @@ -162,7 +162,8 @@ public interface BackupInfoOrBuilder *
    * Output only. Name of the backup from which this backup was copied. If a
    * backup is not created by copying a backup, this field will be empty. Values
-   * are of the form: projects/<project>/instances/<instance>/backups/<backup>.
+   * are of the form:
+   * projects/<project>/instances/<instance>/clusters/<cluster>/backups/<backup>
    * 
* * string source_backup = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -176,7 +177,8 @@ public interface BackupInfoOrBuilder *
    * Output only. Name of the backup from which this backup was copied. If a
    * backup is not created by copying a backup, this field will be empty. Values
-   * are of the form: projects/<project>/instances/<instance>/backups/<backup>.
+   * are of the form:
+   * projects/<project>/instances/<instance>/clusters/<cluster>/backups/<backup>
    * 
* * string source_backup = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/BackupOrBuilder.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/BackupOrBuilder.java index 4f122ca46d..3ef01fdc1e 100644 --- a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/BackupOrBuilder.java +++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/BackupOrBuilder.java @@ -106,7 +106,8 @@ public interface BackupOrBuilder *
    * Output only. Name of the backup from which this backup was copied. If a
    * backup is not created by copying a backup, this field will be empty. Values
-   * are of the form: projects/<project>/instances/<instance>/backups/<backup>.
+   * are of the form:
+   * projects/<project>/instances/<instance>/clusters/<cluster>/backups/<backup>
    * 
* * string source_backup = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -120,7 +121,8 @@ public interface BackupOrBuilder *
    * Output only. Name of the backup from which this backup was copied. If a
    * backup is not created by copying a backup, this field will be empty. Values
-   * are of the form: projects/<project>/instances/<instance>/backups/<backup>.
+   * are of the form:
+   * projects/<project>/instances/<instance>/clusters/<cluster>/backups/<backup>
    * 
* * string source_backup = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; @@ -133,11 +135,13 @@ public interface BackupOrBuilder * * *
-   * Required. The expiration time of the backup, with microseconds
-   * granularity that must be at least 6 hours and at most 90 days
-   * from the time the request is received. Once the `expire_time`
-   * has passed, Cloud Bigtable will delete the backup and free the
-   * resources used by the backup.
+   * Required. The expiration time of the backup.
+   * When creating a backup or updating its `expire_time`, the value must be
+   * greater than the backup creation time by:
+   * - At least 6 hours
+   * - At most 90 days
+   *
+   * Once the `expire_time` has passed, Cloud Bigtable will delete the backup.
    * 
* * .google.protobuf.Timestamp expire_time = 3 [(.google.api.field_behavior) = REQUIRED]; @@ -150,11 +154,13 @@ public interface BackupOrBuilder * * *
-   * Required. The expiration time of the backup, with microseconds
-   * granularity that must be at least 6 hours and at most 90 days
-   * from the time the request is received. Once the `expire_time`
-   * has passed, Cloud Bigtable will delete the backup and free the
-   * resources used by the backup.
+   * Required. The expiration time of the backup.
+   * When creating a backup or updating its `expire_time`, the value must be
+   * greater than the backup creation time by:
+   * - At least 6 hours
+   * - At most 90 days
+   *
+   * Once the `expire_time` has passed, Cloud Bigtable will delete the backup.
    * 
* * .google.protobuf.Timestamp expire_time = 3 [(.google.api.field_behavior) = REQUIRED]; @@ -167,11 +173,13 @@ public interface BackupOrBuilder * * *
-   * Required. The expiration time of the backup, with microseconds
-   * granularity that must be at least 6 hours and at most 90 days
-   * from the time the request is received. Once the `expire_time`
-   * has passed, Cloud Bigtable will delete the backup and free the
-   * resources used by the backup.
+   * Required. The expiration time of the backup.
+   * When creating a backup or updating its `expire_time`, the value must be
+   * greater than the backup creation time by:
+   * - At least 6 hours
+   * - At most 90 days
+   *
+   * Once the `expire_time` has passed, Cloud Bigtable will delete the backup.
    * 
* * .google.protobuf.Timestamp expire_time = 3 [(.google.api.field_behavior) = REQUIRED]; @@ -352,4 +360,85 @@ public interface BackupOrBuilder * */ com.google.bigtable.admin.v2.EncryptionInfoOrBuilder getEncryptionInfoOrBuilder(); + + /** + * + * + *
+   * Indicates the backup type of the backup.
+   * 
+ * + * .google.bigtable.admin.v2.Backup.BackupType backup_type = 11; + * + * @return The enum numeric value on the wire for backupType. + */ + int getBackupTypeValue(); + /** + * + * + *
+   * Indicates the backup type of the backup.
+   * 
+ * + * .google.bigtable.admin.v2.Backup.BackupType backup_type = 11; + * + * @return The backupType. + */ + com.google.bigtable.admin.v2.Backup.BackupType getBackupType(); + + /** + * + * + *
+   * The time at which the hot backup will be converted to a standard backup.
+   * Once the `hot_to_standard_time` has passed, Cloud Bigtable will convert the
+   * hot backup to a standard backup. This value must be greater than the backup
+   * creation time by:
+   * - At least 24 hours
+   *
+   * This field only applies for hot backups. When creating or updating a
+   * standard backup, attempting to set this field will fail the request.
+   * 
+ * + * .google.protobuf.Timestamp hot_to_standard_time = 12; + * + * @return Whether the hotToStandardTime field is set. + */ + boolean hasHotToStandardTime(); + /** + * + * + *
+   * The time at which the hot backup will be converted to a standard backup.
+   * Once the `hot_to_standard_time` has passed, Cloud Bigtable will convert the
+   * hot backup to a standard backup. This value must be greater than the backup
+   * creation time by:
+   * - At least 24 hours
+   *
+   * This field only applies for hot backups. When creating or updating a
+   * standard backup, attempting to set this field will fail the request.
+   * 
+ * + * .google.protobuf.Timestamp hot_to_standard_time = 12; + * + * @return The hotToStandardTime. + */ + com.google.protobuf.Timestamp getHotToStandardTime(); + /** + * + * + *
+   * The time at which the hot backup will be converted to a standard backup.
+   * Once the `hot_to_standard_time` has passed, Cloud Bigtable will convert the
+   * hot backup to a standard backup. This value must be greater than the backup
+   * creation time by:
+   * - At least 24 hours
+   *
+   * This field only applies for hot backups. When creating or updating a
+   * standard backup, attempting to set this field will fail the request.
+   * 
+ * + * .google.protobuf.Timestamp hot_to_standard_time = 12; + */ + com.google.protobuf.TimestampOrBuilder getHotToStandardTimeOrBuilder(); } diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/CopyBackupRequest.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/CopyBackupRequest.java index 69d472c190..37e5b666d0 100644 --- a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/CopyBackupRequest.java +++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/CopyBackupRequest.java @@ -76,7 +76,7 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * *
    * Required. The name of the destination cluster that will contain the backup
-   * copy. The cluster must already exists. Values are of the form:
+   * copy. The cluster must already exist. Values are of the form:
    * `projects/{project}/instances/{instance}/clusters/{cluster}`.
    * 
* @@ -103,7 +103,7 @@ public java.lang.String getParent() { * *
    * Required. The name of the destination cluster that will contain the backup
-   * copy. The cluster must already exists. Values are of the form:
+   * copy. The cluster must already exist. Values are of the form:
    * `projects/{project}/instances/{instance}/clusters/{cluster}`.
    * 
* @@ -754,7 +754,7 @@ public Builder mergeFrom( * *
      * Required. The name of the destination cluster that will contain the backup
-     * copy. The cluster must already exists. Values are of the form:
+     * copy. The cluster must already exist. Values are of the form:
      * `projects/{project}/instances/{instance}/clusters/{cluster}`.
      * 
* @@ -780,7 +780,7 @@ public java.lang.String getParent() { * *
      * Required. The name of the destination cluster that will contain the backup
-     * copy. The cluster must already exists. Values are of the form:
+     * copy. The cluster must already exist. Values are of the form:
      * `projects/{project}/instances/{instance}/clusters/{cluster}`.
      * 
* @@ -806,7 +806,7 @@ public com.google.protobuf.ByteString getParentBytes() { * *
      * Required. The name of the destination cluster that will contain the backup
-     * copy. The cluster must already exists. Values are of the form:
+     * copy. The cluster must already exist. Values are of the form:
      * `projects/{project}/instances/{instance}/clusters/{cluster}`.
      * 
* @@ -831,7 +831,7 @@ public Builder setParent(java.lang.String value) { * *
      * Required. The name of the destination cluster that will contain the backup
-     * copy. The cluster must already exists. Values are of the form:
+     * copy. The cluster must already exist. Values are of the form:
      * `projects/{project}/instances/{instance}/clusters/{cluster}`.
      * 
* @@ -852,7 +852,7 @@ public Builder clearParent() { * *
      * Required. The name of the destination cluster that will contain the backup
-     * copy. The cluster must already exists. Values are of the form:
+     * copy. The cluster must already exist. Values are of the form:
      * `projects/{project}/instances/{instance}/clusters/{cluster}`.
      * 
* diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/CopyBackupRequestOrBuilder.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/CopyBackupRequestOrBuilder.java index 43ca118837..34c46b495c 100644 --- a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/CopyBackupRequestOrBuilder.java +++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/CopyBackupRequestOrBuilder.java @@ -29,7 +29,7 @@ public interface CopyBackupRequestOrBuilder * *
    * Required. The name of the destination cluster that will contain the backup
-   * copy. The cluster must already exists. Values are of the form:
+   * copy. The cluster must already exist. Values are of the form:
    * `projects/{project}/instances/{instance}/clusters/{cluster}`.
    * 
* @@ -45,7 +45,7 @@ public interface CopyBackupRequestOrBuilder * *
    * Required. The name of the destination cluster that will contain the backup
-   * copy. The cluster must already exists. Values are of the form:
+   * copy. The cluster must already exist. Values are of the form:
    * `projects/{project}/instances/{instance}/clusters/{cluster}`.
    * 
* diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/TableProto.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/TableProto.java index 42dcaf2d64..f54afaa543 100644 --- a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/TableProto.java +++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/TableProto.java @@ -213,7 +213,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "\014\n\010CREATING\020\002:{\352Ax\n%bigtableadmin.google" + "apis.com/Snapshot\022Oprojects/{project}/in" + "stances/{instance}/clusters/{cluster}/sn" - + "apshots/{snapshot}\"\273\004\n\006Backup\022\014\n\004name\030\001 " + + "apshots/{snapshot}\"\371\005\n\006Backup\022\014\n\004name\030\001 " + "\001(\t\022\034\n\014source_table\030\002 \001(\tB\006\340A\005\340A\002\022\032\n\rsou" + "rce_backup\030\n \001(\tB\003\340A\003\0224\n\013expire_time\030\003 \001" + "(\0132\032.google.protobuf.TimestampB\003\340A\002\0223\n\ns" @@ -223,28 +223,32 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "\003\340A\003\022:\n\005state\030\007 \001(\0162&.google.bigtable.ad" + "min.v2.Backup.StateB\003\340A\003\022F\n\017encryption_i" + "nfo\030\t \001(\0132(.google.bigtable.admin.v2.Enc" - + "ryptionInfoB\003\340A\003\"7\n\005State\022\025\n\021STATE_UNSPE" - + "CIFIED\020\000\022\014\n\010CREATING\020\001\022\t\n\005READY\020\002:u\352Ar\n#" - + "bigtableadmin.googleapis.com/Backup\022Kpro" - + "jects/{project}/instances/{instance}/clu" - + "sters/{cluster}/backups/{backup}\"\300\001\n\nBac" - + "kupInfo\022\023\n\006backup\030\001 \001(\tB\003\340A\003\0223\n\nstart_ti" - + "me\030\002 \001(\0132\032.google.protobuf.TimestampB\003\340A" - + "\003\0221\n\010end_time\030\003 \001(\0132\032.google.protobuf.Ti" - + "mestampB\003\340A\003\022\031\n\014source_table\030\004 \001(\tB\003\340A\003\022" - + "\032\n\rsource_backup\030\n \001(\tB\003\340A\003*D\n\021RestoreSo" - + "urceType\022#\n\037RESTORE_SOURCE_TYPE_UNSPECIF" - + "IED\020\000\022\n\n\006BACKUP\020\001B\367\002\n\034com.google.bigtabl" - + "e.admin.v2B\nTableProtoP\001Z8cloud.google.c" - + "om/go/bigtable/admin/apiv2/adminpb;admin" - + "pb\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Go" - + "ogle\\Cloud\\Bigtable\\Admin\\V2\352\002\"Google::C" - + "loud::Bigtable::Admin::V2\352A\246\001\n(cloudkms." - + "googleapis.com/CryptoKeyVersion\022zproject" - + "s/{project}/locations/{location}/keyRing" - + "s/{key_ring}/cryptoKeys/{crypto_key}/cry" - + "ptoKeyVersions/{crypto_key_version}b\006pro" - + "to3" + + "ryptionInfoB\003\340A\003\022@\n\013backup_type\030\013 \001(\0162+." + + "google.bigtable.admin.v2.Backup.BackupTy" + + "pe\0228\n\024hot_to_standard_time\030\014 \001(\0132\032.googl" + + "e.protobuf.Timestamp\"7\n\005State\022\025\n\021STATE_U" + + "NSPECIFIED\020\000\022\014\n\010CREATING\020\001\022\t\n\005READY\020\002\"@\n" + + "\nBackupType\022\033\n\027BACKUP_TYPE_UNSPECIFIED\020\000" + + "\022\014\n\010STANDARD\020\001\022\007\n\003HOT\020\002:u\352Ar\n#bigtablead" + + "min.googleapis.com/Backup\022Kprojects/{pro" + + "ject}/instances/{instance}/clusters/{clu" + + "ster}/backups/{backup}\"\300\001\n\nBackupInfo\022\023\n" + + "\006backup\030\001 \001(\tB\003\340A\003\0223\n\nstart_time\030\002 \001(\0132\032" + + ".google.protobuf.TimestampB\003\340A\003\0221\n\010end_t" + + "ime\030\003 \001(\0132\032.google.protobuf.TimestampB\003\340" + + "A\003\022\031\n\014source_table\030\004 \001(\tB\003\340A\003\022\032\n\rsource_" + + "backup\030\n \001(\tB\003\340A\003*D\n\021RestoreSourceType\022#" + + "\n\037RESTORE_SOURCE_TYPE_UNSPECIFIED\020\000\022\n\n\006B" + + "ACKUP\020\001B\367\002\n\034com.google.bigtable.admin.v2" + + "B\nTableProtoP\001Z8cloud.google.com/go/bigt" + + "able/admin/apiv2/adminpb;adminpb\252\002\036Googl" + + "e.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud" + + "\\Bigtable\\Admin\\V2\352\002\"Google::Cloud::Bigt" + + "able::Admin::V2\352A\246\001\n(cloudkms.googleapis" + + ".com/CryptoKeyVersion\022zprojects/{project" + + "}/locations/{location}/keyRings/{key_rin" + + "g}/cryptoKeys/{crypto_key}/cryptoKeyVers" + + "ions/{crypto_key_version}b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( @@ -424,6 +428,8 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "SizeBytes", "State", "EncryptionInfo", + "BackupType", + "HotToStandardTime", }); internal_static_google_bigtable_admin_v2_BackupInfo_descriptor = getDescriptor().getMessageTypes().get(9); diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/proto/google/bigtable/admin/v2/bigtable_table_admin.proto b/proto-google-cloud-bigtable-admin-v2/src/main/proto/google/bigtable/admin/v2/bigtable_table_admin.proto index 8614750afa..1fdcefa4a0 100644 --- a/proto-google-cloud-bigtable-admin-v2/src/main/proto/google/bigtable/admin/v2/bigtable_table_admin.proto +++ b/proto-google-cloud-bigtable-admin-v2/src/main/proto/google/bigtable/admin/v2/bigtable_table_admin.proto @@ -360,7 +360,7 @@ service BigtableTableAdmin { // returned table [long-running operation][google.longrunning.Operation] can // be used to track the progress of the operation, and to cancel it. The // [metadata][google.longrunning.Operation.metadata] field type is - // [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The + // [RestoreTableMetadata][google.bigtable.admin.v2.RestoreTableMetadata]. The // [response][google.longrunning.Operation.response] type is // [Table][google.bigtable.admin.v2.Table], if successful. rpc RestoreTable(RestoreTableRequest) returns (google.longrunning.Operation) { @@ -1219,7 +1219,7 @@ message ListBackupsResponse { // [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. message CopyBackupRequest { // Required. The name of the destination cluster that will contain the backup - // copy. The cluster must already exists. Values are of the form: + // copy. The cluster must already exist. Values are of the form: // `projects/{project}/instances/{instance}/clusters/{cluster}`. string parent = 1 [ (google.api.field_behavior) = REQUIRED, diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/proto/google/bigtable/admin/v2/table.proto b/proto-google-cloud-bigtable-admin-v2/src/main/proto/google/bigtable/admin/v2/table.proto index bdd53b6c54..b2ef458c83 100644 --- a/proto-google-cloud-bigtable-admin-v2/src/main/proto/google/bigtable/admin/v2/table.proto +++ b/proto-google-cloud-bigtable-admin-v2/src/main/proto/google/bigtable/admin/v2/table.proto @@ -456,6 +456,23 @@ message Backup { READY = 2; } + // The type of the backup. + enum BackupType { + // Not specified. + BACKUP_TYPE_UNSPECIFIED = 0; + + // The default type for Cloud Bigtable managed backups. Supported for + // backups created in both HDD and SSD instances. Requires optimization when + // restored to a table in an SSD instance. + STANDARD = 1; + + // A backup type with faster restore to SSD performance. Only supported for + // backups created in SSD instances. A new SSD table restored from a hot + // backup reaches production performance more quickly than a standard + // backup. + HOT = 2; + } + // A globally unique identifier for the backup which cannot be // changed. Values are of the form // `projects/{project}/instances/{instance}/clusters/{cluster}/ @@ -478,14 +495,17 @@ message Backup { // Output only. Name of the backup from which this backup was copied. If a // backup is not created by copying a backup, this field will be empty. Values - // are of the form: projects//instances//backups/. + // are of the form: + // projects//instances//clusters//backups/ string source_backup = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; - // Required. The expiration time of the backup, with microseconds - // granularity that must be at least 6 hours and at most 90 days - // from the time the request is received. Once the `expire_time` - // has passed, Cloud Bigtable will delete the backup and free the - // resources used by the backup. + // Required. The expiration time of the backup. + // When creating a backup or updating its `expire_time`, the value must be + // greater than the backup creation time by: + // - At least 6 hours + // - At most 90 days + // + // Once the `expire_time` has passed, Cloud Bigtable will delete the backup. google.protobuf.Timestamp expire_time = 3 [(google.api.field_behavior) = REQUIRED]; @@ -511,6 +531,19 @@ message Backup { // Output only. The encryption information for the backup. EncryptionInfo encryption_info = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Indicates the backup type of the backup. + BackupType backup_type = 11; + + // The time at which the hot backup will be converted to a standard backup. + // Once the `hot_to_standard_time` has passed, Cloud Bigtable will convert the + // hot backup to a standard backup. This value must be greater than the backup + // creation time by: + // - At least 24 hours + // + // This field only applies for hot backups. When creating or updating a + // standard backup, attempting to set this field will fail the request. + google.protobuf.Timestamp hot_to_standard_time = 12; } // Information about a backup. @@ -533,7 +566,8 @@ message BackupInfo { // Output only. Name of the backup from which this backup was copied. If a // backup is not created by copying a backup, this field will be empty. Values - // are of the form: projects//instances//backups/. + // are of the form: + // projects//instances//clusters//backups/ string source_backup = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; }