diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java index 229e0490f5dc..694c54cf13a6 100644 --- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java @@ -120,7 +120,7 @@ public class HiveIcebergMetaHook implements HiveMetaHook { static final EnumSet SUPPORTED_ALTER_OPS = EnumSet.of( AlterTableType.ADDCOLS, AlterTableType.REPLACE_COLUMNS, AlterTableType.RENAME_COLUMN, AlterTableType.ADDPROPS, AlterTableType.DROPPROPS, AlterTableType.SETPARTITIONSPEC, - AlterTableType.UPDATE_COLUMNS, AlterTableType.RENAME, AlterTableType.EXECUTE); + AlterTableType.UPDATE_COLUMNS, AlterTableType.RENAME, AlterTableType.EXECUTE, AlterTableType.CREATE_BRANCH); private static final List MIGRATION_ALLOWED_SOURCE_FORMATS = ImmutableList.of( FileFormat.PARQUET.name().toLowerCase(), FileFormat.ORC.name().toLowerCase(), diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java index 9ee6874dcbd4..59af674bbc6f 100644 --- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java @@ -76,6 +76,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler; import org.apache.hadoop.hive.ql.metadata.HiveStoragePredicateHandler; import org.apache.hadoop.hive.ql.metadata.VirtualColumn; +import org.apache.hadoop.hive.ql.parse.AlterTableBranchSpec; import org.apache.hadoop.hive.ql.parse.AlterTableExecuteSpec; import org.apache.hadoop.hive.ql.parse.PartitionTransform; import org.apache.hadoop.hive.ql.parse.SemanticException; @@ -716,6 +717,28 @@ private static ExecutorService getDeleteExecutorService(String completeName, int }); } + @Override + public void alterTableBranchOperation(org.apache.hadoop.hive.ql.metadata.Table hmsTable, + AlterTableBranchSpec alterBranchSpec) { + TableDesc tableDesc = Utilities.getTableDesc(hmsTable); + Table icebergTable = IcebergTableUtil.getTable(conf, tableDesc.getProperties()); + Optional.ofNullable(icebergTable.currentSnapshot()).orElseThrow(() -> + new UnsupportedOperationException(String.format("Cannot alter branch on iceberg table" + + " %s.%s which has no snapshot", hmsTable.getDbName(), hmsTable.getTableName()))); + + switch (alterBranchSpec.getOperationType()) { + case CREATE_BRANCH: + AlterTableBranchSpec.CreateBranchSpec createBranchSpec = + (AlterTableBranchSpec.CreateBranchSpec) alterBranchSpec.getOperationParams(); + IcebergBranchExec.createBranch(icebergTable, createBranchSpec); + break; + default: + throw new UnsupportedOperationException( + String.format("Operation type %s is not supported", alterBranchSpec.getOperationType().name())); + } + + } + @Override public boolean isValidMetadataTable(String metaTableName) { return IcebergMetadataTables.isValidMetaTable(metaTableName); diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/IcebergBranchExec.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/IcebergBranchExec.java new file mode 100644 index 000000000000..ef910b14b4b3 --- /dev/null +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/IcebergBranchExec.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.hive; + +import org.apache.hadoop.hive.ql.parse.AlterTableBranchSpec; +import org.apache.iceberg.ManageSnapshots; +import org.apache.iceberg.Table; +import org.apache.iceberg.util.SnapshotUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class IcebergBranchExec { + + private static final Logger LOG = LoggerFactory.getLogger(IcebergBranchExec.class); + + private IcebergBranchExec() { + } + + /** + * Create a branch on the iceberg table + * @param table the iceberg table + * @param createBranchSpec Get the basic parameters needed to create a branch + */ + public static void createBranch(Table table, AlterTableBranchSpec.CreateBranchSpec createBranchSpec) { + String branchName = createBranchSpec.getBranchName(); + Long snapshotId = null; + if (createBranchSpec.getSnapshotId() != null) { + snapshotId = createBranchSpec.getSnapshotId(); + } else if (createBranchSpec.getAsOfTime() != null) { + snapshotId = SnapshotUtil.snapshotIdAsOfTime(table, createBranchSpec.getAsOfTime()); + } else { + snapshotId = table.currentSnapshot().snapshotId(); + } + LOG.info("Creating branch {} on iceberg table {} with snapshotId {}", branchName, table.name(), snapshotId); + ManageSnapshots manageSnapshots = table.manageSnapshots(); + manageSnapshots.createBranch(branchName, snapshotId); + if (createBranchSpec.getMaxRefAgeMs() != null) { + manageSnapshots.setMaxRefAgeMs(branchName, createBranchSpec.getMaxRefAgeMs()); + } + if (createBranchSpec.getMinSnapshotsToKeep() != null) { + manageSnapshots.setMinSnapshotsToKeep(branchName, createBranchSpec.getMinSnapshotsToKeep()); + } + if (createBranchSpec.getMaxSnapshotAgeMs() != null) { + manageSnapshots.setMaxSnapshotAgeMs(branchName, createBranchSpec.getMaxSnapshotAgeMs()); + } + + manageSnapshots.commit(); + } +} diff --git a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergBranchOperation.java b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergBranchOperation.java new file mode 100644 index 000000000000..ef4b94f29453 --- /dev/null +++ b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergBranchOperation.java @@ -0,0 +1,184 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.hive; + +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import org.apache.iceberg.SnapshotRef; +import org.apache.iceberg.Table; +import org.junit.Assert; +import org.junit.Test; + +import static org.apache.iceberg.mr.hive.HiveIcebergTestUtils.timestampAfterSnapshot; + +public class TestHiveIcebergBranchOperation extends HiveIcebergStorageHandlerWithEngineBase { + + @Test + public void testCreateBranchWithDefaultConfig() throws InterruptedException, IOException { + Table table = + testTables.createTableWithVersions(shell, "customers", HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, + fileFormat, HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS, 2); + + String branchName = "test_branch_1"; + shell.executeStatement(String.format("ALTER TABLE customers CREATE BRANCH %s", branchName)); + table.refresh(); + SnapshotRef ref = table.refs().get(branchName); + Assert.assertEquals(table.currentSnapshot().snapshotId(), ref.snapshotId()); + Assert.assertNull(ref.minSnapshotsToKeep()); + Assert.assertNull(ref.maxSnapshotAgeMs()); + Assert.assertNull(ref.maxRefAgeMs()); + + // creating a branch which is already exists will fail + try { + shell.executeStatement(String.format("ALTER TABLE customers CREATE BRANCH %s", branchName)); + } catch (Throwable e) { + while (e.getCause() != null) { + e = e.getCause(); + } + Assert.assertTrue(e.getMessage().contains("Ref test_branch_1 already exists")); + } + } + + @Test + public void testCreateBranchWithSnapshotId() throws InterruptedException, IOException { + Table table = + testTables.createTableWithVersions(shell, "customers", HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, + fileFormat, HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS, 2); + + String branchName = "test_branch_1"; + Long snapshotId = table.history().get(0).snapshotId(); + shell.executeStatement(String.format("ALTER TABLE customers CREATE BRANCH %s FOR SYSTEM_VERSION AS OF %d", + branchName, snapshotId)); + table.refresh(); + SnapshotRef ref = table.refs().get(branchName); + Assert.assertEquals(snapshotId.longValue(), ref.snapshotId()); + Assert.assertNull(ref.minSnapshotsToKeep()); + Assert.assertNull(ref.maxSnapshotAgeMs()); + Assert.assertNull(ref.maxRefAgeMs()); + } + + @Test + public void testCreateBranchWithTimeStamp() throws InterruptedException, IOException { + Table table = + testTables.createTableWithVersions(shell, "customers", HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, + fileFormat, HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS, 2); + + String branchName = "test_branch_1"; + Long snapshotId = table.history().get(0).snapshotId(); + + shell.executeStatement(String.format("ALTER TABLE customers CREATE BRANCH %s FOR SYSTEM_TIME AS OF '%s'", + branchName, timestampAfterSnapshot(table, 0))); + table.refresh(); + SnapshotRef ref = table.refs().get(branchName); + Assert.assertEquals(snapshotId.longValue(), ref.snapshotId()); + } + + @Test + public void testCreateBranchWithMaxRefAge() throws InterruptedException, IOException { + Table table = + testTables.createTableWithVersions(shell, "customers", HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, + fileFormat, HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS, 2); + + String branchName = "test_branch_1"; + long maxRefAge = 5L; + shell.executeStatement(String.format("ALTER TABLE customers CREATE BRANCH %s RETAIN %d DAYS", + branchName, maxRefAge)); + table.refresh(); + SnapshotRef ref = table.refs().get(branchName); + Assert.assertEquals(table.currentSnapshot().snapshotId(), ref.snapshotId()); + Assert.assertNull(ref.minSnapshotsToKeep()); + Assert.assertNull(ref.maxSnapshotAgeMs()); + Assert.assertEquals(TimeUnit.DAYS.toMillis(maxRefAge), ref.maxRefAgeMs().longValue()); + } + + @Test + public void testCreateBranchWithMinSnapshotsToKeep() throws InterruptedException, IOException { + Table table = + testTables.createTableWithVersions(shell, "customers", HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, + fileFormat, HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS, 2); + + String branchName = "test_branch_1"; + Integer minSnapshotsToKeep = 2; + shell.executeStatement(String.format("ALTER TABLE customers CREATE BRANCH %s WITH SNAPSHOT RETENTION %d SNAPSHOTS", + branchName, minSnapshotsToKeep)); + table.refresh(); + SnapshotRef ref = table.refs().get(branchName); + Assert.assertEquals(table.currentSnapshot().snapshotId(), ref.snapshotId()); + Assert.assertEquals(minSnapshotsToKeep, ref.minSnapshotsToKeep()); + Assert.assertNull(ref.maxSnapshotAgeMs()); + Assert.assertNull(ref.maxRefAgeMs()); + } + + @Test + public void testCreateBranchWithMinSnapshotsToKeepAndMaxSnapshotAge() throws InterruptedException, IOException { + Table table = + testTables.createTableWithVersions(shell, "customers", HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, + fileFormat, HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS, 2); + + String branchName = "test_branch_1"; + Integer minSnapshotsToKeep = 2; + long maxSnapshotAge = 2L; + shell.executeStatement(String.format("ALTER TABLE customers CREATE BRANCH %s WITH SNAPSHOT RETENTION %d SNAPSHOTS" + + " %d DAYS", branchName, minSnapshotsToKeep, maxSnapshotAge)); + table.refresh(); + SnapshotRef ref = table.refs().get(branchName); + Assert.assertEquals(table.currentSnapshot().snapshotId(), ref.snapshotId()); + Assert.assertEquals(minSnapshotsToKeep, ref.minSnapshotsToKeep()); + Assert.assertEquals(TimeUnit.DAYS.toMillis(maxSnapshotAge), ref.maxSnapshotAgeMs().longValue()); + Assert.assertNull(ref.maxRefAgeMs()); + } + + @Test + public void testCreateBranchWithAllCustomConfig() throws IOException, InterruptedException { + Table table = + testTables.createTableWithVersions(shell, "customers", HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, + fileFormat, HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS, 2); + + String branchName = "test_branch_1"; + Long snapshotId = table.history().get(0).snapshotId(); + Integer minSnapshotsToKeep = 2; + long maxSnapshotAge = 2L; + long maxRefAge = 5L; + shell.executeStatement(String.format("ALTER TABLE customers CREATE BRANCH %s FOR SYSTEM_VERSION AS OF %d RETAIN" + + " %d DAYS WITH SNAPSHOT RETENTION %d SNAPSHOTS %d days", + branchName, snapshotId, maxRefAge, minSnapshotsToKeep, maxSnapshotAge)); + table.refresh(); + SnapshotRef ref = table.refs().get(branchName); + Assert.assertEquals(snapshotId.longValue(), ref.snapshotId()); + Assert.assertEquals(minSnapshotsToKeep, ref.minSnapshotsToKeep()); + Assert.assertEquals(TimeUnit.DAYS.toMillis(maxSnapshotAge), ref.maxSnapshotAgeMs().longValue()); + Assert.assertEquals(TimeUnit.DAYS.toMillis(maxRefAge), ref.maxRefAgeMs().longValue()); + } + + @Test + public void testCreateBranchWithNonIcebergTable() { + shell.executeStatement("create table nonice_tbl (id int, name string)"); + + String branchName = "test_branch_1"; + try { + shell.executeStatement(String.format("ALTER TABLE nonice_tbl CREATE BRANCH %s", branchName)); + } catch (Throwable e) { + while (e.getCause() != null) { + e = e.getCause(); + } + Assert.assertTrue(e.getMessage().contains("Not an iceberg table")); + } + } +} diff --git a/iceberg/iceberg-handler/src/test/queries/negative/alter_table_create_branch_negative.q b/iceberg/iceberg-handler/src/test/queries/negative/alter_table_create_branch_negative.q new file mode 100644 index 000000000000..45078a252b38 --- /dev/null +++ b/iceberg/iceberg-handler/src/test/queries/negative/alter_table_create_branch_negative.q @@ -0,0 +1,3 @@ +create table ice_tbl (id int, name string) Stored by Iceberg; + +alter table ice_tbl create branch test_branch_1; diff --git a/iceberg/iceberg-handler/src/test/queries/positive/alter_table_create_branch.q b/iceberg/iceberg-handler/src/test/queries/positive/alter_table_create_branch.q new file mode 100644 index 000000000000..4eee6ecf1143 --- /dev/null +++ b/iceberg/iceberg-handler/src/test/queries/positive/alter_table_create_branch.q @@ -0,0 +1,34 @@ +-- SORT_QUERY_RESULTS +set hive.explain.user=false; + +create table iceTbl (id int, name string) Stored by Iceberg; + +-- creating branch requires table to have current snapshot. here insert some values to generate current snapshot +insert into iceTbl values(1, 'jack'); + +-- create s branch test_branch_1 with default values based on the current snapshotId +explain alter table iceTbl create branch test_branch_1; +alter table iceTbl create branch test_branch_1; +-- check the values, one value +select * from iceTbl for system_version as of 'test_branch_1'; + +-- create a branch test_branch_2 which could be retained 5 days based on the current snapshotId +insert into iceTbl values(2, 'bob'); +explain alter table iceTbl create branch test_branch_2 retain 5 days; +alter table iceTbl create branch test_branch_2 retain 5 days; +-- check the values, two values +select * from iceTbl for system_version as of 'test_branch_2'; + +-- create a branch test_branch_3 with 5 snapshots based on the current snapshotId +insert into iceTbl values(3, 'tom'); +explain alter table iceTbl create branch test_branch_3 with snapshot retention 5 snapshots; +alter table iceTbl create branch test_branch_3 with snapshot retention 5 snapshots; +-- check the values, three values +select * from iceTbl for system_version as of 'test_branch_3'; + +-- create a branch test_branch_4 based on the current snapshotId that has 5 snapshots, each of which is retained for 5 days +insert into iceTbl values(4, 'lisa'); +explain alter table iceTbl create branch test_branch_4 with snapshot retention 5 snapshots 5 days; +alter table iceTbl create branch test_branch_4 with snapshot retention 5 snapshots 5 days; +-- check the values, four values +select * from iceTbl for system_version as of 'test_branch_4'; diff --git a/iceberg/iceberg-handler/src/test/results/negative/alter_table_create_branch_negative.q.out b/iceberg/iceberg-handler/src/test/results/negative/alter_table_create_branch_negative.q.out new file mode 100644 index 000000000000..4d5e2812e4b3 --- /dev/null +++ b/iceberg/iceberg-handler/src/test/results/negative/alter_table_create_branch_negative.q.out @@ -0,0 +1,12 @@ +PREHOOK: query: create table ice_tbl (id int, name string) Stored by Iceberg +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@ice_tbl +POSTHOOK: query: create table ice_tbl (id int, name string) Stored by Iceberg +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@ice_tbl +PREHOOK: query: alter table ice_tbl create branch test_branch_1 +PREHOOK: type: ALTERTABLE_CREATEBRANCH +PREHOOK: Input: default@ice_tbl +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.UnsupportedOperationException: Cannot alter branch on iceberg table default.ice_tbl which has no snapshot diff --git a/iceberg/iceberg-handler/src/test/results/positive/alter_table_create_branch.q.out b/iceberg/iceberg-handler/src/test/results/positive/alter_table_create_branch.q.out new file mode 100644 index 000000000000..190f190ec893 --- /dev/null +++ b/iceberg/iceberg-handler/src/test/results/positive/alter_table_create_branch.q.out @@ -0,0 +1,166 @@ +PREHOOK: query: create table iceTbl (id int, name string) Stored by Iceberg +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@iceTbl +POSTHOOK: query: create table iceTbl (id int, name string) Stored by Iceberg +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@iceTbl +PREHOOK: query: insert into iceTbl values(1, 'jack') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@icetbl +POSTHOOK: query: insert into iceTbl values(1, 'jack') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@icetbl +PREHOOK: query: explain alter table iceTbl create branch test_branch_1 +PREHOOK: type: ALTERTABLE_CREATEBRANCH +PREHOOK: Input: default@icetbl +POSTHOOK: query: explain alter table iceTbl create branch test_branch_1 +POSTHOOK: type: ALTERTABLE_CREATEBRANCH +POSTHOOK: Input: default@icetbl +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + CreateBranch operation + table name: default.iceTbl + spec: AlterTableBranchSpec{operationType=CREATE_BRANCH, operationParams=CreateBranchSpec{branchName=test_branch_1, snapshotId=null, asOfTime=null, maxRefAgeMs=null, minSnapshotsToKeep=null, maxSnapshotAgeMs=null}} + +PREHOOK: query: alter table iceTbl create branch test_branch_1 +PREHOOK: type: ALTERTABLE_CREATEBRANCH +PREHOOK: Input: default@icetbl +POSTHOOK: query: alter table iceTbl create branch test_branch_1 +POSTHOOK: type: ALTERTABLE_CREATEBRANCH +POSTHOOK: Input: default@icetbl +PREHOOK: query: select * from iceTbl for system_version as of 'test_branch_1' +PREHOOK: type: QUERY +PREHOOK: Input: default@icetbl +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from iceTbl for system_version as of 'test_branch_1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@icetbl +POSTHOOK: Output: hdfs://### HDFS PATH ### +1 jack +PREHOOK: query: insert into iceTbl values(2, 'bob') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@icetbl +POSTHOOK: query: insert into iceTbl values(2, 'bob') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@icetbl +PREHOOK: query: explain alter table iceTbl create branch test_branch_2 retain 5 days +PREHOOK: type: ALTERTABLE_CREATEBRANCH +PREHOOK: Input: default@icetbl +POSTHOOK: query: explain alter table iceTbl create branch test_branch_2 retain 5 days +POSTHOOK: type: ALTERTABLE_CREATEBRANCH +POSTHOOK: Input: default@icetbl +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + CreateBranch operation + table name: default.iceTbl + spec: AlterTableBranchSpec{operationType=CREATE_BRANCH, operationParams=CreateBranchSpec{branchName=test_branch_2, snapshotId=null, asOfTime=null, maxRefAgeMs=432000000, minSnapshotsToKeep=null, maxSnapshotAgeMs=null}} + +PREHOOK: query: alter table iceTbl create branch test_branch_2 retain 5 days +PREHOOK: type: ALTERTABLE_CREATEBRANCH +PREHOOK: Input: default@icetbl +POSTHOOK: query: alter table iceTbl create branch test_branch_2 retain 5 days +POSTHOOK: type: ALTERTABLE_CREATEBRANCH +POSTHOOK: Input: default@icetbl +PREHOOK: query: select * from iceTbl for system_version as of 'test_branch_2' +PREHOOK: type: QUERY +PREHOOK: Input: default@icetbl +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from iceTbl for system_version as of 'test_branch_2' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@icetbl +POSTHOOK: Output: hdfs://### HDFS PATH ### +1 jack +2 bob +PREHOOK: query: insert into iceTbl values(3, 'tom') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@icetbl +POSTHOOK: query: insert into iceTbl values(3, 'tom') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@icetbl +PREHOOK: query: explain alter table iceTbl create branch test_branch_3 with snapshot retention 5 snapshots +PREHOOK: type: ALTERTABLE_CREATEBRANCH +PREHOOK: Input: default@icetbl +POSTHOOK: query: explain alter table iceTbl create branch test_branch_3 with snapshot retention 5 snapshots +POSTHOOK: type: ALTERTABLE_CREATEBRANCH +POSTHOOK: Input: default@icetbl +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + CreateBranch operation + table name: default.iceTbl + spec: AlterTableBranchSpec{operationType=CREATE_BRANCH, operationParams=CreateBranchSpec{branchName=test_branch_3, snapshotId=null, asOfTime=null, maxRefAgeMs=null, minSnapshotsToKeep=5, maxSnapshotAgeMs=null}} + +PREHOOK: query: alter table iceTbl create branch test_branch_3 with snapshot retention 5 snapshots +PREHOOK: type: ALTERTABLE_CREATEBRANCH +PREHOOK: Input: default@icetbl +POSTHOOK: query: alter table iceTbl create branch test_branch_3 with snapshot retention 5 snapshots +POSTHOOK: type: ALTERTABLE_CREATEBRANCH +POSTHOOK: Input: default@icetbl +PREHOOK: query: select * from iceTbl for system_version as of 'test_branch_3' +PREHOOK: type: QUERY +PREHOOK: Input: default@icetbl +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from iceTbl for system_version as of 'test_branch_3' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@icetbl +POSTHOOK: Output: hdfs://### HDFS PATH ### +1 jack +2 bob +3 tom +PREHOOK: query: insert into iceTbl values(4, 'lisa') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@icetbl +POSTHOOK: query: insert into iceTbl values(4, 'lisa') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@icetbl +PREHOOK: query: explain alter table iceTbl create branch test_branch_4 with snapshot retention 5 snapshots 5 days +PREHOOK: type: ALTERTABLE_CREATEBRANCH +PREHOOK: Input: default@icetbl +POSTHOOK: query: explain alter table iceTbl create branch test_branch_4 with snapshot retention 5 snapshots 5 days +POSTHOOK: type: ALTERTABLE_CREATEBRANCH +POSTHOOK: Input: default@icetbl +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + CreateBranch operation + table name: default.iceTbl + spec: AlterTableBranchSpec{operationType=CREATE_BRANCH, operationParams=CreateBranchSpec{branchName=test_branch_4, snapshotId=null, asOfTime=null, maxRefAgeMs=null, minSnapshotsToKeep=5, maxSnapshotAgeMs=432000000}} + +PREHOOK: query: alter table iceTbl create branch test_branch_4 with snapshot retention 5 snapshots 5 days +PREHOOK: type: ALTERTABLE_CREATEBRANCH +PREHOOK: Input: default@icetbl +POSTHOOK: query: alter table iceTbl create branch test_branch_4 with snapshot retention 5 snapshots 5 days +POSTHOOK: type: ALTERTABLE_CREATEBRANCH +POSTHOOK: Input: default@icetbl +PREHOOK: query: select * from iceTbl for system_version as of 'test_branch_4' +PREHOOK: type: QUERY +PREHOOK: Input: default@icetbl +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from iceTbl for system_version as of 'test_branch_4' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@icetbl +POSTHOOK: Output: hdfs://### HDFS PATH ### +1 jack +2 bob +3 tom +4 lisa diff --git a/parser/src/java/org/apache/hadoop/hive/ql/parse/AlterClauseParser.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/AlterClauseParser.g index 9b276bafc6bc..f785b583be9b 100644 --- a/parser/src/java/org/apache/hadoop/hive/ql/parse/AlterClauseParser.g +++ b/parser/src/java/org/apache/hadoop/hive/ql/parse/AlterClauseParser.g @@ -74,6 +74,7 @@ alterTableStatementSuffix | alterStatementSuffixSetOwner | alterStatementSuffixSetPartSpec | alterStatementSuffixExecute + | alterStatementSuffixCreateBranch | alterStatementSuffixConvert ; @@ -477,6 +478,37 @@ alterStatementSuffixExecute -> ^(TOK_ALTERTABLE_EXECUTE KW_SET_CURRENT_SNAPSHOT $snapshotParam) ; +alterStatementSuffixCreateBranch +@init { gParent.pushMsg("alter table create branch", state); } +@after { gParent.popMsg(state); } + : KW_CREATE KW_BRANCH branchName=identifier snapshotIdOfBranch? branchRetain? retentionOfSnapshots? + -> ^(TOK_ALTERTABLE_CREATE_BRANCH $branchName snapshotIdOfBranch? branchRetain? retentionOfSnapshots?) + ; + +snapshotIdOfBranch +@init { gParent.pushMsg("alter table create branch as of version", state); } +@after { gParent.popMsg(state); } + : KW_FOR KW_SYSTEM_VERSION KW_AS KW_OF snapshotId=Number + -> ^(TOK_AS_OF_VERSION $snapshotId) + | + (KW_FOR KW_SYSTEM_TIME KW_AS KW_OF asOfTime=StringLiteral) + -> ^(TOK_AS_OF_TIME $asOfTime) + ; + +branchRetain +@init { gParent.pushMsg("alter table create branch RETAIN", state); } +@after { gParent.popMsg(state); } + : KW_RETAIN maxRefAge=Number timeUnit=timeUnitQualifiers + -> ^(TOK_RETAIN $maxRefAge $timeUnit) + ; + +retentionOfSnapshots +@init { gParent.pushMsg("alter table create branch WITH SNAPSHOT RETENTION", state); } +@after { gParent.popMsg(state); } + : (KW_WITH KW_SNAPSHOT KW_RETENTION minSnapshotsToKeep=Number KW_SNAPSHOTS (maxSnapshotAge=Number timeUnit=timeUnitQualifiers)?) + -> ^(TOK_WITH_SNAPSHOT_RETENTION $minSnapshotsToKeep ($maxSnapshotAge $timeUnit)?) + ; + fileFormat @init { gParent.pushMsg("file format specification", state); } @after { gParent.popMsg(state); } diff --git a/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveLexerParent.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveLexerParent.g index b07ba782d86b..c7cbc73222c5 100644 --- a/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveLexerParent.g +++ b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveLexerParent.g @@ -392,7 +392,10 @@ KW_SYSTEM_TIME: 'SYSTEM_TIME'; KW_SYSTEM_VERSION: 'SYSTEM_VERSION'; KW_EXPIRE_SNAPSHOTS: 'EXPIRE_SNAPSHOTS'; KW_SET_CURRENT_SNAPSHOT: 'SET_CURRENT_SNAPSHOT'; - +KW_BRANCH: 'BRANCH'; +KW_SNAPSHOTS: 'SNAPSHOTS'; +KW_RETAIN: 'RETAIN'; +KW_RETENTION: 'RETENTION'; // Operators // NOTE: if you add a new function/operator, add it to sysFuncNames so that describe function _FUNC_ will work. diff --git a/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index 8a974e278b86..0ca781526419 100644 --- a/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -219,6 +219,9 @@ TOK_ALTERTABLE_UPDATECOLUMNS; TOK_ALTERTABLE_OWNER; TOK_ALTERTABLE_SETPARTSPEC; TOK_ALTERTABLE_EXECUTE; +TOK_ALTERTABLE_CREATE_BRANCH; +TOK_RETAIN; +TOK_WITH_SNAPSHOT_RETENTION; TOK_ALTERTABLE_CONVERT; TOK_MSCK; TOK_SHOWDATABASES; diff --git a/parser/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g index 2e7d12309e8e..e6e0ae58ca5d 100644 --- a/parser/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g +++ b/parser/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g @@ -408,6 +408,13 @@ timeQualifiers | KW_SECOND -> Identifier["second"] ; +timeUnitQualifiers + : + KW_DAY -> Identifier["days"] + | KW_HOUR -> Identifier["hours"] + | KW_MINUTE -> Identifier["minutes"] + ; + constant @init { gParent.pushMsg("constant", state); } @after { gParent.popMsg(state); } @@ -977,6 +984,7 @@ nonReserved | KW_SYSTEM_TIME | KW_SYSTEM_VERSION | KW_EXPIRE_SNAPSHOTS | KW_SET_CURRENT_SNAPSHOT + | KW_BRANCH | KW_SNAPSHOTS | KW_RETAIN | KW_RETENTION ; //The following SQL2011 reserved keywords are used as function name only, but not as identifiers. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLUtils.java index 0b9830a7b504..b8048da525db 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/DDLUtils.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.HiveMetaHook; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -217,4 +218,13 @@ public static void setColumnsAndStorePartitionTransformSpecOfTable( partCols.ifPresent(tbl::setPartCols); } } + + public static void validateTableIsIceberg(org.apache.hadoop.hive.ql.metadata.Table table) + throws SemanticException { + String tableType = table.getParameters().get(HiveMetaHook.TABLE_TYPE); + if (!HiveMetaHook.ICEBERG.equalsIgnoreCase(tableType)) { + throw new SemanticException(String.format("Not an iceberg table: %s (type=%s)", + table.getFullTableName(), tableType)); + } + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableType.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableType.java index df5ba186b69a..7d6164d6e07b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableType.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AlterTableType.java @@ -40,6 +40,7 @@ public enum AlterTableType { ALTERPARTITION("alter partition"), // Note: this is never used in AlterTableDesc. SETPARTITIONSPEC("set partition spec"), EXECUTE("execute"), + CREATE_BRANCH("create branch"), // constraint ADD_CONSTRAINT("add constraint"), DROP_CONSTRAINT("drop constraint"), diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/branch/create/AlterTableCreateBranchAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/branch/create/AlterTableCreateBranchAnalyzer.java new file mode 100644 index 000000000000..a742d39b84fb --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/branch/create/AlterTableCreateBranchAnalyzer.java @@ -0,0 +1,107 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.branch.create; + +import java.time.ZoneId; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.common.type.TimestampTZ; +import org.apache.hadoop.hive.common.type.TimestampTZUtil; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory; +import org.apache.hadoop.hive.ql.ddl.DDLUtils; +import org.apache.hadoop.hive.ql.ddl.DDLWork; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableAnalyzer; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.hooks.ReadEntity; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.AlterTableBranchSpec; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.session.SessionState; + +import static org.apache.hadoop.hive.ql.parse.AlterTableBranchSpec.AlterBranchOperationType.CREATE_BRANCH; + +@DDLSemanticAnalyzerFactory.DDLType(types = HiveParser.TOK_ALTERTABLE_CREATE_BRANCH) +public class AlterTableCreateBranchAnalyzer extends AbstractAlterTableAnalyzer { + + public AlterTableCreateBranchAnalyzer(QueryState queryState) throws SemanticException { + super(queryState); + } + + @Override + protected void analyzeCommand(TableName tableName, Map partitionSpec, ASTNode command) + throws SemanticException { + Table table = getTable(tableName); + validateAlterTableType(table, AlterTableType.CREATE_BRANCH, false); + DDLUtils.validateTableIsIceberg(table); + inputs.add(new ReadEntity(table)); + + String branchName = command.getChild(0).getText(); + Long snapshotId = null; + Long asOfTime = null; + Long maxRefAgeMs = null; + Integer minSnapshotsToKeep = null; + Long maxSnapshotAgeMs = null; + for (int i = 1; i < command.getChildCount(); i++) { + ASTNode childNode = (ASTNode) command.getChild(i); + switch (childNode.getToken().getType()) { + case HiveParser.TOK_AS_OF_VERSION: + snapshotId = Long.parseLong(childNode.getChild(0).getText()); + break; + case HiveParser.TOK_AS_OF_TIME: + ZoneId timeZone = SessionState.get() == null ? new HiveConf().getLocalTimeZone() : + SessionState.get().getConf().getLocalTimeZone(); + TimestampTZ ts = TimestampTZUtil.parse(stripQuotes(childNode.getChild(0).getText()), timeZone); + asOfTime = ts.toEpochMilli(); + break; + case HiveParser.TOK_RETAIN: + String maxRefAge = childNode.getChild(0).getText(); + String timeUnitOfBranchRetain = childNode.getChild(1).getText(); + maxRefAgeMs = TimeUnit.valueOf(timeUnitOfBranchRetain.toUpperCase(Locale.ENGLISH)) + .toMillis(Long.parseLong(maxRefAge)); + break; + case HiveParser.TOK_WITH_SNAPSHOT_RETENTION: + minSnapshotsToKeep = Integer.valueOf(childNode.getChild(0).getText()); + if (childNode.getChildren().size() > 1) { + String maxSnapshotAge = childNode.getChild(1).getText(); + String timeUnitOfSnapshotsRetention = childNode.getChild(2).getText(); + maxSnapshotAgeMs = TimeUnit.valueOf(timeUnitOfSnapshotsRetention.toUpperCase(Locale.ENGLISH)) + .toMillis(Long.parseLong(maxSnapshotAge)); + } + break; + default: + throw new SemanticException("Unrecognized token in ALTER CREATE BRANCH statement"); + } + } + + AlterTableBranchSpec.CreateBranchSpec + createBranchspec = new AlterTableBranchSpec.CreateBranchSpec(branchName, snapshotId, asOfTime, + maxRefAgeMs, minSnapshotsToKeep, maxSnapshotAgeMs); + AlterTableBranchSpec alterTableBranchSpec = new AlterTableBranchSpec(CREATE_BRANCH, createBranchspec); + AlterTableCreateBranchDesc desc = new AlterTableCreateBranchDesc(tableName, alterTableBranchSpec); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/branch/create/AlterTableCreateBranchDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/branch/create/AlterTableCreateBranchDesc.java new file mode 100644 index 000000000000..43ef199b30cb --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/branch/create/AlterTableCreateBranchDesc.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.branch.create; + +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; +import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; +import org.apache.hadoop.hive.ql.parse.AlterTableBranchSpec; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +@Explain(displayName = "CreateBranch operation", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class AlterTableCreateBranchDesc extends AbstractAlterTableDesc { + private static final long serialVersionUID = 1L; + + private final AlterTableBranchSpec alterTableBranchSpec; + + public AlterTableCreateBranchDesc(TableName tableName, AlterTableBranchSpec alterTableBranchSpec) + throws SemanticException { + super(AlterTableType.CREATE_BRANCH, tableName, null, null, false, false, null); + this.alterTableBranchSpec = alterTableBranchSpec; + } + + public AlterTableBranchSpec getAlterTableBranchSpec() { + return alterTableBranchSpec; + } + + @Explain(displayName = "spec", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) + public String getExplainOutput() { + return alterTableBranchSpec.toString(); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/branch/create/AlterTableCreateBranchOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/branch/create/AlterTableCreateBranchOperation.java new file mode 100644 index 000000000000..46fc6f37d459 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/branch/create/AlterTableCreateBranchOperation.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.ddl.table.branch.create; + +import org.apache.hadoop.hive.ql.ddl.DDLOperation; +import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.Table; + +public class AlterTableCreateBranchOperation extends DDLOperation { + + public AlterTableCreateBranchOperation(DDLOperationContext context, AlterTableCreateBranchDesc desc) { + super(context, desc); + } + + @Override + public int execute() throws Exception { + Table table = context.getDb().getTable(desc.getFullTableName()); + context.getDb().alterTableBranchOperation(table, desc.getAlterTableBranchSpec()); + return 0; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index f6dbf3c79293..8377dc4b766a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -219,6 +219,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializedViewUtils; import org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.ListBucketingPrunerUtils; import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.AlterTableBranchSpec; import org.apache.hadoop.hive.ql.parse.AlterTableExecuteSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; @@ -6714,6 +6715,15 @@ public void alterTableExecuteOperation(Table table, AlterTableExecuteSpec execut } } + public void alterTableBranchOperation(Table table, AlterTableBranchSpec createBranchSpec) throws HiveException { + try { + HiveStorageHandler storageHandler = createStorageHandler(table.getTTable()); + storageHandler.alterTableBranchOperation(table, createBranchSpec); + } catch (Exception e) { + throw new HiveException(e); + } + } + public AbortCompactResponse abortCompactions(AbortCompactionRequest request) throws HiveException { try { return getMSC().abortCompactions(request); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java index ef976feb54cc..fb5a1066e1c5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hive.ql.ddl.table.create.like.CreateTableLikeDesc; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.io.StorageFormatDescriptor; +import org.apache.hadoop.hive.ql.parse.AlterTableBranchSpec; import org.apache.hadoop.hive.ql.parse.AlterTableExecuteSpec; import org.apache.hadoop.hive.ql.parse.TransformSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; @@ -576,6 +577,10 @@ default void validateSinkDesc(FileSinkDesc sinkDesc) throws SemanticException { default void executeOperation(org.apache.hadoop.hive.ql.metadata.Table table, AlterTableExecuteSpec executeSpec) { } + default void alterTableBranchOperation(org.apache.hadoop.hive.ql.metadata.Table table, + AlterTableBranchSpec alterBranchSpec) { + } + /** * Gets whether this storage handler supports snapshots. * @return true means snapshots are supported false otherwise diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/AlterTableBranchSpec.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/AlterTableBranchSpec.java new file mode 100644 index 000000000000..a842218cf5eb --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/AlterTableBranchSpec.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.parse; + +import com.google.common.base.MoreObjects; + +public class AlterTableBranchSpec { + + public enum AlterBranchOperationType { + CREATE_BRANCH + } + + private final AlterBranchOperationType operationType; + private final T operationParams; + + public AlterTableBranchSpec(AlterBranchOperationType type, T value) { + this.operationType = type; + this.operationParams = value; + } + + public AlterBranchOperationType getOperationType() { + return operationType; + } + + public T getOperationParams() { + return operationParams; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("operationType", operationType.name()) + .add("operationParams", operationParams).toString(); + } + + public static class CreateBranchSpec { + + private final String branchName; + private final Long snapshotId; + private final Long asOfTime; + private final Long maxRefAgeMs; + private final Integer minSnapshotsToKeep; + private final Long maxSnapshotAgeMs; + + public String getBranchName() { + return branchName; + } + + public Long getSnapshotId() { + return snapshotId; + } + + public Long getAsOfTime() { + return asOfTime; + } + + public Long getMaxRefAgeMs() { + return maxRefAgeMs; + } + + public Integer getMinSnapshotsToKeep() { + return minSnapshotsToKeep; + } + + public Long getMaxSnapshotAgeMs() { + return maxSnapshotAgeMs; + } + + public CreateBranchSpec(String branchName, Long snapShotId, Long asOfTime, Long maxRefAgeMs, + Integer minSnapshotsToKeep, Long maxSnapshotAgeMs) { + this.branchName = branchName; + this.snapshotId = snapShotId; + this.asOfTime = asOfTime; + this.maxRefAgeMs = maxRefAgeMs; + this.minSnapshotsToKeep = minSnapshotsToKeep; + this.maxSnapshotAgeMs = maxSnapshotAgeMs; + } + + public String toString() { + return MoreObjects.toStringHelper(this).add("branchName", branchName).add("snapshotId", snapshotId) + .add("asOfTime", asOfTime).add("maxRefAgeMs", maxRefAgeMs).add("minSnapshotsToKeep", minSnapshotsToKeep) + .add("maxSnapshotAgeMs", maxSnapshotAgeMs).toString(); + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java index 8cb4c2ab7358..d83bba55da36 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java @@ -77,6 +77,7 @@ public enum HiveOperation { ALTERTABLE_OWNER("ALTERTABLE_OWNER", HiveParser.TOK_ALTERTABLE_OWNER, null, null), ALTERTABLE_SETPARTSPEC("ALTERTABLE_SETPARTSPEC", HiveParser.TOK_ALTERTABLE_SETPARTSPEC, null, null), ALTERTABLE_EXECUTE("ALTERTABLE_EXECUTE", HiveParser.TOK_ALTERTABLE_EXECUTE, null, null), + ALTERTABLE_CREATEBRANCH("ALTERTABLE_CREATEBRANCH", HiveParser.TOK_ALTERTABLE_CREATE_BRANCH, null, null), ALTERTABLE_CONVERT("ALTERTABLE_CONVERT", HiveParser.TOK_ALTERTABLE_CONVERT, null, null), ALTERTABLE_SERIALIZER("ALTERTABLE_SERIALIZER", HiveParser.TOK_ALTERTABLE_SERIALIZER, new Privilege[]{Privilege.ALTER_METADATA}, null), diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java index 15b5c59057c4..a2279d9737ff 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java @@ -139,6 +139,7 @@ public enum HiveOperationType { ALTER_MATERIALIZED_VIEW_REBUILD, ALTERTABLE_COMPACT, ALTERTABLE_UPDATECOLUMNS, + ALTERTABLE_CREATEBRANCH, SHOW_COMPACTIONS, SHOW_TRANSACTIONS, ABORT_TRANSACTIONS, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java index 4d47d10e2d82..75abbf754e8b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java @@ -240,6 +240,8 @@ public HivePrivilegeObjectType getObjectType() { PrivRequirement.newIOPrivRequirement(OWNER_PRIV_AR, OWNER_PRIV_AR)); op2Priv.put(HiveOperationType.ALTERTABLE_ADDCONSTRAINT, PrivRequirement.newIOPrivRequirement(OWNER_PRIV_AR, OWNER_PRIV_AR)); + op2Priv.put(HiveOperationType.ALTERTABLE_CREATEBRANCH, + PrivRequirement.newIOPrivRequirement(OWNER_PRIV_AR, OWNER_PRIV_AR)); // require view ownership for alter/drop view op2Priv.put(HiveOperationType.ALTERVIEW_PROPERTIES,