Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -171,11 +171,21 @@ public void rollbackDropTable(org.apache.hadoop.hive.metastore.api.Table hmsTabl
@Override
public void commitDropTable(org.apache.hadoop.hive.metastore.api.Table hmsTable, boolean deleteData) {
if (deleteData && deleteIcebergTable) {
if (!Catalogs.hiveCatalog(conf)) {
LOG.info("Dropping with purge all the data for table {}.{}", hmsTable.getDbName(), hmsTable.getTableName());
Catalogs.dropTable(conf, catalogProperties);
} else {
CatalogUtil.dropTableData(deleteIo, deleteMetadata);
try {
if (!Catalogs.hiveCatalog(conf)) {
LOG.info("Dropping with purge all the data for table {}.{}", hmsTable.getDbName(), hmsTable.getTableName());
Catalogs.dropTable(conf, catalogProperties);
} else {
// do nothing if metadata folder has been deleted already (Hive 4 behaviour for purge=TRUE)
if (deleteIo.newInputFile(deleteMetadata.location()).exists()) {
CatalogUtil.dropTableData(deleteIo, deleteMetadata);
}
}
} catch (Exception e) {
// we want to successfully complete the Hive DROP TABLE command despite catalog-related exceptions here
// e.g. we wish to successfully delete a Hive table even if the underlying Hadoop table has already been deleted
LOG.warn("Exception during commitDropTable operation for table {}.{}.",
hmsTable.getDbName(), hmsTable.getTableName(), e);
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.hadoop.fs.FileSystem;
Expand All @@ -48,6 +49,7 @@
import org.apache.iceberg.hive.MetastoreUtil;
import org.apache.iceberg.mr.Catalogs;
import org.apache.iceberg.mr.InputFormatConfig;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet;
import org.apache.iceberg.types.Type;
Expand All @@ -56,6 +58,7 @@
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
Expand Down Expand Up @@ -568,6 +571,45 @@ public void testIcebergAndHmsTableProperties() throws TException, InterruptedExc
}
}

@Test
public void testDropTableWithAppendedData() throws IOException {
TableIdentifier identifier = TableIdentifier.of("default", "customers");

testTables.createTable(shell, identifier.name(), HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, SPEC,
FileFormat.PARQUET, ImmutableList.of());

org.apache.iceberg.Table icebergTable = testTables.loadTable(identifier);
testTables.appendIcebergTable(shell.getHiveConf(), icebergTable, FileFormat.PARQUET, null,
HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS);

shell.executeStatement("DROP TABLE customers");
}

@Test
public void testDropHiveTableWithoutUnderlyingTable() throws IOException {
Assume.assumeFalse("Not relevant for HiveCatalog", Catalogs.hiveCatalog(shell.getHiveConf()));

TableIdentifier identifier = TableIdentifier.of("default", "customers");
// Create the Iceberg table in non-HiveCatalog
testTables.createIcebergTable(shell.getHiveConf(), identifier.name(),
HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, FileFormat.PARQUET,
HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS);

// Create Hive table on top
String tableLocation = testTables.locationForCreateTableSQL(identifier);
shell.executeStatement(testTables.createHiveTableSQL(identifier,
ImmutableMap.of(InputFormatConfig.EXTERNAL_TABLE_PURGE, "TRUE")));

// Drop the Iceberg table
Properties properties = new Properties();
properties.put(Catalogs.NAME, identifier.toString());
properties.put(Catalogs.LOCATION, tableLocation);
Catalogs.dropTable(shell.getHiveConf(), properties);

// Finally drop the Hive table as well
shell.executeStatement("DROP TABLE " + identifier);
}

private String getCurrentSnapshotForHiveCatalogTable(org.apache.iceberg.Table icebergTable) {
return ((BaseMetastoreTableOperations) ((BaseTable) icebergTable).operations()).currentMetadataLocation();
}
Expand Down
42 changes: 26 additions & 16 deletions mr/src/test/java/org/apache/iceberg/mr/hive/TestTables.java
Original file line number Diff line number Diff line change
Expand Up @@ -106,13 +106,21 @@ public Tables tables() {
* string which we have to execute. Overridden for HiveCatalog where the Hive table is immediately created
* during the Iceberg table creation so no extra sql execution is required.
* @param identifier The table identifier (the namespace should be non-empty and single level)
* @param tableProps Optional map of table properties
* @return The SQL string - which should be executed, null - if it is not needed.
*/
public String createHiveTableSQL(TableIdentifier identifier) {
public String createHiveTableSQL(TableIdentifier identifier, Map<String, String> tableProps) {
Preconditions.checkArgument(!identifier.namespace().isEmpty(), "Namespace should not be empty");
Preconditions.checkArgument(identifier.namespace().levels().length == 1, "Namespace should be single level");
return String.format("CREATE TABLE %s.%s STORED BY '%s' %s", identifier.namespace(), identifier.name(),
String sql = String.format("CREATE TABLE %s.%s STORED BY '%s' %s", identifier.namespace(), identifier.name(),
HiveIcebergStorageHandler.class.getName(), locationForCreateTableSQL(identifier));
if (tableProps != null && !tableProps.isEmpty()) {
String props = tableProps.entrySet().stream()
.map(entry -> String.format("'%s'='%s'", entry.getKey(), entry.getValue()))
.collect(Collectors.joining(","));
sql += " TBLPROPERTIES (" + props + ")";
}
return sql;
}

/**
Expand Down Expand Up @@ -140,7 +148,7 @@ public Table loadTable(TableIdentifier identifier) {
public Table createTable(TestHiveShell shell, String tableName, Schema schema, FileFormat fileFormat,
List<Record> records) throws IOException {
Table table = createIcebergTable(shell.getHiveConf(), tableName, schema, fileFormat, records);
String createHiveSQL = createHiveTableSQL(TableIdentifier.of("default", tableName));
String createHiveSQL = createHiveTableSQL(TableIdentifier.of("default", tableName), ImmutableMap.of());
if (createHiveSQL != null) {
shell.executeStatement(createHiveSQL);
}
Expand Down Expand Up @@ -172,18 +180,20 @@ public Table createTable(TestHiveShell shell, String tableName, Schema schema, P
PartitionSpecParser.toJson(spec) + "', " +
"'" + TableProperties.DEFAULT_FILE_FORMAT + "'='" + fileFormat + "')");

StringBuilder query = new StringBuilder().append("INSERT INTO " + identifier + " VALUES ");

records.forEach(record -> {
query.append("(");
query.append(record.struct().fields().stream()
.map(field -> getStringValueForInsert(record.getField(field.name()), field.type()))
.collect(Collectors.joining(",")));
query.append("),");
});
query.setLength(query.length() - 1);

shell.executeStatement(query.toString());
if (records != null && !records.isEmpty()) {
StringBuilder query = new StringBuilder().append("INSERT INTO " + identifier + " VALUES ");

records.forEach(record -> {
query.append("(");
query.append(record.struct().fields().stream()
.map(field -> getStringValueForInsert(record.getField(field.name()), field.type()))
.collect(Collectors.joining(",")));
query.append("),");
});
query.setLength(query.length() - 1);

shell.executeStatement(query.toString());
}

return loadTable(identifier);
}
Expand Down Expand Up @@ -386,7 +396,7 @@ public String locationForCreateTableSQL(TableIdentifier identifier) {
}

@Override
public String createHiveTableSQL(TableIdentifier identifier) {
public String createHiveTableSQL(TableIdentifier identifier, Map<String, String> tblProps) {
return null;
}
}
Expand Down