Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@
import static io.trino.tests.product.iceberg.TestIcebergSparkCompatibility.CreateMode.CREATE_TABLE_AND_INSERT;
import static io.trino.tests.product.iceberg.TestIcebergSparkCompatibility.CreateMode.CREATE_TABLE_AS_SELECT;
import static io.trino.tests.product.iceberg.TestIcebergSparkCompatibility.CreateMode.CREATE_TABLE_WITH_NO_DATA_AND_INSERT;
import static io.trino.tests.product.iceberg.util.IcebergTestUtils.getLatestMetadataFilename;
import static io.trino.tests.product.iceberg.util.IcebergTestUtils.getTableLocation;
import static io.trino.tests.product.iceberg.util.IcebergTestUtils.stripNamenodeURI;
import static io.trino.tests.product.utils.QueryExecutors.onHive;
Expand Down Expand Up @@ -2524,9 +2525,8 @@ public void testRegisterTableWithTableLocation(StorageFormat storageFormat)

onSpark().executeQuery(format("CREATE TABLE %s (a INT, b STRING, c BOOLEAN) USING ICEBERG TBLPROPERTIES ('write.format.default' = '%s')", sparkTableName, storageFormat));
onSpark().executeQuery(format("INSERT INTO %s values(1, 'INDIA', true)", sparkTableName));
onTrino().executeQuery(format("INSERT INTO %s values(2, 'USA', false)", trinoTableName));

List<Row> expected = List.of(row(1, "INDIA", true), row(2, "USA", false));
List<Row> expected = List.of(row(1, "INDIA", true));
String tableLocation = getTableLocation(trinoTableName);
// Drop table from hive metastore and use the same table name to register again with the metadata
dropTableFromMetastore(baseTableName);
Expand Down Expand Up @@ -2555,10 +2555,11 @@ public void testRegisterTableWithComments(StorageFormat storageFormat)
onTrino().executeQuery(format("COMMENT ON COLUMN %s.c is 'c-comment'", trinoTableName));

String tableLocation = getTableLocation(trinoTableName);
String metadataFileName = getLatestMetadataFilename(TRINO_CATALOG, TEST_SCHEMA_NAME, baseTableName);
// Drop table from hive metastore and use the same table name to register again with the metadata
dropTableFromMetastore(baseTableName);

onTrino().executeQuery(format("CALL iceberg.system.register_table ('%s', '%s', '%s')", TEST_SCHEMA_NAME, baseTableName, tableLocation));
onTrino().executeQuery(format("CALL iceberg.system.register_table ('%s', '%s', '%s', '%s')", TEST_SCHEMA_NAME, baseTableName, tableLocation, metadataFileName));

assertThat(getTableComment(baseTableName)).isEqualTo("my-table-comment");
assertThat(getColumnComment(baseTableName, "a")).isEqualTo("a-comment");
Expand Down Expand Up @@ -2586,10 +2587,11 @@ public void testRegisterTableWithShowCreateTable(StorageFormat storageFormat)
List<Row> expectedShowCreateTableRows = expectedShowCreateTable.rows().stream().map(columns -> row(columns.toArray())).collect(toImmutableList());

String tableLocation = getTableLocation(trinoTableName);
String metadataFileName = getLatestMetadataFilename(TRINO_CATALOG, TEST_SCHEMA_NAME, baseTableName);
// Drop table from hive metastore and use the same table name to register again with the metadata
dropTableFromMetastore(baseTableName);

onTrino().executeQuery(format("CALL iceberg.system.register_table ('%s', '%s', '%s')", TEST_SCHEMA_NAME, baseTableName, tableLocation));
onTrino().executeQuery(format("CALL iceberg.system.register_table ('%s', '%s', '%s', '%s')", TEST_SCHEMA_NAME, baseTableName, tableLocation, metadataFileName));

QueryResult actualDescribeTable = onSpark().executeQuery("DESCRIBE TABLE EXTENDED " + sparkTableName);
QueryResult actualShowCreateTable = onTrino().executeQuery("SHOW CREATE TABLE " + trinoTableName);
Expand All @@ -2612,10 +2614,11 @@ public void testRegisterTableWithReInsert(StorageFormat storageFormat)
onTrino().executeQuery(format("INSERT INTO %s values(2, 'USA', false)", trinoTableName));

String tableLocation = getTableLocation(trinoTableName);
String metadataFileName = getLatestMetadataFilename(TRINO_CATALOG, TEST_SCHEMA_NAME, baseTableName);
// Drop table from hive metastore and use the same table name to register again with the metadata
dropTableFromMetastore(baseTableName);

onTrino().executeQuery(format("CALL iceberg.system.register_table ('%s', '%s', '%s')", TEST_SCHEMA_NAME, baseTableName, tableLocation));
onTrino().executeQuery(format("CALL iceberg.system.register_table ('%s', '%s', '%s', '%s')", TEST_SCHEMA_NAME, baseTableName, tableLocation, metadataFileName));
onSpark().executeQuery(format("INSERT INTO %s values(3, 'POLAND', true)", sparkTableName));

List<Row> expected = List.of(row(1, "INDIA", true), row(2, "USA", false), row(3, "POLAND", true));
Expand Down Expand Up @@ -2659,13 +2662,14 @@ public void testRegisterTableWithDifferentTableName(StorageFormat storageFormat)
onTrino().executeQuery(format("INSERT INTO %s values(2, 'USA', false)", trinoTableName));

String tableLocation = getTableLocation(trinoTableName);
String metadataFileName = getLatestMetadataFilename(TRINO_CATALOG, TEST_SCHEMA_NAME, baseTableName);
String baseTableNameNew = baseTableName + "_new";
String trinoTableNameNew = trinoTableName(baseTableNameNew);
String sparkTableNameNew = sparkTableName(baseTableNameNew);
// Drop table from hive metastore and use the same table name to register again with the metadata
dropTableFromMetastore(baseTableName);

onTrino().executeQuery(format("CALL iceberg.system.register_table ('%s', '%s', '%s')", TEST_SCHEMA_NAME, baseTableNameNew, tableLocation));
onTrino().executeQuery(format("CALL iceberg.system.register_table ('%s', '%s', '%s', '%s')", TEST_SCHEMA_NAME, baseTableNameNew, tableLocation, metadataFileName));
onSpark().executeQuery(format("INSERT INTO %s values(3, 'POLAND', true)", sparkTableNameNew));
List<Row> expected = List.of(row(1, "INDIA", true), row(2, "USA", false), row(3, "POLAND", true));

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,11 @@ public static String getTableLocation(String tableName)
return (String) onTrino().executeQuery("SELECT DISTINCT regexp_replace(\"$path\", '/[^/]*/[^/]*$', '') FROM " + tableName).getOnlyValue();
}

public static String getLatestMetadataFilename(String catalog, String schema, String tableName)
{
return (String) onTrino().executeQuery("SELECT substring(file, strpos(file, '/', -1) + 1) FROM %s.%s.\"%s$metadata_log_entries\" ORDER BY timestamp DESC LIMIT 1".formatted(catalog, schema, tableName)).getOnlyValue();
}

public static String stripNamenodeURI(String location)
{
return URI.create(location).getPath();
Expand Down