diff --git a/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/DeltaLakeMetadata.java b/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/DeltaLakeMetadata.java index 0d54c0639cdf..e56a503381d2 100644 --- a/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/DeltaLakeMetadata.java +++ b/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/DeltaLakeMetadata.java @@ -177,7 +177,7 @@ import static io.trino.plugin.deltalake.transactionlog.DeltaLakeSchemaSupport.serializeSchemaAsJson; import static io.trino.plugin.deltalake.transactionlog.DeltaLakeSchemaSupport.serializeStatsAsJson; import static io.trino.plugin.deltalake.transactionlog.DeltaLakeSchemaSupport.validateType; -import static io.trino.plugin.deltalake.transactionlog.MetadataEntry.buildDeltaMetadataConfiguration; +import static io.trino.plugin.deltalake.transactionlog.MetadataEntry.configurationForNewTable; import static io.trino.plugin.deltalake.transactionlog.TransactionLogParser.getMandatoryCurrentVersion; import static io.trino.plugin.deltalake.transactionlog.TransactionLogUtil.getTransactionLogDir; import static io.trino.plugin.hive.HiveMetadata.PRESTO_QUERY_ID_NAME; @@ -682,7 +682,7 @@ public void createTable(ConnectorSession session, ConnectorTableMetadata tableMe deltaLakeColumns, partitionColumns, columnComments, - buildDeltaMetadataConfiguration(checkpointInterval), + configurationForNewTable(checkpointInterval), CREATE_TABLE_OPERATION, session, nodeVersion, @@ -948,7 +948,7 @@ public Optional finishCreateTable( handle.getInputColumns(), handle.getPartitionedBy(), ImmutableMap.of(), - buildDeltaMetadataConfiguration(handle.getCheckpointInterval()), + configurationForNewTable(handle.getCheckpointInterval()), CREATE_TABLE_AS_OPERATION, session, nodeVersion, @@ -1017,8 +1017,6 @@ public void setTableComment(ConnectorSession session, ConnectorTableHandle table .map(column -> toColumnHandle(column, column.getName(), column.getType(), partitionColumns)) .collect(toImmutableList()); - Optional checkpointInterval = DeltaLakeTableProperties.getCheckpointInterval(tableMetadata.getProperties()); - TransactionLogWriter transactionLogWriter = transactionLogWriterFactory.newWriter(session, handle.getLocation()); appendTableEntries( commitVersion, @@ -1027,7 +1025,7 @@ public void setTableComment(ConnectorSession session, ConnectorTableHandle table columns, partitionColumns, getColumnComments(handle.getMetadataEntry()), - buildDeltaMetadataConfiguration(checkpointInterval), + handle.getMetadataEntry().getConfiguration(), SET_TBLPROPERTIES_OPERATION, session, nodeVersion, @@ -1064,8 +1062,6 @@ public void setColumnComment(ConnectorSession session, ConnectorTableHandle tabl .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))); comment.ifPresent(s -> columnComments.put(deltaLakeColumnHandle.getName(), s)); - Optional checkpointInterval = DeltaLakeTableProperties.getCheckpointInterval(tableMetadata.getProperties()); - TransactionLogWriter transactionLogWriter = transactionLogWriterFactory.newWriter(session, deltaLakeTableHandle.getLocation()); appendTableEntries( commitVersion, @@ -1074,7 +1070,7 @@ public void setColumnComment(ConnectorSession session, ConnectorTableHandle tabl columns, partitionColumns, columnComments.buildOrThrow(), - buildDeltaMetadataConfiguration(checkpointInterval), + deltaLakeTableHandle.getMetadataEntry().getConfiguration(), CHANGE_COLUMN_OPERATION, session, nodeVersion, @@ -1111,8 +1107,6 @@ public void addColumn(ConnectorSession session, ConnectorTableHandle tableHandle columnComments.put(newColumnMetadata.getName(), newColumnMetadata.getComment()); } - Optional checkpointInterval = DeltaLakeTableProperties.getCheckpointInterval(tableMetadata.getProperties()); - TransactionLogWriter transactionLogWriter = transactionLogWriterFactory.newWriter(session, handle.getLocation()); appendTableEntries( commitVersion, @@ -1121,7 +1115,7 @@ public void addColumn(ConnectorSession session, ConnectorTableHandle tableHandle columnsBuilder.build(), partitionColumns, columnComments.buildOrThrow(), - buildDeltaMetadataConfiguration(checkpointInterval), + handle.getMetadataEntry().getConfiguration(), ADD_COLUMN_OPERATION, session, nodeVersion, diff --git a/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/transactionlog/MetadataEntry.java b/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/transactionlog/MetadataEntry.java index f62af938a4df..b0fd34c4222b 100644 --- a/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/transactionlog/MetadataEntry.java +++ b/plugin/trino-delta-lake/src/main/java/io/trino/plugin/deltalake/transactionlog/MetadataEntry.java @@ -153,7 +153,7 @@ public Optional getCheckpointInterval() } } - public static Map buildDeltaMetadataConfiguration(Optional checkpointInterval) + public static Map configurationForNewTable(Optional checkpointInterval) { return checkpointInterval .map(value -> ImmutableMap.of(DELTA_CHECKPOINT_INTERVAL_PROPERTY, String.valueOf(value))) diff --git a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeAlterTableCompatibility.java b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeAlterTableCompatibility.java index a3568192d34c..7df963779d50 100644 --- a/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeAlterTableCompatibility.java +++ b/testing/trino-product-tests/src/main/java/io/trino/tests/product/deltalake/TestDeltaLakeAlterTableCompatibility.java @@ -15,6 +15,9 @@ import org.testng.annotations.Test; +import java.util.List; + +import static com.google.common.collect.Iterables.getOnlyElement; import static io.trino.tempto.assertions.QueryAssert.Row.row; import static io.trino.tempto.assertions.QueryAssert.assertQueryFailure; import static io.trino.tempto.assertions.QueryAssert.assertThat; @@ -30,6 +33,7 @@ import static io.trino.tests.product.utils.QueryExecutors.onTrino; import static java.lang.String.format; import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertTrue; public class TestDeltaLakeAlterTableCompatibility extends BaseTestDeltaLakeS3Storage @@ -167,4 +171,30 @@ public void testCommentOnColumnUnsupportedWriterVersion() onTrino().executeQuery("DROP TABLE delta.default." + tableName); } } + + @Test(groups = {DELTA_LAKE_DATABRICKS, DELTA_LAKE_OSS, PROFILE_SPECIFIC_TESTS}) + public void testTrinoAlterTablePreservesTableMetadata() + { + String tableName = "test_trino_alter_table_preserves_table_metadata_" + randomTableSuffix(); + String tableDirectory = "databricks-compatibility-test-" + tableName; + + onDelta().executeQuery(format("" + + "CREATE TABLE default.%s (col int) " + + "USING DELTA LOCATION 's3://%s/%s'" + + "TBLPROPERTIES ('delta.appendOnly' = true)", + tableName, + bucketName, + tableDirectory)); + try { + onTrino().executeQuery("COMMENT ON COLUMN delta.default." + tableName + ".col IS 'test column comment'"); + onTrino().executeQuery("COMMENT ON TABLE delta.default." + tableName + " IS 'test table comment'"); + onTrino().executeQuery("ALTER TABLE delta.default." + tableName + " ADD COLUMN new_column INT"); + + List properties = getOnlyElement(onDelta().executeQuery("SHOW TBLPROPERTIES " + tableName + "(delta.appendOnly)").rows()); + assertTrue(Boolean.parseBoolean((String) properties.get(1))); + } + finally { + onTrino().executeQuery("DROP TABLE delta.default." + tableName); + } + } }