diff --git a/presto-docs/src/main/sphinx/connector/iceberg.rst b/presto-docs/src/main/sphinx/connector/iceberg.rst index e8864971fcfcb..fe020a8ca23d5 100644 --- a/presto-docs/src/main/sphinx/connector/iceberg.rst +++ b/presto-docs/src/main/sphinx/connector/iceberg.rst @@ -511,6 +511,25 @@ dropping the table from the metadata catalog using ``TRUNCATE TABLE``. -----------+------+-----------+--------- (0 rows) +DELETE +^^^^^^^^ + +The iceberg connector can delete data in one or more entire partitions from tables by using ``DELETE FROM``. For example, to delete from the table ``lineitem``:: + + DELETE FROM lineitem; + + DELETE FROM lineitem WHERE linenumber = 1; + + DELETE FROM lineitem WHERE linenumber not in (1, 3, 5, 7) and linestatus in ('O', 'F'); + +.. note:: + + Columns in the filter must all be identity transformed partition columns of the target table. + + Filtered columns only support comparison operators, such as EQUALS, LESS THAN, or LESS THAN EQUALS. + + Deletes must only occur on the latest snapshot. + DROP TABLE ^^^^^^^^^^^ diff --git a/presto-iceberg/src/main/java/com/facebook/presto/iceberg/IcebergAbstractMetadata.java b/presto-iceberg/src/main/java/com/facebook/presto/iceberg/IcebergAbstractMetadata.java index 54560a236a35a..212b9911d7181 100644 --- a/presto-iceberg/src/main/java/com/facebook/presto/iceberg/IcebergAbstractMetadata.java +++ b/presto-iceberg/src/main/java/com/facebook/presto/iceberg/IcebergAbstractMetadata.java @@ -49,11 +49,14 @@ import org.apache.iceberg.DeleteFiles; import org.apache.iceberg.FileScanTask; import org.apache.iceberg.PartitionField; +import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.PartitionSpecParser; import org.apache.iceberg.Schema; import org.apache.iceberg.SchemaParser; import org.apache.iceberg.Table; +import org.apache.iceberg.TableScan; import org.apache.iceberg.Transaction; +import org.apache.iceberg.expressions.Expression; import org.apache.iceberg.io.CloseableIterable; import org.apache.iceberg.types.Type; import org.apache.iceberg.types.TypeUtil; @@ -65,11 +68,14 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.OptionalLong; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; import static com.facebook.presto.common.type.BigintType.BIGINT; +import static com.facebook.presto.iceberg.ExpressionConverter.toIcebergExpression; import static com.facebook.presto.iceberg.IcebergColumnHandle.primitiveIcebergColumnHandle; import static com.facebook.presto.iceberg.IcebergErrorCode.ICEBERG_INVALID_SNAPSHOT_ID; import static com.facebook.presto.iceberg.IcebergTableProperties.FILE_FORMAT_PROPERTY; @@ -89,6 +95,7 @@ import static com.google.common.base.Verify.verify; import static com.google.common.collect.ImmutableList.toImmutableList; import static com.google.common.collect.ImmutableMap.toImmutableMap; +import static com.google.common.collect.ImmutableSet.toImmutableSet; import static java.lang.String.format; import static java.util.Collections.singletonList; import static java.util.Objects.requireNonNull; @@ -414,6 +421,7 @@ public IcebergTableHandle getTableHandle(ConnectorSession session, SchemaTableNa name.getTableName(), name.getTableType(), resolveSnapshotIdByName(table, name), + name.getSnapshotId().isPresent(), TupleDomain.all()); } @@ -438,6 +446,101 @@ public Optional getSystemTable(ConnectorSession session, SchemaTabl return getIcebergSystemTable(tableName, icebergTable); } + @Override + public void truncateTable(ConnectorSession session, ConnectorTableHandle tableHandle) + { + IcebergTableHandle handle = (IcebergTableHandle) tableHandle; + Table icebergTable = getIcebergTable(session, handle.getSchemaTableName()); + try (CloseableIterable files = icebergTable.newScan().planFiles()) { + removeScanFiles(icebergTable, files); + } + catch (IOException e) { + throw new PrestoException(GENERIC_INTERNAL_ERROR, "failed to scan files for delete", e); + } + } + + @Override + public ConnectorTableHandle beginDelete(ConnectorSession session, ConnectorTableHandle tableHandle) + { + IcebergTableHandle handle = (IcebergTableHandle) tableHandle; + if (handle.isSnapshotSpecified()) { + throw new PrestoException(NOT_SUPPORTED, "This connector do not allow delete data at specified snapshot"); + } + throw new PrestoException(NOT_SUPPORTED, "This connector only supports delete where one or more partitions are deleted entirely"); + } + + @Override + public boolean supportsMetadataDelete(ConnectorSession session, ConnectorTableHandle tableHandle, Optional tableLayoutHandle) + { + IcebergTableHandle handle = (IcebergTableHandle) tableHandle; + if (handle.isSnapshotSpecified()) { + return false; + } + + if (!tableLayoutHandle.isPresent()) { + return true; + } + + // Allow metadata delete for range filters on partition columns. + IcebergTableLayoutHandle layoutHandle = (IcebergTableLayoutHandle) tableLayoutHandle.get(); + + TupleDomain domainPredicate = layoutHandle.getTupleDomain(); + if (domainPredicate.isAll()) { + return true; + } + + Set predicateColumnIds = domainPredicate.getDomains().get().keySet().stream() + .map(IcebergColumnHandle.class::cast) + .map(IcebergColumnHandle::getId) + .collect(toImmutableSet()); + Table icebergTable = getIcebergTable(session, handle.getSchemaTableName()); + + boolean supportsMetadataDelete = true; + for (PartitionSpec spec : icebergTable.specs().values()) { + // Currently we do not support delete when any partition columns in predicate is not transform by identity() + Set partitionColumnSourceIds = spec.fields().stream() + .filter(field -> field.transform().isIdentity()) + .map(PartitionField::sourceId) + .collect(Collectors.toSet()); + + if (!partitionColumnSourceIds.containsAll(predicateColumnIds)) { + supportsMetadataDelete = false; + break; + } + } + + return supportsMetadataDelete; + } + + @Override + public OptionalLong metadataDelete(ConnectorSession session, ConnectorTableHandle tableHandle, ConnectorTableLayoutHandle tableLayoutHandle) + { + IcebergTableHandle handle = (IcebergTableHandle) tableHandle; + IcebergTableLayoutHandle layoutHandle = (IcebergTableLayoutHandle) tableLayoutHandle; + + Table icebergTable; + try { + icebergTable = getIcebergTable(session, handle.getSchemaTableName()); + } + catch (Exception e) { + throw new TableNotFoundException(handle.getSchemaTableName()); + } + + TableScan scan = icebergTable.newScan(); + TupleDomain domainPredicate = layoutHandle.getTupleDomain(); + if (!domainPredicate.isAll()) { + Expression filterExpression = toIcebergExpression(handle.getPredicate()); + scan = scan.filter(filterExpression); + } + + try (CloseableIterable files = scan.planFiles()) { + return OptionalLong.of(removeScanFiles(icebergTable, files)); + } + catch (IOException e) { + throw new PrestoException(GENERIC_INTERNAL_ERROR, "failed to scan files for delete", e); + } + } + /** * Deletes all the files within a particular scan * @@ -456,17 +559,4 @@ private long removeScanFiles(Table icebergTable, Iterable scan) transaction.commitTransaction(); return rowsDeleted.get(); } - - @Override - public void truncateTable(ConnectorSession session, ConnectorTableHandle tableHandle) - { - IcebergTableHandle handle = (IcebergTableHandle) tableHandle; - Table icebergTable = getIcebergTable(session, handle.getSchemaTableName()); - try (CloseableIterable files = icebergTable.newScan().planFiles()) { - removeScanFiles(icebergTable, files); - } - catch (IOException e) { - throw new PrestoException(GENERIC_INTERNAL_ERROR, "failed to scan files for delete", e); - } - } } diff --git a/presto-iceberg/src/main/java/com/facebook/presto/iceberg/IcebergTableHandle.java b/presto-iceberg/src/main/java/com/facebook/presto/iceberg/IcebergTableHandle.java index a32d7400decc0..ba24b8310d531 100644 --- a/presto-iceberg/src/main/java/com/facebook/presto/iceberg/IcebergTableHandle.java +++ b/presto-iceberg/src/main/java/com/facebook/presto/iceberg/IcebergTableHandle.java @@ -33,6 +33,7 @@ public class IcebergTableHandle private final TableType tableType; private final Optional snapshotId; private final TupleDomain predicate; + private final boolean snapshotSpecified; @JsonCreator public IcebergTableHandle( @@ -40,12 +41,14 @@ public IcebergTableHandle( @JsonProperty("tableName") String tableName, @JsonProperty("tableType") TableType tableType, @JsonProperty("snapshotId") Optional snapshotId, + @JsonProperty("snapshotSpecified") boolean snapshotSpecified, @JsonProperty("predicate") TupleDomain predicate) { this.schemaName = requireNonNull(schemaName, "schemaName is null"); this.tableName = requireNonNull(tableName, "tableName is null"); this.tableType = requireNonNull(tableType, "tableType is null"); this.snapshotId = requireNonNull(snapshotId, "snapshotId is null"); + this.snapshotSpecified = requireNonNull(snapshotSpecified, "specifiedSnapshot is null"); this.predicate = requireNonNull(predicate, "predicate is null"); } @@ -73,6 +76,12 @@ public Optional getSnapshotId() return snapshotId; } + @JsonProperty + public boolean isSnapshotSpecified() + { + return snapshotSpecified; + } + @JsonProperty public TupleDomain getPredicate() { @@ -104,13 +113,14 @@ public boolean equals(Object o) Objects.equals(tableName, that.tableName) && tableType == that.tableType && Objects.equals(snapshotId, that.snapshotId) && + snapshotSpecified == that.snapshotSpecified && Objects.equals(predicate, that.predicate); } @Override public int hashCode() { - return Objects.hash(schemaName, tableName, tableType, snapshotId, predicate); + return Objects.hash(schemaName, tableName, tableType, snapshotId, snapshotSpecified, predicate); } @Override diff --git a/presto-iceberg/src/main/java/com/facebook/presto/iceberg/IcebergUtil.java b/presto-iceberg/src/main/java/com/facebook/presto/iceberg/IcebergUtil.java index b163019fe5409..bf20827a8b7ce 100644 --- a/presto-iceberg/src/main/java/com/facebook/presto/iceberg/IcebergUtil.java +++ b/presto-iceberg/src/main/java/com/facebook/presto/iceberg/IcebergUtil.java @@ -23,6 +23,7 @@ import com.facebook.presto.spi.ConnectorSession; import com.facebook.presto.spi.PrestoException; import com.facebook.presto.spi.SchemaTableName; +import com.facebook.presto.spi.connector.ConnectorMetadata; import com.google.common.collect.ImmutableMap; import org.apache.iceberg.BaseTable; import org.apache.iceberg.FileFormat; @@ -57,6 +58,7 @@ import static com.facebook.presto.iceberg.IcebergSessionProperties.isMergeOnReadModeEnabled; import static com.facebook.presto.iceberg.util.IcebergPrestoModelConverters.toIcebergTableIdentifier; import static com.facebook.presto.spi.StandardErrorCode.NOT_SUPPORTED; +import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.collect.ImmutableList.toImmutableList; import static com.google.common.collect.ImmutableMap.toImmutableMap; import static com.google.common.collect.Lists.reverse; @@ -87,6 +89,13 @@ public static boolean isIcebergTable(com.facebook.presto.hive.metastore.Table ta return ICEBERG_TABLE_TYPE_VALUE.equalsIgnoreCase(table.getParameters().get(TABLE_TYPE_PROP)); } + public static Table getIcebergTable(ConnectorMetadata metadata, ConnectorSession session, SchemaTableName table) + { + checkArgument(metadata instanceof IcebergAbstractMetadata, "metadata must be instance of IcebergAbstractMetadata!"); + IcebergAbstractMetadata icebergMetadata = (IcebergAbstractMetadata) metadata; + return icebergMetadata.getIcebergTable(session, table); + } + public static Table getHiveIcebergTable(ExtendedHiveMetastore metastore, HdfsEnvironment hdfsEnvironment, ConnectorSession session, SchemaTableName table) { HdfsContext hdfsContext = new HdfsContext(session, table.getSchemaName(), table.getTableName()); diff --git a/presto-iceberg/src/main/java/com/facebook/presto/iceberg/optimizer/IcebergPlanOptimizer.java b/presto-iceberg/src/main/java/com/facebook/presto/iceberg/optimizer/IcebergPlanOptimizer.java index ebcfbcad1b94b..90cfcb0b81c30 100644 --- a/presto-iceberg/src/main/java/com/facebook/presto/iceberg/optimizer/IcebergPlanOptimizer.java +++ b/presto-iceberg/src/main/java/com/facebook/presto/iceberg/optimizer/IcebergPlanOptimizer.java @@ -17,8 +17,11 @@ import com.facebook.presto.common.predicate.TupleDomain; import com.facebook.presto.common.type.TypeManager; import com.facebook.presto.hive.SubfieldExtractor; +import com.facebook.presto.iceberg.IcebergAbstractMetadata; import com.facebook.presto.iceberg.IcebergColumnHandle; import com.facebook.presto.iceberg.IcebergTableHandle; +import com.facebook.presto.iceberg.IcebergTransactionManager; +import com.facebook.presto.spi.ColumnHandle; import com.facebook.presto.spi.ConnectorPlanOptimizer; import com.facebook.presto.spi.ConnectorPlanRewriter; import com.facebook.presto.spi.ConnectorSession; @@ -32,14 +35,20 @@ import com.facebook.presto.spi.relation.DomainTranslator; import com.facebook.presto.spi.relation.RowExpression; import com.facebook.presto.spi.relation.RowExpressionService; +import org.apache.iceberg.PartitionField; +import org.apache.iceberg.PartitionSpec; +import org.apache.iceberg.Table; import javax.inject.Inject; import java.util.Map; +import java.util.Set; import java.util.stream.Collectors; import static com.facebook.presto.expressions.LogicalRowExpressions.TRUE_CONSTANT; +import static com.facebook.presto.iceberg.IcebergUtil.getIcebergTable; import static com.facebook.presto.spi.ConnectorPlanRewriter.rewriteWith; +import static com.google.common.collect.ImmutableSet.toImmutableSet; import static java.util.Objects.requireNonNull; public class IcebergPlanOptimizer @@ -48,19 +57,25 @@ public class IcebergPlanOptimizer private final RowExpressionService rowExpressionService; private final StandardFunctionResolution functionResolution; private final TypeManager typeManager; + private final IcebergTransactionManager transactionManager; @Inject - IcebergPlanOptimizer(StandardFunctionResolution functionResolution, RowExpressionService rowExpressionService, TypeManager typeManager) + IcebergPlanOptimizer(StandardFunctionResolution functionResolution, + RowExpressionService rowExpressionService, + TypeManager typeManager, + IcebergTransactionManager transactionManager) { this.functionResolution = requireNonNull(functionResolution, "functionResolution is null"); this.rowExpressionService = requireNonNull(rowExpressionService, "rowExpressionService is null"); this.typeManager = requireNonNull(typeManager, "typeManager is null"); + this.transactionManager = requireNonNull(transactionManager, "transactionManager is null"); } @Override public PlanNode optimize(PlanNode maxSubplan, ConnectorSession session, VariableAllocator variableAllocator, PlanNodeIdAllocator idAllocator) { - return rewriteWith(new FilterPushdownRewriter(functionResolution, rowExpressionService, typeManager, idAllocator, session), maxSubplan); + return rewriteWith(new FilterPushdownRewriter(functionResolution, rowExpressionService, + typeManager, transactionManager, idAllocator, session), maxSubplan); } private static class FilterPushdownRewriter @@ -71,17 +86,20 @@ private static class FilterPushdownRewriter private final StandardFunctionResolution functionResolution; private final TypeManager typeManager; private final PlanNodeIdAllocator idAllocator; + private final IcebergTransactionManager transactionManager; public FilterPushdownRewriter( StandardFunctionResolution functionResolution, RowExpressionService rowExpressionService, TypeManager typeManager, + IcebergTransactionManager transactionManager, PlanNodeIdAllocator idAllocator, ConnectorSession session) { this.functionResolution = functionResolution; this.rowExpressionService = rowExpressionService; this.typeManager = typeManager; + this.transactionManager = transactionManager; this.idAllocator = idAllocator; this.session = session; } @@ -119,6 +137,7 @@ public PlanNode visitFilter(FilterNode filter, RewriteContext context) oldTableHandle.getTableName(), oldTableHandle.getTableType(), oldTableHandle.getSnapshotId(), + oldTableHandle.isSnapshotSpecified(), simplifiedColumnDomain); TableScanNode newTableScan = new TableScanNode( tableScan.getSourceLocation(), @@ -132,6 +151,41 @@ public PlanNode visitFilter(FilterNode filter, RewriteContext context) if (TRUE_CONSTANT.equals(filterPredicate)) { return newTableScan; } + + if (TRUE_CONSTANT.equals(decomposedFilter.getRemainingExpression()) && simplifiedColumnDomain.equals(entireColumnDomain)) { + Set predicateColumnIds = simplifiedColumnDomain.getDomains().get().keySet().stream() + .map(IcebergColumnHandle::getId) + .collect(toImmutableSet()); + + IcebergTableHandle tableHandle = (IcebergTableHandle) handle.getConnectorHandle(); + IcebergAbstractMetadata metadata = (IcebergAbstractMetadata) transactionManager.get(handle.getTransaction()); + Table icebergTable = getIcebergTable(metadata, session, tableHandle.getSchemaTableName()); + + // check iceberg table's every partition specs, to make sure the filterPredicate could be enforced + boolean canEnforced = true; + for (PartitionSpec spec : icebergTable.specs().values()) { + // Currently we do not support delete when any partition columns in predicate is not transform by identity() + Set partitionColumnSourceIds = spec.fields().stream() + .filter(field -> field.transform().isIdentity()) + .map(PartitionField::sourceId).collect(Collectors.toSet()); + + if (!partitionColumnSourceIds.containsAll(predicateColumnIds)) { + canEnforced = false; + break; + } + } + + if (canEnforced) { + return new TableScanNode( + newTableScan.getSourceLocation(), + newTableScan.getId(), + newTableScan.getTable(), + newTableScan.getOutputVariables(), + newTableScan.getAssignments(), + newTableScan.getCurrentConstraint(), + simplifiedColumnDomain.transform(icebergColumnHandle -> (ColumnHandle) icebergColumnHandle)); + } + } return new FilterNode(filter.getSourceLocation(), idAllocator.getNextId(), newTableScan, filterPredicate); } } diff --git a/presto-iceberg/src/test/java/com/facebook/presto/iceberg/IcebergDistributedTestBase.java b/presto-iceberg/src/test/java/com/facebook/presto/iceberg/IcebergDistributedTestBase.java index 4977f99030d58..36e91f4f9bda7 100644 --- a/presto-iceberg/src/test/java/com/facebook/presto/iceberg/IcebergDistributedTestBase.java +++ b/presto-iceberg/src/test/java/com/facebook/presto/iceberg/IcebergDistributedTestBase.java @@ -20,6 +20,9 @@ import org.intellij.lang.annotations.Language; import org.testng.annotations.Test; +import java.util.List; +import java.util.stream.Collectors; + import static com.facebook.presto.common.type.VarcharType.VARCHAR; import static com.facebook.presto.testing.MaterializedResult.resultBuilder; import static com.facebook.presto.testing.TestingAccessControlManager.TestingPrivilegeType.SELECT_COLUMN; @@ -65,6 +68,60 @@ public void testRenameColumn() @Override public void testDelete() { + // Test delete all rows + long totalCount = (long) getQueryRunner().execute("CREATE TABLE test_delete as select * from lineitem") + .getOnlyValue(); + assertUpdate("DELETE FROM test_delete", totalCount); + assertEquals(getQueryRunner().execute("SELECT count(*) FROM test_delete").getOnlyValue(), 0L); + assertQuerySucceeds("DROP TABLE test_delete"); + + // Test delete whole partitions identified by one partition column + totalCount = (long) getQueryRunner().execute("CREATE TABLE test_partitioned_drop WITH (partitioning = ARRAY['bucket(orderkey, 2)', 'linenumber', 'linestatus']) as select * from lineitem") + .getOnlyValue(); + long countPart1 = (long) getQueryRunner().execute("SELECT count(*) FROM test_partitioned_drop where linenumber = 1").getOnlyValue(); + assertUpdate("DELETE FROM test_partitioned_drop WHERE linenumber = 1", countPart1); + + long countPart2 = (long) getQueryRunner().execute("SELECT count(*) FROM test_partitioned_drop where linenumber > 4 and linenumber < 7").getOnlyValue(); + assertUpdate("DELETE FROM test_partitioned_drop WHERE linenumber > 4 and linenumber < 7", countPart2); + + long newTotalCount = (long) getQueryRunner().execute("SELECT count(*) FROM test_partitioned_drop") + .getOnlyValue(); + assertEquals(totalCount - countPart1 - countPart2, newTotalCount); + assertQuerySucceeds("DROP TABLE test_partitioned_drop"); + + // Test delete whole partitions identified by two partition columns + totalCount = (long) getQueryRunner().execute("CREATE TABLE test_partitioned_drop WITH (partitioning = ARRAY['bucket(orderkey, 2)', 'linenumber', 'linestatus']) as select * from lineitem") + .getOnlyValue(); + long countPart1F = (long) getQueryRunner().execute("SELECT count(*) FROM test_partitioned_drop where linenumber = 1 and linestatus = 'F'").getOnlyValue(); + assertUpdate("DELETE FROM test_partitioned_drop WHERE linenumber = 1 and linestatus = 'F'", countPart1F); + + long countPart2O = (long) getQueryRunner().execute("SELECT count(*) FROM test_partitioned_drop where linenumber = 2 and linestatus = 'O'").getOnlyValue(); + assertUpdate("DELETE FROM test_partitioned_drop WHERE linenumber = 2 and linestatus = 'O'", countPart2O); + + long countPartOther = (long) getQueryRunner().execute("SELECT count(*) FROM test_partitioned_drop where linenumber not in (1, 3, 5, 7) and linestatus in ('O', 'F')").getOnlyValue(); + assertUpdate("DELETE FROM test_partitioned_drop WHERE linenumber not in (1, 3, 5, 7) and linestatus in ('O', 'F')", countPartOther); + + newTotalCount = (long) getQueryRunner().execute("SELECT count(*) FROM test_partitioned_drop") + .getOnlyValue(); + assertEquals(totalCount - countPart1F - countPart2O - countPartOther, newTotalCount); + assertQuerySucceeds("DROP TABLE test_partitioned_drop"); + + // Do not support delete with filters about non-identity partition column + String errorMessage1 = "This connector only supports delete where one or more partitions are deleted entirely"; + assertUpdate("CREATE TABLE test_partitioned_drop WITH (partitioning = ARRAY['bucket(orderkey, 2)', 'linenumber', 'linestatus']) as select * from lineitem", totalCount); + assertQueryFails("DELETE FROM test_partitioned_drop WHERE orderkey = 1", errorMessage1); + assertQueryFails("DELETE FROM test_partitioned_drop WHERE partkey > 100", errorMessage1); + assertQueryFails("DELETE FROM test_partitioned_drop WHERE linenumber = 1 and orderkey = 1", errorMessage1); + + // Do not allow delete data at specified snapshot + String errorMessage2 = "This connector do not allow delete data at specified snapshot"; + List snapshots = getQueryRunner().execute("SELECT snapshot_id FROM \"test_partitioned_drop$snapshots\"").getOnlyColumnAsSet() + .stream().map(Long.class::cast).collect(Collectors.toList()); + for (long snapshot : snapshots) { + assertQueryFails("DELETE FROM \"test_partitioned_drop@" + snapshot + "\" WHERE linenumber = 1", errorMessage2); + } + + assertQuerySucceeds("DROP TABLE test_partitioned_drop"); } @Test diff --git a/presto-main/src/main/java/com/facebook/presto/util/GraphvizPrinter.java b/presto-main/src/main/java/com/facebook/presto/util/GraphvizPrinter.java index 02db09f3d8c5a..e9df17dea6914 100644 --- a/presto-main/src/main/java/com/facebook/presto/util/GraphvizPrinter.java +++ b/presto-main/src/main/java/com/facebook/presto/util/GraphvizPrinter.java @@ -50,6 +50,7 @@ import com.facebook.presto.sql.planner.plan.JoinNode; import com.facebook.presto.sql.planner.plan.LateralJoinNode; import com.facebook.presto.sql.planner.plan.MergeJoinNode; +import com.facebook.presto.sql.planner.plan.MetadataDeleteNode; import com.facebook.presto.sql.planner.plan.PlanFragmentId; import com.facebook.presto.sql.planner.plan.RemoteSourceNode; import com.facebook.presto.sql.planner.plan.RowNumberNode; @@ -116,6 +117,7 @@ private enum NodeType TABLE_WRITER, TABLE_WRITER_MERGE, TABLE_FINISH, + METADATA_DELETE, INDEX_SOURCE, UNNEST, ANALYZE_FINISH, @@ -142,6 +144,7 @@ private enum NodeType .put(NodeType.TABLE_WRITER, "cyan") .put(NodeType.TABLE_WRITER_MERGE, "cyan4") .put(NodeType.TABLE_FINISH, "hotpink") + .put(NodeType.METADATA_DELETE, "garnet") .put(NodeType.INDEX_SOURCE, "dodgerblue3") .put(NodeType.UNNEST, "crimson") .put(NodeType.SAMPLE, "goldenrod4") @@ -300,6 +303,13 @@ public Void visitTableFinish(TableFinishNode node, Void context) return node.getSource().accept(this, context); } + @Override + public Void visitMetadataDelete(MetadataDeleteNode node, Void context) + { + printNode(node, format("MetadataDeleteNode[%s]", Joiner.on(", ").join(node.getOutputVariables())), NODE_COLORS.get(NodeType.METADATA_DELETE)); + return null; + } + @Override public Void visitExplainAnalyze(ExplainAnalyzeNode node, Void context) {