diff --git a/spark/v3.4/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestComputePartitionStatsProcedure.java b/spark/v3.4/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestComputePartitionStatsProcedure.java new file mode 100644 index 000000000000..37423fc14736 --- /dev/null +++ b/spark/v3.4/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestComputePartitionStatsProcedure.java @@ -0,0 +1,121 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg.spark.extensions; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import java.io.File; +import java.util.List; +import org.apache.iceberg.ParameterizedTestExtension; +import org.apache.iceberg.PartitionStatisticsFile; +import org.apache.iceberg.Table; +import org.apache.iceberg.catalog.Namespace; +import org.apache.iceberg.catalog.TableIdentifier; +import org.apache.iceberg.spark.Spark3Util; +import org.apache.spark.sql.catalyst.analysis.NoSuchTableException; +import org.apache.spark.sql.catalyst.parser.ParseException; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.api.extension.ExtendWith; + +@ExtendWith(ParameterizedTestExtension.class) +public class TestComputePartitionStatsProcedure extends ExtensionsTestBase { + + @AfterEach + public void removeTable() { + sql("DROP TABLE IF EXISTS %s", tableName); + } + + @TestTemplate + public void procedureOnEmptyTable() { + sql( + "CREATE TABLE %s (id bigint NOT NULL, data string) USING iceberg PARTITIONED BY (data)", + tableName); + List result = + sql("CALL %s.system.compute_partition_stats('%s')", catalogName, tableIdent); + assertThat(result).isEmpty(); + } + + @TestTemplate + public void procedureWithPositionalArgs() throws NoSuchTableException, ParseException { + sql( + "CREATE TABLE %s (id bigint NOT NULL, data string) USING iceberg PARTITIONED BY (data)", + tableName); + sql("INSERT INTO TABLE %s VALUES (1, 'a'), (2, 'b'), (3, 'c'), (4, 'd')", tableName); + List output = + sql("CALL %s.system.compute_partition_stats('%s')", catalogName, tableIdent); + assertThat(output.get(0)).isNotEmpty(); + Table table = Spark3Util.loadIcebergTable(spark, tableName); + assertThat(table.partitionStatisticsFiles()).hasSize(1); + PartitionStatisticsFile statisticsFile = table.partitionStatisticsFiles().get(0); + assertThat(statisticsFile.path()).isEqualTo(output.get(0)[0].toString()); + assertThat(statisticsFile.snapshotId()).isEqualTo(table.currentSnapshot().snapshotId()); + assertThat(new File(statisticsFile.path().replace("file:", ""))).exists(); + } + + @TestTemplate + public void procedureWithNamedArgs() throws NoSuchTableException, ParseException { + sql( + "CREATE TABLE %s (id bigint NOT NULL, data string) USING iceberg PARTITIONED BY (data)", + tableName); + sql("INSERT INTO TABLE %s VALUES (1, 'a'), (2, 'b'), (3, 'c'), (4, 'd')", tableName); + sql("ALTER TABLE %s CREATE BRANCH `b1`", tableName); + Table table = Spark3Util.loadIcebergTable(spark, tableName); + long branchSnapshotId = table.refs().get("b1").snapshotId(); + sql("INSERT INTO TABLE %s VALUES (5, 'e'), (6, 'f'), (7, 'g'), (8, 'h')", tableName); + + List output = + sql( + "CALL %s.system.compute_partition_stats(table => '%s', snapshot_id => %s)", + catalogName, tableIdent, branchSnapshotId); + table.refresh(); + assertThat(table.partitionStatisticsFiles()).hasSize(1); + PartitionStatisticsFile statisticsFile = table.partitionStatisticsFiles().get(0); + assertThat(statisticsFile.path()).isEqualTo(output.get(0)[0].toString()); + // should be from the branch's snapshot instead of latest snapshot of the table + assertThat(statisticsFile.snapshotId()).isEqualTo(branchSnapshotId); + assertThat(new File(statisticsFile.path().replace("file:", ""))).exists(); + } + + @TestTemplate + public void procedureWithInvalidSnapshotId() { + sql( + "CREATE TABLE %s (id bigint NOT NULL, data string) USING iceberg PARTITIONED BY (data)", + tableName); + assertThatThrownBy( + () -> + sql( + "CALL %s.system.compute_partition_stats(table => '%s', snapshot_id => 42)", + catalogName, tableIdent)) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("Snapshot not found: 42"); + } + + @TestTemplate + public void procedureWithInvalidTable() { + assertThatThrownBy( + () -> + sql( + "CALL %s.system.compute_partition_stats(table => '%s')", + catalogName, TableIdentifier.of(Namespace.of("default"), "abcd"))) + .isInstanceOf(RuntimeException.class) + .hasMessageContaining("Couldn't load table"); + } +} diff --git a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/ComputePartitionStatsSparkAction.java b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/ComputePartitionStatsSparkAction.java new file mode 100644 index 000000000000..d46fa86c125f --- /dev/null +++ b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/ComputePartitionStatsSparkAction.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg.spark.actions; + +import java.io.IOException; +import org.apache.iceberg.PartitionStatisticsFile; +import org.apache.iceberg.PartitionStatsHandler; +import org.apache.iceberg.Snapshot; +import org.apache.iceberg.Table; +import org.apache.iceberg.actions.ComputePartitionStats; +import org.apache.iceberg.actions.ImmutableComputePartitionStats; +import org.apache.iceberg.exceptions.RuntimeIOException; +import org.apache.iceberg.relocated.com.google.common.base.Preconditions; +import org.apache.iceberg.spark.JobGroupInfo; +import org.apache.spark.sql.SparkSession; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Computes the stats incrementally after the snapshot that has partition stats file till the given + * snapshot (uses current snapshot if not specified) and writes the combined result into a {@link + * PartitionStatisticsFile} after merging the stats for a given snapshot. Does a full compute if + * previous statistics file does not exist. Also registers the {@link PartitionStatisticsFile} to + * table metadata. + */ +public class ComputePartitionStatsSparkAction + extends BaseSparkAction implements ComputePartitionStats { + + private static final Logger LOG = LoggerFactory.getLogger(ComputePartitionStatsSparkAction.class); + private static final Result EMPTY_RESULT = + ImmutableComputePartitionStats.Result.builder().build(); + + private final Table table; + private Snapshot snapshot; + + ComputePartitionStatsSparkAction(SparkSession spark, Table table) { + super(spark); + this.table = table; + this.snapshot = table.currentSnapshot(); + } + + @Override + protected ComputePartitionStatsSparkAction self() { + return this; + } + + @Override + public ComputePartitionStats snapshot(long newSnapshotId) { + Snapshot newSnapshot = table.snapshot(newSnapshotId); + Preconditions.checkArgument(newSnapshot != null, "Snapshot not found: %s", newSnapshotId); + this.snapshot = newSnapshot; + return this; + } + + @Override + public Result execute() { + if (snapshot == null) { + LOG.info("No snapshot to compute partition stats for table {}", table.name()); + return EMPTY_RESULT; + } + + JobGroupInfo info = newJobGroupInfo("COMPUTE-PARTITION-STATS", jobDesc()); + return withJobGroupInfo(info, this::doExecute); + } + + private Result doExecute() { + LOG.info("Computing partition stats for {} (snapshot {})", table.name(), snapshot.snapshotId()); + PartitionStatisticsFile statisticsFile; + try { + statisticsFile = PartitionStatsHandler.computeAndWriteStatsFile(table, snapshot.snapshotId()); + } catch (IOException e) { + throw new RuntimeIOException(e); + } + + if (statisticsFile == null) { + return EMPTY_RESULT; + } + + table.updatePartitionStatistics().setPartitionStatistics(statisticsFile).commit(); + return ImmutableComputePartitionStats.Result.builder().statisticsFile(statisticsFile).build(); + } + + private String jobDesc() { + return String.format( + "Computing partition stats for %s (snapshot=%s)", table.name(), snapshot.snapshotId()); + } +} diff --git a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/SparkActions.java b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/SparkActions.java index aa4ef987e788..b7361c336a69 100644 --- a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/SparkActions.java +++ b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/SparkActions.java @@ -20,6 +20,7 @@ import org.apache.iceberg.Table; import org.apache.iceberg.actions.ActionsProvider; +import org.apache.iceberg.actions.ComputePartitionStats; import org.apache.iceberg.actions.ComputeTableStats; import org.apache.iceberg.actions.RemoveDanglingDeleteFiles; import org.apache.iceberg.spark.Spark3Util; @@ -104,6 +105,11 @@ public ComputeTableStats computeTableStats(Table table) { return new ComputeTableStatsSparkAction(spark, table); } + @Override + public ComputePartitionStats computePartitionStats(Table table) { + return new ComputePartitionStatsSparkAction(spark, table); + } + @Override public RemoveDanglingDeleteFiles removeDanglingDeleteFiles(Table table) { return new RemoveDanglingDeletesSparkAction(spark, table); diff --git a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/procedures/ComputePartitionStatsProcedure.java b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/procedures/ComputePartitionStatsProcedure.java new file mode 100644 index 000000000000..c82cbbc216f6 --- /dev/null +++ b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/procedures/ComputePartitionStatsProcedure.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg.spark.procedures; + +import org.apache.iceberg.PartitionStatisticsFile; +import org.apache.iceberg.Table; +import org.apache.iceberg.actions.ComputePartitionStats; +import org.apache.iceberg.actions.ComputePartitionStats.Result; +import org.apache.iceberg.spark.actions.SparkActions; +import org.apache.iceberg.spark.procedures.SparkProcedures.ProcedureBuilder; +import org.apache.spark.sql.catalyst.InternalRow; +import org.apache.spark.sql.connector.catalog.Identifier; +import org.apache.spark.sql.connector.catalog.TableCatalog; +import org.apache.spark.sql.connector.iceberg.catalog.ProcedureParameter; +import org.apache.spark.sql.types.DataTypes; +import org.apache.spark.sql.types.Metadata; +import org.apache.spark.sql.types.StructField; +import org.apache.spark.sql.types.StructType; +import org.apache.spark.unsafe.types.UTF8String; + +/** + * A procedure that computes the stats incrementally from the last snapshot that has partition stats + * file until the given snapshot (uses current snapshot if not specified) and writes the combined + * result into a {@link PartitionStatisticsFile} after merging the partition stats. Does a full + * compute if previous statistics file does not exist. Also registers the {@link + * PartitionStatisticsFile} to table metadata. + * + * @see SparkActions#computePartitionStats(Table) + */ +public class ComputePartitionStatsProcedure extends BaseProcedure { + + private static final ProcedureParameter TABLE_PARAM = + ProcedureParameter.required("table", DataTypes.StringType); + private static final ProcedureParameter SNAPSHOT_ID_PARAM = + ProcedureParameter.optional("snapshot_id", DataTypes.LongType); + + private static final ProcedureParameter[] PARAMETERS = + new ProcedureParameter[] {TABLE_PARAM, SNAPSHOT_ID_PARAM}; + + private static final StructType OUTPUT_TYPE = + new StructType( + new StructField[] { + new StructField( + "partition_statistics_file", DataTypes.StringType, true, Metadata.empty()) + }); + + public static ProcedureBuilder builder() { + return new Builder() { + @Override + protected ComputePartitionStatsProcedure doBuild() { + return new ComputePartitionStatsProcedure(tableCatalog()); + } + }; + } + + private ComputePartitionStatsProcedure(TableCatalog tableCatalog) { + super(tableCatalog); + } + + @Override + public ProcedureParameter[] parameters() { + return PARAMETERS; + } + + @Override + public StructType outputType() { + return OUTPUT_TYPE; + } + + @Override + public InternalRow[] call(InternalRow args) { + ProcedureInput input = new ProcedureInput(spark(), tableCatalog(), PARAMETERS, args); + Identifier tableIdent = input.ident(TABLE_PARAM); + Long snapshotId = input.asLong(SNAPSHOT_ID_PARAM, null); + + return modifyIcebergTable( + tableIdent, + table -> { + ComputePartitionStats action = actions().computePartitionStats(table); + if (snapshotId != null) { + action.snapshot(snapshotId); + } + + return toOutputRows(action.execute()); + }); + } + + private InternalRow[] toOutputRows(Result result) { + PartitionStatisticsFile statisticsFile = result.statisticsFile(); + if (statisticsFile != null) { + InternalRow row = newInternalRow(UTF8String.fromString(statisticsFile.path())); + return new InternalRow[] {row}; + } else { + return new InternalRow[0]; + } + } + + @Override + public String description() { + return "ComputePartitionStatsProcedure"; + } +} diff --git a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/procedures/SparkProcedures.java b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/procedures/SparkProcedures.java index 353970443025..82f44996c8e1 100644 --- a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/procedures/SparkProcedures.java +++ b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/procedures/SparkProcedures.java @@ -62,6 +62,7 @@ private static Map> initProcedureBuilders() { mapBuilder.put("rewrite_position_delete_files", RewritePositionDeleteFilesProcedure::builder); mapBuilder.put("fast_forward", FastForwardBranchProcedure::builder); mapBuilder.put("compute_table_stats", ComputeTableStatsProcedure::builder); + mapBuilder.put("compute_partition_stats", ComputePartitionStatsProcedure::builder); mapBuilder.put("rewrite_table_path", RewriteTablePathProcedure::builder); return mapBuilder.build(); } diff --git a/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/actions/TestComputePartitionStatsAction.java b/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/actions/TestComputePartitionStatsAction.java new file mode 100644 index 000000000000..3a1b71b38091 --- /dev/null +++ b/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/actions/TestComputePartitionStatsAction.java @@ -0,0 +1,309 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg.spark.actions; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import java.io.IOException; +import java.util.List; +import org.apache.iceberg.Files; +import org.apache.iceberg.PartitionStatisticsFile; +import org.apache.iceberg.PartitionStats; +import org.apache.iceberg.PartitionStatsHandler; +import org.apache.iceberg.Partitioning; +import org.apache.iceberg.Schema; +import org.apache.iceberg.Snapshot; +import org.apache.iceberg.StructLike; +import org.apache.iceberg.Table; +import org.apache.iceberg.data.GenericRecord; +import org.apache.iceberg.io.CloseableIterable; +import org.apache.iceberg.relocated.com.google.common.collect.Lists; +import org.apache.iceberg.spark.CatalogTestBase; +import org.apache.iceberg.types.Types; +import org.assertj.core.groups.Tuple; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.TestTemplate; + +public class TestComputePartitionStatsAction extends CatalogTestBase { + + private static final int DEFAULT_SPEC_ID = 0; + private static final long DEFAULT_POS_DEL_RECORD_COUNT = 0L; + private static final int DEFAULT_POS_DEL_FILE_COUNT = 0; + private static final long DEFAULT_EQ_DEL_RECORD_COUNT = 0L; + private static final int DEFAULT_EQ_DEL_FILE_COUNT = 0; + private static final Long DEFAULT_TOTAL_RECORD_COUNT = null; + + @AfterEach + public void removeTable() { + sql("DROP TABLE IF EXISTS %s", tableName); + } + + @TestTemplate + public void emptyTable() { + createPartitionedTable(); + Table table = validationCatalog.loadTable(tableIdent); + ComputePartitionStatsSparkAction.Result result = + SparkActions.get().computePartitionStats(table).execute(); + assertThat(result.statisticsFile()).isNull(); + } + + @TestTemplate + public void emptyBranch() { + createPartitionedTable(); + Table table = validationCatalog.loadTable(tableIdent); + table.manageSnapshots().createBranch("b1").commit(); + ComputePartitionStatsSparkAction.Result result = + SparkActions.get() + .computePartitionStats(table) + .snapshot(table.refs().get("b1").snapshotId()) + .execute(); + assertThat(result.statisticsFile()).isNull(); + } + + @TestTemplate + public void invalidSnapshot() { + createPartitionedTable(); + Table table = validationCatalog.loadTable(tableIdent); + assertThatThrownBy( + () -> SparkActions.get().computePartitionStats(table).snapshot(42L).execute()) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("Snapshot not found: 42"); + } + + @TestTemplate + public void partitionStatsComputeOnLatestSnapshot() throws IOException { + createPartitionedTable(); + // foo, A -> 4 records, + // foo, B -> 2 records, + // bar, A -> 2 records, + // bar, B -> 1 record + sql( + "INSERT into %s values (0, 'foo', 'A'), (1, 'foo', 'A'), (2, 'foo', 'B'), (3, 'foo', 'B')", + tableName); + Table table = validationCatalog.loadTable(tableIdent); + Snapshot snapshot1 = table.currentSnapshot(); + sql("INSERT into %s values(4, 'bar', 'A'), (5, 'bar', 'A'), (6, 'bar', 'B')", tableName); + table.refresh(); + Snapshot snapshot2 = table.currentSnapshot(); + sql("INSERT into %s values(7, 'foo', 'A'), (8, 'foo', 'A')", tableName); + // snapshot3 is unused for partition stats as the same partition is modified by snapshot4 + + // delete one record of foo, A + sql("DELETE FROM %s WHERE c1=1", tableName); + table.refresh(); + Snapshot snapshot4 = table.currentSnapshot(); + + assertThat(table.partitionStatisticsFiles()).isEmpty(); + + PartitionStatisticsFile statisticsFile = + SparkActions.get().computePartitionStats(table).execute().statisticsFile(); + assertThat(statisticsFile.fileSizeInBytes()).isGreaterThan(0); + assertThat(statisticsFile.snapshotId()).isEqualTo(snapshot4.snapshotId()); + // check table metadata registration + assertThat(table.partitionStatisticsFiles()).containsExactly(statisticsFile); + + Types.StructType partitionType = Partitioning.partitionType(table); + Schema dataSchema = PartitionStatsHandler.schema(partitionType); + validatePartitionStats( + statisticsFile, + dataSchema, + Tuple.tuple( + partitionRecord(partitionType, "foo", "A"), + DEFAULT_SPEC_ID, + 4L, // dataRecordCount (total 4 records for this partition) + 2, // dataFileCount + totalDataFileSizeInBytes("foo", "A"), + 1L, // positionDeleteRecordCount (from delete operation) + 1, // positionDeleteFileCount (from delete operation) + DEFAULT_EQ_DEL_RECORD_COUNT, + DEFAULT_EQ_DEL_FILE_COUNT, + DEFAULT_TOTAL_RECORD_COUNT, + snapshot4.timestampMillis(), // lastUpdatedAt (last modified by snapshot4) + snapshot4.snapshotId() // lastUpdatedSnapshotId + ), + Tuple.tuple( + partitionRecord(partitionType, "foo", "B"), + DEFAULT_SPEC_ID, + 2L, // dataRecordCount + 1, // dataFileCount + totalDataFileSizeInBytes("foo", "B"), + DEFAULT_POS_DEL_RECORD_COUNT, + DEFAULT_POS_DEL_FILE_COUNT, + DEFAULT_EQ_DEL_RECORD_COUNT, + DEFAULT_EQ_DEL_FILE_COUNT, + DEFAULT_TOTAL_RECORD_COUNT, + snapshot1.timestampMillis(), // lastUpdatedAt (added by snapshot1) + snapshot1.snapshotId() // lastUpdatedSnapshotId + ), + Tuple.tuple( + partitionRecord(partitionType, "bar", "A"), + DEFAULT_SPEC_ID, + 2L, // dataRecordCount + 1, // dataFileCount + totalDataFileSizeInBytes("bar", "A"), + DEFAULT_POS_DEL_RECORD_COUNT, + DEFAULT_POS_DEL_FILE_COUNT, + DEFAULT_EQ_DEL_RECORD_COUNT, + DEFAULT_EQ_DEL_FILE_COUNT, + DEFAULT_TOTAL_RECORD_COUNT, + snapshot2.timestampMillis(), // lastUpdatedAt (added by snapshot2) + snapshot2.snapshotId() // lastUpdatedSnapshotId + ), + Tuple.tuple( + partitionRecord(partitionType, "bar", "B"), + DEFAULT_SPEC_ID, + 1L, // dataRecordCount + 1, // dataFileCount + totalDataFileSizeInBytes("bar", "B"), + DEFAULT_POS_DEL_RECORD_COUNT, + DEFAULT_POS_DEL_FILE_COUNT, + DEFAULT_EQ_DEL_RECORD_COUNT, + DEFAULT_EQ_DEL_FILE_COUNT, + DEFAULT_TOTAL_RECORD_COUNT, + snapshot2.timestampMillis(), // lastUpdatedAt + snapshot2.snapshotId() // lastUpdatedSnapshotId + )); + } + + @TestTemplate + public void partitionStatsComputeOnSnapshot() throws IOException { + createPartitionedTableV1(); + // foo, A -> 2 records, + // foo, B -> 1 record, + // bar, A -> 2 records, + sql("INSERT into %s values (0, 'foo', 'A'), (1, 'foo', 'A'), (2, 'foo', 'B')", tableName); + Table table = validationCatalog.loadTable(tableIdent); + Snapshot snapshot1 = table.currentSnapshot(); + sql("INSERT into %s values(3, 'bar', 'A'), (4, 'bar', 'A')", tableName); + table.refresh(); + + assertThat(table.partitionStatisticsFiles()).isEmpty(); + + PartitionStatisticsFile statisticsFile = + SparkActions.get() + .computePartitionStats(table) + .snapshot(snapshot1.snapshotId()) + .execute() + .statisticsFile(); + assertThat(statisticsFile.fileSizeInBytes()).isGreaterThan(0); + // should be mapped to snapshot1 instead of latest snapshot + assertThat(statisticsFile.snapshotId()).isEqualTo(snapshot1.snapshotId()); + // check table metadata registration + assertThat(table.partitionStatisticsFiles()).containsExactly(statisticsFile); + + Types.StructType partitionType = Partitioning.partitionType(table); + Schema dataSchema = PartitionStatsHandler.schema(partitionType); + // should contain stats for only partitions of snapshot1 (no entry for partition bar, A) + validatePartitionStats( + statisticsFile, + dataSchema, + Tuple.tuple( + partitionRecord(partitionType, "foo", "A"), + DEFAULT_SPEC_ID, + 2L, // dataRecordCount + 1, // dataFileCount + totalDataFileSizeInBytes("foo", "A"), + DEFAULT_POS_DEL_RECORD_COUNT, + DEFAULT_POS_DEL_FILE_COUNT, + DEFAULT_EQ_DEL_RECORD_COUNT, + DEFAULT_EQ_DEL_FILE_COUNT, + DEFAULT_TOTAL_RECORD_COUNT, + snapshot1.timestampMillis(), // lastUpdatedAt + snapshot1.snapshotId()), // lastUpdatedSnapshotId + Tuple.tuple( + partitionRecord(partitionType, "foo", "B"), + DEFAULT_SPEC_ID, + 1L, // dataRecordCount + 1, // dataFileCount + totalDataFileSizeInBytes("foo", "B"), + DEFAULT_POS_DEL_RECORD_COUNT, + DEFAULT_POS_DEL_FILE_COUNT, + DEFAULT_EQ_DEL_RECORD_COUNT, + DEFAULT_EQ_DEL_FILE_COUNT, + DEFAULT_TOTAL_RECORD_COUNT, + snapshot1.timestampMillis(), // lastUpdatedAt + snapshot1.snapshotId() // lastUpdatedSnapshotId + )); + + // try again on same snapshot + PartitionStatisticsFile newStatsFile = + SparkActions.get() + .computePartitionStats(table) + .snapshot(snapshot1.snapshotId()) + .execute() + .statisticsFile(); + assertThat(newStatsFile).isEqualTo(statisticsFile); + } + + private long totalDataFileSizeInBytes(String col1, String col2) { + return (long) + sql( + "SELECT sum(file_size_in_bytes) FROM %s.data_files WHERE partition.c2 = '%s' AND partition.c3 = '%s'", + tableName, col1, col2) + .get(0)[0]; + } + + private void createPartitionedTable() { + sql( + "CREATE TABLE %s (c1 int, c2 string, c3 string) USING iceberg PARTITIONED BY (c2, c3) TBLPROPERTIES('write.delete.mode'='merge-on-read')", + tableName); + } + + private void createPartitionedTableV1() { + sql( + "CREATE TABLE %s (c1 int, c2 string, c3 string) USING iceberg PARTITIONED BY (c2, c3) TBLPROPERTIES('format-version'='1')", + tableName); + } + + private void validatePartitionStats( + PartitionStatisticsFile result, Schema recordSchema, Tuple... expectedValues) + throws IOException { + // read the partition entries from the stats file + List partitionStats; + try (CloseableIterable recordIterator = + PartitionStatsHandler.readPartitionStatsFile( + recordSchema, Files.localInput(result.path()))) { + partitionStats = Lists.newArrayList(recordIterator); + } + + assertThat(partitionStats) + .extracting( + PartitionStats::partition, + PartitionStats::specId, + PartitionStats::dataRecordCount, + PartitionStats::dataFileCount, + PartitionStats::totalDataFileSizeInBytes, + PartitionStats::positionDeleteRecordCount, + PartitionStats::positionDeleteFileCount, + PartitionStats::equalityDeleteRecordCount, + PartitionStats::equalityDeleteFileCount, + PartitionStats::totalRecords, + PartitionStats::lastUpdatedAt, + PartitionStats::lastUpdatedSnapshotId) + .containsExactlyInAnyOrder(expectedValues); + } + + private StructLike partitionRecord(Types.StructType partitionType, String val1, String val2) { + GenericRecord record = GenericRecord.create(partitionType); + record.set(0, val1); + record.set(1, val2); + return record; + } +}