Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
105 changes: 105 additions & 0 deletions core/src/jmh/java/org/apache/iceberg/PartitionStatsUtilBenchmark.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;

import static org.apache.iceberg.types.Types.NestedField.optional;
import static org.apache.iceberg.types.Types.NestedField.required;
import static org.assertj.core.api.Assertions.assertThat;

import java.util.Collection;
import java.util.concurrent.TimeUnit;
import org.apache.iceberg.hadoop.HadoopTables;
import org.apache.iceberg.types.Types;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.annotations.Threads;
import org.openjdk.jmh.annotations.Timeout;
import org.openjdk.jmh.annotations.Warmup;

@Fork(1)
@State(Scope.Benchmark)
@Warmup(iterations = 2)
@Measurement(iterations = 5)
@Timeout(time = 1000, timeUnit = TimeUnit.HOURS)
@BenchmarkMode(Mode.SingleShotTime)
public class PartitionStatsUtilBenchmark {

private static final Schema SCHEMA =
new Schema(
required(1, "c1", Types.IntegerType.get()),
optional(2, "c2", Types.StringType.get()),
optional(3, "c3", Types.StringType.get()));

private static final PartitionSpec SPEC = PartitionSpec.builderFor(SCHEMA).identity("c1").build();

// Create 10k manifests
private static final int MANIFEST_COUNTER = 10000;

// each manifest with 100 partition values
private static final int PARTITION_PER_MANIFEST = 100;

// 20 data files per partition, which results in 2k data files per manifest
private static final int DATA_FILES_PER_PARTITION_COUNT = 20;

private static final HadoopTables TABLES = new HadoopTables();

private static final String TABLE_IDENT = "tbl";

private Table table;

@Setup
public void setupBenchmark() {
this.table = TABLES.create(SCHEMA, SPEC, TABLE_IDENT);

for (int manifestCount = 0; manifestCount < MANIFEST_COUNTER; manifestCount++) {
AppendFiles appendFiles = table.newFastAppend();

for (int partition = 0; partition < PARTITION_PER_MANIFEST; partition++) {
StructLike partitionData = TestHelpers.Row.of(partition);
for (int fileOrdinal = 0; fileOrdinal < DATA_FILES_PER_PARTITION_COUNT; fileOrdinal++) {
appendFiles.appendFile(FileGenerationUtil.generateDataFile(table, partitionData));
}
}

appendFiles.commit();
}
}

@TearDown
public void tearDownBenchmark() {
TABLES.dropTable(TABLE_IDENT);
}

@Benchmark
@Threads(1)
public void benchmarkPartitionStats() {
Collection<PartitionStats> partitionStats =
PartitionStatsUtil.computeStats(table, table.currentSnapshot());
assertThat(partitionStats).hasSize(PARTITION_PER_MANIFEST);

PartitionStatsUtil.sortStats(partitionStats, Partitioning.partitionType(table));
}
}
17 changes: 17 additions & 0 deletions core/src/main/java/org/apache/iceberg/BaseScan.java
Original file line number Diff line number Diff line change
Expand Up @@ -289,4 +289,21 @@ private static Schema lazyColumnProjection(TableScanContext context, Schema sche
public ThisT metricsReporter(MetricsReporter reporter) {
return newRefinedScan(table, schema, context.reportWith(reporter));
}

/**
* Retrieves a list of column names based on the type of manifest content provided.
*
* @param content the manifest content type to scan.
* @return a list of column names corresponding to the specified manifest content type.
*/
static List<String> scanColumns(ManifestContent content) {
switch (content) {
case DATA:
return BaseScan.SCAN_COLUMNS;
case DELETES:
return BaseScan.DELETE_SCAN_COLUMNS;
default:
throw new UnsupportedOperationException("Cannot read unknown manifest type: " + content);
}
}
}
252 changes: 252 additions & 0 deletions core/src/main/java/org/apache/iceberg/PartitionStats.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,252 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;

import org.apache.iceberg.relocated.com.google.common.base.Preconditions;

public class PartitionStats implements StructLike {

private static final int STATS_COUNT = 12;

private StructLike partition;
private int specId;
private long dataRecordCount;
private int dataFileCount;
private long totalDataFileSizeInBytes;
private long positionDeleteRecordCount;
private int positionDeleteFileCount;
private long equalityDeleteRecordCount;
private int equalityDeleteFileCount;
private long totalRecordCount;
private Long lastUpdatedAt; // null by default
private Long lastUpdatedSnapshotId; // null by default

public PartitionStats(StructLike partition, int specId) {
this.partition = partition;
this.specId = specId;
}

public StructLike partition() {
return partition;
}

public int specId() {
return specId;
}

public long dataRecordCount() {
return dataRecordCount;
}

public int dataFileCount() {
return dataFileCount;
}

public long totalDataFileSizeInBytes() {
return totalDataFileSizeInBytes;
}

public long positionDeleteRecordCount() {
return positionDeleteRecordCount;
}

public int positionDeleteFileCount() {
return positionDeleteFileCount;
}

public long equalityDeleteRecordCount() {
return equalityDeleteRecordCount;
}

public int equalityDeleteFileCount() {
return equalityDeleteFileCount;
}

public long totalRecordCount() {
return totalRecordCount;
}

public Long lastUpdatedAt() {
return lastUpdatedAt;
}

public Long lastUpdatedSnapshotId() {
return lastUpdatedSnapshotId;
}

/**
* Updates the partition stats from the data/delete file.
*
* @param file the {@link ContentFile} from the manifest entry.
* @param snapshot the snapshot corresponding to the live entry.
*/
public void liveEntry(ContentFile<?> file, Snapshot snapshot) {
Preconditions.checkArgument(specId == file.specId(), "Spec IDs must match");

switch (file.content()) {
case DATA:
this.dataRecordCount += file.recordCount();
this.dataFileCount += 1;
this.totalDataFileSizeInBytes += file.fileSizeInBytes();
break;
case POSITION_DELETES:
this.positionDeleteRecordCount += file.recordCount();
this.positionDeleteFileCount += 1;
break;
case EQUALITY_DELETES:
this.equalityDeleteRecordCount += file.recordCount();
this.equalityDeleteFileCount += 1;
break;
default:
throw new UnsupportedOperationException("Unsupported file content type: " + file.content());
}

if (snapshot != null) {
updateSnapshotInfo(snapshot.snapshotId(), snapshot.timestampMillis());
}

// Note: Not computing the `TOTAL_RECORD_COUNT` for now as it needs scanning the data.
}

/**
* Updates the modified time and snapshot ID for the deleted manifest entry.
*
* @param snapshot the snapshot corresponding to the deleted manifest entry.
*/
public void deletedEntry(Snapshot snapshot) {
if (snapshot != null) {
updateSnapshotInfo(snapshot.snapshotId(), snapshot.timestampMillis());
}
}

/**
* Appends statistics from given entry to current entry.
*
* @param entry the entry from which statistics will be sourced.
*/
public void appendStats(PartitionStats entry) {
Preconditions.checkArgument(specId == entry.specId(), "Spec IDs must match");

this.dataRecordCount += entry.dataRecordCount;
this.dataFileCount += entry.dataFileCount;
this.totalDataFileSizeInBytes += entry.totalDataFileSizeInBytes;
this.positionDeleteRecordCount += entry.positionDeleteRecordCount;
this.positionDeleteFileCount += entry.positionDeleteFileCount;
this.equalityDeleteRecordCount += entry.equalityDeleteRecordCount;
this.equalityDeleteFileCount += entry.equalityDeleteFileCount;
this.totalRecordCount += entry.totalRecordCount;

if (entry.lastUpdatedAt != null) {
updateSnapshotInfo(entry.lastUpdatedSnapshotId, entry.lastUpdatedAt);
}
}

private void updateSnapshotInfo(long snapshotId, long updatedAt) {
if (lastUpdatedAt == null || lastUpdatedAt < updatedAt) {
this.lastUpdatedAt = updatedAt;
this.lastUpdatedSnapshotId = snapshotId;
}
}

@Override
public int size() {
return STATS_COUNT;
}

@Override
public <T> T get(int pos, Class<T> javaClass) {
switch (pos) {
case 0:
return javaClass.cast(partition);
case 1:
return javaClass.cast(specId);
case 2:
return javaClass.cast(dataRecordCount);
case 3:
return javaClass.cast(dataFileCount);
case 4:
return javaClass.cast(totalDataFileSizeInBytes);
case 5:
return javaClass.cast(positionDeleteRecordCount);
case 6:
return javaClass.cast(positionDeleteFileCount);
case 7:
return javaClass.cast(equalityDeleteRecordCount);
case 8:
return javaClass.cast(equalityDeleteFileCount);
case 9:
return javaClass.cast(totalRecordCount);
case 10:
return javaClass.cast(lastUpdatedAt);
case 11:
return javaClass.cast(lastUpdatedSnapshotId);
default:
throw new UnsupportedOperationException("Unknown position: " + pos);
}
}

@Override
public <T> void set(int pos, T value) {
switch (pos) {
case 0:
this.partition = (StructLike) value;
break;
case 1:
this.specId = (int) value;
break;
case 2:
this.dataRecordCount = (long) value;
break;
case 3:
this.dataFileCount = (int) value;
break;
case 4:
this.totalDataFileSizeInBytes = (long) value;
break;
case 5:
// optional field as per spec, implementation initialize to 0 for counters
this.positionDeleteRecordCount = value == null ? 0L : (long) value;
break;
case 6:
// optional field as per spec, implementation initialize to 0 for counters
this.positionDeleteFileCount = value == null ? 0 : (int) value;
break;
case 7:
// optional field as per spec, implementation initialize to 0 for counters
this.equalityDeleteRecordCount = value == null ? 0L : (long) value;
break;
case 8:
// optional field as per spec, implementation initialize to 0 for counters
this.equalityDeleteFileCount = value == null ? 0 : (int) value;
break;
case 9:
// optional field as per spec, implementation initialize to 0 for counters
this.totalRecordCount = value == null ? 0L : (long) value;
break;
case 10:
this.lastUpdatedAt = (Long) value;
break;
case 11:
this.lastUpdatedSnapshotId = (Long) value;
break;
default:
throw new UnsupportedOperationException("Unknown position: " + pos);
}
}
}
Loading