-
Notifications
You must be signed in to change notification settings - Fork 3k
Flink: add append capability to dynamic iceberg sink (#14526) #14559
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 1 commit
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -31,6 +31,7 @@ | |
| import org.apache.flink.annotation.Internal; | ||
| import org.apache.flink.api.connector.sink2.Committer; | ||
| import org.apache.flink.core.io.SimpleVersionedSerialization; | ||
| import org.apache.iceberg.AppendFiles; | ||
| import org.apache.iceberg.ManifestFile; | ||
| import org.apache.iceberg.ReplacePartitions; | ||
| import org.apache.iceberg.RowDelta; | ||
|
|
@@ -302,30 +303,58 @@ private void commitDeltaTxn( | |
| CommitSummary summary, | ||
| String newFlinkJobId, | ||
| String operatorId) { | ||
| for (Map.Entry<Long, List<WriteResult>> e : pendingResults.entrySet()) { | ||
| long checkpointId = e.getKey(); | ||
| List<WriteResult> writeResults = e.getValue(); | ||
|
|
||
| RowDelta rowDelta = table.newRowDelta().scanManifestsWith(workerPool); | ||
| for (WriteResult result : writeResults) { | ||
| // Row delta validations are not needed for streaming changes that write equality deletes. | ||
| // Equality deletes are applied to data in all previous sequence numbers, so retries may | ||
| // push deletes further in the future, but do not affect correctness. Position deletes | ||
| // committed to the table in this path are used only to delete rows from data files that are | ||
| // being added in this commit. There is no way for data files added along with the delete | ||
| // files to be concurrently removed, so there is no need to validate the files referenced by | ||
| // the position delete files that are being committed. | ||
| Arrays.stream(result.dataFiles()).forEach(rowDelta::addRows); | ||
| Arrays.stream(result.deleteFiles()).forEach(rowDelta::addDeletes); | ||
| if (summary.deleteFilesCount() == 0) { | ||
| // To be compatible with iceberg format V1. | ||
| AppendFiles appendFiles = table.newAppend().scanManifestsWith(workerPool); | ||
| for (List<WriteResult> resultList : pendingResults.values()) { | ||
| for (WriteResult result : resultList) { | ||
| Preconditions.checkState( | ||
| result.referencedDataFiles().length == 0, | ||
| "Should have no referenced data files for append."); | ||
| Arrays.stream(result.dataFiles()).forEach(appendFiles::appendFile); | ||
| } | ||
| } | ||
| String description = "append"; | ||
|
||
|
|
||
| // Every Flink checkpoint contains a set of independent changes which can be committed | ||
| // together. While it is technically feasible to combine append-only data across checkpoints, | ||
| // for the sake of simplicity, we do not implement this (premature) optimization. Multiple | ||
| // pending checkpoints here are very rare to occur, i.e. only with very short checkpoint | ||
| // intervals or when concurrent checkpointing is enabled. | ||
| // fail all commits as really its only one | ||
| commitOperation( | ||
| table, branch, rowDelta, summary, "rowDelta", newFlinkJobId, operatorId, checkpointId); | ||
| table, | ||
| branch, | ||
| appendFiles, | ||
| summary, | ||
| description, | ||
|
||
| newFlinkJobId, | ||
| operatorId, | ||
| pendingResults.lastKey()); | ||
| } else { | ||
| for (Map.Entry<Long, List<WriteResult>> e : pendingResults.entrySet()) { | ||
| long checkpointId = e.getKey(); | ||
| List<WriteResult> writeResults = e.getValue(); | ||
|
|
||
| RowDelta rowDelta = table.newRowDelta().scanManifestsWith(workerPool); | ||
| for (WriteResult result : writeResults) { | ||
| // Row delta validations are not needed for streaming changes that write equality deletes. | ||
| // Equality deletes are applied to data in all previous sequence numbers, so retries may | ||
| // push deletes further in the future, but do not affect correctness. Position deletes | ||
| // committed to the table in this path are used only to delete rows from data files that | ||
| // are | ||
| // being added in this commit. There is no way for data files added along with the delete | ||
| // files to be concurrently removed, so there is no need to validate the files referenced | ||
| // by | ||
| // the position delete files that are being committed. | ||
| Arrays.stream(result.dataFiles()).forEach(rowDelta::addRows); | ||
| Arrays.stream(result.deleteFiles()).forEach(rowDelta::addDeletes); | ||
| } | ||
|
|
||
| // Every Flink checkpoint contains a set of independent changes which can be committed | ||
| // together. While it is technically feasible to combine append-only data across | ||
| // checkpoints, | ||
| // for the sake of simplicity, we do not implement this (premature) optimization. Multiple | ||
| // pending checkpoints here are very rare to occur, i.e. only with very short checkpoint | ||
| // intervals or when concurrent checkpointing is enabled. | ||
| commitOperation( | ||
| table, branch, rowDelta, summary, "rowDelta", newFlinkJobId, operatorId, checkpointId); | ||
| } | ||
| } | ||
| } | ||
|
|
||
|
|
||
| Original file line number | Diff line number | Diff line change | ||||||||||||||||||||||||||||||||||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
|
@@ -31,6 +31,7 @@ | |||||||||||||||||||||||||||||||||||||||||||||
| import org.apache.flink.annotation.Internal; | ||||||||||||||||||||||||||||||||||||||||||||||
| import org.apache.flink.api.connector.sink2.Committer; | ||||||||||||||||||||||||||||||||||||||||||||||
| import org.apache.flink.core.io.SimpleVersionedSerialization; | ||||||||||||||||||||||||||||||||||||||||||||||
| import org.apache.iceberg.AppendFiles; | ||||||||||||||||||||||||||||||||||||||||||||||
| import org.apache.iceberg.ManifestFile; | ||||||||||||||||||||||||||||||||||||||||||||||
| import org.apache.iceberg.ReplacePartitions; | ||||||||||||||||||||||||||||||||||||||||||||||
| import org.apache.iceberg.RowDelta; | ||||||||||||||||||||||||||||||||||||||||||||||
|
|
@@ -302,30 +303,58 @@ private void commitDeltaTxn( | |||||||||||||||||||||||||||||||||||||||||||||
| CommitSummary summary, | ||||||||||||||||||||||||||||||||||||||||||||||
| String newFlinkJobId, | ||||||||||||||||||||||||||||||||||||||||||||||
| String operatorId) { | ||||||||||||||||||||||||||||||||||||||||||||||
| for (Map.Entry<Long, List<WriteResult>> e : pendingResults.entrySet()) { | ||||||||||||||||||||||||||||||||||||||||||||||
| long checkpointId = e.getKey(); | ||||||||||||||||||||||||||||||||||||||||||||||
| List<WriteResult> writeResults = e.getValue(); | ||||||||||||||||||||||||||||||||||||||||||||||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can we keep the loop structure? We will need it for both types of snapshots. This should work: for (Map.Entry<Long, List<WriteResult>> e : pendingResults.entrySet()) {
long checkpointId = e.getKey();
List<WriteResult> writeResults = e.getValue();
boolean appendOnly = true;
for (WriteResult writeResult : writeResults) {
if (writeResult.deleteFiles().length > 0) {
appendOnly = false;
break;
}
}
final SnapshotUpdate snapshotUpdate;
if (appendOnly) {
AppendFiles appendFiles = table.newAppend().scanManifestsWith(workerPool);
for (WriteResult result : writeResults) {
Arrays.stream(result.dataFiles()).forEach(appendFiles::appendFile);
}
snapshotUpdate = appendFiles
} else {
RowDelta rowDelta = table.newRowDelta().scanManifestsWith(workerPool);
for (WriteResult result : writeResults) {
Arrays.stream(result.dataFiles()).forEach(rowDelta::addRows);
Arrays.stream(result.deleteFiles()).forEach(rowDelta::addDeletes);
}
snapshotUpdate = rowDelta;
}
commitOperation(
table,
branch,
snapshotUpdate,
summary,
appendOnly ? "append" : "rowDelta",
newFlinkJobId,
operatorId,
checkpointId);
}
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this looks good - only thing left is the checkState for result.referencedDataFiles().length == 0 (which exists in IcebergSink) which I will add to the loop checking it is appendOnly |
||||||||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||||||||
| RowDelta rowDelta = table.newRowDelta().scanManifestsWith(workerPool); | ||||||||||||||||||||||||||||||||||||||||||||||
| for (WriteResult result : writeResults) { | ||||||||||||||||||||||||||||||||||||||||||||||
| // Row delta validations are not needed for streaming changes that write equality deletes. | ||||||||||||||||||||||||||||||||||||||||||||||
| // Equality deletes are applied to data in all previous sequence numbers, so retries may | ||||||||||||||||||||||||||||||||||||||||||||||
| // push deletes further in the future, but do not affect correctness. Position deletes | ||||||||||||||||||||||||||||||||||||||||||||||
| // committed to the table in this path are used only to delete rows from data files that are | ||||||||||||||||||||||||||||||||||||||||||||||
| // being added in this commit. There is no way for data files added along with the delete | ||||||||||||||||||||||||||||||||||||||||||||||
| // files to be concurrently removed, so there is no need to validate the files referenced by | ||||||||||||||||||||||||||||||||||||||||||||||
| // the position delete files that are being committed. | ||||||||||||||||||||||||||||||||||||||||||||||
| Arrays.stream(result.dataFiles()).forEach(rowDelta::addRows); | ||||||||||||||||||||||||||||||||||||||||||||||
| Arrays.stream(result.deleteFiles()).forEach(rowDelta::addDeletes); | ||||||||||||||||||||||||||||||||||||||||||||||
| if (summary.deleteFilesCount() == 0) { | ||||||||||||||||||||||||||||||||||||||||||||||
|
||||||||||||||||||||||||||||||||||||||||||||||
| summary.addAll(pendingResults); |
And internally:
iceberg/flink/v2.1/flink/src/main/java/org/apache/iceberg/flink/sink/CommitSummary.java
Lines 46 to 66 in 059310e
| public void addAll(NavigableMap<Long, List<WriteResult>> pendingResults) { | |
| pendingResults.values().forEach(writeResults -> writeResults.forEach(this::addWriteResult)); | |
| } | |
| private void addWriteResult(WriteResult writeResult) { | |
| dataFilesCount.addAndGet(writeResult.dataFiles().length); | |
| Arrays.stream(writeResult.dataFiles()) | |
| .forEach( | |
| dataFile -> { | |
| dataFilesRecordCount.addAndGet(dataFile.recordCount()); | |
| dataFilesByteCount.addAndGet(dataFile.fileSizeInBytes()); | |
| }); | |
| deleteFilesCount.addAndGet(writeResult.deleteFiles().length); | |
| Arrays.stream(writeResult.deleteFiles()) | |
| .forEach( | |
| deleteFile -> { | |
| deleteFilesRecordCount.addAndGet(deleteFile.recordCount()); | |
| long deleteBytes = ScanTaskUtil.contentSizeInBytes(deleteFile); | |
| deleteFilesByteCount.addAndGet(deleteBytes); | |
| }); | |
| } |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The value is correct but we commit at checkpoint level and the delete file count is done across checkpoints. Strictly speaking, there could be both append-only checkpoints and overwrite checkpoints as part of pendingResults.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Interesting decision.
In IcebergSink we commit multiple checkpoints together in a single commit, if we happen to accumulate multiple of them.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
When we fixed #14182, we decided not to do that. In fact, you proposed not to do that: #14182 (comment) 🤓
IMHO this kind of optimization is a bit premature. In practice it is rare to even have multiple pending checkpoints.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Also, as I have mentioned in the comment, this could cause issues if "replacePartition" is used
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
So we should commit them 1-by-1
Outdated
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Can we move this loop up one level, as in https://github.com/apache/iceberg/pull/14559/files#r2513719871? This avoids repeating it.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Could we change this comment to describe correctly why we do this?