Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
import org.apache.iceberg.Snapshot;
import org.apache.iceberg.SnapshotUpdate;
import org.apache.iceberg.Table;
import org.apache.iceberg.events.CreateSnapshotEvent;
import org.apache.iceberg.flink.TableLoader;
import org.apache.iceberg.io.WriteResult;
import org.apache.iceberg.relocated.com.google.common.base.MoreObjects;
Expand All @@ -64,6 +65,7 @@ class IcebergFilesCommitter extends AbstractStreamOperator<Void>

private static final long serialVersionUID = 1L;
private static final long INITIAL_CHECKPOINT_ID = -1L;
private static final long INITIAL_SNAPSHOT_ID = -1L;
private static final byte[] EMPTY_MANIFEST_DATA = new byte[0];

private static final Logger LOG = LoggerFactory.getLogger(IcebergFilesCommitter.class);
Expand Down Expand Up @@ -97,6 +99,7 @@ class IcebergFilesCommitter extends AbstractStreamOperator<Void>
private transient Table table;
private transient ManifestOutputFileFactory manifestOutputFileFactory;
private transient long maxCommittedCheckpointId;
private transient long lastCommittedSnapshotId;
private transient int continuousEmptyCheckpoints;
private transient int maxContinuousEmptyCommits;
// There're two cases that we restore from flink checkpoints: the first case is restoring from snapshot created by the
Expand Down Expand Up @@ -132,6 +135,7 @@ public void initializeState(StateInitializationContext context) throws Exception
int attemptId = getRuntimeContext().getAttemptNumber();
this.manifestOutputFileFactory = FlinkManifestUtil.createOutputFileFactory(table, flinkJobId, subTaskId, attemptId);
this.maxCommittedCheckpointId = INITIAL_CHECKPOINT_ID;
this.lastCommittedSnapshotId = INITIAL_SNAPSHOT_ID; // validate all snapshot history for first commit

this.checkpointsState = context.getOperatorStateStore().getListState(STATE_DESCRIPTOR);
this.jobIdState = context.getOperatorStateStore().getListState(JOB_ID_DESCRIPTOR);
Expand Down Expand Up @@ -283,6 +287,7 @@ private void commitDeltaTxn(NavigableMap<Long, WriteResult> pendingResults, Stri
// merged one will lead to the incorrect delete semantic.
WriteResult result = e.getValue();
RowDelta rowDelta = table.newRowDelta()
.validateFromSnapshot(lastCommittedSnapshotId)

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What will be the value of lastCommittedSnapshotId if I restore the job from a savepoint?

Shouldn't we do something similar for it as we do for maxCommittedCheckpointId but here many things to consider as if we run expire snapshot maintenance procedure before restoring the job from the savepoint(#2482 (comment))

this.lastCommittedSnapshotId = getLastCommittedSnapshotId(table);

Copy link
Contributor Author

@Reo-LEI Reo-LEI Sep 27, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for your review @ayush-san. As I listed in #2867, restart or restore from checkpoint/savepoint, the lastCommittedSnapshotId will just simply reset to null, and rowDelta will travel all snapshot history to ensure data files still exist and guarantee all snapshot history are valid. If we delete the expired snapshots by maintenance procedure before restoring the job, the validation will travel to the snapshot which is commited by expire snapshot maintenance procedure. I think that whould be ok.

This optimization just trying to speed up commit result in runtime, and I want to keep it simple, so I think we don't need to save the lastCommittedSnapshotId and restore it from checkpoint/savepoint. but I want to hear @openinx @jackye1995 @stevenzwu @kbendick opinions.

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@Reo-LEI Can you please help me understand how the #2867 help in solving this validation error

I agree with you that we can handle the lastCommittedSnapshotId in a separate PR and get this reviewed because this will really speed up the commit time which increases with time. I have seen my flink job checkpoint time increases to 10-15mins from 100-200ms

Copy link
Contributor Author

@Reo-LEI Reo-LEI Sep 29, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@ayush-san I'm very sorry, this is my mistake, I want to link to #3102 (comment), but not #2867 .

And I think this PR will not solving #2482. if the validation error not fixed, this PR will encounter the same problem. I think we should follow #2603 (comment) this to check the exists files to fix the validation error.

Copy link

@ayush-san ayush-san Sep 29, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, but with your PR we can run the snapshot expire task with the flink job running since you are updating the lastCommittedSnapshotId. The only case now left is that when we start the flink job from a checkpoint, we will encounter the same problem.

But if are doing that for one case, we can mimic the same for case when we restore the job from a checkpoint. Anyways we can tackle this in a separate PR and discuss it with @rdblue and @openinx

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

but with your PR we can run the snapshot expire task with the flink job running since you are updating the lastCommittedSnapshotId.

I think the snapshot expire task don’t cause the validation error is becasue the validationHistory will just travel to the lastCommittedSnapshot and stop valid the removed snapshots which is older than the lastCommittedSnapshot. But once the lastCommittedSnapshot be removed(e.g. the snapshot expire task run multiple time between two checkpoints) , you will encounter the validation error again.

I think store the lastCommittedSnapshotId is not a correct way to resolve the validation error(#2482). Because we cann't guarantee the lastCommittedSnapshot what we store its snapshot id will alway exists when we restore the flink job. The lastCommittedSnapshot probably has been removed when we restore the flink job, and the validation error will raise again. @ayush-san

.validateDataFilesExist(ImmutableList.copyOf(result.referencedDataFiles()))
.validateDeletedFiles();

Expand All @@ -293,6 +298,8 @@ private void commitDeltaTxn(NavigableMap<Long, WriteResult> pendingResults, Stri
Arrays.stream(result.deleteFiles()).forEach(rowDelta::addDeletes);

commitOperation(rowDelta, numDataFiles, numDeleteFiles, "rowDelta", newFlinkJobId, e.getKey());

lastCommittedSnapshotId = ((CreateSnapshotEvent) rowDelta.updateEvent()).snapshotId();
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -739,6 +739,99 @@ public void testValidateDataFileExist() throws Exception {
}
}

@Test
public void testValidateDataFileExistStartFromLastCommittedSnapshot() throws Exception {
Assume.assumeFalse("Only support equality-delete in format v2.", formatVersion < 2);
long timestamp = 0;
long checkpoint = 10;
JobID jobId = new JobID();
FileAppenderFactory<RowData> appenderFactory = createDeletableAppenderFactory();

// Txn#1: insert the row <1, 'aaa'> to data-file-1
RowData insert1 = SimpleDataUtil.createInsert(1, "aaa");
DataFile dataFile1 = writeDataFile("data-file-1", ImmutableList.of(insert1));
new TestTableLoader(tablePath)
.loadTable()
.newAppend()
.appendFile(dataFile1)
.commit();

// Txn#2: Overwrite the committed data-file-1 and insert row <1, 'bbb'> to data-file-2
RowData insert2 = SimpleDataUtil.createInsert(2, "bbb");
DataFile dataFile2 = writeDataFile("data-file-2", ImmutableList.of(insert2));
new TestTableLoader(tablePath)
.loadTable()
.newOverwrite()
.addFile(dataFile2)
.deleteFile(dataFile1)
.commit();

try (OneInputStreamOperatorTestHarness<WriteResult, Void> harness = createStreamSink(jobId)) {
harness.setup();
harness.open();

// Txn#3: construct should not exist pos-delete-file to delete the not exist row <1, 'aaa'> (only for test).
DeleteFile shouldNotExistDeleteFile = writePosDeleteFile(appenderFactory,
"pos-delete-file-x",
ImmutableList.of(Pair.of(dataFile1.path(), 0L)));
harness.processElement(WriteResult.builder()
.addDeleteFiles(shouldNotExistDeleteFile)
.addReferencedDataFiles(dataFile1.path())
.build(),
++timestamp);
harness.snapshot(++checkpoint, ++timestamp);

// First commit will travel all snapshot history to ensure referenced data files are exist.
// And validate will be failure when committing.
final long currentCheckpointId = checkpoint;
AssertHelpers.assertThrows("Validation should be failure because of non-exist data files.",
ValidationException.class, "Cannot commit, missing data files",
() -> {
harness.notifyOfCompletedCheckpoint(currentCheckpointId);
return null;
});
}

try (OneInputStreamOperatorTestHarness<WriteResult, Void> harness = createStreamSink(jobId)) {
harness.setup();
harness.open();

// Txn#3: delete row <2, 'bbb'> and insert the row <3, 'ccc'> to data-file-3
RowData insert3 = SimpleDataUtil.createInsert(3, "ccc");
DataFile dataFile3 = writeDataFile("data-file-3", ImmutableList.of(insert3));
DeleteFile deleteFile1 = writePosDeleteFile(appenderFactory,
"pos-delete-file-1",
ImmutableList.of(Pair.of(dataFile2.path(), 0L)));
harness.processElement(WriteResult.builder()
.addDataFiles(dataFile3)
.addDeleteFiles(deleteFile1)
.addReferencedDataFiles(dataFile2.path())
.build(),
++timestamp);
harness.snapshot(checkpoint, ++timestamp);
harness.notifyOfCompletedCheckpoint(checkpoint);

// Txn#4: construct should not exist pos-delete-file to delete the not exist row <1, 'aaa'> (only for test).
DeleteFile shouldNotExistDeleteFile = writePosDeleteFile(appenderFactory,
"pos-delete-file-x",
ImmutableList.of(Pair.of(dataFile1.path(), 0L)));
harness.processElement(WriteResult.builder()
.addDeleteFiles(shouldNotExistDeleteFile)
.addReferencedDataFiles(dataFile1.path())
.build(),
++timestamp);
harness.snapshot(++checkpoint, ++timestamp);
// Commit will travel snapshot history from last succeed commit snapshotId(here is snapshot#3).
// And this commit will success even't dataFile1 not exist.
harness.notifyOfCompletedCheckpoint(checkpoint);

SimpleDataUtil.assertTableRows(table, ImmutableList.of(insert3));
assertMaxCommittedCheckpointId(jobId, checkpoint);
assertFlinkManifests(0);
Assert.assertEquals("Should have committed 4 txn.", 4, ImmutableList.copyOf(table.snapshots()).size());
}
}

@Test
public void testCommitTwoCheckpointsInSingleTxn() throws Exception {
Assume.assumeFalse("Only support equality-delete in format v2.", formatVersion < 2);
Expand Down