-
Notifications
You must be signed in to change notification settings - Fork 2.5k
[HUDI-2671] Fix kafka offset handling in Kafka Connect protocol #4021
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
yihua
merged 2 commits into
apache:master
from
rmahindra123:rm_fix_kafka_connect_issues
Nov 24, 2021
Merged
Changes from all commits
Commits
Show all changes
2 commits
Select commit
Hold shift + click to select a range
File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -149,7 +149,7 @@ private void handleStartCommit(ControlMessage message) { | |
| LOG.info("Started a new transaction after receiving START_COMMIT for commit " + currentCommitTime); | ||
| try { | ||
| ongoingTransactionInfo = new TransactionInfo<>(currentCommitTime, writerProvider.getWriter(currentCommitTime)); | ||
| ongoingTransactionInfo.setLastWrittenKafkaOffset(committedKafkaOffset); | ||
| ongoingTransactionInfo.setExpectedKafkaOffset(committedKafkaOffset); | ||
| } catch (Exception exception) { | ||
| LOG.warn("Error received while starting a new transaction", exception); | ||
| } | ||
|
|
@@ -188,7 +188,7 @@ private void handleEndCommit(ControlMessage message) { | |
| .setParticipantInfo( | ||
| ControlMessage.ParticipantInfo.newBuilder() | ||
| .setWriteStatus(KafkaConnectUtils.buildWriteStatuses(writeStatuses)) | ||
| .setKafkaOffset(ongoingTransactionInfo.getLastWrittenKafkaOffset()) | ||
| .setKafkaOffset(ongoingTransactionInfo.getExpectedKafkaOffset()) | ||
| .build() | ||
| ).build(); | ||
|
|
||
|
|
@@ -201,9 +201,9 @@ private void handleEndCommit(ControlMessage message) { | |
| } | ||
|
|
||
| private void handleAckCommit(ControlMessage message) { | ||
| // Update lastKafkCommitedOffset locally. | ||
| if (ongoingTransactionInfo != null && committedKafkaOffset < ongoingTransactionInfo.getLastWrittenKafkaOffset()) { | ||
| committedKafkaOffset = ongoingTransactionInfo.getLastWrittenKafkaOffset(); | ||
| // Update committedKafkaOffset that tracks the last committed kafka offset locally. | ||
| if (ongoingTransactionInfo != null && committedKafkaOffset < ongoingTransactionInfo.getExpectedKafkaOffset()) { | ||
| committedKafkaOffset = ongoingTransactionInfo.getExpectedKafkaOffset(); | ||
| } | ||
| syncKafkaOffsetWithLeader(message); | ||
| cleanupOngoingTransaction(); | ||
|
|
@@ -215,12 +215,22 @@ private void writeRecords() { | |
| try { | ||
| SinkRecord record = buffer.peek(); | ||
| if (record != null | ||
| && record.kafkaOffset() >= ongoingTransactionInfo.getLastWrittenKafkaOffset()) { | ||
| && record.kafkaOffset() == ongoingTransactionInfo.getExpectedKafkaOffset()) { | ||
| ongoingTransactionInfo.getWriter().writeRecord(record); | ||
| ongoingTransactionInfo.setLastWrittenKafkaOffset(record.kafkaOffset() + 1); | ||
| } else if (record != null && record.kafkaOffset() < committedKafkaOffset) { | ||
| LOG.warn(String.format("Received a kafka record with offset %s prior to last committed offset %s for partition %s", | ||
| record.kafkaOffset(), ongoingTransactionInfo.getLastWrittenKafkaOffset(), | ||
| ongoingTransactionInfo.setExpectedKafkaOffset(record.kafkaOffset() + 1); | ||
| } else if (record != null && record.kafkaOffset() > ongoingTransactionInfo.getExpectedKafkaOffset()) { | ||
| LOG.warn(String.format("Received a kafka record with offset %s above the next expected kafka offset %s for partition %s, " | ||
| + "hence resetting the kafka offset to %s", | ||
| record.kafkaOffset(), | ||
| ongoingTransactionInfo.getExpectedKafkaOffset(), | ||
| partition, | ||
| ongoingTransactionInfo.getExpectedKafkaOffset())); | ||
| context.offset(partition, ongoingTransactionInfo.getExpectedKafkaOffset()); | ||
| } else if (record != null && record.kafkaOffset() < ongoingTransactionInfo.getExpectedKafkaOffset()) { | ||
| LOG.warn(String.format("Received a kafka record with offset %s below the next expected kafka offset %s for partition %s, " | ||
| + "no action will be taken but this record will be ignored since its already written", | ||
| record.kafkaOffset(), | ||
| ongoingTransactionInfo.getExpectedKafkaOffset(), | ||
| partition)); | ||
| } | ||
|
||
| buffer.poll(); | ||
|
|
@@ -250,13 +260,24 @@ private void syncKafkaOffsetWithLeader(ControlMessage message) { | |
| // Recover kafka committed offsets, treating the commit offset from the coordinator | ||
| // as the source of truth | ||
| if (coordinatorCommittedKafkaOffset != null && coordinatorCommittedKafkaOffset >= 0) { | ||
| // Debug only messages | ||
| if (coordinatorCommittedKafkaOffset != committedKafkaOffset) { | ||
| LOG.warn(String.format("Recovering the kafka offset for partition %s to offset %s instead of local offset %s", | ||
| partition.partition(), coordinatorCommittedKafkaOffset, committedKafkaOffset)); | ||
| context.offset(partition, coordinatorCommittedKafkaOffset); | ||
| LOG.warn(String.format("The coordinator offset for kafka partition %s is %d while the locally committed offset is %d, " | ||
| + "hence resetting the local committed offset to the coordinator provided one to ensure consistency", | ||
| partition, | ||
| coordinatorCommittedKafkaOffset, | ||
| committedKafkaOffset)); | ||
| } | ||
| committedKafkaOffset = coordinatorCommittedKafkaOffset; | ||
| return; | ||
| } | ||
| } else { | ||
| LOG.warn(String.format("The coordinator offset for kafka partition %s is not present while the locally committed offset is %d, " | ||
| + "hence resetting the local committed offset to 0 to avoid data loss", | ||
| partition, | ||
| committedKafkaOffset)); | ||
| } | ||
| // If the coordinator does not have a committed offset for this partition, reset to zero offset. | ||
| committedKafkaOffset = 0; | ||
| } | ||
| } | ||
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This assumes that the records come in order in terms of the Kafka offset from the buffer. Is it possible that the records may come out of order?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
They will always come in order. But its possible that kafka may send them out of order in case we do not commit an offset for sometime. For instance, it will send 12,13,14,15 ... but then realize that the consumer has not committed an offset (and the last committed offset was 5), it will start sending 5,6,7 .. but henceforth it will be in order. Thats why we have an excepted offset, and if we do not receive an expected offset, we force offset reset, and kafka will start sending messages from before. This ensures no data loss/duplication, but yeah there may be scope for some optimization, but trying to be conservative for now.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Got it. Basically, the offsets of the records are sth like 12,13,14,15,5,6,7,8,9,10,11,12,13,14,15 after the resetting so this logic skips the first group of records.