-
Notifications
You must be signed in to change notification settings - Fork 411
[CELEBORN-1858] Support DfsPartitionReader read partition by chunkOffsets when enable optimize skew partition read #3115
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -66,12 +66,16 @@ public class DfsPartitionReader implements PartitionReader { | |
| private int numChunks = 0; | ||
| private int returnedChunks = 0; | ||
| private int currentChunkIndex = 0; | ||
| private int startChunkIndex; | ||
| private int endChunkIndex; | ||
| private final List<Long> chunkOffsets = new ArrayList<>(); | ||
| private TransportClient client; | ||
| private PbStreamHandler streamHandler; | ||
| private MetricsCallback metricsCallback; | ||
| private FileSystem hadoopFs; | ||
|
|
||
| private Path dataFilePath; | ||
|
|
||
| public DfsPartitionReader( | ||
| CelebornConf conf, | ||
| String shuffleKey, | ||
|
|
@@ -80,7 +84,9 @@ public DfsPartitionReader( | |
| TransportClientFactory clientFactory, | ||
| int startMapIndex, | ||
| int endMapIndex, | ||
| MetricsCallback metricsCallback) | ||
| MetricsCallback metricsCallback, | ||
| int startChunkIndex, | ||
| int endChunkIndex) | ||
| throws IOException { | ||
| this.conf = conf; | ||
| shuffleChunkSize = conf.dfsReadChunkSize(); | ||
|
|
@@ -121,23 +127,33 @@ public DfsPartitionReader( | |
| "read shuffle file from DFS failed, filePath: " + location.getStorageInfo().getFilePath(), | ||
| e); | ||
| } | ||
|
|
||
| if (endMapIndex != Integer.MAX_VALUE) { | ||
| dfsInputStream = | ||
| hadoopFs.open(new Path(Utils.getSortedFilePath(location.getStorageInfo().getFilePath()))); | ||
| if (endMapIndex != Integer.MAX_VALUE && endMapIndex != -1) { | ||
| dataFilePath = new Path(Utils.getSortedFilePath(location.getStorageInfo().getFilePath())); | ||
| dfsInputStream = hadoopFs.open(dataFilePath); | ||
| chunkOffsets.addAll( | ||
| getChunkOffsetsFromSortedIndex(conf, location, startMapIndex, endMapIndex)); | ||
| } else { | ||
| dfsInputStream = hadoopFs.open(new Path(location.getStorageInfo().getFilePath())); | ||
| dataFilePath = new Path(location.getStorageInfo().getFilePath()); | ||
| dfsInputStream = hadoopFs.open(dataFilePath); | ||
| chunkOffsets.addAll(getChunkOffsetsFromUnsortedIndex(conf, location)); | ||
| } | ||
| this.startChunkIndex = startChunkIndex == -1 ? 0 : startChunkIndex; | ||
| this.endChunkIndex = | ||
| endChunkIndex == -1 | ||
| ? chunkOffsets.size() - 2 | ||
| : Math.min(chunkOffsets.size() - 2, endChunkIndex); | ||
| this.currentChunkIndex = this.startChunkIndex; | ||
| this.numChunks = this.endChunkIndex - this.startChunkIndex + 1; | ||
| logger.debug( | ||
| "DFS {} index count:{} offsets:{}", | ||
| "DFS {} total offset count:{} chunk count: {} " | ||
| + "start chunk index:{} end chunk index:{} offsets:{}", | ||
| location.getStorageInfo().getFilePath(), | ||
| chunkOffsets.size(), | ||
| this.numChunks, | ||
| this.startChunkIndex, | ||
| this.endChunkIndex, | ||
| chunkOffsets); | ||
| if (chunkOffsets.size() > 1) { | ||
| numChunks = chunkOffsets.size() - 1; | ||
| if (this.numChunks > 0) { | ||
| fetchThread = | ||
| ThreadUtils.newDaemonSingleThreadExecutor( | ||
| "celeborn-client-dfs-partition-fetcher" + location.getStorageInfo().getFilePath()); | ||
|
|
@@ -197,7 +213,7 @@ public ByteBuf next() throws IOException, InterruptedException { | |
| fetchThread.submit( | ||
| () -> { | ||
| try { | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In this catch case, we could avoid to fallback to read SortedPath when optimizing skew partition read is enabled , code
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. fixed, PTAL @wangshengjie123
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hi @Z1Wu , sorry for late reply. LGTM, thanks |
||
| while (!closed && currentChunkIndex < numChunks) { | ||
| while (!closed && currentChunkIndex <= endChunkIndex) { | ||
| while (results.size() >= fetchMaxReqsInFlight) { | ||
| Thread.sleep(50); | ||
| } | ||
|
|
@@ -208,16 +224,10 @@ public ByteBuf next() throws IOException, InterruptedException { | |
| try { | ||
| dfsInputStream.readFully(offset, buffer); | ||
| } catch (IOException e) { | ||
| logger.warn( | ||
| "read DFS {} failed will retry, error detail {}", | ||
| location.getStorageInfo().getFilePath(), | ||
| e); | ||
| logger.warn("read DFS {} failed will retry, error detail {}", dataFilePath, e); | ||
| try { | ||
| dfsInputStream.close(); | ||
| dfsInputStream = | ||
| hadoopFs.open( | ||
| new Path( | ||
| Utils.getSortedFilePath(location.getStorageInfo().getFilePath()))); | ||
| dfsInputStream = hadoopFs.open(dataFilePath); | ||
| dfsInputStream.readFully(offset, buffer); | ||
| } catch (IOException ex) { | ||
| logger.warn( | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
If the feature of optimizing skew partition read has been enabled, there will be no more sort. So here should not read the sorted DFS file.
The condition should be the condition startMapIndex > endMapIndex to be true and read chunk range from DFS shuffle file.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
In my understanding, currently celeborn supports both sort-based skew-handling and the sort-free skew-handling introduced in #2373. If we change the condition to
startMapIndex > endMapIndex, then it will no longer support sort-based skew-handling at all.There are some other sort-based skew-handling code in current codebase, such as
PartitionFilesSorter. If we want to completely remove the code related to sort-based skew-handling, would it be more appropriate to do it in a new PR after #3118 is merged into the main branch?Please correct me if there's anything I haven't understood correctly.