This repository was archived by the owner on May 9, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 14
Proper heterogen execution modes #652
Merged
Merged
Changes from 1 commit
Commits
Show all changes
3 commits
Select commit
Hold shift + click to select a range
File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -2790,66 +2790,109 @@ std::vector<std::unique_ptr<ExecutionKernel>> Executor::createHeterogeneousKerne | |
|
||
CHECK(!ra_exe_unit.input_descs.empty()); | ||
|
||
const bool use_multifrag_kernel = eo.allow_multifrag && is_agg; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This reads as group-bys would not use a multi-fragment policy. Needs cleanup I guess. |
||
|
||
fragment_descriptor.buildFragmentKernelMap(ra_exe_unit, | ||
shared_context.getFragOffsets(), | ||
policy, | ||
available_cpus + available_gpus.size(), | ||
false, /*multifrag policy unsupported yet*/ | ||
use_multifrag_kernel, | ||
this, | ||
co.codegen_traits_desc); | ||
|
||
if (allow_single_frag_table_opt && query_mem_descs.count(ExecutorDeviceType::GPU) && | ||
(query_mem_descs.at(ExecutorDeviceType::GPU)->getQueryDescriptionType() == | ||
QueryDescriptionType::Projection) && | ||
table_infos.size() == 1) { | ||
const auto max_frag_size = table_infos.front().info.getFragmentNumTuplesUpperBound(); | ||
if (max_frag_size < query_mem_descs.at(ExecutorDeviceType::GPU)->getEntryCount()) { | ||
LOG(INFO) << "Lowering scan limit from " | ||
<< query_mem_descs.at(ExecutorDeviceType::GPU)->getEntryCount() | ||
<< " to match max fragment size " << max_frag_size | ||
<< " for kernel per fragment execution path."; | ||
throw CompilationRetryNewScanLimit(max_frag_size); | ||
if (use_multifrag_kernel) { | ||
LOG(INFO) << "use_multifrag_kernel=" << use_multifrag_kernel; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is debug information. Same for others. |
||
size_t frag_list_idx{0}; | ||
auto multifrag_heterogeneous_kernel_dispatch = | ||
[&ra_exe_unit, | ||
&execution_kernels, | ||
&column_fetcher, | ||
&co, | ||
&eo, | ||
&frag_list_idx, | ||
&query_comp_descs, | ||
&query_mem_descs](const int device_id, | ||
const FragmentsList& frag_list, | ||
const int64_t rowid_lookup_key, | ||
const ExecutorDeviceType device_type) { | ||
if (!frag_list.size()) { | ||
return; | ||
} | ||
CHECK_GE(device_id, 0); | ||
|
||
execution_kernels.emplace_back(std::make_unique<ExecutionKernel>( | ||
ra_exe_unit, | ||
device_type, | ||
device_id, | ||
co, | ||
eo, | ||
column_fetcher, | ||
*query_comp_descs.at(device_type).get(), | ||
*query_mem_descs.at(device_type).get(), | ||
frag_list, | ||
device_type == ExecutorDeviceType::CPU | ||
? ExecutorDispatchMode::KernelPerFragment | ||
: ExecutorDispatchMode::MultifragmentKernel, | ||
rowid_lookup_key)); | ||
|
||
++frag_list_idx; | ||
}; | ||
fragment_descriptor.assignFragsToMultiHeterogeneousDispatch( | ||
multifrag_heterogeneous_kernel_dispatch, ra_exe_unit); | ||
} else { | ||
if (allow_single_frag_table_opt && query_mem_descs.count(ExecutorDeviceType::GPU) && | ||
(query_mem_descs.at(ExecutorDeviceType::GPU)->getQueryDescriptionType() == | ||
QueryDescriptionType::Projection) && | ||
table_infos.size() == 1) { | ||
const auto max_frag_size = | ||
table_infos.front().info.getFragmentNumTuplesUpperBound(); | ||
if (max_frag_size < query_mem_descs.at(ExecutorDeviceType::GPU)->getEntryCount()) { | ||
LOG(INFO) << "Lowering scan limit from " | ||
<< query_mem_descs.at(ExecutorDeviceType::GPU)->getEntryCount() | ||
<< " to match max fragment size " << max_frag_size | ||
<< " for kernel per fragment execution path."; | ||
throw CompilationRetryNewScanLimit(max_frag_size); | ||
} | ||
} | ||
} | ||
|
||
size_t frag_list_idx{0}; | ||
auto fragment_per_kernel_dispatch = [&ra_exe_unit, | ||
&execution_kernels, | ||
&column_fetcher, | ||
&co, | ||
&eo, | ||
&frag_list_idx, | ||
&query_comp_descs, | ||
&query_mem_descs]( | ||
const int device_id, | ||
const FragmentsList& frag_list, | ||
const int64_t rowid_lookup_key, | ||
const ExecutorDeviceType device_type) { | ||
if (!frag_list.size()) { | ||
return; | ||
} | ||
CHECK_GE(device_id, 0); | ||
CHECK(query_comp_descs.count(device_type)); | ||
CHECK(query_mem_descs.count(device_type)); | ||
|
||
execution_kernels.emplace_back( | ||
std::make_unique<ExecutionKernel>(ra_exe_unit, | ||
device_type, | ||
device_id, | ||
co, | ||
eo, | ||
column_fetcher, | ||
*query_comp_descs.at(device_type).get(), | ||
*query_mem_descs.at(device_type).get(), | ||
frag_list, | ||
ExecutorDispatchMode::KernelPerFragment, | ||
rowid_lookup_key)); | ||
++frag_list_idx; | ||
}; | ||
size_t frag_list_idx{0}; | ||
auto fragment_per_kernel_dispatch = [&ra_exe_unit, | ||
&execution_kernels, | ||
&column_fetcher, | ||
&co, | ||
&eo, | ||
&frag_list_idx, | ||
&query_comp_descs, | ||
&query_mem_descs]( | ||
const int device_id, | ||
const FragmentsList& frag_list, | ||
const int64_t rowid_lookup_key, | ||
const ExecutorDeviceType device_type) { | ||
if (!frag_list.size()) { | ||
return; | ||
} | ||
CHECK_GE(device_id, 0); | ||
CHECK(query_comp_descs.count(device_type)); | ||
CHECK(query_mem_descs.count(device_type)); | ||
|
||
fragment_descriptor.assignFragsToKernelDispatch(fragment_per_kernel_dispatch, | ||
ra_exe_unit); | ||
execution_kernels.emplace_back( | ||
std::make_unique<ExecutionKernel>(ra_exe_unit, | ||
device_type, | ||
device_id, | ||
co, | ||
eo, | ||
column_fetcher, | ||
*query_comp_descs.at(device_type).get(), | ||
*query_mem_descs.at(device_type).get(), | ||
frag_list, | ||
ExecutorDispatchMode::KernelPerFragment, | ||
rowid_lookup_key)); | ||
++frag_list_idx; | ||
}; | ||
|
||
fragment_descriptor.assignFragsToKernelDispatch(fragment_per_kernel_dispatch, | ||
ra_exe_unit); | ||
} | ||
return execution_kernels; | ||
} | ||
|
||
|
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Having a separate method is a bit ugly (that was the reason we didn't merge it long time ago), but I think we can live with that for now.