Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

修复异步框架中的bug #1308

Merged
merged 2 commits into from
Jul 14, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions core/predictor/framework/bsf-inl.h
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ int TaskExecutor<TaskT>::start(uint32_t thread_num, uint32_t init_timeout_sec) {
_thread_contexts.push_back(&contexts[i]);
}

int init_timeout = init_timeout_sec * 1000 * 1000;
size_t init_timeout = init_timeout_sec * 1000 * 1000;
bool has_error = false;

bool has_timeout = true;
Expand Down Expand Up @@ -103,7 +103,7 @@ int TaskExecutor<TaskT>::start(uint32_t thread_num, uint32_t init_timeout_sec) {
}

// 100ms
const int sleep_interval = 100 * 1000;
const size_t sleep_interval = 100 * 1000;
usleep(sleep_interval);
init_timeout -= sleep_interval;
}
Expand Down
23 changes: 11 additions & 12 deletions core/predictor/framework/bsf.h
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ struct Task {
if (!check_feedvar_valid(feedvar_index)) {
return 0;
}
int element_num = 1;
size_t element_num = 1;
if ((*inVectorT_ptr)[feedvar_index].shape.size() == 1) {
// cause shape[0] is batch_size.
// [10,1] = [10], so if shape[1] doesn`t exist.
Expand Down Expand Up @@ -315,7 +315,7 @@ class BatchTasks {
for (int index = 0; index < feedvar_num; ++index) {
const paddle::PaddleTensor& feedVarTensor =
(*tm.task->inVectorT_ptr)[index];
int feedvar_bytesize = tm.task->feedvar_bytesize(index);
size_t feedvar_bytesize = tm.task->feedvar_bytesize(index);

if (ti == 0) {
if (feedVarTensor.lod.size() > 0 && feedVarTensor.lod[0].size() > 0) {
Expand All @@ -336,11 +336,10 @@ class BatchTasks {
_batch_in.push_back(paddleTensor);
}

void* dst_ptr = _batch_in[index].data.data() +
feedvar_bytesize * _batch_in_offset[index];
void* dst_ptr = _batch_in[index].data.data() + _batch_in_offset[index];
void* source_ptr =
feedVarTensor.data.data() + feedvar_bytesize * tm.begin;
int length = feedvar_bytesize * (tm.end - tm.begin);
size_t length = feedvar_bytesize * (tm.end - tm.begin);
memcpy(dst_ptr, source_ptr, length);
_batch_in_offset[index] += length;
}
Expand Down Expand Up @@ -395,7 +394,7 @@ class BatchTasks {
if (!check_fetchvar_valid(fetchvar_index)) {
return 0;
}
int element_num = 1;
size_t element_num = 1;
if (_batch_out[fetchvar_index].shape.size() == 1) {
// cause shape[0] is batch_size.
return 1;
Expand Down Expand Up @@ -457,7 +456,7 @@ class BatchTasks {
// the task->outVectorT_ptr is null before core->run().
// first time we should copy from _batch_out
// so we need init.
int fetchvar_bytesize_index = fetchvar_bytesize(index);
size_t fetchvar_bytesize_index = fetchvar_bytesize(index);
if (task->outVectorT_ptr->size() <= index) {
paddle::PaddleTensor tensor_out;
tensor_out.name = _batch_out[index].name;
Expand All @@ -475,7 +474,7 @@ class BatchTasks {

void* dst_ptr =
fetchVarTensor.data.data() + fetchvar_bytesize_index * begin;
int length = fetchvar_bytesize_index * add;
size_t length = fetchvar_bytesize_index * add;
if (_batch_out_offset[index] + length >
fetchvar_batch_size() * fetchvar_bytesize(index)) {
LOG(ERROR) << "_batch_out is less than taskmeta, error.";
Expand Down Expand Up @@ -507,11 +506,11 @@ class BatchTasks {
private:
std::vector<TaskMetaT> _taskmeta_vector;
typename TaskT::InVectorT _batch_in;
std::vector<int> _batch_in_offset;
std::vector<int> _realNumber_batch_in;
std::vector<size_t> _batch_in_offset;
std::vector<size_t> _realNumber_batch_in;
typename TaskT::OutVectorT _batch_out;
std::vector<int> _batch_out_offset;
std::vector<int> _realNumber_batch_out;
std::vector<size_t> _batch_out_offset;
std::vector<size_t> _realNumber_batch_out;
size_t _rem_size;
size_t _batch_size;
bool _batch_align;
Expand Down