diff --git a/tests/cpp/engine/threaded_engine_test.cc b/tests/cpp/engine/threaded_engine_test.cc index 405f3b30a176..4532da806d87 100644 --- a/tests/cpp/engine/threaded_engine_test.cc +++ b/tests/cpp/engine/threaded_engine_test.cc @@ -42,7 +42,7 @@ * present the following workload * n = reads.size() * data[write] = (data[reads[0]] + ... data[reads[n]]) / n - * std::this_thread::sleep_for(std::chrono::microsecons(time)); + * std::this_thread::sleep_for(std::chrono::microseconds(time)); */ struct Workload { std::vector reads; @@ -76,7 +76,7 @@ void GenerateWorkload(int num_workloads, int num_var, /** * evaluate a single workload */ -void EvaluateWorload(const Workload& wl, std::vector* data) { +void EvaluateWorkload(const Workload &wl, std::vector *data) { double tmp = 0; for (int i : wl.reads) tmp += data->at(i); data->at(wl.write) = tmp / (wl.reads.size() + 1); @@ -88,9 +88,9 @@ void EvaluateWorload(const Workload& wl, std::vector* data) { /** * evaluate a list of workload, return the time used */ -double EvaluateWorloads(const std::vector& workloads, - mxnet::Engine* engine, - std::vector* data) { +double EvaluateWorkloads(const std::vector &workloads, + mxnet::Engine *engine, + std::vector *data) { using namespace mxnet; double t = dmlc::GetTime(); std::vector vars; @@ -103,10 +103,10 @@ double EvaluateWorloads(const std::vector& workloads, for (const auto& wl : workloads) { if (wl.reads.size() == 0) continue; if (engine == NULL) { - EvaluateWorload(wl, data); + EvaluateWorkload(wl, data); } else { auto func = [wl, data](RunContext ctx, Engine::CallbackOnComplete cb) { - EvaluateWorload(wl, data); cb(); + EvaluateWorkload(wl, data); cb(); }; std::vector reads; for (auto i : wl.reads) { @@ -159,7 +159,7 @@ TEST(Engine, RandSumExpr) { std::vector> data(num_engine); for (int i = 0; i < num_engine; ++i) { data[i].resize(num_var, 1.0); - t[i] += EvaluateWorloads(workloads, engine[i], &data[i]); + t[i] += EvaluateWorkloads(workloads, engine[i], &data[i]); } for (int i = 1; i < num_engine; ++i) {