Skip to content

Commit 3ed1b70

Browse files
committed
Apply style fixes
1 parent 4f40f71 commit 3ed1b70

File tree

20 files changed

+83
-95
lines changed

20 files changed

+83
-95
lines changed

include/RAJA/pattern/params/forall.hpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ struct ForallParamPack
4949
Args&&... args)
5050
{
5151
CAMP_EXPAND(param_init(pol, camp::get<Seq>(f_params.param_tup),
52-
std::forward<Args>(args)...));
52+
std::forward<Args>(args)...));
5353
}
5454

5555
// Combine
@@ -61,7 +61,7 @@ struct ForallParamPack
6161
const ForallParamPack& in)
6262
{
6363
CAMP_EXPAND(param_combine(pol, camp::get<Seq>(out.param_tup),
64-
camp::get<Seq>(in.param_tup)));
64+
camp::get<Seq>(in.param_tup)));
6565
}
6666

6767
template<typename EXEC_POL, camp::idx_t... Seq>
@@ -81,7 +81,7 @@ struct ForallParamPack
8181
Args&&... args)
8282
{
8383
CAMP_EXPAND(param_resolve(pol, camp::get<Seq>(f_params.param_tup),
84-
std::forward<Args>(args)...));
84+
std::forward<Args>(args)...));
8585
}
8686

8787
// Used to construct the argument TYPES that will be invoked with the lambda.

include/RAJA/policy/cuda/forall.hpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -445,7 +445,7 @@ __launch_bounds__(BlockSize, BlocksPerSM) __global__
445445
{
446446
RAJA::expt::invoke_body(f_params, body, idx[ii]);
447447
}
448-
RAJA::expt::ParamMultiplexer::params_combine(EXEC_POL{}, f_params);
448+
RAJA::expt::ParamMultiplexer::params_combine(EXEC_POL {}, f_params);
449449
}
450450

451451
///
@@ -474,7 +474,7 @@ __global__ void forallp_cuda_kernel(LOOP_BODY loop_body,
474474
{
475475
RAJA::expt::invoke_body(f_params, body, idx[ii]);
476476
}
477-
RAJA::expt::ParamMultiplexer::params_combine(EXEC_POL{}, f_params);
477+
RAJA::expt::ParamMultiplexer::params_combine(EXEC_POL {}, f_params);
478478
}
479479

480480
template<
@@ -565,7 +565,7 @@ __launch_bounds__(BlockSize, BlocksPerSM) __global__
565565
{
566566
RAJA::expt::invoke_body(f_params, body, idx[ii]);
567567
}
568-
RAJA::expt::ParamMultiplexer::params_combine(EXEC_POL{}, f_params);
568+
RAJA::expt::ParamMultiplexer::params_combine(EXEC_POL {}, f_params);
569569
}
570570

571571
///
@@ -597,7 +597,7 @@ __global__ void forallp_cuda_kernel(LOOP_BODY loop_body,
597597
{
598598
RAJA::expt::invoke_body(f_params, body, idx[ii]);
599599
}
600-
RAJA::expt::ParamMultiplexer::params_combine(EXEC_POL{}, f_params);
600+
RAJA::expt::ParamMultiplexer::params_combine(EXEC_POL {}, f_params);
601601
}
602602

603603
} // namespace impl

include/RAJA/policy/cuda/launch.hpp

+16-16
Original file line numberDiff line numberDiff line change
@@ -61,8 +61,8 @@ __global__ void launch_new_reduce_global_fcn(BODY body_in,
6161
RAJA::expt::invoke_body(reduce_params, body, ctx);
6262

6363
// Using a flatten global policy as we may use all dimensions
64-
RAJA::expt::ParamMultiplexer::params_combine(RAJA::cuda_flatten_global_xyz_direct{},
65-
reduce_params);
64+
RAJA::expt::ParamMultiplexer::params_combine(
65+
RAJA::cuda_flatten_global_xyz_direct {}, reduce_params);
6666
}
6767

6868
template<bool async>
@@ -186,8 +186,8 @@ struct LaunchExecute<
186186
{
187187
using EXEC_POL = RAJA::policy::cuda::cuda_launch_explicit_t<
188188
async, named_usage::unspecified, named_usage::unspecified>;
189-
RAJA::expt::ParamMultiplexer::params_init(EXEC_POL{}, launch_reducers,
190-
launch_info);
189+
RAJA::expt::ParamMultiplexer::params_init(EXEC_POL {}, launch_reducers,
190+
launch_info);
191191

192192
//
193193
// Privatize the loop_body, using make_launch_body to setup reductions
@@ -203,8 +203,8 @@ struct LaunchExecute<
203203
RAJA::cuda::launch(func, gridSize, blockSize, args, shared_mem_size,
204204
cuda_res, async, kernel_name);
205205

206-
RAJA::expt::ParamMultiplexer::params_resolve(EXEC_POL{}, launch_reducers,
207-
launch_info);
206+
RAJA::expt::ParamMultiplexer::params_resolve(
207+
EXEC_POL {}, launch_reducers, launch_info);
208208
}
209209

210210
RAJA_FT_END;
@@ -252,8 +252,8 @@ __launch_bounds__(num_threads, BLOCKS_PER_SM) __global__
252252
RAJA::expt::invoke_body(reduce_params, body, ctx);
253253

254254
// Using a flatten global policy as we may use all dimensions
255-
RAJA::expt::ParamMultiplexer::params_combine(RAJA::cuda_flatten_global_xyz_direct{},
256-
reduce_params);
255+
RAJA::expt::ParamMultiplexer::params_combine(
256+
RAJA::cuda_flatten_global_xyz_direct {}, reduce_params);
257257
}
258258

259259
template<bool async, int nthreads, size_t BLOCKS_PER_SM>
@@ -375,12 +375,12 @@ struct LaunchExecute<
375375
launch_info.res = cuda_res;
376376

377377
{
378-
// Use a generic block size policy here to match that used in params_combine
379-
using EXEC_POL =
380-
RAJA::policy::cuda::cuda_launch_explicit_t<
381-
async, named_usage::unspecified, named_usage::unspecified>;
382-
RAJA::expt::ParamMultiplexer::params_init(EXEC_POL{}, launch_reducers,
383-
launch_info);
378+
// Use a generic block size policy here to match that used in
379+
// params_combine
380+
using EXEC_POL = RAJA::policy::cuda::cuda_launch_explicit_t<
381+
async, named_usage::unspecified, named_usage::unspecified>;
382+
RAJA::expt::ParamMultiplexer::params_init(EXEC_POL {}, launch_reducers,
383+
launch_info);
384384

385385
//
386386
// Privatize the loop_body, using make_launch_body to setup reductions
@@ -396,8 +396,8 @@ struct LaunchExecute<
396396
RAJA::cuda::launch(func, gridSize, blockSize, args, shared_mem_size,
397397
cuda_res, async, kernel_name);
398398

399-
RAJA::expt::ParamMultiplexer::params_resolve(EXEC_POL{}, launch_reducers,
400-
launch_info);
399+
RAJA::expt::ParamMultiplexer::params_resolve(
400+
EXEC_POL {}, launch_reducers, launch_info);
401401
}
402402

403403
RAJA_FT_END;

include/RAJA/policy/cuda/params/reduce.hpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,7 @@ camp::concepts::enable_if<type_traits::is_cuda_policy<EXEC_POL>> param_init(
3535
template<typename EXEC_POL, typename OP, typename T, typename VOp>
3636
RAJA_HOST_DEVICE camp::concepts::enable_if<
3737
type_traits::is_cuda_policy<EXEC_POL>>
38-
param_combine(EXEC_POL const&,
39-
Reducer<OP, T, VOp>& red)
38+
param_combine(EXEC_POL const&, Reducer<OP, T, VOp>& red)
4039
{
4140
RAJA::cuda::impl::expt::grid_reduce<typename EXEC_POL::IterationGetter, OP>(
4241
red.devicetarget, red.getVal(), red.device_mem, red.device_count);

include/RAJA/policy/hip/forall.hpp

+12-11
Original file line numberDiff line numberDiff line change
@@ -443,7 +443,7 @@ __launch_bounds__(BlockSize, 1) __global__
443443
{
444444
RAJA::expt::invoke_body(f_params, body, idx[ii]);
445445
}
446-
RAJA::expt::ParamMultiplexer::params_combine(EXEC_POL{}, f_params);
446+
RAJA::expt::ParamMultiplexer::params_combine(EXEC_POL {}, f_params);
447447
}
448448

449449
///
@@ -471,7 +471,7 @@ __global__ void forallp_hip_kernel(LOOP_BODY loop_body,
471471
{
472472
RAJA::expt::invoke_body(f_params, body, idx[ii]);
473473
}
474-
RAJA::expt::ParamMultiplexer::params_combine(EXEC_POL{}, f_params);
474+
RAJA::expt::ParamMultiplexer::params_combine(EXEC_POL {}, f_params);
475475
}
476476

477477
template<
@@ -559,7 +559,7 @@ __launch_bounds__(BlockSize, 1) __global__
559559
{
560560
RAJA::expt::invoke_body(f_params, body, idx[ii]);
561561
}
562-
RAJA::expt::ParamMultiplexer::params_combine(EXEC_POL{}, f_params);
562+
RAJA::expt::ParamMultiplexer::params_combine(EXEC_POL {}, f_params);
563563
}
564564

565565
///
@@ -590,7 +590,7 @@ __global__ void forallp_hip_kernel(LOOP_BODY loop_body,
590590
{
591591
RAJA::expt::invoke_body(f_params, body, idx[ii]);
592592
}
593-
RAJA::expt::ParamMultiplexer::params_combine(EXEC_POL{}, f_params);
593+
RAJA::expt::ParamMultiplexer::params_combine(EXEC_POL {}, f_params);
594594
}
595595

596596
} // namespace impl
@@ -696,13 +696,14 @@ RAJA_INLINE concepts::enable_if_t<
696696
RAJA::expt::type_traits::is_ForallParamPack<ForallParam>,
697697
concepts::negate<
698698
RAJA::expt::type_traits::is_ForallParamPack_empty<ForallParam>>>
699-
forall_impl(
700-
resources::Hip hip_res,
701-
::RAJA::policy::hip::
702-
hip_exec<IterationMapping, IterationGetter, Concretizer, Async> const& pol,
703-
Iterable&& iter,
704-
LoopBody&& loop_body,
705-
ForallParam f_params)
699+
forall_impl(resources::Hip hip_res,
700+
::RAJA::policy::hip::hip_exec<IterationMapping,
701+
IterationGetter,
702+
Concretizer,
703+
Async> const& pol,
704+
Iterable&& iter,
705+
LoopBody&& loop_body,
706+
ForallParam f_params)
706707
{
707708
using Iterator = camp::decay<decltype(std::begin(iter))>;
708709
using LOOP_BODY = camp::decay<LoopBody>;

include/RAJA/policy/hip/launch.hpp

+12-12
Original file line numberDiff line numberDiff line change
@@ -61,8 +61,8 @@ __global__ void launch_new_reduce_global_fcn(BODY body_in,
6161
RAJA::expt::invoke_body(reduce_params, body, ctx);
6262

6363
// Using a flatten global policy as we may use all dimensions
64-
RAJA::expt::ParamMultiplexer::params_combine(RAJA::hip_flatten_global_xyz_direct{},
65-
reduce_params);
64+
RAJA::expt::ParamMultiplexer::params_combine(
65+
RAJA::hip_flatten_global_xyz_direct {}, reduce_params);
6666
}
6767

6868
template<bool async>
@@ -184,8 +184,8 @@ struct LaunchExecute<
184184
{
185185
using EXEC_POL =
186186
RAJA::policy::hip::hip_launch_t<async, named_usage::unspecified>;
187-
RAJA::expt::ParamMultiplexer::params_init(EXEC_POL{}, launch_reducers,
188-
launch_info);
187+
RAJA::expt::ParamMultiplexer::params_init(EXEC_POL {}, launch_reducers,
188+
launch_info);
189189

190190
//
191191
// Privatize the loop_body, using make_launch_body to setup reductions
@@ -201,8 +201,8 @@ struct LaunchExecute<
201201
RAJA::hip::launch(func, gridSize, blockSize, args, shared_mem_size,
202202
hip_res, async, kernel_name);
203203

204-
RAJA::expt::ParamMultiplexer::params_resolve(EXEC_POL{}, launch_reducers,
205-
launch_info);
204+
RAJA::expt::ParamMultiplexer::params_resolve(
205+
EXEC_POL {}, launch_reducers, launch_info);
206206
}
207207

208208
RAJA_FT_END;
@@ -247,8 +247,8 @@ __launch_bounds__(num_threads, 1) __global__
247247
RAJA::expt::invoke_body(reduce_params, body, ctx);
248248

249249
// Using a flatten global policy as we may use all dimensions
250-
RAJA::expt::ParamMultiplexer::params_combine(RAJA::hip_flatten_global_xyz_direct{},
251-
reduce_params);
250+
RAJA::expt::ParamMultiplexer::params_combine(
251+
RAJA::hip_flatten_global_xyz_direct {}, reduce_params);
252252
}
253253

254254
template<bool async, int nthreads>
@@ -371,8 +371,8 @@ struct LaunchExecute<RAJA::policy::hip::hip_launch_t<async, nthreads>>
371371
{
372372
using EXEC_POL =
373373
RAJA::policy::hip::hip_launch_t<async, named_usage::unspecified>;
374-
RAJA::expt::ParamMultiplexer::params_init(EXEC_POL{}, launch_reducers,
375-
launch_info);
374+
RAJA::expt::ParamMultiplexer::params_init(EXEC_POL {}, launch_reducers,
375+
launch_info);
376376

377377
//
378378
// Privatize the loop_body, using make_launch_body to setup reductions
@@ -388,8 +388,8 @@ struct LaunchExecute<RAJA::policy::hip::hip_launch_t<async, nthreads>>
388388
RAJA::hip::launch(func, gridSize, blockSize, args, shared_mem_size,
389389
hip_res, async, kernel_name);
390390

391-
RAJA::expt::ParamMultiplexer::params_resolve(EXEC_POL{}, launch_reducers,
392-
launch_info);
391+
RAJA::expt::ParamMultiplexer::params_resolve(
392+
EXEC_POL {}, launch_reducers, launch_info);
393393
}
394394

395395
RAJA_FT_END;

include/RAJA/policy/hip/params/kernel_name.hpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,7 @@ camp::concepts::enable_if<type_traits::is_hip_policy<EXEC_POL>> param_init(
3535
// Combine
3636
template<typename EXEC_POL>
3737
RAJA_HOST_DEVICE camp::concepts::enable_if<type_traits::is_hip_policy<EXEC_POL>>
38-
param_combine(EXEC_POL const&,
39-
KernelName&)
38+
param_combine(EXEC_POL const&, KernelName&)
4039
{}
4140

4241
// Resolve

include/RAJA/policy/hip/params/reduce.hpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -32,8 +32,7 @@ camp::concepts::enable_if<type_traits::is_hip_policy<EXEC_POL>> param_init(
3232
// Combine
3333
template<typename EXEC_POL, typename OP, typename T, typename VOp>
3434
RAJA_HOST_DEVICE camp::concepts::enable_if<type_traits::is_hip_policy<EXEC_POL>>
35-
param_combine(EXEC_POL const&,
36-
Reducer<OP, T, VOp>& red)
35+
param_combine(EXEC_POL const&, Reducer<OP, T, VOp>& red)
3736
{
3837
RAJA::hip::impl::expt::grid_reduce<typename EXEC_POL::IterationGetter, OP>(
3938
red.devicetarget, red.getVal(), red.device_mem, red.device_count);

include/RAJA/policy/openmp/launch.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ struct LaunchExecute<RAJA::omp_launch_t>
7070
{
7171

7272
using EXEC_POL = RAJA::omp_launch_t;
73-
EXEC_POL pol{};
73+
EXEC_POL pol {};
7474

7575
expt::ParamMultiplexer::params_init(pol, f_params);
7676

include/RAJA/policy/openmp/params/kernel_name.hpp

+4-7
Original file line numberDiff line numberDiff line change
@@ -23,17 +23,14 @@ camp::concepts::enable_if<type_traits::is_openmp_policy<EXEC_POL>> param_init(
2323

2424
// Combine
2525
template<typename EXEC_POL, typename T>
26-
camp::concepts::enable_if<type_traits::is_openmp_policy<EXEC_POL>> param_combine(
27-
EXEC_POL const&,
28-
KernelName&,
29-
T& /*place holder argument*/)
26+
camp::concepts::enable_if<type_traits::is_openmp_policy<EXEC_POL>>
27+
param_combine(EXEC_POL const&, KernelName&, T& /*place holder argument*/)
3028
{}
3129

3230
// Resolve
3331
template<typename EXEC_POL>
34-
camp::concepts::enable_if<type_traits::is_openmp_policy<EXEC_POL>> param_resolve(
35-
EXEC_POL const&,
36-
KernelName&)
32+
camp::concepts::enable_if<type_traits::is_openmp_policy<EXEC_POL>>
33+
param_resolve(EXEC_POL const&, KernelName&)
3734
{
3835
// TODO: Define kernel naming
3936
}

include/RAJA/policy/openmp/params/reduce.hpp

+6-7
Original file line numberDiff line numberDiff line change
@@ -23,19 +23,18 @@ camp::concepts::enable_if<type_traits::is_openmp_policy<EXEC_POL>> param_init(
2323

2424
// Combine
2525
template<typename EXEC_POL, typename OP, typename T, typename VOp>
26-
camp::concepts::enable_if<type_traits::is_openmp_policy<EXEC_POL>> param_combine(
27-
EXEC_POL const&,
28-
Reducer<OP, T, VOp>& out,
29-
const Reducer<OP, T, VOp>& in)
26+
camp::concepts::enable_if<type_traits::is_openmp_policy<EXEC_POL>>
27+
param_combine(EXEC_POL const&,
28+
Reducer<OP, T, VOp>& out,
29+
const Reducer<OP, T, VOp>& in)
3030
{
3131
out.m_valop.val = OP {}(out.m_valop.val, in.m_valop.val);
3232
}
3333

3434
// Resolve
3535
template<typename EXEC_POL, typename OP, typename T, typename VOp>
36-
camp::concepts::enable_if<type_traits::is_openmp_policy<EXEC_POL>> param_resolve(
37-
EXEC_POL const&,
38-
Reducer<OP, T, VOp>& red)
36+
camp::concepts::enable_if<type_traits::is_openmp_policy<EXEC_POL>>
37+
param_resolve(EXEC_POL const&, Reducer<OP, T, VOp>& red)
3938
{
4039
red.combineTarget(red.m_valop.val);
4140
}

include/RAJA/policy/openmp_target/params/kernel_name.hpp

+4-7
Original file line numberDiff line numberDiff line change
@@ -14,25 +14,22 @@ namespace detail
1414

1515
// Init
1616
template<typename EXEC_POL>
17-
camp::concepts::enable_if<type_traits::is_target_openmp_policy<EXEC_POL>> param_init(
18-
EXEC_POL const&,
19-
KernelName&)
17+
camp::concepts::enable_if<type_traits::is_target_openmp_policy<EXEC_POL>>
18+
param_init(EXEC_POL const&, KernelName&)
2019
{
2120
// TODO: Define kernel naming
2221
}
2322

2423
// Combine
2524
template<typename EXEC_POL, typename T>
2625
camp::concepts::enable_if<type_traits::is_target_openmp_policy<EXEC_POL>>
27-
param_combine(EXEC_POL const&,
28-
KernelName&, T& /*place holder argument*/)
26+
param_combine(EXEC_POL const&, KernelName&, T& /*place holder argument*/)
2927
{}
3028

3129
// Resolve
3230
template<typename EXEC_POL>
3331
camp::concepts::enable_if<type_traits::is_target_openmp_policy<EXEC_POL>>
34-
param_resolve(EXEC_POL const&,
35-
KernelName&)
32+
param_resolve(EXEC_POL const&, KernelName&)
3633
{
3734
// TODO: Define kernel naming
3835
}

0 commit comments

Comments
 (0)