@@ -61,8 +61,8 @@ __global__ void launch_new_reduce_global_fcn(BODY body_in,
61
61
RAJA::expt::invoke_body (reduce_params, body, ctx);
62
62
63
63
// Using a flatten global policy as we may use all dimensions
64
- RAJA::expt::ParamMultiplexer::params_combine (RAJA::cuda_flatten_global_xyz_direct{},
65
- reduce_params);
64
+ RAJA::expt::ParamMultiplexer::params_combine (
65
+ RAJA::cuda_flatten_global_xyz_direct {}, reduce_params);
66
66
}
67
67
68
68
template <bool async>
@@ -186,8 +186,8 @@ struct LaunchExecute<
186
186
{
187
187
using EXEC_POL = RAJA::policy::cuda::cuda_launch_explicit_t <
188
188
async, named_usage::unspecified, named_usage::unspecified>;
189
- RAJA::expt::ParamMultiplexer::params_init (EXEC_POL{}, launch_reducers,
190
- launch_info);
189
+ RAJA::expt::ParamMultiplexer::params_init (EXEC_POL {}, launch_reducers,
190
+ launch_info);
191
191
192
192
//
193
193
// Privatize the loop_body, using make_launch_body to setup reductions
@@ -203,8 +203,8 @@ struct LaunchExecute<
203
203
RAJA::cuda::launch (func, gridSize, blockSize, args, shared_mem_size,
204
204
cuda_res, async, kernel_name);
205
205
206
- RAJA::expt::ParamMultiplexer::params_resolve (EXEC_POL{}, launch_reducers,
207
- launch_info);
206
+ RAJA::expt::ParamMultiplexer::params_resolve (
207
+ EXEC_POL {}, launch_reducers, launch_info);
208
208
}
209
209
210
210
RAJA_FT_END;
@@ -252,8 +252,8 @@ __launch_bounds__(num_threads, BLOCKS_PER_SM) __global__
252
252
RAJA::expt::invoke_body (reduce_params, body, ctx);
253
253
254
254
// Using a flatten global policy as we may use all dimensions
255
- RAJA::expt::ParamMultiplexer::params_combine (RAJA::cuda_flatten_global_xyz_direct{},
256
- reduce_params);
255
+ RAJA::expt::ParamMultiplexer::params_combine (
256
+ RAJA::cuda_flatten_global_xyz_direct {}, reduce_params);
257
257
}
258
258
259
259
template <bool async, int nthreads, size_t BLOCKS_PER_SM>
@@ -375,12 +375,12 @@ struct LaunchExecute<
375
375
launch_info.res = cuda_res;
376
376
377
377
{
378
- // Use a generic block size policy here to match that used in params_combine
379
- using EXEC_POL =
380
- RAJA::policy::cuda::cuda_launch_explicit_t <
381
- async, named_usage::unspecified, named_usage::unspecified>;
382
- RAJA::expt::ParamMultiplexer::params_init (EXEC_POL{}, launch_reducers,
383
- launch_info);
378
+ // Use a generic block size policy here to match that used in
379
+ // params_combine
380
+ using EXEC_POL = RAJA::policy::cuda::cuda_launch_explicit_t <
381
+ async, named_usage::unspecified, named_usage::unspecified>;
382
+ RAJA::expt::ParamMultiplexer::params_init (EXEC_POL {}, launch_reducers,
383
+ launch_info);
384
384
385
385
//
386
386
// Privatize the loop_body, using make_launch_body to setup reductions
@@ -396,8 +396,8 @@ struct LaunchExecute<
396
396
RAJA::cuda::launch (func, gridSize, blockSize, args, shared_mem_size,
397
397
cuda_res, async, kernel_name);
398
398
399
- RAJA::expt::ParamMultiplexer::params_resolve (EXEC_POL{}, launch_reducers,
400
- launch_info);
399
+ RAJA::expt::ParamMultiplexer::params_resolve (
400
+ EXEC_POL {}, launch_reducers, launch_info);
401
401
}
402
402
403
403
RAJA_FT_END;
0 commit comments