Skip to content

Commit 41fd2c2

Browse files
committed
* review
1 parent dfe2ddd commit 41fd2c2

File tree

3 files changed

+44
-44
lines changed

3 files changed

+44
-44
lines changed

apps/cpp_clml/clml_runner.cc

Lines changed: 29 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -59,12 +59,12 @@ CLMLRunner::CLMLRunner(std::string name, ToolArgs& args, cl_platform_id arg_plat
5959
cl_int majorVersions[MAX_VERSIONS];
6060
cl_int minorVersions[MAX_VERSIONS];
6161
cl_uint numVersions = 0;
62-
result = clQueryMLInterfaceVersionsQCOM(NULL, NULL, 0, &numVersions);
62+
result = clQueryMLInterfaceVersionsQCOM(nullptr, nullptr, 0, &numVersions);
6363
CLML_SDK_TEST_AND_EXIT(result == CL_SUCCESS);
6464
CLML_SDK_TEST_AND_EXIT(numVersions > 0u);
6565
CLML_SDK_TEST_AND_EXIT(numVersions <= MAX_VERSIONS);
6666

67-
result = clQueryMLInterfaceVersionsQCOM(majorVersions, minorVersions, numVersions, NULL);
67+
result = clQueryMLInterfaceVersionsQCOM(majorVersions, minorVersions, numVersions, nullptr);
6868
CLML_SDK_TEST_AND_EXIT(result == CL_SUCCESS);
6969

7070
for (cl_uint i = 0; i < numVersions; ++i) {
@@ -74,7 +74,7 @@ CLMLRunner::CLMLRunner(std::string name, ToolArgs& args, cl_platform_id arg_plat
7474
break;
7575
}
7676
}
77-
CLML_SDK_TEST_AND_EXIT(this->h_ClmlIntf != NULL);
77+
CLML_SDK_TEST_AND_EXIT(this->h_ClmlIntf != nullptr);
7878

7979
result = h_ClmlIntf->clCreateMLTuningCacheQCOM(&tuning_cache);
8080
CLML_SDK_TEST_AND_EXIT(result == CL_SUCCESS);
@@ -104,7 +104,7 @@ int CLMLRunner::Run(void) {
104104

105105
for (size_t i = 0; i < this->function.size(); ++i) {
106106
result =
107-
h_ClmlIntf->clEnqueueMLOpQCOM(queue, this->function[i], this->descriptorSet, 0, NULL, NULL);
107+
h_ClmlIntf->clEnqueueMLOpQCOM(queue, this->function[i], this->descriptorSet, 0, nullptr, nullptr);
108108
CLML_SDK_TEST_AND_EXIT(result == CL_SUCCESS);
109109
}
110110
if (!r_args.output.empty()) {
@@ -155,13 +155,13 @@ void CLMLRunner::PrintMetaInfo(void) { LOG(INFO) << "\n" << this->meta_info; }
155155
void CLMLRunner::CopyDataToCLMLTensor(std::shared_ptr<cl_ml_tensor_memory_desc_qcom> tensor,
156156
void* data, cl_ml_tensor_layout_qcom layout) {
157157
cl_int result = 0;
158-
cl_event evt = NULL;
158+
cl_event evt = nullptr;
159159
result = h_ClmlIntf->clEnqueueWriteMLTensorDataQCOM(this->queue, data, layout, tensor->tensor,
160160
tensor->memory,
161161
0, // n waitlist
162-
NULL, // waitlist
162+
nullptr, // waitlist
163163
&evt); // event
164-
CLML_SDK_TEST_AND_EXIT((evt != NULL) && result == CL_SUCCESS);
164+
CLML_SDK_TEST_AND_EXIT((evt != nullptr) && result == CL_SUCCESS);
165165
}
166166

167167
/*!
@@ -173,12 +173,12 @@ void CLMLRunner::CopyDataToCLMLTensor(std::shared_ptr<cl_ml_tensor_memory_desc_q
173173
void CLMLRunner::CopyDataFromCLMLTensor(std::shared_ptr<cl_ml_tensor_memory_desc_qcom> tensor,
174174
void* data, cl_ml_tensor_layout_qcom layout) {
175175
cl_int result = 0;
176-
cl_event readEvent = NULL;
176+
cl_event readEvent = nullptr;
177177
// Read the output tensor
178178
result = h_ClmlIntf->clEnqueueReadMLTensorDataQCOM(this->queue, tensor->tensor, tensor->memory,
179179
data, layout,
180180
0, // n waitlist
181-
NULL, // waitlist
181+
nullptr, // waitlist
182182
&readEvent); // event
183183
CLML_SDK_TEST_AND_EXIT(result == CL_SUCCESS);
184184
result = clWaitForEvents(1, &readEvent);
@@ -194,12 +194,12 @@ cl_int CLMLRunner::AllocateTensorMemory(
194194
std::shared_ptr<cl_ml_tensor_memory_desc_qcom> pTensorMemDesc) {
195195
uint32_t size = 0;
196196
cl_int result = CL_OUT_OF_HOST_MEMORY;
197-
cl_mem buffer = NULL;
197+
cl_mem buffer = nullptr;
198198

199199
result = h_ClmlIntf->clGetMLTensorMemorySizeQCOM(context, pTensorMemDesc->tensor, &size);
200200
CLML_SDK_TEST_AND_EXIT(result == CL_SUCCESS);
201201

202-
buffer = clCreateBuffer(context, CL_MEM_READ_WRITE, size, NULL, &result);
202+
buffer = clCreateBuffer(context, CL_MEM_READ_WRITE, size, nullptr, &result);
203203
CLML_SDK_TEST_AND_EXIT(result == CL_SUCCESS);
204204

205205
pTensorMemDesc->memory = buffer;
@@ -257,7 +257,7 @@ void CLMLRunner::MakeUnusedTensor(void) {
257257
cl_ml_tensor_desc_qcom desc = {};
258258
desc.num_dimensions = CL_TENSOR_UNUSED_QCOM;
259259
this->unusedTensor = std::make_shared<cl_ml_tensor_memory_desc_qcom>();
260-
result = this->h_ClmlIntf->clCreateMLTensorQCOM(this->context, NULL, &desc,
260+
result = this->h_ClmlIntf->clCreateMLTensorQCOM(this->context, nullptr, &desc,
261261
&(this->unusedTensor->tensor));
262262
CLML_SDK_TEST_AND_EXIT(this->unusedTensor && result == CL_SUCCESS);
263263
}
@@ -321,7 +321,7 @@ std::shared_ptr<cl_ml_tensor_memory_desc_qcom> CLMLRunner::MakeCLMLTensor(
321321
auto tensor_dsc = std::make_shared<cl_ml_tensor_memory_desc_qcom>();
322322
cl_ml_tensor_desc_qcom desc = {
323323
cl_dtype, layout, dims.n, dims.c, dims.h, dims.w, 0, CL_TENSOR_DIMENSIONS_4D_QCOM, {0}};
324-
result = this->h_ClmlIntf->clCreateMLTensorQCOM(this->context, NULL, &desc, &tensor_dsc->tensor);
324+
result = this->h_ClmlIntf->clCreateMLTensorQCOM(this->context, nullptr, &desc, &tensor_dsc->tensor);
325325
CLML_SDK_TEST_AND_EXIT(tensor_dsc->tensor && result == CL_SUCCESS);
326326
return tensor_dsc;
327327
}
@@ -372,7 +372,7 @@ void CLMLRunner::MakeConv2D(std::shared_ptr<cl_ml_tensor_memory_desc_qcom> input
372372
{clml_dilation[0], clml_dilation[1]},
373373
0,
374374
cl_arithmetic_mode};
375-
cl_ml_op_qcom op = NULL;
375+
cl_ml_op_qcom op = nullptr;
376376
if (!has_act) {
377377
result = h_ClmlIntf->clCreateMLOpConvolutionForwardQCOM(
378378
this->context, 0, &conv_desc, input_desc->tensor, weight_desc->tensor, bias_desc->tensor,
@@ -381,7 +381,7 @@ void CLMLRunner::MakeConv2D(std::shared_ptr<cl_ml_tensor_memory_desc_qcom> input
381381
} else {
382382
result = h_ClmlIntf->clCreateMLOpFusedConvolutionActivationForwardQCOM(
383383
this->context, 0, &conv_desc, &act_desc, input_desc->tensor, weight_desc->tensor,
384-
bias_desc->tensor, NULL, output_desc->tensor, &op, tuning_cache);
384+
bias_desc->tensor, nullptr, output_desc->tensor, &op, tuning_cache);
385385
CLML_SDK_TEST_AND_EXIT(op && result == CL_SUCCESS);
386386
}
387387
this->function.push_back(op);
@@ -443,7 +443,7 @@ void CLMLRunner::MakeConv2DWithBN(std::shared_ptr<cl_ml_tensor_memory_desc_qcom>
443443
{clml_dilation[0], clml_dilation[1]},
444444
0,
445445
cl_arithmetic_mode};
446-
cl_ml_op_qcom op = NULL;
446+
cl_ml_op_qcom op = nullptr;
447447
cl_ml_op_batchnorm_desc_qcom bn_desc = {CL_BATCHNORM_MODE_SPATIAL_QCOM, cl_arithmetic_mode};
448448
if (!has_act) {
449449
result = h_ClmlIntf->clCreateMLOpFusedConvolutionBatchNormForwardQCOM(
@@ -454,7 +454,7 @@ void CLMLRunner::MakeConv2DWithBN(std::shared_ptr<cl_ml_tensor_memory_desc_qcom>
454454
} else {
455455
result = h_ClmlIntf->clCreateMLOpFusedConvolutionBatchNormActivationForwardQCOM(
456456
this->context, 0, &conv_desc, &bn_desc, &act_desc, input_desc->tensor, weight_desc->tensor,
457-
bias_desc->tensor, output_desc->tensor, NULL, bn_mean->tensor, bn_var->tensor,
457+
bias_desc->tensor, output_desc->tensor, nullptr, bn_mean->tensor, bn_var->tensor,
458458
bn_scale->tensor, bn_bias->tensor, &op, tuning_cache);
459459
CLML_SDK_TEST_AND_EXIT(op && result == CL_SUCCESS);
460460
}
@@ -472,7 +472,7 @@ void CLMLRunner::MakeRelu(std::shared_ptr<cl_ml_tensor_memory_desc_qcom> input_d
472472
std::shared_ptr<cl_ml_tensor_memory_desc_qcom> output_desc,
473473
cl_activation_function_qcom relu_type, std::string dtype) {
474474
cl_arithmetic_mode_qcom cl_arithmetic_mode = MakeCLArithMode(MakeCLDataType(dtype));
475-
cl_ml_op_qcom op = NULL;
475+
cl_ml_op_qcom op = nullptr;
476476
cl_int result;
477477
cl_ml_op_activation_desc_qcom act_desc = {relu_type, CL_PROPAGATE_NAN_QCOM, cl_arithmetic_mode};
478478

@@ -502,7 +502,7 @@ void CLMLRunner::MakeBatchNorm(std::shared_ptr<cl_ml_tensor_memory_desc_qcom> in
502502
std::shared_ptr<cl_ml_tensor_memory_desc_qcom> bn_var,
503503
std::vector<float> bn_attrs, std::string dtype) {
504504
cl_arithmetic_mode_qcom cl_arithmetic_mode = MakeCLArithMode(MakeCLDataType(dtype));
505-
cl_ml_op_qcom op = NULL;
505+
cl_ml_op_qcom op = nullptr;
506506
cl_int result;
507507

508508
cl_ml_op_batchnorm_desc_qcom bn_desc = {CL_BATCHNORM_MODE_SPATIAL_QCOM, cl_arithmetic_mode};
@@ -531,7 +531,7 @@ void CLMLRunner::MakePool2D(std::shared_ptr<cl_ml_tensor_memory_desc_qcom> input
531531
std::vector<cl_uint> padding, std::string pool_type,
532532
std::string dtype) {
533533
cl_arithmetic_mode_qcom cl_arithmetic_mode = MakeCLArithMode(MakeCLDataType(dtype));
534-
cl_ml_op_qcom op = NULL;
534+
cl_ml_op_qcom op = nullptr;
535535
cl_int result;
536536

537537
cl_ml_op_pooling_desc_qcom pool_desc = {
@@ -567,7 +567,7 @@ void CLMLRunner::MakeGlobalPool2D(std::shared_ptr<cl_ml_tensor_memory_desc_qcom>
567567
std::vector<cl_uint> in_shape, std::string pool_type,
568568
std::string dtype) {
569569
cl_arithmetic_mode_qcom cl_arithmetic_mode = MakeCLArithMode(MakeCLDataType(dtype));
570-
cl_ml_op_qcom op = NULL;
570+
cl_ml_op_qcom op = nullptr;
571571
cl_int result;
572572
cl_ml_op_pooling_desc_qcom pool_desc = {
573573
pool_type == "nn.global_max_pool2d" ? CL_POOLING_MODE_MAX_QCOM
@@ -599,7 +599,7 @@ void CLMLRunner::MakeReshape(std::shared_ptr<cl_ml_tensor_memory_desc_qcom> inpu
599599
std::shared_ptr<cl_ml_tensor_memory_desc_qcom> output_desc,
600600
std::string dtype) {
601601
cl_arithmetic_mode_qcom cl_arithmetic_mode = MakeCLArithMode(MakeCLDataType(dtype));
602-
cl_ml_op_qcom op = NULL;
602+
cl_ml_op_qcom op = nullptr;
603603
cl_int result;
604604

605605
result = h_ClmlIntf->clCreateMLOpReshapeQCOM(this->context, 0, input_desc->tensor,
@@ -620,7 +620,7 @@ void CLMLRunner::MakeConcatenate(
620620
std::vector<std::shared_ptr<cl_ml_tensor_memory_desc_qcom>> in_list,
621621
std::shared_ptr<cl_ml_tensor_memory_desc_qcom> output_desc, int axis, std::string dtype) {
622622
cl_arithmetic_mode_qcom cl_arithmetic_mode = MakeCLArithMode(MakeCLDataType(dtype));
623-
cl_ml_op_qcom op = NULL;
623+
cl_ml_op_qcom op = nullptr;
624624
cl_int result;
625625

626626
cl_ml_tensor_qcom* concatInputs = new cl_ml_tensor_qcom[in_list.size()];
@@ -650,7 +650,7 @@ void CLMLRunner::MakeDense(std::shared_ptr<cl_ml_tensor_memory_desc_qcom> input_
650650
std::shared_ptr<cl_ml_tensor_memory_desc_qcom> bias_desc,
651651
std::string dtype) {
652652
cl_arithmetic_mode_qcom cl_arithmetic_mode = MakeCLArithMode(MakeCLDataType(dtype));
653-
cl_ml_op_qcom op = NULL;
653+
cl_ml_op_qcom op = nullptr;
654654
cl_int result;
655655

656656
cl_ml_op_convolution_desc_qcom conv_desc = {CL_CONVOLUTION_MODE_CONVOLUTION_QCOM,
@@ -681,7 +681,7 @@ void CLMLRunner::MakeSoftMax(std::shared_ptr<cl_ml_tensor_memory_desc_qcom> inpu
681681
std::shared_ptr<cl_ml_tensor_memory_desc_qcom> output_desc,
682682
std::string dtype) {
683683
cl_arithmetic_mode_qcom cl_arithmetic_mode = MakeCLArithMode(MakeCLDataType(dtype));
684-
cl_ml_op_qcom op = NULL;
684+
cl_ml_op_qcom op = nullptr;
685685
cl_int result;
686686

687687
cl_ml_op_softmax_desc_qcom softmax_desc = {CL_SOFTMAX_ALGORITHM_ACCURATE_QCOM,
@@ -706,7 +706,7 @@ void CLMLRunner::MakePad(std::shared_ptr<cl_ml_tensor_memory_desc_qcom> input_de
706706
std::shared_ptr<cl_ml_tensor_memory_desc_qcom> output_desc,
707707
std::string pad_mode, std::vector<cl_uint> padding, std::string dtype) {
708708
cl_arithmetic_mode_qcom cl_arithmetic_mode = MakeCLArithMode(MakeCLDataType(dtype));
709-
cl_ml_op_qcom op = NULL;
709+
cl_ml_op_qcom op = nullptr;
710710
cl_int result;
711711

712712
cl_pad_mode_qcom clml_pad_mode = CL_PAD_MODE_CONSTANT_QCOM;
@@ -741,7 +741,7 @@ void CLMLRunner::MakeBatchFlatten(std::shared_ptr<cl_ml_tensor_memory_desc_qcom>
741741
std::shared_ptr<cl_ml_tensor_memory_desc_qcom> output_desc,
742742
std::string dtype) {
743743
cl_arithmetic_mode_qcom cl_arithmetic_mode = MakeCLArithMode(MakeCLDataType(dtype));
744-
cl_ml_op_qcom op = NULL;
744+
cl_ml_op_qcom op = nullptr;
745745
cl_int result;
746746

747747
result = h_ClmlIntf->clCreateMLOpReshapeQCOM(this->context, 0, input_desc->tensor,
@@ -763,7 +763,7 @@ void CLMLRunner::MakeClip(std::shared_ptr<cl_ml_tensor_memory_desc_qcom> input_d
763763
float a_min, std::string dtype) {
764764
LOG(INFO) << "MakeClip called";
765765
cl_arithmetic_mode_qcom cl_arithmetic_mode = MakeCLArithMode(MakeCLDataType(dtype));
766-
cl_ml_op_qcom op = NULL;
766+
cl_ml_op_qcom op = nullptr;
767767
cl_int result;
768768

769769
cl_ml_op_clip_desc_qcom clip_desc = {
@@ -788,7 +788,7 @@ void CLMLRunner::MakeBinaryOp(std::shared_ptr<cl_ml_tensor_memory_desc_qcom> inp
788788
std::shared_ptr<cl_ml_tensor_memory_desc_qcom> output_desc,
789789
std::string op_name, std::string dtype) {
790790
cl_arithmetic_mode_qcom cl_arithmetic_mode = MakeCLArithMode(MakeCLDataType(dtype));
791-
cl_ml_op_qcom op = NULL;
791+
cl_ml_op_qcom op = nullptr;
792792
cl_int result;
793793

794794
cl_binary_op_qcom binary_op = CL_TENSOR_OP_ADD_QCOM;

apps/cpp_clml/clml_runner.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -229,20 +229,20 @@ class CLMLRunner {
229229
/*! \brief ML API interface */
230230
GET_ML_API_INTERFACE* h_ClmlIntf = nullptr;
231231
/*! \brief Tuning cache object */
232-
cl_ml_tuningcache_qcom tuning_cache = NULL;
232+
cl_ml_tuningcache_qcom tuning_cache = nullptr;
233233
/*! \brief Flag to inticate a tuning run */
234234
bool is_tuning_run;
235235
/*! \brief The tuning file for loading or storing cache */
236236
char* tuning_file;
237237

238238
/*! \brief OpenCL platform */
239-
cl_platform_id platform{NULL};
239+
cl_platform_id platform{nullptr};
240240
/*! \brief OpenCL context */
241-
cl_context context{NULL};
241+
cl_context context{nullptr};
242242
/*! \brief OpenCL device */
243-
cl_device_id device_id{NULL};
243+
cl_device_id device_id{nullptr};
244244
/*! \brief OpenCL Queue */
245-
cl_command_queue queue{NULL};
245+
cl_command_queue queue{nullptr};
246246
/*! \brief Numpy object for params */
247247
cnpy::npz_t npz_params;
248248
/*! \brief Numpy object for inputs */

apps/cpp_clml/main.cc

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -148,11 +148,11 @@ void ParseCmdArgs(int argc, char* argv[], struct ToolArgs& args) {
148148
bool ExtensionStringPresent(cl_platform_id platform_id, cl_device_id device_id) {
149149
cl_int result = 0;
150150
size_t reqd_size = 0;
151-
result = clGetDeviceInfo(device_id, CL_DEVICE_EXTENSIONS, 0, NULL, &reqd_size);
151+
result = clGetDeviceInfo(device_id, CL_DEVICE_EXTENSIONS, 0, nullptr, &reqd_size);
152152
CLML_SDK_TEST_AND_EXIT(reqd_size > 0u && result == CL_SUCCESS);
153153

154154
std::vector<char> buf(reqd_size);
155-
result = clGetDeviceInfo(device_id, CL_DEVICE_EXTENSIONS, reqd_size, buf.data(), NULL);
155+
result = clGetDeviceInfo(device_id, CL_DEVICE_EXTENSIONS, reqd_size, buf.data(), nullptr);
156156
CLML_SDK_TEST_AND_EXIT(result == CL_SUCCESS);
157157

158158
std::string extensions(buf.data());
@@ -174,25 +174,25 @@ int ExecuteModel(ToolArgs& args) {
174174
// Init OpenCL Environment
175175
cl_int result;
176176
cl_event readEvent = nullptr;
177-
cl_platform_id platform = NULL;
178-
cl_context context = NULL;
179-
cl_device_id device_id = NULL;
180-
cl_command_queue queue = NULL;
177+
cl_platform_id platform = nullptr;
178+
cl_context context = nullptr;
179+
cl_device_id device_id = nullptr;
180+
cl_command_queue queue = nullptr;
181181

182182
// Initialize Context and Command Queue
183-
result = clGetPlatformIDs(1, &platform, NULL);
183+
result = clGetPlatformIDs(1, &platform, nullptr);
184184
CLML_SDK_TEST_AND_EXIT(result == CL_SUCCESS);
185185

186186
uint32_t num_devices = 0;
187-
result = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 0, NULL, &num_devices);
187+
result = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 0, nullptr, &num_devices);
188188
CLML_SDK_TEST_AND_EXIT(result == CL_SUCCESS && num_devices == 1);
189189

190-
result = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 1, &device_id, NULL);
190+
result = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 1, &device_id, nullptr);
191191
CLML_SDK_TEST_AND_EXIT(device_id && result == CL_SUCCESS);
192192

193193
CLML_SDK_TEST_AND_EXIT(ExtensionStringPresent(platform, device_id) == true);
194194

195-
context = clCreateContext(0, 1, &device_id, NULL, NULL, &result);
195+
context = clCreateContext(0, 1, &device_id, nullptr, nullptr, &result);
196196
CLML_SDK_TEST_AND_EXIT(result == CL_SUCCESS);
197197

198198
cl_command_queue_properties queue_props = 0;

0 commit comments

Comments
 (0)