Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Unified clang format #1536

Merged
merged 5 commits into from
Jan 17, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
3 changes: 0 additions & 3 deletions .clang-format
Original file line number Diff line number Diff line change
@@ -1,7 +1,4 @@
---
Language: Cpp
BasedOnStyle: LLVM
IndentWidth: 4
InsertBraces: true
ReflowComments: false
...
13 changes: 11 additions & 2 deletions .editorconfig
Original file line number Diff line number Diff line change
@@ -1,6 +1,15 @@
[*]
indent_style = space
indent_size = 2

[*.py]
indent_size = 4
kbenzie marked this conversation as resolved.
Show resolved Hide resolved

[.github/workflows/*.yml]
indent_size = 2
[scripts/core/*]
indent_size = 4

[CMakeLists.txt]
indent_size = 4

[*.cmake]
indent_size = 4
4 changes: 2 additions & 2 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -236,13 +236,13 @@ endif()

# Check if clang-format (in correct version) is available for Cpp code formatting.
if(UR_FORMAT_CPP_STYLE)
find_program(CLANG_FORMAT NAMES clang-format-15 clang-format-15.0 clang-format)
find_program(CLANG_FORMAT NAMES clang-format-18 clang-format-18.1 clang-format)

if(CLANG_FORMAT)
get_program_version_major_minor(${CLANG_FORMAT} CLANG_FORMAT_VERSION)
message(STATUS "Found clang-format: ${CLANG_FORMAT} (version: ${CLANG_FORMAT_VERSION})")

set(CLANG_FORMAT_REQUIRED "15.0")
set(CLANG_FORMAT_REQUIRED "18.1")
if(NOT (CLANG_FORMAT_VERSION VERSION_EQUAL CLANG_FORMAT_REQUIRED))
message(FATAL_ERROR "required clang-format version is ${CLANG_FORMAT_REQUIRED}")
endif()
Expand Down
6 changes: 3 additions & 3 deletions REQUIREMENTS.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@
## API and Library Requirements

### API Namespace
* API’s will clearly define the functionality
* API’s will use a consistent style within the library
* APIs will not allow namespace collision when multiple libraries are linked in the same user application
* APIs will clearly define the functionality
* APIs will use a consistent style within the library
* APIs will not allow namespace collision when multiple libraries are linked in the same user application
* API will be consistent across libraries included in the product
* Will not allow namespace collision when multiple libraries are linked in the same user application
* Exceptions:
Expand Down
258 changes: 130 additions & 128 deletions examples/codegen/codegen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,19 @@
*
* Copyright (C) 2023 Intel Corporation
*
* Part of the Unified-Runtime Project, under the Apache License v2.0 with LLVM Exceptions.
* See LICENSE.TXT
* Part of the Unified-Runtime Project, under the Apache License v2.0 with LLVM
* Exceptions. See LICENSE.TXT
*
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
* @file codegen.cpp
*
* @brief UR code generation and execution example for use with the Level Zero adapter.
* @brief UR code generation and execution example for use with the Level Zero
* adapter.
*
* The codegen example demonstrates a complete flow for generating LLVM IR,
* translating it to SPIR-V, and submitting the kernel to Level Zero Runtime via UR API.
* translating it to SPIR-V, and submitting the kernel to Level Zero Runtime via
* UR API.
*/

#include <iostream>
Expand All @@ -23,154 +26,153 @@
constexpr unsigned PAGE_SIZE = 4096;

void ur_check(const ur_result_t r) {
if (r != UR_RESULT_SUCCESS) {
urLoaderTearDown();
throw std::runtime_error("Unified runtime error: " + std::to_string(r));
}
if (r != UR_RESULT_SUCCESS) {
urLoaderTearDown();
throw std::runtime_error("Unified runtime error: " + std::to_string(r));
}
}

std::vector<ur_adapter_handle_t> get_adapters() {
uint32_t adapterCount = 0;
ur_check(urAdapterGet(0, nullptr, &adapterCount));
uint32_t adapterCount = 0;
ur_check(urAdapterGet(0, nullptr, &adapterCount));

if (!adapterCount) {
throw std::runtime_error("No adapters available.");
}
if (!adapterCount) {
throw std::runtime_error("No adapters available.");
}

std::vector<ur_adapter_handle_t> adapters(adapterCount);
ur_check(urAdapterGet(adapterCount, adapters.data(), nullptr));
return adapters;
std::vector<ur_adapter_handle_t> adapters(adapterCount);
ur_check(urAdapterGet(adapterCount, adapters.data(), nullptr));
return adapters;
}

std::vector<ur_adapter_handle_t>
get_supported_adapters(std::vector<ur_adapter_handle_t> &adapters) {
std::vector<ur_adapter_handle_t> supported_adapters;
for (auto adapter : adapters) {
ur_adapter_backend_t backend;
ur_check(urAdapterGetInfo(adapter, UR_ADAPTER_INFO_BACKEND,
sizeof(ur_adapter_backend_t), &backend,
nullptr));

if (backend == UR_ADAPTER_BACKEND_LEVEL_ZERO) {
supported_adapters.push_back(adapter);
}
std::vector<ur_adapter_handle_t> supported_adapters;
for (auto adapter : adapters) {
ur_adapter_backend_t backend;
ur_check(urAdapterGetInfo(adapter, UR_ADAPTER_INFO_BACKEND,
sizeof(ur_adapter_backend_t), &backend, nullptr));

if (backend == UR_ADAPTER_BACKEND_LEVEL_ZERO) {
supported_adapters.push_back(adapter);
}
}

return supported_adapters;
return supported_adapters;
}

std::vector<ur_platform_handle_t>
get_platforms(std::vector<ur_adapter_handle_t> &adapters) {
uint32_t platformCount = 0;
ur_check(urPlatformGet(adapters.data(), adapters.size(), 1, nullptr,
&platformCount));

if (!platformCount) {
throw std::runtime_error("No platforms available.");
}

std::vector<ur_platform_handle_t> platforms(platformCount);
ur_check(urPlatformGet(adapters.data(), adapters.size(), platformCount,
platforms.data(), nullptr));
return platforms;
uint32_t platformCount = 0;
ur_check(urPlatformGet(adapters.data(), adapters.size(), 1, nullptr,
&platformCount));

if (!platformCount) {
throw std::runtime_error("No platforms available.");
}

std::vector<ur_platform_handle_t> platforms(platformCount);
ur_check(urPlatformGet(adapters.data(), adapters.size(), platformCount,
platforms.data(), nullptr));
return platforms;
}

std::vector<ur_device_handle_t> get_gpus(ur_platform_handle_t p) {
uint32_t deviceCount = 0;
ur_check(urDeviceGet(p, UR_DEVICE_TYPE_GPU, 0, nullptr, &deviceCount));
uint32_t deviceCount = 0;
ur_check(urDeviceGet(p, UR_DEVICE_TYPE_GPU, 0, nullptr, &deviceCount));

if (!deviceCount) {
throw std::runtime_error("No GPUs available.");
}
if (!deviceCount) {
throw std::runtime_error("No GPUs available.");
}

std::vector<ur_device_handle_t> devices(deviceCount);
ur_check(urDeviceGet(p, UR_DEVICE_TYPE_GPU, deviceCount, devices.data(),
nullptr));
return devices;
std::vector<ur_device_handle_t> devices(deviceCount);
ur_check(
urDeviceGet(p, UR_DEVICE_TYPE_GPU, deviceCount, devices.data(), nullptr));
return devices;
}

template <typename T, size_t N> struct alignas(PAGE_SIZE) AlignedArray {
T data[N];
T data[N];
};

int main() {
ur_loader_config_handle_t loader_config = nullptr;
ur_check(urLoaderInit(UR_DEVICE_INIT_FLAG_GPU, loader_config));

auto adapters = get_adapters();
auto supported_adapters = get_supported_adapters(adapters);
auto platforms = get_platforms(supported_adapters);
auto gpus = get_gpus(platforms.front());
auto spv = generate_plus_one_spv();

constexpr int a_size = 32;
AlignedArray<int, a_size> a, b;
for (auto i = 0; i < a_size; ++i) {
a.data[i] = a_size - i;
b.data[i] = i;
}

auto current_device = gpus.front();

ur_context_handle_t hContext;
ur_check(urContextCreate(1, &current_device, nullptr, &hContext));

ur_program_handle_t hProgram;
ur_check(urProgramCreateWithIL(hContext, spv.data(), spv.size(), nullptr,
&hProgram));
ur_check(urProgramBuild(hContext, hProgram, nullptr));

ur_mem_handle_t dA, dB;
ur_check(urMemBufferCreate(hContext, UR_MEM_FLAG_READ_WRITE,
a_size * sizeof(int), nullptr, &dA));
ur_check(urMemBufferCreate(hContext, UR_MEM_FLAG_READ_WRITE,
a_size * sizeof(int), nullptr, &dB));

ur_kernel_handle_t hKernel;
ur_check(urKernelCreate(hProgram, "plus1", &hKernel));
ur_check(urKernelSetArgMemObj(hKernel, 0, nullptr, dA));
ur_check(urKernelSetArgMemObj(hKernel, 1, nullptr, dB));

ur_queue_handle_t queue;
ur_check(urQueueCreate(hContext, current_device, nullptr, &queue));

ur_check(urEnqueueMemBufferWrite(queue, dA, true, 0, a_size * sizeof(int),
a.data, 0, nullptr, nullptr));
ur_check(urEnqueueMemBufferWrite(queue, dB, true, 0, a_size * sizeof(int),
b.data, 0, nullptr, nullptr));

const size_t gWorkOffset[] = {0, 0, 0};
const size_t gWorkSize[] = {128, 1, 1};
const size_t lWorkSize[] = {1, 1, 1};
ur_event_handle_t event;
ur_check(urEnqueueKernelLaunch(queue, hKernel, 3, gWorkOffset, gWorkSize,
lWorkSize, 0, nullptr, &event));

ur_check(urEnqueueMemBufferRead(queue, dB, true, 0, a_size * sizeof(int),
b.data, 1, &event, nullptr));

ur_check(urQueueFinish(queue));

std::cout << "Input Array: ";
for (int i = 0; i < a_size; ++i) {
std::cout << a.data[i] << " ";
}
std::cout << std::endl;

bool expectedResult = false;

std::cout << "Output Array: ";
for (int i = 0; i < a_size; ++i) {
std::cout << b.data[i] << " ";
expectedResult |= (b.data[i] == a.data[i] + 1);
}
std::cout << std::endl;

if (expectedResult) {
std::cout << "Results are correct." << std::endl;
} else {
std::cout << "Results are incorrect." << std::endl;
}

return urLoaderTearDown() == UR_RESULT_SUCCESS && expectedResult ? 0 : 1;
ur_loader_config_handle_t loader_config = nullptr;
ur_check(urLoaderInit(UR_DEVICE_INIT_FLAG_GPU, loader_config));

auto adapters = get_adapters();
auto supported_adapters = get_supported_adapters(adapters);
auto platforms = get_platforms(supported_adapters);
auto gpus = get_gpus(platforms.front());
auto spv = generate_plus_one_spv();

constexpr int a_size = 32;
AlignedArray<int, a_size> a, b;
for (auto i = 0; i < a_size; ++i) {
a.data[i] = a_size - i;
b.data[i] = i;
}

auto current_device = gpus.front();

ur_context_handle_t hContext;
ur_check(urContextCreate(1, &current_device, nullptr, &hContext));

ur_program_handle_t hProgram;
ur_check(urProgramCreateWithIL(hContext, spv.data(), spv.size(), nullptr,
&hProgram));
ur_check(urProgramBuild(hContext, hProgram, nullptr));

ur_mem_handle_t dA, dB;
ur_check(urMemBufferCreate(hContext, UR_MEM_FLAG_READ_WRITE,
a_size * sizeof(int), nullptr, &dA));
ur_check(urMemBufferCreate(hContext, UR_MEM_FLAG_READ_WRITE,
a_size * sizeof(int), nullptr, &dB));

ur_kernel_handle_t hKernel;
ur_check(urKernelCreate(hProgram, "plus1", &hKernel));
ur_check(urKernelSetArgMemObj(hKernel, 0, nullptr, dA));
ur_check(urKernelSetArgMemObj(hKernel, 1, nullptr, dB));

ur_queue_handle_t queue;
ur_check(urQueueCreate(hContext, current_device, nullptr, &queue));

ur_check(urEnqueueMemBufferWrite(queue, dA, true, 0, a_size * sizeof(int),
a.data, 0, nullptr, nullptr));
ur_check(urEnqueueMemBufferWrite(queue, dB, true, 0, a_size * sizeof(int),
b.data, 0, nullptr, nullptr));

const size_t gWorkOffset[] = {0, 0, 0};
const size_t gWorkSize[] = {128, 1, 1};
const size_t lWorkSize[] = {1, 1, 1};
ur_event_handle_t event;
ur_check(urEnqueueKernelLaunch(queue, hKernel, 3, gWorkOffset, gWorkSize,
lWorkSize, 0, nullptr, &event));

ur_check(urEnqueueMemBufferRead(queue, dB, true, 0, a_size * sizeof(int),
b.data, 1, &event, nullptr));

ur_check(urQueueFinish(queue));

std::cout << "Input Array: ";
for (int i = 0; i < a_size; ++i) {
std::cout << a.data[i] << " ";
}
std::cout << std::endl;

bool expectedResult = false;

std::cout << "Output Array: ";
for (int i = 0; i < a_size; ++i) {
std::cout << b.data[i] << " ";
expectedResult |= (b.data[i] == a.data[i] + 1);
}
std::cout << std::endl;

if (expectedResult) {
std::cout << "Results are correct." << std::endl;
} else {
std::cout << "Results are incorrect." << std::endl;
}

return urLoaderTearDown() == UR_RESULT_SUCCESS && expectedResult ? 0 : 1;
}
Loading
Loading