diff --git a/cpp/tests/utilities/identify_stream_usage.cpp b/cpp/tests/utilities/identify_stream_usage.cpp index 93f4ee3e4a4..69dc1c4430b 100644 --- a/cpp/tests/utilities/identify_stream_usage.cpp +++ b/cpp/tests/utilities/identify_stream_usage.cpp @@ -209,6 +209,40 @@ DEFINE_OVERLOAD(cudaLaunchKernel, size_t sharedMem, cudaStream_t stream), ARG(func, gridDim, blockDim, args, sharedMem, stream)); + +#if CUDART_VERSION >= 13000 +// We need to define the __cudaLaunchKernel ABI as +// it isn't part of cuda_runtime.h when compiling as a C++ source +extern "C" cudaError_t CUDARTAPI __cudaLaunchKernel(cudaKernel_t kernel, + dim3 gridDim, + dim3 blockDim, + void** args, + size_t sharedMem, + cudaStream_t stream); +extern "C" cudaError_t CUDARTAPI __cudaLaunchKernel_ptsz(cudaKernel_t kernel, + dim3 gridDim, + dim3 blockDim, + void** args, + size_t sharedMem, + cudaStream_t stream); +DEFINE_OVERLOAD(__cudaLaunchKernel, + ARG(cudaKernel_t kernel, + dim3 gridDim, + dim3 blockDim, + void** args, + size_t sharedMem, + cudaStream_t stream), + ARG(kernel, gridDim, blockDim, args, sharedMem, stream)); +DEFINE_OVERLOAD(__cudaLaunchKernel_ptsz, + ARG(cudaKernel_t kernel, + dim3 gridDim, + dim3 blockDim, + void** args, + size_t sharedMem, + cudaStream_t stream), + ARG(kernel, gridDim, blockDim, args, sharedMem, stream)); +#endif + DEFINE_OVERLOAD(cudaLaunchCooperativeKernel, ARG(void const* func, dim3 gridDim, @@ -223,9 +257,16 @@ DEFINE_OVERLOAD(cudaLaunchHostFunc, // Memory transfer APIS: // https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__MEMORY.html#group__CUDART__MEMORY +#if CUDART_VERSION >= 13000 +DEFINE_OVERLOAD( + cudaMemPrefetchAsync, + ARG(void const* devPtr, size_t count, cudaMemLocation loc, int flags, cudaStream_t stream), + ARG(devPtr, count, loc, flags, stream)); +#else DEFINE_OVERLOAD(cudaMemPrefetchAsync, ARG(void const* devPtr, size_t count, int dstDevice, cudaStream_t stream), ARG(devPtr, count, dstDevice, stream)); +#endif DEFINE_OVERLOAD(cudaMemcpy2DAsync, ARG(void* dst, size_t dpitch,