Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions src/api/cpp/nixl.h
Original file line number Diff line number Diff line change
Expand Up @@ -325,6 +325,8 @@ class nixlAgent {
* @param req_hndl [in] Transfer request obtained from makeXferReq/createXferReq
* @param gpu_req_hndl [out] GPU transfer request handle
* @return nixl_status_t Error code if call was not successful
*
* @note This call may block until the associated connection is established.
*/
nixl_status_t
createGpuXferReq(const nixlXferReqH &req_hndl, nixlGpuXferReqH &gpu_req_hndl) const;
Expand Down
3 changes: 2 additions & 1 deletion src/plugins/ucx/ucx_backend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1688,7 +1688,8 @@ nixlUcxEngine::createGpuXferReq(const nixlBackendReqH &req_hndl,
}

try {
gpu_req_hndl = nixl::ucx::createGpuXferReq(*ep, local_mems, remote_rkeys, remote_addrs);
gpu_req_hndl = nixl::ucx::createGpuXferReq(
*ep, uws, local_mems, remote_rkeys, remote_addrs);
NIXL_TRACE << "Created device memory list: ep=" << ep->getEp() << " handle=" << gpu_req_hndl
<< " worker_id=" << workerId << " num_elements=" << local_mems.size();
return NIXL_SUCCESS;
Expand Down
21 changes: 20 additions & 1 deletion src/utils/ucx/gpu_xfer_req_h.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@
#include "rkey.h"
#include "config.h"

#include <chrono>

extern "C" {
#ifdef HAVE_UCX_GPU_DEVICE_API
#include <ucp/api/device/ucp_host.h>
Expand All @@ -33,6 +35,7 @@ namespace nixl::ucx {

nixlGpuXferReqH
createGpuXferReq(const nixlUcxEp &ep,
const std::vector<std::unique_ptr<nixlUcxWorker>> &all_workers,
const std::vector<nixlUcxMem> &local_mems,
const std::vector<const nixl::ucx::rkey *> &remote_rkeys,
const std::vector<uint64_t> &remote_addrs) {
Expand Down Expand Up @@ -74,8 +77,23 @@ createGpuXferReq(const nixlUcxEp &ep,
params.element_size = sizeof(ucp_device_mem_list_elem_t);
params.num_elements = ucp_elements.size();

const auto start = std::chrono::steady_clock::now();
constexpr auto timeout = std::chrono::seconds(5);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What do you think about making it configurable via environment variable?

ucp_device_mem_list_handle_h ucx_handle;
ucs_status_t ucs_status = ucp_device_mem_list_create(ep.getEp(), &params, &ucx_handle);
ucs_status_t ucs_status;
// Workaround: loop until wireup is completed
while ((ucs_status = ucp_device_mem_list_create(ep.getEp(), &params, &ucx_handle)) ==
UCS_ERR_NOT_CONNECTED) {
for (const auto &w : all_workers) {
w->progress();
}

if (std::chrono::steady_clock::now() - start > timeout) {
throw std::runtime_error(
"Timeout waiting for endpoint wireup completion has been exceeded");
}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think it makes sense to swap the time check and the execution of the progress on the workers. Otherwise, we may throw the exception even when the wireup is completed on this iteration.

Optional. I'd prefer to do a time loop. E.g.:

for (const auto start = std::chrono::steady_clock::now();
     std::chrono::steady_clock::now() - start <= timeout;)
{
    status = ucp_device_mem_list_create(ep.getEp(), &params, &ucx_handle);
    if (status != UCS_ERR_NOT_CONNECTED) {
        break;
    }

    for (const auto &w : workers) {
        w->progress();
    }
}

if (status == UCS_ERR_NOT_CONNECTED) {
    throw std::runtime_error("Timeout waiting for endpoint wireup completion has been exceeded");
} else if (status != UCS_OK) {
    throw std::runtime_error(std::string("Failed to create device memory list: ") +
                             ucs_status_string(ucs_status));
}

}

if (ucs_status != UCS_OK) {
throw std::runtime_error(std::string("Failed to create device memory list: ") +
ucs_status_string(ucs_status));
Expand All @@ -96,6 +114,7 @@ releaseGpuXferReq(nixlGpuXferReqH gpu_req) noexcept {

nixlGpuXferReqH
createGpuXferReq(const nixlUcxEp &ep,
const std::vector<std::unique_ptr<nixlUcxWorker>> &all_workers,
const std::vector<nixlUcxMem> &local_mems,
const std::vector<const nixl::ucx::rkey *> &remote_rkeys,
const std::vector<uint64_t> &remote_addrs) {
Expand Down
3 changes: 3 additions & 0 deletions src/utils/ucx/gpu_xfer_req_h.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,17 +19,20 @@
#define NIXL_SRC_UTILS_UCX_GPU_XFER_REQ_H_H

#include <vector>
#include <memory>

#include "nixl_types.h"

class nixlUcxEp;
class nixlUcxMem;
class nixlUcxWorker;

namespace nixl::ucx {
class rkey;

nixlGpuXferReqH
createGpuXferReq(const nixlUcxEp &ep,
const std::vector<std::unique_ptr<nixlUcxWorker>> &all_workers,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It looks like all_ is redundant for this parameter. workers would be enough.

const std::vector<nixlUcxMem> &local_mems,
const std::vector<const nixl::ucx::rkey *> &remote_rkeys,
const std::vector<uint64_t> &remote_addrs);
Expand Down
44 changes: 0 additions & 44 deletions test/gtest/device_api/single_write_test.cu
Original file line number Diff line number Diff line change
Expand Up @@ -194,50 +194,8 @@ protected:
agent.registerMem(reg_list);
}

// TODO: remove this function once a blocking CreateGpuXferReq is implemented
void
completeWireup(size_t from_agent, size_t to_agent,
const std::vector<MemBuffer> &wireup_src,
const std::vector<MemBuffer> &wireup_dst) {
nixl_opt_args_t wireup_params;

for (size_t worker_id = 0; worker_id < numWorkers; worker_id++) {
wireup_params.customParam = "worker_id=" + std::to_string(worker_id);

nixlXferReqH *wireup_req;
nixl_status_t status = getAgent(from_agent)
.createXferReq(NIXL_WRITE,
makeDescList<nixlBasicDesc>(wireup_src, VRAM_SEG),
makeDescList<nixlBasicDesc>(wireup_dst, VRAM_SEG),
getAgentName(to_agent),
wireup_req,
&wireup_params);

ASSERT_EQ(status, NIXL_SUCCESS) << "Failed to create wireup request for worker " << worker_id;

status = getAgent(from_agent).postXferReq(wireup_req);
ASSERT_TRUE(status == NIXL_SUCCESS || status == NIXL_IN_PROG)
<< "Failed to post wireup for worker " << worker_id;

nixl_status_t xfer_status;
do {
xfer_status = getAgent(from_agent).getXferStatus(wireup_req);
std::this_thread::sleep_for(std::chrono::milliseconds(1));
} while (xfer_status == NIXL_IN_PROG);

ASSERT_EQ(xfer_status, NIXL_SUCCESS) << "Warmup failed for worker " << worker_id;

status = getAgent(from_agent).releaseXferReq(wireup_req);
ASSERT_EQ(status, NIXL_SUCCESS);
}
}

void
exchangeMD(size_t from_agent, size_t to_agent) {
std::vector<MemBuffer> wireup_src, wireup_dst;
createRegisteredMem(getAgent(from_agent), 64, 1, VRAM_SEG, wireup_src);
createRegisteredMem(getAgent(to_agent), 64, 1, VRAM_SEG, wireup_dst);

for (size_t i = 0; i < agents.size(); i++) {
nixl_blob_t md;
nixl_status_t status = agents[i]->getLocalMD(md);
Expand All @@ -251,8 +209,6 @@ protected:
EXPECT_EQ(remote_agent_name, getAgentName(i));
}
}

completeWireup(from_agent, to_agent, wireup_src, wireup_dst);
}

void
Expand Down
Loading