|
1 | | -from ray.util.collective.collective import nccl_available, gloo_available, \ |
| 1 | +from ray.util.collective.collective import nccl_available, mpi_available, \ |
2 | 2 | is_group_initialized, init_collective_group, destroy_collective_group, \ |
3 | | - declare_collective_group, get_rank, get_world_size, allreduce, \ |
4 | | - allreduce_multigpu, barrier, reduce, reduce_multigpu, broadcast, \ |
5 | | - broadcast_multigpu, allgather, allgather_multigpu, reducescatter, \ |
6 | | - reducescatter_multigpu, send, send_multigpu, recv, recv_multigpu |
| 3 | + get_rank, get_world_size, allreduce, barrier, reduce, broadcast, \ |
| 4 | + allgather, reducescatter, send, recv |
7 | 5 |
|
8 | 6 | __all__ = [ |
9 | | - "nccl_available", "gloo_available", "is_group_initialized", |
10 | | - "init_collective_group", "destroy_collective_group", |
11 | | - "declare_collective_group", "get_rank", "get_world_size", "allreduce", |
12 | | - "allreduce_multigpu", "barrier", "reduce", "reduce_multigpu", "broadcast", |
13 | | - "broadcast_multigpu", "allgather", "allgather_multigpu", "reducescatter", |
14 | | - "reducescatter_multigpu", "send", "send_multigpu", "recv", "recv_multigpu" |
| 7 | + "nccl_available", "mpi_available", "is_group_initialized", |
| 8 | + "init_collective_group", "destroy_collective_group", "get_rank", |
| 9 | + "get_world_size", "allreduce", "barrier", "reduce", "broadcast", |
| 10 | + "allgather", "reducescatter", "send", "recv" |
15 | 11 | ] |
0 commit comments