Skip to content

Commit

Permalink
Add numa binding to init from env helper (pytorch#396)
Browse files Browse the repository at this point in the history
Summary: Pull Request resolved: pytorch#396

Differential Revision: D46036186

fbshipit-source-id: 574307ee09807c9b422239afea643637ce24cb0a
  • Loading branch information
ananthsub authored and facebook-github-bot committed May 19, 2023
1 parent e05c060 commit 887e3ef
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 0 deletions.
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ torch
numpy
fsspec
tensorboard
numa
packaging
psutil
pyre_extensions
Expand Down
15 changes: 15 additions & 0 deletions torchtnt/utils/env.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ def init_from_env(
pg_backend: T.Optional[str] = None,
pg_timeout: timedelta = default_pg_timeout,
float32_matmul_precision: str = "high",
bind_numa: bool = True,
) -> torch.device:
"""Utility function that initializes the device and process group, if applicable.
Expand All @@ -57,6 +58,7 @@ def init_from_env(
pg_timeout (timedelta, optional): Timeout for operations executed against the process
group. Default value equals 30 minutes
float32_matmul_precision (str, optional): The setting for torch's precision of matrix multiplications.
bind_numa (bool, optional): Whether to bind CPU sockets to GPUs.
Returns:
The current device.
Expand Down Expand Up @@ -86,4 +88,17 @@ def init_from_env(
)
torch.distributed.init_process_group(backend=pg_backend, timeout=pg_timeout)
maybe_enable_tf32(float32_matmul_precision)
if bind_numa and device.type == "cuda":
init_numa()

return device


def init_numa() -> None:
import numa

local_world_size = int(os.environ.get("LOCAL_WORLD_SIZE", 1))
num_sockets = numa.get_max_node() + 1
socket_id = torch.cuda.current_device() // (max(local_world_size // num_sockets, 1))
node_mask = {socket_id}
numa.bind(node_mask)

0 comments on commit 887e3ef

Please sign in to comment.