Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion deepspeed/runtime/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,13 @@
from bisect import bisect_left, bisect_right

import torch
from torch._six import inf
import torch.distributed as dist

try:
from torch._six import inf as inf
except ModuleNotFoundError:
from torch import inf as inf

from deepspeed.utils import groups, logger
from deepspeed.runtime.constants import PIPE_REPLICATED
from numpy import prod
Expand Down
3 changes: 1 addition & 2 deletions deepspeed/runtime/zero/stage3.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,14 @@
from torch.nn import Module, Parameter
import torch.distributed as dist
import math
from torch._six import inf
from torch.nn import Module
from torch.nn.parameter import Parameter

from deepspeed.runtime import ZeROOptimizer
from deepspeed.utils.logging import logger
from deepspeed.runtime.fp16.loss_scaler import LossScaler, DynamicLossScaler
from deepspeed.runtime.comm.coalesced_collectives import reduce_scatter_coalesced
from deepspeed.runtime.utils import get_global_norm, see_memory_usage, is_model_parallel_parameter, DummyOptim
from deepspeed.runtime.utils import inf, get_global_norm, see_memory_usage, is_model_parallel_parameter, DummyOptim
from deepspeed.runtime.zero.partition_parameters import *
from deepspeed.runtime.zero.partition_parameters import _init_external_params
from deepspeed.runtime.zero.constants import ZERO_OPTIMIZATION_WEIGHTS
Expand Down
4 changes: 2 additions & 2 deletions deepspeed/runtime/zero/stage_1_and_2.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,12 @@
import torch
from torch.distributed.distributed_c10d import _get_global_rank
import torch.distributed as dist
from torch._six import inf
from packaging import version as pkg_version

from deepspeed.runtime import ZeROOptimizer
from deepspeed.runtime.fp16.loss_scaler import LossScaler, DynamicLossScaler
from deepspeed.runtime.utils import (bwc_tensor_model_parallel_rank,
from deepspeed.runtime.utils import (inf,
bwc_tensor_model_parallel_rank,
get_global_norm,
see_memory_usage,
is_model_parallel_parameter,
Expand Down