-
Notifications
You must be signed in to change notification settings - Fork 591
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Making wrapper tensor subclass to work in serialization #2440
Changes from 12 commits
a9911b8
4ebe8aa
0a1cc96
8fd26b7
b889019
c91fe18
83f6884
a02657d
94b3885
1b25148
ca3c228
122a68a
3c79607
7fb689b
45cb98a
1486253
7850025
473c317
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -31,6 +31,14 @@ def _dummy_get_storage_size(item): | |
return sum(item) | ||
|
||
|
||
# util functions for checking the version for pytorch | ||
def is_wrapper_tensor_subclass_available(): | ||
try: | ||
from torch.utils._python_dispatch import is_traceable_wrapper_subclass | ||
return True | ||
except ImportError: | ||
return False | ||
|
||
@pytest.fixture | ||
def dummy_state_dict() -> Dict[str, List[int]]: | ||
return { | ||
|
@@ -58,6 +66,25 @@ def torch_state_dict() -> Dict[str, "torch.Tensor"]: | |
pytest.skip("torch is not available") | ||
|
||
|
||
@pytest.fixture | ||
def torch_state_dict_tensor_subclass() -> Dict[str, "torch.Tensor"]: | ||
try: | ||
import torch | ||
from torch.testing._internal.two_tensor import TwoTensor | ||
|
||
t = torch.tensor([4]) | ||
return { | ||
"layer_1": torch.tensor([4]), | ||
"layer_2": torch.tensor([10]), | ||
"layer_3": torch.tensor([30]), | ||
"layer_4": torch.tensor([2]), | ||
"layer_5": torch.tensor([2]), | ||
"layer_6": TwoTensor(t, t), | ||
} | ||
except ImportError: | ||
pytest.skip("torch is not available") | ||
|
||
|
||
@pytest.fixture | ||
def torch_state_dict_shared_layers() -> Dict[str, "torch.Tensor"]: | ||
try: | ||
|
@@ -75,6 +102,55 @@ def torch_state_dict_shared_layers() -> Dict[str, "torch.Tensor"]: | |
pytest.skip("torch is not available") | ||
|
||
|
||
@pytest.fixture | ||
def torch_state_dict_shared_layers_tensor_subclass() -> Dict[str, "torch.Tensor"]: | ||
try: | ||
import torch | ||
from torch.testing._internal.two_tensor import TwoTensor | ||
|
||
t = torch.tensor([4]) | ||
tensor_subclass_tensor = TwoTensor(t, t) | ||
|
||
t = torch.tensor([4]) | ||
shared_tensor_subclass_tensor = TwoTensor(t, t) | ||
return { | ||
"layer_1": torch.tensor([4]), | ||
"layer_2": torch.tensor([10]), | ||
"layer_3": torch.tensor([30]), | ||
"layer_4": torch.tensor([2]), | ||
"layer_5": torch.tensor([2]), | ||
"layer_6": tensor_subclass_tensor, | ||
"ts_shared_1": shared_tensor_subclass_tensor, | ||
"ts_shared_2": shared_tensor_subclass_tensor, | ||
} | ||
except ImportError: | ||
pytest.skip("torch is not available") | ||
|
||
|
||
@pytest.fixture | ||
def torch_state_dict_shared_layers() -> Dict[str, "torch.Tensor"]: | ||
try: | ||
import torch | ||
from torch.testing._internal.two_tensor import TwoTensor | ||
|
||
if is_wrapper_tensor_subclass_available(): | ||
# TODO: need to fix safetensor support for tensor subclasses before we can add this | ||
# to test | ||
# shared_layer = TwoTensor(torch.tensor([4]), torch.tensor([4])) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @Wauplin this seem to fail because safetensor does not support wrapper tensor subclass yet, we can enable this when we add the similar support in safetensors There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Actually we don't have to test on Also, |
||
shared_layer = torch.tensor([4]) | ||
else: | ||
shared_layer = torch.tensor([4]) | ||
|
||
return { | ||
"shared_1": shared_layer, | ||
"unique_1": torch.tensor([10]), | ||
"unique_2": torch.tensor([30]), | ||
"shared_2": shared_layer, | ||
} | ||
except ImportError: | ||
pytest.skip("torch is not available") | ||
|
||
|
||
def test_single_shard(dummy_state_dict): | ||
state_dict_split = split_state_dict_into_shards_factory( | ||
dummy_state_dict, | ||
|
@@ -170,6 +246,17 @@ def test_get_torch_storage_size(): | |
assert get_torch_storage_size(torch.tensor([1, 2, 3, 4, 5], dtype=torch.float16)) == 5 * 2 | ||
|
||
|
||
@requires("torch") | ||
@pytest.mark.skipif(not is_wrapper_tensor_subclass_available(), reason="requires torch 2.1 or higher") | ||
def test_get_torch_storage_size_wrapper_tensor_subclass(): | ||
import torch | ||
from torch.testing._internal.two_tensor import TwoTensor | ||
t = torch.tensor([1, 2, 3, 4, 5], dtype=torch.float64) | ||
assert get_torch_storage_size(TwoTensor(t, t)) == 5 * 8 * 2 | ||
t = torch.tensor([1, 2, 3, 4, 5], dtype=torch.float16) | ||
assert get_torch_storage_size(TwoTensor(t, TwoTensor(t, t))) == 5 * 2 * 3 | ||
|
||
|
||
def test_parse_size_to_int(): | ||
assert parse_size_to_int("1KB") == 1 * 10**3 | ||
assert parse_size_to_int("2MB") == 2 * 10**6 | ||
|
@@ -247,6 +334,32 @@ def test_save_torch_state_dict_unsafe_not_sharded( | |
assert not (tmp_path / "pytorch_model.bin.index.json").is_file() | ||
|
||
|
||
@pytest.mark.skipif(not is_wrapper_tensor_subclass_available(), reason="requires torch 2.1 or higher") | ||
def test_save_torch_state_dict_tensor_subclass_unsafe_not_sharded( | ||
tmp_path: Path, caplog: pytest.LogCaptureFixture, torch_state_dict_tensor_subclass: Dict[str, "torch.Tensor"] | ||
) -> None: | ||
"""Save as pickle without sharding.""" | ||
with caplog.at_level("WARNING"): | ||
save_torch_state_dict(torch_state_dict_tensor_subclass, tmp_path, max_shard_size="1GB", safe_serialization=False) | ||
assert "we strongly recommend using safe serialization" in caplog.text | ||
|
||
assert (tmp_path / "pytorch_model.bin").is_file() | ||
assert not (tmp_path / "pytorch_model.bin.index.json").is_file() | ||
|
||
|
||
@pytest.mark.skipif(not is_wrapper_tensor_subclass_available(), reason="requires torch 2.1 or higher") | ||
def test_save_torch_state_dict_shared_layers_tensor_subclass_unsafe_not_sharded( | ||
tmp_path: Path, caplog: pytest.LogCaptureFixture, torch_state_dict_shared_layers_tensor_subclass: Dict[str, "torch.Tensor"] | ||
) -> None: | ||
"""Save as pickle without sharding.""" | ||
with caplog.at_level("WARNING"): | ||
save_torch_state_dict(torch_state_dict_shared_layers_tensor_subclass, tmp_path, max_shard_size="1GB", safe_serialization=False) | ||
assert "we strongly recommend using safe serialization" in caplog.text | ||
|
||
assert (tmp_path / "pytorch_model.bin").is_file() | ||
assert not (tmp_path / "pytorch_model.bin.index.json").is_file() | ||
|
||
|
||
def test_save_torch_state_dict_unsafe_sharded( | ||
tmp_path: Path, caplog: pytest.LogCaptureFixture, torch_state_dict: Dict[str, "torch.Tensor"] | ||
) -> None: | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
If I understand correctly, two "meta" tensors can have the exact same
_get_unique_id(tensor)
, the exact sametensor.device
but still be different, correct? If different, how can we be sure their storage size distinguish them? Can it happen that they randomly happen to have the same storage size?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
yeah it just means the current approach does not generalize to meta tensor, does it work previously?
I think we'd need to reimplement the higher level sharding logic in the end in pytorch, I added some PoC in the slack, let me make a quick intro there
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I don't think so since we never had to serialize meta tensors. The only use case that could benefit from that is in accelerate (find tied parameters from the meta model). Right now, this is how we do for meta tensors: https://github.com/huggingface/accelerate/blob/726140cad2f2361d79da7786a7b96d0bee591c48/src/accelerate/utils/modeling.py#L677