Skip to content

Commit 349fb7f

Browse files
mfbalinlijialin03
authored andcommitted
[GraphBolt] CPUCachedFeature tests. [3] (dmlc#7538)
1 parent 2ac268d commit 349fb7f

File tree

1 file changed

+95
-0
lines changed

1 file changed

+95
-0
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,95 @@
1+
import backend as F
2+
3+
import pytest
4+
import torch
5+
6+
from dgl import graphbolt as gb
7+
8+
9+
@pytest.mark.parametrize(
10+
"dtype",
11+
[
12+
torch.bool,
13+
torch.uint8,
14+
torch.int8,
15+
torch.int16,
16+
torch.int32,
17+
torch.int64,
18+
torch.float16,
19+
torch.bfloat16,
20+
torch.float32,
21+
torch.float64,
22+
],
23+
)
24+
@pytest.mark.parametrize("policy", ["s3-fifo", "sieve", "lru", "clock"])
25+
def test_cpu_cached_feature(dtype, policy):
26+
cache_size_a = 32
27+
cache_size_b = 64
28+
a = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=dtype)
29+
b = torch.tensor([[[1, 2], [3, 4]], [[4, 5], [6, 7]]], dtype=dtype)
30+
31+
pin_memory = F._default_context_str == "gpu"
32+
33+
cache_size_a *= a[:1].nbytes
34+
cache_size_b *= b[:1].nbytes
35+
36+
feat_store_a = gb.CPUCachedFeature(
37+
gb.TorchBasedFeature(a), cache_size_a, policy, pin_memory
38+
)
39+
feat_store_b = gb.CPUCachedFeature(
40+
gb.TorchBasedFeature(b), cache_size_b, policy, pin_memory
41+
)
42+
43+
# Test read the entire feature.
44+
assert torch.equal(feat_store_a.read(), a)
45+
assert torch.equal(feat_store_b.read(), b)
46+
47+
# Test read with ids.
48+
assert torch.equal(
49+
feat_store_a.read(torch.tensor([0])),
50+
torch.tensor([[1, 2, 3]], dtype=dtype),
51+
)
52+
assert torch.equal(
53+
feat_store_b.read(torch.tensor([1, 1])),
54+
torch.tensor([[[4, 5], [6, 7]], [[4, 5], [6, 7]]], dtype=dtype),
55+
)
56+
assert torch.equal(
57+
feat_store_a.read(torch.tensor([1, 1])),
58+
torch.tensor([[4, 5, 6], [4, 5, 6]], dtype=dtype),
59+
)
60+
assert torch.equal(
61+
feat_store_b.read(torch.tensor([0])),
62+
torch.tensor([[[1, 2], [3, 4]]], dtype=dtype),
63+
)
64+
# The cache should be full now for the large cache sizes, %100 hit expected.
65+
total_miss = feat_store_a._feature.total_miss
66+
feat_store_a.read(torch.tensor([0, 1]))
67+
assert total_miss == feat_store_a._feature.total_miss
68+
total_miss = feat_store_b._feature.total_miss
69+
feat_store_b.read(torch.tensor([0, 1]))
70+
assert total_miss == feat_store_b._feature.total_miss
71+
72+
# Test get the size of the entire feature with ids.
73+
assert feat_store_a.size() == torch.Size([3])
74+
assert feat_store_b.size() == torch.Size([2, 2])
75+
76+
# Test update the entire feature.
77+
feat_store_a.update(torch.tensor([[0, 1, 2], [3, 5, 2]], dtype=dtype))
78+
assert torch.equal(
79+
feat_store_a.read(),
80+
torch.tensor([[0, 1, 2], [3, 5, 2]], dtype=dtype),
81+
)
82+
83+
# Test update with ids.
84+
feat_store_a.update(
85+
torch.tensor([[2, 0, 1]], dtype=dtype),
86+
torch.tensor([0]),
87+
)
88+
assert torch.equal(
89+
feat_store_a.read(),
90+
torch.tensor([[2, 0, 1], [3, 5, 2]], dtype=dtype),
91+
)
92+
93+
# Test with different dimensionality
94+
feat_store_a.update(b)
95+
assert torch.equal(feat_store_a.read(), b)

0 commit comments

Comments
 (0)