-
Notifications
You must be signed in to change notification settings - Fork 3.8k
/
Copy pathgin.py
102 lines (79 loc) · 3.06 KB
/
gin.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import os.path as osp
import time
import torch
import torch.nn.functional as F
import torch_geometric
from torch_geometric.datasets import TUDataset
from torch_geometric.loader import DataLoader
from torch_geometric.nn import MLP, GINConv, global_add_pool
if not torch_geometric.typing.WITH_PT21:
quit('Dynamic shape compilation requires PyTorch >= 2.1.0')
if torch.cuda.is_available():
device = torch.device('cuda')
elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
# MPS is currently slower than CPU due to missing int64 min/max ops
device = torch.device('cpu')
else:
device = torch.device('cpu')
path = osp.dirname(osp.realpath(__file__))
path = osp.join(path, '..', '..', 'data', 'TU')
dataset = TUDataset(path, name='MUTAG').shuffle()
train_loader = DataLoader(dataset[:0.9], batch_size=128, shuffle=True)
test_loader = DataLoader(dataset[0.9:], batch_size=128)
class GIN(torch.nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, num_layers):
super().__init__()
self.convs = torch.nn.ModuleList()
for _ in range(num_layers):
mlp = MLP([in_channels, hidden_channels, hidden_channels])
self.convs.append(GINConv(nn=mlp, train_eps=False))
in_channels = hidden_channels
self.mlp = MLP([hidden_channels, hidden_channels, out_channels],
norm=None, dropout=0.5)
def forward(self, x, edge_index, batch, batch_size):
for conv in self.convs:
x = conv(x, edge_index).relu()
# Pass the batch size to avoid CPU communication/graph breaks:
x = global_add_pool(x, batch, size=batch_size)
return self.mlp(x)
model = GIN(
in_channels=dataset.num_features,
hidden_channels=32,
out_channels=dataset.num_classes,
num_layers=5,
).to(device)
# Compile the model into an optimized version:
model = torch.compile(model, dynamic=True)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
def train():
model.train()
total_loss = 0
for data in train_loader:
data = data.to(device)
optimizer.zero_grad()
out = model(data.x, data.edge_index, data.batch, data.batch_size)
loss = F.cross_entropy(out, data.y)
loss.backward()
optimizer.step()
total_loss += float(loss) * data.num_graphs
return total_loss / len(train_loader.dataset)
@torch.no_grad()
def test(loader):
model.eval()
total_correct = 0
for data in loader:
data = data.to(device)
out = model(data.x, data.edge_index, data.batch, data.batch_size)
pred = out.argmax(dim=-1)
total_correct += int((pred == data.y).sum())
return total_correct / len(loader.dataset)
times = []
for epoch in range(1, 101):
start = time.time()
loss = train()
train_acc = test(train_loader)
test_acc = test(test_loader)
times.append(time.time() - start)
print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, Train: {train_acc:.4f}, '
f'Test: {test_acc:.4f}')
print(f'Median time per epoch: {torch.tensor(times).median():.4f}s')