1
+ {
2
+ "nbformat" : 4 ,
3
+ "nbformat_minor" : 0 ,
4
+ "metadata" : {
5
+ "colab" : {
6
+ "name" : " PyTorch/XLA Profling Colab Tutorial" ,
7
+ "provenance" : [],
8
+ "collapsed_sections" : [],
9
+ "machine_shape" : " hm"
10
+ },
11
+ "kernelspec" : {
12
+ "name" : " python3" ,
13
+ "display_name" : " Python 3"
14
+ },
15
+ "accelerator" : " TPU"
16
+ },
17
+ "cells" : [
18
+ {
19
+ "cell_type" : " markdown" ,
20
+ "metadata" : {
21
+ "id" : " YX1hxqUQn47M"
22
+ },
23
+ "source" : [
24
+ " ## PyTorch/XLA TPU Profiling Colab tutorial\n " ,
25
+ " \n " ,
26
+ " *Note*: Since we're not using GCS in this tutorial, TPU side traces won't be collected. To collect full TPU traces follow [this tutorial](https://cloud.google.com/tpu/docs/pytorch-xla-users-guide)."
27
+ ]
28
+ },
29
+ {
30
+ "cell_type" : " markdown" ,
31
+ "metadata" : {
32
+ "id" : " pLQPoJ6Fn8wF"
33
+ },
34
+ "source" : [
35
+ " ### [RUNME] Install Colab compatible PyTorch/XLA wheels and dependencies\n " ,
36
+ " \n "
37
+ ]
38
+ },
39
+ {
40
+ "cell_type" : " code" ,
41
+ "metadata" : {
42
+ "id" : " O53lrJMDn9Rd"
43
+ },
44
+ "source" : [
45
+ " !pip install cloud-tpu-client==0.10 torch==1.8.0 https://storage.googleapis.com/tpu-pytorch/wheels/torch_xla-1.8-cp37-cp37m-linux_x86_64.whl tensorboard-plugin-profile"
46
+ ],
47
+ "execution_count" : null ,
48
+ "outputs" : []
49
+ },
50
+ {
51
+ "cell_type" : " markdown" ,
52
+ "metadata" : {
53
+ "id" : " rroH9yiAn-XE"
54
+ },
55
+ "source" : [
56
+ " ### Define Parameters\n " ,
57
+ " \n "
58
+ ]
59
+ },
60
+ {
61
+ "cell_type" : " code" ,
62
+ "metadata" : {
63
+ "id" : " iMdPRFXIn_jH"
64
+ },
65
+ "source" : [
66
+ " # Define Parameters\n " ,
67
+ " import os\n " ,
68
+ " FLAGS = {}\n " ,
69
+ " FLAGS['data_dir'] = \" /tmp/cifar\"\n " ,
70
+ " FLAGS['batch_size'] = 128\n " ,
71
+ " FLAGS['num_workers'] = 4\n " ,
72
+ " FLAGS['learning_rate'] = 0.02\n " ,
73
+ " FLAGS['momentum'] = 0.9\n " ,
74
+ " FLAGS['num_epochs'] = 200\n " ,
75
+ " FLAGS['num_cores'] = 8 if os.environ.get('TPU_NAME', None) else 1\n " ,
76
+ " FLAGS['log_steps'] = 20\n " ,
77
+ " FLAGS['metrics_debug'] = False"
78
+ ],
79
+ "execution_count" : null ,
80
+ "outputs" : []
81
+ },
82
+ {
83
+ "cell_type" : " code" ,
84
+ "metadata" : {
85
+ "id" : " EP5H63aViwJe"
86
+ },
87
+ "source" : [
88
+ " # Setup profiler env var\n " ,
89
+ " os.environ['XLA_HLO_DEBUG'] = '1'"
90
+ ],
91
+ "execution_count" : null ,
92
+ "outputs" : []
93
+ },
94
+ {
95
+ "cell_type" : " code" ,
96
+ "metadata" : {
97
+ "id" : " Micd3xZvoA-c"
98
+ },
99
+ "source" : [
100
+ " import multiprocessing\n " ,
101
+ " import numpy as np\n " ,
102
+ " import os\n " ,
103
+ " import sys\n " ,
104
+ " import time\n " ,
105
+ " import torch\n " ,
106
+ " import torch.nn as nn\n " ,
107
+ " import torch.nn.functional as F\n " ,
108
+ " import torch.optim as optim\n " ,
109
+ " import torch_xla\n " ,
110
+ " import torch_xla.core.xla_model as xm\n " ,
111
+ " import torch_xla.debug.metrics as met\n " ,
112
+ " import torch_xla.debug.profiler as xp\n " ,
113
+ " import torch_xla.distributed.parallel_loader as pl\n " ,
114
+ " import torch_xla.distributed.xla_multiprocessing as xmp\n " ,
115
+ " import torch_xla.utils.utils as xu\n " ,
116
+ " import torchvision\n " ,
117
+ " from torchvision import datasets, transforms\n " ,
118
+ " \n " ,
119
+ " class BasicBlock(nn.Module):\n " ,
120
+ " expansion = 1\n " ,
121
+ " \n " ,
122
+ " def __init__(self, in_planes, planes, stride=1):\n " ,
123
+ " super(BasicBlock, self).__init__()\n " ,
124
+ " self.conv1 = nn.Conv2d(\n " ,
125
+ " in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n " ,
126
+ " self.bn1 = nn.BatchNorm2d(planes)\n " ,
127
+ " self.conv2 = nn.Conv2d(\n " ,
128
+ " planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n " ,
129
+ " self.bn2 = nn.BatchNorm2d(planes)\n " ,
130
+ " \n " ,
131
+ " self.shortcut = nn.Sequential()\n " ,
132
+ " if stride != 1 or in_planes != self.expansion * planes:\n " ,
133
+ " self.shortcut = nn.Sequential(\n " ,
134
+ " nn.Conv2d(\n " ,
135
+ " in_planes,\n " ,
136
+ " self.expansion * planes,\n " ,
137
+ " kernel_size=1,\n " ,
138
+ " stride=stride,\n " ,
139
+ " bias=False), nn.BatchNorm2d(self.expansion * planes))\n " ,
140
+ " \n " ,
141
+ " def forward(self, x):\n " ,
142
+ " out = F.relu(self.bn1(self.conv1(x)))\n " ,
143
+ " out = self.bn2(self.conv2(out))\n " ,
144
+ " out += self.shortcut(x)\n " ,
145
+ " out = F.relu(out)\n " ,
146
+ " return out\n " ,
147
+ " \n " ,
148
+ " \n " ,
149
+ " class ResNet(nn.Module):\n " ,
150
+ " \n " ,
151
+ " def __init__(self, block, num_blocks, num_classes=10):\n " ,
152
+ " super(ResNet, self).__init__()\n " ,
153
+ " self.in_planes = 64\n " ,
154
+ " \n " ,
155
+ " self.conv1 = nn.Conv2d(\n " ,
156
+ " 3, 64, kernel_size=3, stride=1, padding=1, bias=False)\n " ,
157
+ " self.bn1 = nn.BatchNorm2d(64)\n " ,
158
+ " self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n " ,
159
+ " self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n " ,
160
+ " self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n " ,
161
+ " self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n " ,
162
+ " self.linear = nn.Linear(512 * block.expansion, num_classes)\n " ,
163
+ " \n " ,
164
+ " def _make_layer(self, block, planes, num_blocks, stride):\n " ,
165
+ " strides = [stride] + [1] * (num_blocks - 1)\n " ,
166
+ " layers = []\n " ,
167
+ " for stride in strides:\n " ,
168
+ " layers.append(block(self.in_planes, planes, stride))\n " ,
169
+ " self.in_planes = planes * block.expansion\n " ,
170
+ " return nn.Sequential(*layers)\n " ,
171
+ " \n " ,
172
+ " def forward(self, x):\n " ,
173
+ " out = F.relu(self.bn1(self.conv1(x)))\n " ,
174
+ " out = self.layer1(out)\n " ,
175
+ " out = self.layer2(out)\n " ,
176
+ " out = self.layer3(out)\n " ,
177
+ " out = self.layer4(out)\n " ,
178
+ " out = F.avg_pool2d(out, 4)\n " ,
179
+ " out = torch.flatten(out, 1)\n " ,
180
+ " out = self.linear(out)\n " ,
181
+ " return F.log_softmax(out, dim=1)\n " ,
182
+ " \n " ,
183
+ " \n " ,
184
+ " def ResNet18():\n " ,
185
+ " return ResNet(BasicBlock, [2, 2, 2, 2])"
186
+ ],
187
+ "execution_count" : null ,
188
+ "outputs" : []
189
+ },
190
+ {
191
+ "cell_type" : " markdown" ,
192
+ "metadata" : {
193
+ "id" : " LyBPVi71h7ug"
194
+ },
195
+ "source" : [
196
+ " In the following cell we define the training loops and most importantly add tracing annotations `xp.StepTrace` and `xp.Trace` to that we'll be able to inspect in our profiler traces view on Tensorboard. `xp.StepTrace` specifically should be annotated only once per step as it denotes a full step and is used to calculate the step time for the model and is displayed on Tensorboard profile summary page. The `xp.Trace` context manager annotation can be sprinkled around on whichever parts you want a more detailed timeline of."
197
+ ]
198
+ },
199
+ {
200
+ "cell_type" : " code" ,
201
+ "metadata" : {
202
+ "id" : " 8vMl96KLoCq8"
203
+ },
204
+ "source" : [
205
+ " SERIAL_EXEC = xmp.MpSerialExecutor()\n " ,
206
+ " # Only instantiate model weights once in memory.\n " ,
207
+ " WRAPPED_MODEL = xmp.MpModelWrapper(ResNet18())\n " ,
208
+ " \n " ,
209
+ " def train_resnet18(training_started):\n " ,
210
+ " torch.manual_seed(1)\n " ,
211
+ " \n " ,
212
+ " # We are using fake data here (not real CIFAR dataset).\n " ,
213
+ " train_dataset_len = 50000 # Number of example in CIFAR train set.\n " ,
214
+ " train_loader = xu.SampleGenerator(\n " ,
215
+ " data=(torch.zeros(FLAGS['batch_size'], 3, 32,\n " ,
216
+ " 32), torch.zeros(FLAGS['batch_size'],\n " ,
217
+ " dtype=torch.int64)),\n " ,
218
+ " sample_count=train_dataset_len // FLAGS['batch_size'] //\n " ,
219
+ " xm.xrt_world_size())\n " ,
220
+ " test_loader = xu.SampleGenerator(\n " ,
221
+ " data=(torch.zeros(FLAGS['batch_size'], 3, 32,\n " ,
222
+ " 32), torch.zeros(FLAGS['batch_size'],\n " ,
223
+ " dtype=torch.int64)),\n " ,
224
+ " sample_count=10000 // FLAGS['batch_size'] // xm.xrt_world_size())\n " ,
225
+ " \n " ,
226
+ " # Get loss function, optimizer, and model\n " ,
227
+ " device = xm.xla_device()\n " ,
228
+ " model = WRAPPED_MODEL.to(device)\n " ,
229
+ " optimizer = optim.SGD(model.parameters(), lr=FLAGS['learning_rate'],\n " ,
230
+ " momentum=FLAGS['momentum'], weight_decay=5e-4)\n " ,
231
+ " loss_fn = nn.NLLLoss()\n " ,
232
+ " \n " ,
233
+ " server = xp.start_server(9012)\n " ,
234
+ " \n " ,
235
+ " def train_loop_fn(loader):\n " ,
236
+ " tracker = xm.RateTracker()\n " ,
237
+ " model.train()\n " ,
238
+ " for x, (data, target) in enumerate(loader):\n " ,
239
+ " if x == 5:\n " ,
240
+ " training_started.set()\n " ,
241
+ " # Let's now profile the training step.\n " ,
242
+ " with xp.StepTrace('train_loop', step_num=x):\n " ,
243
+ " # This profiles the construction of the graph.\n " ,
244
+ " with xp.Trace('build_graph'):\n " ,
245
+ " optimizer.zero_grad()\n " ,
246
+ " output = model(data)\n " ,
247
+ " loss = loss_fn(output, target)\n " ,
248
+ " loss.backward()\n " ,
249
+ " \n " ,
250
+ " xm.optimizer_step(optimizer)\n " ,
251
+ " tracker.add(FLAGS['batch_size'])\n " ,
252
+ " if x % FLAGS['log_steps'] == 0:\n " ,
253
+ " print('[xla:{}]({}) Loss={:.5f} Rate={:.2f} GlobalRate={:.2f} Time={}'.format(\n " ,
254
+ " xm.get_ordinal(), x, loss.item(), tracker.rate(),\n " ,
255
+ " tracker.global_rate(), time.asctime()), flush=True)\n " ,
256
+ " \n " ,
257
+ " def test_loop_fn(loader):\n " ,
258
+ " total_samples = 0\n " ,
259
+ " correct = 0\n " ,
260
+ " model.eval()\n " ,
261
+ " data, pred, target = None, None, None\n " ,
262
+ " for data, target in loader:\n " ,
263
+ " output = model(data)\n " ,
264
+ " pred = output.max(1, keepdim=True)[1]\n " ,
265
+ " correct += pred.eq(target.view_as(pred)).sum().item()\n " ,
266
+ " total_samples += data.size()[0]\n " ,
267
+ " \n " ,
268
+ " accuracy = 100.0 * correct / total_samples\n " ,
269
+ " print('[xla:{}] Accuracy={:.2f}%'.format(\n " ,
270
+ " xm.get_ordinal(), accuracy), flush=True)\n " ,
271
+ " return accuracy, data, pred, target\n " ,
272
+ " \n " ,
273
+ " # Train and eval loops\n " ,
274
+ " accuracy = 0.0\n " ,
275
+ " data, pred, target = None, None, None\n " ,
276
+ " for epoch in range(1, FLAGS['num_epochs'] + 1):\n " ,
277
+ " para_loader = pl.ParallelLoader(train_loader, [device])\n " ,
278
+ " train_loop_fn(para_loader.per_device_loader(device))\n " ,
279
+ " xm.master_print(\" Finished training epoch {}\" .format(epoch))\n " ,
280
+ " \n " ,
281
+ " para_loader = pl.ParallelLoader(test_loader, [device])\n " ,
282
+ " accuracy, data, pred, target = test_loop_fn(para_loader.per_device_loader(device))\n " ,
283
+ " if FLAGS['metrics_debug']:\n " ,
284
+ " xm.master_print(met.metrics_report(), flush=True)\n " ,
285
+ " \n " ,
286
+ " return accuracy, data, pred, target\n " ,
287
+ " "
288
+ ],
289
+ "execution_count" : null ,
290
+ "outputs" : []
291
+ },
292
+ {
293
+ "cell_type" : " code" ,
294
+ "metadata" : {
295
+ "id" : " _2nL4HmloEyl"
296
+ },
297
+ "source" : [
298
+ " # Start training processes\n " ,
299
+ " def _mp_fn(rank, flags, training_started):\n " ,
300
+ " global FLAGS\n " ,
301
+ " FLAGS = flags\n " ,
302
+ " torch.set_default_tensor_type('torch.FloatTensor')\n " ,
303
+ " accuracy, data, pred, target = train_resnet18(training_started)\n " ,
304
+ " if rank == 0:\n " ,
305
+ " # Retrieve tensors that are on TPU core 0 and plot.\n " ,
306
+ " plot_results(data.cpu(), pred.cpu(), target.cpu())\n " ,
307
+ " \n " ,
308
+ " def target_fn(training_started):\n " ,
309
+ " sys.stdout = open('training_logs.stdout', 'w')\n " ,
310
+ " sys.stderr = open('training_logs.stderr', 'w')\n " ,
311
+ " xmp.spawn(_mp_fn, args=(FLAGS, training_started,),\n " ,
312
+ " nprocs=FLAGS['num_cores'], start_method='fork')\n " ,
313
+ " \n " ,
314
+ " training_started = multiprocessing.Event()\n " ,
315
+ " p = multiprocessing.Process(target=target_fn, args=(training_started,))\n " ,
316
+ " p.start()"
317
+ ],
318
+ "execution_count" : null ,
319
+ "outputs" : []
320
+ },
321
+ {
322
+ "cell_type" : " markdown" ,
323
+ "metadata" : {
324
+ "id" : " bcS58faWifX7"
325
+ },
326
+ "source" : [
327
+ " The following cell first waits for the training to start up and then subsequently traces both the client VM side (i.e., where the XLA graph is built and input pipeline is run) and the TPU device side (where the actual compilation and execution happens). However, note that since we're running on Colab and not using GCS in this tutorial, TPU side traces won't be collected. To collect full TPU traces follow this [tutorial](https://cloud.google.com/tpu/docs/pytorch-xla-users-guide)."
328
+ ]
329
+ },
330
+ {
331
+ "cell_type" : " code" ,
332
+ "metadata" : {
333
+ "id" : " zppUkQI3fv2p"
334
+ },
335
+ "source" : [
336
+ " training_started.wait(120)\n " ,
337
+ " \n " ,
338
+ " import re\n " ,
339
+ " tpu_ip = re.match('grpc\\ ://((\\ d{1,3}\\ .){3}\\ d{1,3})\\ :\\ d{4}',\n " ,
340
+ " os.environ.get('TPU_NAME')).group(1)\n " ,
341
+ " xp.trace('localhost:9012', '/tmp/tensorboard') # client side profiling\n " ,
342
+ " xp.trace(f'{tpu_ip}:8466', '/tmp/tensorboard') # need GCS bucket for all traces to be written"
343
+ ],
344
+ "execution_count" : null ,
345
+ "outputs" : []
346
+ },
347
+ {
348
+ "cell_type" : " code" ,
349
+ "metadata" : {
350
+ "id" : " Efdo1gx4bYRY"
351
+ },
352
+ "source" : [
353
+ " %load_ext tensorboard\n " ,
354
+ " %tensorboard --logdir /tmp/tensorboard\n " ,
355
+ " # Click on \" INACTIVE\" dropdown and select \" PROFILE\" "
356
+ ],
357
+ "execution_count" : null ,
358
+ "outputs" : []
359
+ }
360
+ ]
361
+ }
0 commit comments