Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 13 additions & 13 deletions python/ray/tune/examples/mnist_pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable

# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
Expand Down Expand Up @@ -120,7 +119,6 @@ def train(epoch):
for batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
Expand All @@ -131,16 +129,17 @@ def test():
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.nll_loss(
output, target, size_average=False).item() # sum up batch loss
pred = output.data.max(
1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).long().cpu().sum()
with torch.no_grad():
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
output = model(data)
# sum up batch loss
test_loss += F.nll_loss(output, target, reduction='sum').item()
# get the index of the max log-probability
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(
target.data.view_as(pred)).long().cpu().sum()

test_loss = test_loss / len(test_loader.dataset)
accuracy = correct.item() / len(test_loader.dataset)
Expand Down Expand Up @@ -176,7 +175,8 @@ def test():
"training_iteration": 1 if args.smoke_test else 20
},
"resources_per_trial": {
"cpu": 3
"cpu": 3,
"gpu": int(not args.no_cuda)
},
"run": "train_mnist",
"num_samples": 1 if args.smoke_test else 10,
Expand Down
28 changes: 13 additions & 15 deletions python/ray/tune/examples/mnist_pytorch_trainable.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable

from ray.tune import Trainable

Expand Down Expand Up @@ -127,7 +126,6 @@ def _train_iteration(self):
for batch_idx, (data, target) in enumerate(self.train_loader):
if self.args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
self.optimizer.zero_grad()
output = self.model(data)
loss = F.nll_loss(output, target)
Expand All @@ -138,18 +136,17 @@ def _test(self):
self.model.eval()
test_loss = 0
correct = 0
for data, target in self.test_loader:
if self.args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = self.model(data)

# sum up batch loss
test_loss += F.nll_loss(output, target, size_average=False).item()

# get the index of the max log-probability
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).long().cpu().sum()
with torch.no_grad():
for data, target in self.test_loader:
if self.args.cuda:
data, target = data.cuda(), target.cuda()
output = self.model(data)
# sum up batch loss
test_loss += F.nll_loss(output, target, reduction='sum').item()
# get the index of the max log-probability
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(
target.data.view_as(pred)).long().cpu().sum()

test_loss = test_loss / len(self.test_loader.dataset)
accuracy = correct.item() / len(self.test_loader.dataset)
Expand Down Expand Up @@ -188,7 +185,8 @@ def _restore(self, checkpoint_path):
"training_iteration": 1 if args.smoke_test else 20,
},
"resources_per_trial": {
"cpu": 3
"cpu": 3,
"gpu": int(not args.no_cuda)
},
"run": TrainMNIST,
"num_samples": 1 if args.smoke_test else 20,
Expand Down
5 changes: 2 additions & 3 deletions test/jenkins_tests/run_multi_node_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -363,12 +363,11 @@ docker run --rm --shm-size=${SHM_SIZE} --memory=${MEMORY_SIZE} $DOCKER_SHA \
--smoke-test

docker run --rm --shm-size=${SHM_SIZE} --memory=${MEMORY_SIZE} $DOCKER_SHA \
python /ray/python/ray/tune/examples/mnist_pytorch.py \
--smoke-test
python /ray/python/ray/tune/examples/mnist_pytorch.py --smoke-test --no-cuda

docker run --rm --shm-size=${SHM_SIZE} --memory=${MEMORY_SIZE} $DOCKER_SHA \
python /ray/python/ray/tune/examples/mnist_pytorch_trainable.py \
--smoke-test
--smoke-test --no-cuda

docker run --rm --shm-size=${SHM_SIZE} --memory=${MEMORY_SIZE} $DOCKER_SHA \
python /ray/python/ray/tune/examples/genetic_example.py \
Expand Down