diff --git a/datasets.py b/datasets.py index ebb7bcc..be00441 100755 --- a/datasets.py +++ b/datasets.py @@ -8,7 +8,8 @@ from glob import glob import utils.frame_utils as frame_utils -from scipy.misc import imread, imresize +from imageio import imread + class StaticRandomCrop(object): def __init__(self, image_size, crop_size): diff --git a/download_caffe_models.sh b/download_caffe_models.sh index a0f8495..69b09f9 100755 --- a/download_caffe_models.sh +++ b/download_caffe_models.sh @@ -1,6 +1,6 @@ #!/bin/bash -sudo rm -rf flownet2-docker -sudo git clone https://github.com/lmb-freiburg/flownet2-docker +#sudo rm -rf flownet2-docker +#sudo git clone https://github.com/lmb-freiburg/flownet2-docker cd flownet2-docker sudo sed -i '$ a RUN apt-get update && apt-get install -y python-pip \ diff --git a/main.py b/main.py index 6b19eb3..4abcc07 100755 --- a/main.py +++ b/main.py @@ -223,8 +223,8 @@ def forward(self, data, target, inference=False ): if not os.path.exists(args.save): os.makedirs(args.save) - train_logger = SummaryWriter(log_dir = os.path.join(args.save, 'train'), comment = 'training') - validation_logger = SummaryWriter(log_dir = os.path.join(args.save, 'validation'), comment = 'validation') + train_logger = SummaryWriter(logdir = os.path.join(args.save, 'train'), comment = 'training') + validation_logger = SummaryWriter(logdir = os.path.join(args.save, 'validation'), comment = 'validation') # Dynamically load the optimizer with parameters passed in via "--optimizer_[param]=[value]" arguments with tools.TimerBlock("Initializing {} Optimizer".format(args.optimizer)) as block: @@ -261,7 +261,7 @@ def train(args, epoch, start_iteration, data_loader, model, optimizer, logger, i data, target = [Variable(d) for d in data], [Variable(t) for t in target] if args.cuda and args.number_gpus == 1: - data, target = [d.cuda(async=True) for d in data], [t.cuda(async=True) for t in target] + data, target = [d.cuda(non_blocking=True) for d in data], [t.cuda(non_blocking=True) for t in target] optimizer.zero_grad() if not is_validate else None losses = model(data[0], target[0]) @@ -357,7 +357,7 @@ def inference(args, epoch, data_loader, model, offset=0): total_loss = 0 for batch_idx, (data, target) in enumerate(progress): if args.cuda: - data, target = [d.cuda(async=True) for d in data], [t.cuda(async=True) for t in target] + data, target = [d.cuda(non_blocking=True) for d in data], [t.cuda(non_blocking=True) for t in target] data, target = [Variable(d) for d in data], [Variable(t) for t in target] # when ground-truth flows are not available for inference_dataset, diff --git a/networks/FlowNetC.py b/networks/FlowNetC.py index 61e117a..00ca7fb 100755 --- a/networks/FlowNetC.py +++ b/networks/FlowNetC.py @@ -69,6 +69,7 @@ def __init__(self,args, batchNorm=True, div_flow = 20): self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear') def forward(self, x): + from pdb import set_trace x1 = x[:,0:3,:,:] x2 = x[:,3::,:,:] diff --git a/run_pair.py b/run_pair.py new file mode 100644 index 0000000..c42bbef --- /dev/null +++ b/run_pair.py @@ -0,0 +1,76 @@ +import os +import torch +import numpy as np +import argparse + +from models import FlowNet2 # the path is depended on where you create this module +from utils.frame_utils import read_gen # the path is depended on where you create this module +from PIL import Image +from math import ceil + +from pdb import set_trace + +if __name__ == '__main__': + # obtain the necessary args for construct the flownet framework + parser = argparse.ArgumentParser() + parser.add_argument('--fp16', action='store_true', help='Run model in pseudo-fp16 mode (fp16 storage fp32 math).') + parser.add_argument("--rgb_max", type=float, default=255.) + args = parser.parse_args() + + # initial a Net + net = FlowNet2(args).cuda() + # load the state_dict + state_dict = torch.load("./FlowNet2_checkpoint.pth.tar") + net.load_state_dict(state_dict["state_dict"]) + + # load the image pair, you can find this operation in dataset.py + img1_fn = "./flownet2-docker/data/0000000-imgL.png" + img2_fn = "./flownet2-docker/data/0000001-imgL.png" + pim1 = read_gen(img1_fn) + pim2 = read_gen(img2_fn) + # return numpy array with shape h,w,3 + + img1 = Image.open(img1_fn) + img2 = Image.open(img2_fn) + assert(img1.size == img2.size) + width, height = img1.size + divisor = 64. + adapted_width = int(ceil(width/divisor) * divisor) + adapted_height = int(ceil(height/divisor) * divisor) + img1 = img1.resize((adapted_width,adapted_height),Image.BICUBIC) + img2 = img1.resize((adapted_width,adapted_height),Image.BICUBIC) + pim1 = np.array(img1) + pim2 = np.array(img2) + + assert(pim1.shape == pim2.shape) + images = [pim1, pim2] + images = np.array(images).transpose(3, 0, 1, 2) + im = torch.from_numpy(images.astype(np.float32)).unsqueeze(0).cuda() + + # process the image pair to obtian the flow + result = net(im).squeeze() + data = result.data.cpu().numpy().transpose(1, 2, 0) + + cmp_path = "./flownet2-docker/flow.flo" + if os.path.isfile( cmp_path): + cmp_data = read_gen(cmp_path) + # resize channels individually + if width != adapted_width or height != adapted_height: + flow_u = Image.fromarray(data[:,:,0]).resize((width, height)) + flow_v = Image.fromarray(data[:,:,1]).resize((width, height)) + data = np.stack((flow_u,flow_v),axis=2) + print("Doing comparison: ", np.linalg.norm(data - cmp_data) ) + + # save flow, I reference the code in scripts/run-flownet.py in flownet2-caffe project + def writeFlow(name, flow): + f = open(name, 'wb') + f.write('PIEH'.encode('utf-8')) + np.array([flow.shape[1], flow.shape[0]], dtype=np.int32).tofile(f) + flow = flow.astype(np.float32) + flow.tofile(f) + f.flush() + f.close() + + data = result.data.cpu().numpy().transpose(1, 2, 0) + writeFlow("./flow.flo", data) + print("wrote flow.flo") diff --git a/utils/frame_utils.py b/utils/frame_utils.py index 9294f7b..d3f380c 100755 --- a/utils/frame_utils.py +++ b/utils/frame_utils.py @@ -1,6 +1,6 @@ import numpy as np from os.path import * -from scipy.misc import imread +from imageio import imread from . import flow_utils def read_gen(file_name): diff --git a/utils/tools.py b/utils/tools.py index 0de5ee7..faedabe 100755 --- a/utils/tools.py +++ b/utils/tools.py @@ -5,14 +5,12 @@ from os.path import * import numpy as np from inspect import isclass -from pytz import timezone from datetime import datetime import inspect import torch def datestr(): - pacific = timezone('US/Pacific') - now = datetime.now(pacific) + now = datetime.now() return '{}{:02}{:02}_{:02}{:02}'.format(now.year, now.month, now.day, now.hour, now.minute) def module_to_dict(module, exclude=[]):