From c5f325343ea88252de6447694e39e3a6593894b3 Mon Sep 17 00:00:00 2001 From: Victor Milewski Date: Mon, 25 Nov 2019 18:50:54 +0100 Subject: [PATCH 1/5] add option for multiple inputs when writing weight summary --- pytorch_lightning/root_module/memory.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/pytorch_lightning/root_module/memory.py b/pytorch_lightning/root_module/memory.py index 362688ebe5f019..3485df2c9fd396 100644 --- a/pytorch_lightning/root_module/memory.py +++ b/pytorch_lightning/root_module/memory.py @@ -50,10 +50,20 @@ def get_variable_sizes(self): input_ = self.model.example_input_array if self.model.on_gpu: - input_ = input_.cuda(0) + # test if input is a list or a tuple + if type(input_) is list or type(input_) is tuple: + input_ = [input_i.cuda(0) if torch.is_tensor(input_i) else input_i + for input_i in input_] + else: + input_ = input_.cuda(0) if self.model.trainer.use_amp: - input_ = input_.half() + # test if it is not a list or a tuple + if type(input_) is list or type(input_) is tuple: + input_ = [input_i.half() if torch.is_tensor(input_i) else input_i + for input_i in input_] + else: + input_ = input_.half() with torch.no_grad(): From 4a584aa5db16a27e5b62d8ffd72048c28837c166 Mon Sep 17 00:00:00 2001 From: Victor Milewski Date: Tue, 26 Nov 2019 09:31:28 +0100 Subject: [PATCH 2/5] resolve extra indent --- pytorch_lightning/root_module/memory.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pytorch_lightning/root_module/memory.py b/pytorch_lightning/root_module/memory.py index 3485df2c9fd396..4f8dded863f8ce 100644 --- a/pytorch_lightning/root_module/memory.py +++ b/pytorch_lightning/root_module/memory.py @@ -52,16 +52,16 @@ def get_variable_sizes(self): if self.model.on_gpu: # test if input is a list or a tuple if type(input_) is list or type(input_) is tuple: - input_ = [input_i.cuda(0) if torch.is_tensor(input_i) else input_i - for input_i in input_] + input_ = [input_i.cuda(0) if torch.is_tensor(input_i) else input_i + for input_i in input_] else: input_ = input_.cuda(0) if self.model.trainer.use_amp: # test if it is not a list or a tuple if type(input_) is list or type(input_) is tuple: - input_ = [input_i.half() if torch.is_tensor(input_i) else input_i - for input_i in input_] + input_ = [input_i.half() if torch.is_tensor(input_i) else input_i + for input_i in input_] else: input_ = input_.half() From bfa7537eba8f59765428fdcde1b923ca0b30313a Mon Sep 17 00:00:00 2001 From: Victor Milewski Date: Mon, 25 Nov 2019 18:50:54 +0100 Subject: [PATCH 3/5] add option for multiple inputs when writing weight summary --- pytorch_lightning/core/memory.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/pytorch_lightning/core/memory.py b/pytorch_lightning/core/memory.py index b01451f8cf71e2..2393244db45ba8 100644 --- a/pytorch_lightning/core/memory.py +++ b/pytorch_lightning/core/memory.py @@ -50,10 +50,20 @@ def get_variable_sizes(self): input_ = self.model.example_input_array if self.model.on_gpu: - input_ = input_.cuda(0) + # test if input is a list or a tuple + if type(input_) is list or type(input_) is tuple: + input_ = [input_i.cuda(0) if torch.is_tensor(input_i) else input_i + for input_i in input_] + else: + input_ = input_.cuda(0) if self.model.trainer.use_amp: - input_ = input_.half() + # test if it is not a list or a tuple + if type(input_) is list or type(input_) is tuple: + input_ = [input_i.half() if torch.is_tensor(input_i) else input_i + for input_i in input_] + else: + input_ = input_.half() with torch.no_grad(): From 985c32b89c2706cf801f1601027e1b8b4b9b0093 Mon Sep 17 00:00:00 2001 From: Victor Milewski Date: Tue, 26 Nov 2019 09:31:28 +0100 Subject: [PATCH 4/5] resolve extra indent --- pytorch_lightning/core/memory.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pytorch_lightning/core/memory.py b/pytorch_lightning/core/memory.py index 2393244db45ba8..7e229e73f9a844 100644 --- a/pytorch_lightning/core/memory.py +++ b/pytorch_lightning/core/memory.py @@ -52,16 +52,16 @@ def get_variable_sizes(self): if self.model.on_gpu: # test if input is a list or a tuple if type(input_) is list or type(input_) is tuple: - input_ = [input_i.cuda(0) if torch.is_tensor(input_i) else input_i - for input_i in input_] + input_ = [input_i.cuda(0) if torch.is_tensor(input_i) else input_i + for input_i in input_] else: input_ = input_.cuda(0) if self.model.trainer.use_amp: # test if it is not a list or a tuple if type(input_) is list or type(input_) is tuple: - input_ = [input_i.half() if torch.is_tensor(input_i) else input_i - for input_i in input_] + input_ = [input_i.half() if torch.is_tensor(input_i) else input_i + for input_i in input_] else: input_ = input_.half() From 65ab2199b11911e1b66a07f8ec0f2a4a38f3f524 Mon Sep 17 00:00:00 2001 From: Victor Milewski Date: Mon, 9 Dec 2019 10:11:47 +0100 Subject: [PATCH 5/5] Do not place tensors on cuda:0 by default. Check whether list or tuple using isinstance() --- pytorch_lightning/core/memory.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/pytorch_lightning/core/memory.py b/pytorch_lightning/core/memory.py index 7e229e73f9a844..4704362e4ac3eb 100644 --- a/pytorch_lightning/core/memory.py +++ b/pytorch_lightning/core/memory.py @@ -50,16 +50,17 @@ def get_variable_sizes(self): input_ = self.model.example_input_array if self.model.on_gpu: + device = next(self.model.parameters()).get_device() # test if input is a list or a tuple - if type(input_) is list or type(input_) is tuple: - input_ = [input_i.cuda(0) if torch.is_tensor(input_i) else input_i + if isinstance(input_, (list, tuple)): + input_ = [input_i.cuda(device) if torch.is_tensor(input_i) else input_i for input_i in input_] else: - input_ = input_.cuda(0) + input_ = input_.cuda(device) if self.model.trainer.use_amp: # test if it is not a list or a tuple - if type(input_) is list or type(input_) is tuple: + if isinstance(input_, (list, tuple)): input_ = [input_i.half() if torch.is_tensor(input_i) else input_i for input_i in input_] else: @@ -68,12 +69,12 @@ def get_variable_sizes(self): with torch.no_grad(): for _, m in mods: - if type(input_) is list or type(input_) is tuple: # pragma: no cover + if isinstance(input_, (list, tuple)): # pragma: no cover out = m(*input_) else: out = m(input_) - if type(input_) is tuple or type(input_) is list: # pragma: no cover + if isinstance(input_, (list, tuple)): # pragma: no cover in_size = [] for x in input_: if type(x) is list: @@ -85,7 +86,7 @@ def get_variable_sizes(self): in_sizes.append(in_size) - if type(out) is tuple or type(out) is list: # pragma: no cover + if isinstance(out, (list, tuple)): # pragma: no cover out_size = np.asarray([x.size() for x in out]) else: out_size = np.array(out.size())