Skip to content
This repository was archived by the owner on Mar 22, 2021. It is now read-only.

Commit 0cbaa6a

Browse files
authored
Dev (#40)
* added image channel and params to config (#29) * exping * added large kernel matters architecture, renamed stuff, generalized c… (#30) * added large kernel matters architecture, renamed stuff, generalized conv2drelubn block * exping * exping * copied the old ConvBnRelu block to make sure it is easy to finetune old models * reverted main * Depth (#31) * exping * exping * added depth loaders, and depth_excitation layer, adjusted models and callbacks to deal with both * fixed minor issues * exping * merged/refactored * exping * refactored architectures, moved use_depth param to main * added dropout to lkm constructor, dropped my experiment dir definition * Second level (#33) * exping * first stacked unet training * fixed minor typo-bugs * fixed unet naming bug * added stacking preds exploration * dropped redundant imports * adjusted callbacks to work with stacking, added custom to_tensor_stacking * Auxiliary data (#34) * exping * added option to use auxiliary masks * Stacking (#35) * exping * exping * fixed stacking postpro * Stacking (#36) * exping * exping * fixed stacking postpro * exping * added fully convo stacking, fixed minor issues with loader_mode: stacking * Update architectures.py import fix * Update README.md * Update models.py reverted to default (current best) large kernel matters internal_channel_nr * Stacking (#37) Stacking * Stacking depth (#38) * exping * added depth option to stacking model, dropped stacking unet from models * Empty non empty (#39) * exping * added empty vs non empty loaders/models and execution * changed to lovasz loss as default from bce * reverted default callbacks target name
1 parent 187eb7c commit 0cbaa6a

16 files changed

+2430
-422
lines changed

README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ In this open source solution you will find references to the [neptune.ml](https:
3232
|[solution 6](https://app.neptune.ml/neptune-ml/Salt-Detection?namedFilterId=ab96e5df-3f1b-4516-9df0-4492e0199c71)|0.821|0.827|
3333
|[solution 7](https://app.neptune.ml/neptune-ml/Salt-Detection?namedFilterId=0810785e-ebab-4173-8e9e-8fe560095b77)|0.829|0.837|
3434
|[solution 8](https://app.neptune.ml/neptune-ml/Salt-Detection?namedFilterId=bda70048-f037-4c0d-a096-15ea93fd8924)|0.830|0.845|
35-
35+
|[solution 9](https://app.neptune.ml/neptune-ml/Salt-Detection?namedFilterId=c21fc5a2-437a-412f-86e1-078fe31e025d)|0.849|0.847|
3636

3737
## Start experimenting with ready-to-use code
3838
You can jump start your participation in the competition by using our starter pack. Installation instruction below will guide you through the setup.

common_blocks/architectures.py

+631
Large diffs are not rendered by default.

common_blocks/callbacks.py

+268-18
Large diffs are not rendered by default.

common_blocks/loaders.py

+320-30
Large diffs are not rendered by default.

common_blocks/models.py

+112-52
Original file line numberDiff line numberDiff line change
@@ -10,63 +10,43 @@
1010

1111
from .utils import sigmoid, softmax, get_list_of_image_predictions, pytorch_where
1212
from . import callbacks as cbk
13-
from .unet_models import UNetResNet
13+
from .architectures import UNetResNet, LargeKernelMatters, UNetResNetWithDepth, StackingFCN, StackingFCNWithDepth, \
14+
EmptinessClassifier
1415
from .lovasz_losses import lovasz_hinge
1516

16-
PRETRAINED_NETWORKS = {'ResNet18': {'model': UNetResNet,
17-
'model_config': {'encoder_depth': 18, 'use_hypercolumn': False,
18-
'dropout_2d': 0.0, 'pretrained': True,
19-
},
20-
'init_weights': False},
21-
'ResNet34': {'model': UNetResNet,
22-
'model_config': {'encoder_depth': 34, 'use_hypercolumn': False,
23-
'dropout_2d': 0.0, 'pretrained': True,
24-
},
25-
'init_weights': False},
26-
'ResNet50': {'model': UNetResNet,
27-
'model_config': {'encoder_depth': 50, 'use_hypercolumn': False,
28-
'dropout_2d': 0.0, 'pretrained': True,
29-
},
30-
'init_weights': False},
31-
'ResNet101': {'model': UNetResNet,
32-
'model_config': {'encoder_depth': 101, 'use_hypercolumn': False,
33-
'dropout_2d': 0.0, 'pretrained': True,
34-
},
35-
'init_weights': False},
36-
'ResNet152': {'model': UNetResNet,
37-
'model_config': {'encoder_depth': 152, 'use_hypercolumn': False,
38-
'dropout_2d': 0.0, 'pretrained': True,
39-
},
40-
'init_weights': False},
41-
'ResNetHyper18': {'model': UNetResNet,
42-
'model_config': {'encoder_depth': 18, 'use_hypercolumn': True,
43-
'dropout_2d': 0.0, 'pretrained': True,
44-
},
45-
'init_weights': False},
46-
'ResNetHyper34': {'model': UNetResNet,
17+
ARCHITECTURES = {'UNetResNet': {'model': UNetResNet,
18+
'model_config': {'encoder_depth': 34, 'use_hypercolumn': True,
19+
'dropout_2d': 0.0, 'pretrained': True,
20+
},
21+
'init_weights': False},
22+
23+
'UNetResNetWithDepth': {'model': UNetResNetWithDepth,
4724
'model_config': {'encoder_depth': 34, 'use_hypercolumn': True,
4825
'dropout_2d': 0.0, 'pretrained': True,
4926
},
5027
'init_weights': False},
51-
'ResNetHyper50': {'model': UNetResNet,
52-
'model_config': {'encoder_depth': 50, 'use_hypercolumn': True,
53-
'dropout_2d': 0.0, 'pretrained': True,
28+
'LargeKernelMatters': {'model': LargeKernelMatters,
29+
'model_config': {'encoder_depth': 34, 'pretrained': True,
30+
'kernel_size': 9, 'internal_channels': 21,
31+
'dropout_2d': 0.0, 'use_relu': True
32+
},
33+
'init_weights': False},
34+
'StackingFCN': {'model': StackingFCN,
35+
'model_config': {'input_model_nr': 18, 'filter_nr': 32, 'dropout_2d': 0.0
36+
},
37+
'init_weights': True},
38+
'StackingFCNWithDepth': {'model': StackingFCNWithDepth,
39+
'model_config': {'input_model_nr': 18, 'filter_nr': 32, 'dropout_2d': 0.0
40+
},
41+
'init_weights': True},
42+
'EmptinessClassifier': {'model': EmptinessClassifier,
43+
'model_config': {'encoder_depth': 18, 'pretrained': True,
5444
},
5545
'init_weights': False},
56-
'ResNetHyper101': {'model': UNetResNet,
57-
'model_config': {'encoder_depth': 101, 'use_hypercolumn': True,
58-
'dropout_2d': 0.0, 'pretrained': True,
59-
},
60-
'init_weights': False},
61-
'ResNetHyper152': {'model': UNetResNet,
62-
'model_config': {'encoder_depth': 152, 'use_hypercolumn': True,
63-
'dropout_2d': 0.0, 'pretrained': True,
64-
},
65-
'init_weights': False},
66-
}
46+
}
6747

6848

69-
class PyTorchUNet(Model):
49+
class SegmentationModel(Model):
7050
def __init__(self, architecture_config, training_config, callbacks_config):
7151
super().__init__(architecture_config, training_config, callbacks_config)
7252
self.activation_func = self.architecture_config['model_params']['activation']
@@ -75,7 +55,7 @@ def __init__(self, architecture_config, training_config, callbacks_config):
7555
self.weight_regularization = weight_regularization
7656
self.optimizer = optim.Adam(self.weight_regularization(self.model, **architecture_config['regularizer_params']),
7757
**architecture_config['optimizer_params'])
78-
self.callbacks = callbacks_unet(self.callbacks_config)
58+
self.callbacks = callbacks_network(self.callbacks_config)
7959

8060
def fit(self, datagen, validation_datagen=None, meta_valid=None):
8161
self._initialize_model_weights()
@@ -179,8 +159,8 @@ def _transform(self, datagen, validation_datagen=None, **kwargs):
179159
return outputs
180160

181161
def set_model(self):
182-
encoder = self.architecture_config['model_params']['encoder']
183-
config = PRETRAINED_NETWORKS[encoder]
162+
architecture = self.architecture_config['model_params']['architecture']
163+
config = ARCHITECTURES[architecture]
184164
self.model = config['model'](num_classes=self.architecture_config['model_params']['out_channels'],
185165
**config['model_config'])
186166
self._initialize_model_weights = lambda: None
@@ -190,6 +170,7 @@ def set_loss(self):
190170
raise NotImplementedError('No softmax loss defined')
191171
elif self.activation_func == 'sigmoid':
192172
loss_function = lovasz_loss
173+
# loss_function = nn.BCEWithLogitsLoss()
193174
else:
194175
raise Exception('Only softmax and sigmoid activations are allowed')
195176
self.loss_function = [('mask', loss_function, 1.0)]
@@ -209,6 +190,84 @@ def load(self, filepath):
209190
return self
210191

211192

193+
class SegmentationModelWithDepth(SegmentationModel):
194+
def __init__(self, architecture_config, training_config, callbacks_config):
195+
super().__init__(architecture_config, training_config, callbacks_config)
196+
self.activation_func = self.architecture_config['model_params']['activation']
197+
self.set_model()
198+
self.set_loss()
199+
self.weight_regularization = weight_regularization
200+
self.optimizer = optim.Adam(self.weight_regularization(self.model, **architecture_config['regularizer_params']),
201+
**architecture_config['optimizer_params'])
202+
self.callbacks = callbacks_network(self.callbacks_config)
203+
204+
def _fit_loop(self, data):
205+
X = data[0]
206+
D = data[1]
207+
targets_tensors = data[2:]
208+
209+
if torch.cuda.is_available():
210+
X = Variable(X).cuda()
211+
D = Variable(D).cuda()
212+
targets_var = []
213+
for target_tensor in targets_tensors:
214+
targets_var.append(Variable(target_tensor).cuda())
215+
else:
216+
X = Variable(X)
217+
D = Variable(D)
218+
targets_var = []
219+
for target_tensor in targets_tensors:
220+
targets_var.append(Variable(target_tensor))
221+
222+
self.optimizer.zero_grad()
223+
outputs_batch = self.model(X, D)
224+
partial_batch_losses = {}
225+
226+
if len(self.output_names) == 1:
227+
for (name, loss_function, weight), target in zip(self.loss_function, targets_var):
228+
batch_loss = loss_function(outputs_batch, target) * weight
229+
else:
230+
for (name, loss_function, weight), output, target in zip(self.loss_function, outputs_batch, targets_var):
231+
partial_batch_losses[name] = loss_function(output, target) * weight
232+
batch_loss = sum(partial_batch_losses.values())
233+
partial_batch_losses['sum'] = batch_loss
234+
235+
batch_loss.backward()
236+
self.optimizer.step()
237+
238+
return partial_batch_losses
239+
240+
def _transform(self, datagen, validation_datagen=None, **kwargs):
241+
self.model.eval()
242+
243+
batch_gen, steps = datagen
244+
outputs = {}
245+
for batch_id, data in enumerate(batch_gen):
246+
X = data[0]
247+
D = data[1]
248+
249+
if torch.cuda.is_available():
250+
X = Variable(X, volatile=True).cuda()
251+
D = Variable(D, volatile=True).cuda()
252+
else:
253+
X = Variable(X, volatile=True)
254+
D = Variable(D, volatile=True)
255+
outputs_batch = self.model(X, D)
256+
257+
if len(self.output_names) == 1:
258+
outputs.setdefault(self.output_names[0], []).append(outputs_batch.data.cpu().numpy())
259+
else:
260+
for name, output in zip(self.output_names, outputs_batch):
261+
output_ = output.data.cpu().numpy()
262+
outputs.setdefault(name, []).append(output_)
263+
if batch_id == steps:
264+
break
265+
self.model.train()
266+
outputs = {'{}_prediction'.format(name): get_list_of_image_predictions(outputs_) for name, outputs_ in
267+
outputs.items()}
268+
return outputs
269+
270+
212271
class FocalWithLogitsLoss(nn.Module):
213272
def __init__(self, alpha=1.0, gamma=1.0):
214273
super().__init__()
@@ -235,7 +294,7 @@ def __init__(self, smooth=0, eps=1e-7):
235294

236295
def forward(self, output, target):
237296
return 1 - (2 * torch.sum(output * target) + self.smooth) / (
238-
torch.sum(output) + torch.sum(target) + self.smooth + self.eps)
297+
torch.sum(output) + torch.sum(target) + self.smooth + self.eps)
239298

240299

241300
def weight_regularization(model, regularize, weight_decay_conv2d):
@@ -249,12 +308,13 @@ def weight_regularization(model, regularize, weight_decay_conv2d):
249308
return parameter_list
250309

251310

252-
def callbacks_unet(callbacks_config):
311+
def callbacks_network(callbacks_config):
253312
experiment_timing = cbk.ExperimentTiming(**callbacks_config['experiment_timing'])
254313
model_checkpoints = cbk.ModelCheckpoint(**callbacks_config['model_checkpoint'])
255314
lr_scheduler = cbk.ReduceLROnPlateauScheduler(**callbacks_config['reduce_lr_on_plateau_scheduler'])
256315
training_monitor = cbk.TrainingMonitor(**callbacks_config['training_monitor'])
257316
validation_monitor = cbk.ValidationMonitor(**callbacks_config['validation_monitor'])
317+
# validation_monitor = cbk.ValidationMonitorEmptiness(**callbacks_config['validation_monitor'])
258318
neptune_monitor = cbk.NeptuneMonitor(**callbacks_config['neptune_monitor'])
259319
early_stopping = cbk.EarlyStopping(**callbacks_config['early_stopping'])
260320

common_blocks/pipelines.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -8,10 +8,10 @@
88
from .postprocessing import binarize
99

1010

11-
def preprocessing_train(config, model_name='unet', suffix=''):
11+
def preprocessing_train(config, model_name='network', suffix=''):
1212
if config.general.loader_mode == 'resize_and_pad':
1313
loader_config = config.loaders.resize_and_pad
14-
elif config.general.loader_mode == 'resize':
14+
elif config.general.loader_mode == 'resize' or config.general.loader_mode == 'stacking':
1515
loader_config = config.loaders.resize
1616
else:
1717
raise NotImplementedError
@@ -56,10 +56,10 @@ def preprocessing_train(config, model_name='unet', suffix=''):
5656
return loader
5757

5858

59-
def preprocessing_inference(config, model_name='unet', suffix=''):
59+
def preprocessing_inference(config, model_name='network', suffix=''):
6060
if config.general.loader_mode == 'resize_and_pad':
6161
loader_config = config.loaders.resize_and_pad
62-
elif config.general.loader_mode == 'resize':
62+
elif config.general.loader_mode == 'resize' or config.general.loader_mode == 'stacking':
6363
loader_config = config.loaders.resize
6464
else:
6565
raise NotImplementedError
@@ -92,10 +92,10 @@ def preprocessing_inference(config, model_name='unet', suffix=''):
9292
return loader
9393

9494

95-
def preprocessing_inference_tta(config, model_name='unet', suffix=''):
95+
def preprocessing_inference_tta(config, model_name='network', suffix=''):
9696
if config.general.loader_mode == 'resize_and_pad':
9797
loader_config = config.loaders.pad_tta
98-
elif config.general.loader_mode == 'resize':
98+
elif config.general.loader_mode == 'resize' or config.general.loader_mode == 'stacking':
9999
loader_config = config.loaders.resize_tta
100100
else:
101101
raise NotImplementedError

common_blocks/postprocessing.py

+18
Original file line numberDiff line numberDiff line change
@@ -41,3 +41,21 @@ def crop_image(image, target_size):
4141
def binarize(image, threshold):
4242
image_binarized = (image[1, :, :] > threshold).astype(np.uint8)
4343
return image_binarized
44+
45+
46+
def resize_emptiness_predictions(image, target_size):
47+
"""Resize image to target size
48+
49+
Args:
50+
image (numpy.ndarray): Image of shape (C x H x W).
51+
target_size (tuple): Target size (H, W).
52+
53+
Returns:
54+
numpy.ndarray: Resized image of shape (C x H x W).
55+
56+
"""
57+
n_channels = image.shape[0]
58+
resized_image = np.zeros((n_channels, target_size[0], target_size[1]))
59+
resized_image[0, :, :] = image[0]
60+
resized_image[1, :, :] = image[1]
61+
return resized_image

common_blocks/preprocessing.py

-5
This file was deleted.

0 commit comments

Comments
 (0)