Skip to content
This repository was archived by the owner on Mar 22, 2021. It is now read-only.

Commit 10af852

Browse files
authored
Dev (#28)
* Hypercolumn (#16) * fixed lovash loss, added helpers for loss weighing (#14) * updated results exploration, added unet with hypercolumn * updated with lighter hypercolumn setup * Model average (#17) * added prediction average notebook * added simple average notebook * added replication pad instead of zero pad (#18) * changed to heng-like arch, added channel and spatial squeeze and excite, extended hypercolumn (#19) * Update unet_models.py typo in resnet unet fixed * added resnet 18 an50 pretrained options, unified hyper and vanilla in one class (#20) * Update models.py Changed old class import and namings * Loss design (#21) * local * initial * formated results * added focal, added border weighing, added size weighing added focus, added loss desing notebook * fixed wrong focal definition, updated loss api * exp with dropped borders * set best params, not using weighing for now * Dev depth experiments (#23) * add depth layer in input * reduce lr on plateau scheduler * depth channels transformer * fix reduce lr * bugfix * change default config * added adaptive threshold in callbacks (#24) * added adaptive threshold in callbacks * fix * added initial lr selector (#25) * Initial lb selector (#26) * added initial lr selector * small refactor * Auxiliary data small masks (#27) * exping * auxiliary data for border masks generated
1 parent 7f80d14 commit 10af852

11 files changed

+1071
-414
lines changed

common_blocks/callbacks.py

+97-7
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
from PIL import Image
88
import neptune
99
from torch.autograd import Variable
10-
from torch.optim.lr_scheduler import ExponentialLR
10+
from torch.optim.lr_scheduler import ExponentialLR, ReduceLROnPlateau
1111
from tempfile import TemporaryDirectory
1212

1313
from steppy.base import Step, IdentityOperation
@@ -200,6 +200,83 @@ def on_batch_end(self, *args, **kwargs):
200200
self.batch_id += 1
201201

202202

203+
class ReduceLROnPlateauScheduler(Callback):
204+
def __init__(self, metric_name, minimize, reduce_factor, reduce_patience, min_lr):
205+
super().__init__()
206+
self.ctx = neptune.Context()
207+
self.metric_name = metric_name
208+
self.minimize = minimize
209+
self.reduce_factor = reduce_factor
210+
self.reduce_patience = reduce_patience
211+
self.min_lr = min_lr
212+
213+
def set_params(self, transformer, validation_datagen, *args, **kwargs):
214+
super().set_params(transformer, validation_datagen)
215+
self.validation_datagen = validation_datagen
216+
self.model = transformer.model
217+
self.optimizer = transformer.optimizer
218+
self.loss_function = transformer.loss_function
219+
self.lr_scheduler = ReduceLROnPlateau(optimizer=self.optimizer,
220+
mode='min' if self.minimize else 'max',
221+
factor=self.reduce_factor,
222+
patience=self.reduce_patience,
223+
min_lr=self.min_lr)
224+
225+
def on_train_begin(self, *args, **kwargs):
226+
self.epoch_id = 0
227+
self.batch_id = 0
228+
229+
def on_epoch_end(self, *args, **kwargs):
230+
self.model.eval()
231+
val_loss = self.get_validation_loss()
232+
metric = val_loss[self.metric_name]
233+
metric = metric.data.cpu().numpy()[0]
234+
self.model.train()
235+
236+
self.lr_scheduler.step(metrics=metric, epoch=self.epoch_id)
237+
logger.info('epoch {0} current lr: {1}'.format(self.epoch_id + 1,
238+
self.optimizer.state_dict()['param_groups'][0]['lr']))
239+
self.ctx.channel_send('Learning Rate', x=self.epoch_id,
240+
y=self.optimizer.state_dict()['param_groups'][0]['lr'])
241+
242+
self.epoch_id += 1
243+
244+
245+
class InitialLearningRateFinder(Callback):
246+
def __init__(self, min_lr=1e-8, multipy_factor=1.05, add_factor=0.0):
247+
super().__init__()
248+
self.ctx = neptune.Context()
249+
self.min_lr = min_lr
250+
self.multipy_factor = multipy_factor
251+
self.add_factor = add_factor
252+
253+
def set_params(self, transformer, validation_datagen, *args, **kwargs):
254+
super().set_params(transformer, validation_datagen)
255+
self.validation_datagen = validation_datagen
256+
self.model = transformer.model
257+
self.optimizer = transformer.optimizer
258+
self.loss_function = transformer.loss_function
259+
260+
def on_train_begin(self, *args, **kwargs):
261+
self.epoch_id = 0
262+
self.batch_id = 0
263+
264+
for param_group in self.optimizer.param_groups:
265+
param_group['lr'] = self.min_lr
266+
267+
def on_batch_end(self, metrics, *args, **kwargs):
268+
for name, loss in metrics.items():
269+
loss = loss.data.cpu().numpy()[0]
270+
current_lr = self.optimizer.state_dict()['param_groups'][0]['lr']
271+
logger.info('Learning Rate {} Loss {})'.format(current_lr, loss))
272+
self.ctx.channel_send('Learning Rate', x=self.batch_id, y=current_lr)
273+
self.ctx.channel_send('Loss', x=self.batch_id, y=loss)
274+
275+
for param_group in self.optimizer.param_groups:
276+
param_group['lr'] = current_lr * self.multipy_factor + self.add_factor
277+
self.batch_id += 1
278+
279+
203280
class ExperimentTiming(Callback):
204281
def __init__(self, epoch_every=None, batch_every=None):
205282
super().__init__()
@@ -340,11 +417,24 @@ def on_epoch_end(self, *args, **kwargs):
340417

341418
def _get_validation_loss(self):
342419
output, epoch_loss = self._transform()
343-
y_pred = self._generate_prediction(output)
420+
logger.info('Selecting best threshold')
421+
422+
iout_best, threshold_best = 0.0, 0.5
423+
for threshold in np.linspace(0.5, 0.3, 21):
424+
y_pred = self._generate_prediction(output, threshold)
425+
iout_score = intersection_over_union_thresholds(self.y_true, y_pred)
426+
logger.info('threshold {} IOUT {}'.format(threshold, iout_score))
427+
if iout_score > iout_best:
428+
iout_best = iout_score
429+
threshold_best = threshold
430+
else:
431+
break
432+
logger.info('Selected best threshold {} IOUT {}'.format(threshold_best, iout_best))
344433

345434
logger.info('Calculating IOU and IOUT Scores')
346-
iou_score = intersection_over_union(self.y_true, y_pred)
435+
y_pred = self._generate_prediction(output, threshold_best)
347436
iout_score = intersection_over_union_thresholds(self.y_true, y_pred)
437+
iou_score = intersection_over_union(self.y_true, y_pred)
348438
logger.info('IOU score on validation is {}'.format(iou_score))
349439
logger.info('IOUT score on validation is {}'.format(iout_score))
350440

@@ -407,14 +497,14 @@ def _transform(self):
407497

408498
return outputs, average_losses
409499

410-
def _generate_prediction(self, outputs):
500+
def _generate_prediction(self, outputs, threshold):
411501
data = {'callback_input': {'meta': self.meta_valid,
412502
'meta_valid': None,
413503
},
414504
'unet_output': {**outputs}
415505
}
416506
with TemporaryDirectory() as cache_dirpath:
417-
pipeline = self.validation_pipeline(cache_dirpath, self.loader_mode)
507+
pipeline = self.validation_pipeline(cache_dirpath, self.loader_mode, threshold)
418508
output = pipeline.transform(data)
419509
y_pred = output['y_pred']
420510
return y_pred
@@ -494,7 +584,7 @@ def on_epoch_end(self, *args, **kwargs):
494584
self.epoch_id += 1
495585

496586

497-
def postprocessing_pipeline_simplified(cache_dirpath, loader_mode):
587+
def postprocessing_pipeline_simplified(cache_dirpath, loader_mode, threshold):
498588
if loader_mode == 'resize_and_pad':
499589
size_adjustment_function = partial(crop_image, target_size=ORIGINAL_SIZE)
500590
elif loader_mode == 'resize':
@@ -513,7 +603,7 @@ def postprocessing_pipeline_simplified(cache_dirpath, loader_mode):
513603

514604
binarizer = Step(name='binarizer',
515605
transformer=make_apply_transformer(
516-
partial(binarize, threshold=THRESHOLD),
606+
partial(binarize, threshold=threshold),
517607
output_name='binarized_images',
518608
apply_on=['images']),
519609
input_steps=[mask_resize],

common_blocks/loaders.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
import json
1515
from steppy.base import BaseTransformer
1616

17-
from .utils import from_pil, to_pil, binary_from_rle, ImgAug
17+
from .utils import from_pil, to_pil, binary_from_rle, ImgAug, AddDepthChannels
1818

1919

2020
class ImageReader(BaseTransformer):
@@ -337,6 +337,7 @@ def __init__(self, train_mode, loader_params, dataset_params, augmentation_param
337337
transforms.ToTensor(),
338338
transforms.Normalize(mean=self.dataset_params.MEAN,
339339
std=self.dataset_params.STD),
340+
AddDepthChannels()
340341
])
341342
self.mask_transform = transforms.Compose([transforms.Lambda(to_array),
342343
transforms.Lambda(to_tensor),
@@ -364,6 +365,7 @@ def __init__(self, loader_params, dataset_params, augmentation_params):
364365
transforms.ToTensor(),
365366
transforms.Normalize(mean=self.dataset_params.MEAN,
366367
std=self.dataset_params.STD),
368+
AddDepthChannels()
367369
])
368370
self.mask_transform = transforms.Compose([transforms.Lambda(to_array),
369371
transforms.Lambda(to_tensor),

0 commit comments

Comments
 (0)