Skip to content
This repository has been archived by the owner on Mar 22, 2021. It is now read-only.

Commit

Permalink
Dev (#28)
Browse files Browse the repository at this point in the history
* Hypercolumn (#16)

* fixed lovash loss, added helpers for loss weighing (#14)

* updated results exploration, added unet with hypercolumn

* updated with lighter hypercolumn setup

* Model average (#17)

* added prediction average notebook

* added simple average notebook

* added replication pad instead of zero pad (#18)

* changed to heng-like arch, added channel and spatial squeeze and excite, extended hypercolumn (#19)

* Update unet_models.py

typo in resnet unet fixed

* added resnet 18 an50 pretrained options, unified hyper and vanilla in one class (#20)

* Update models.py

Changed old class import and namings

* Loss design (#21)

* local

* initial

* formated results

* added focal, added border weighing, added size weighing added focus, added loss desing notebook

* fixed wrong focal definition, updated loss api

* exp with dropped borders

* set best params, not using weighing for now

* Dev depth experiments (#23)

* add depth layer in input

* reduce lr on plateau scheduler

* depth channels transformer

* fix reduce lr

* bugfix

* change default config

* added adaptive threshold in callbacks (#24)

* added adaptive threshold in callbacks

* fix

* added initial lr selector (#25)

* Initial lb selector (#26)

* added initial lr selector

* small refactor

* Auxiliary data small masks (#27)

* exping

* auxiliary data for border masks generated
  • Loading branch information
jakubczakon committed Sep 12, 2018
1 parent 7f80d14 commit 10af852
Show file tree
Hide file tree
Showing 11 changed files with 1,071 additions and 414 deletions.
104 changes: 97 additions & 7 deletions common_blocks/callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from PIL import Image
import neptune
from torch.autograd import Variable
from torch.optim.lr_scheduler import ExponentialLR
from torch.optim.lr_scheduler import ExponentialLR, ReduceLROnPlateau
from tempfile import TemporaryDirectory

from steppy.base import Step, IdentityOperation
Expand Down Expand Up @@ -200,6 +200,83 @@ def on_batch_end(self, *args, **kwargs):
self.batch_id += 1


class ReduceLROnPlateauScheduler(Callback):
def __init__(self, metric_name, minimize, reduce_factor, reduce_patience, min_lr):
super().__init__()
self.ctx = neptune.Context()
self.metric_name = metric_name
self.minimize = minimize
self.reduce_factor = reduce_factor
self.reduce_patience = reduce_patience
self.min_lr = min_lr

def set_params(self, transformer, validation_datagen, *args, **kwargs):
super().set_params(transformer, validation_datagen)
self.validation_datagen = validation_datagen
self.model = transformer.model
self.optimizer = transformer.optimizer
self.loss_function = transformer.loss_function
self.lr_scheduler = ReduceLROnPlateau(optimizer=self.optimizer,
mode='min' if self.minimize else 'max',
factor=self.reduce_factor,
patience=self.reduce_patience,
min_lr=self.min_lr)

def on_train_begin(self, *args, **kwargs):
self.epoch_id = 0
self.batch_id = 0

def on_epoch_end(self, *args, **kwargs):
self.model.eval()
val_loss = self.get_validation_loss()
metric = val_loss[self.metric_name]
metric = metric.data.cpu().numpy()[0]
self.model.train()

self.lr_scheduler.step(metrics=metric, epoch=self.epoch_id)
logger.info('epoch {0} current lr: {1}'.format(self.epoch_id + 1,
self.optimizer.state_dict()['param_groups'][0]['lr']))
self.ctx.channel_send('Learning Rate', x=self.epoch_id,
y=self.optimizer.state_dict()['param_groups'][0]['lr'])

self.epoch_id += 1


class InitialLearningRateFinder(Callback):
def __init__(self, min_lr=1e-8, multipy_factor=1.05, add_factor=0.0):
super().__init__()
self.ctx = neptune.Context()
self.min_lr = min_lr
self.multipy_factor = multipy_factor
self.add_factor = add_factor

def set_params(self, transformer, validation_datagen, *args, **kwargs):
super().set_params(transformer, validation_datagen)
self.validation_datagen = validation_datagen
self.model = transformer.model
self.optimizer = transformer.optimizer
self.loss_function = transformer.loss_function

def on_train_begin(self, *args, **kwargs):
self.epoch_id = 0
self.batch_id = 0

for param_group in self.optimizer.param_groups:
param_group['lr'] = self.min_lr

def on_batch_end(self, metrics, *args, **kwargs):
for name, loss in metrics.items():
loss = loss.data.cpu().numpy()[0]
current_lr = self.optimizer.state_dict()['param_groups'][0]['lr']
logger.info('Learning Rate {} Loss {})'.format(current_lr, loss))
self.ctx.channel_send('Learning Rate', x=self.batch_id, y=current_lr)
self.ctx.channel_send('Loss', x=self.batch_id, y=loss)

for param_group in self.optimizer.param_groups:
param_group['lr'] = current_lr * self.multipy_factor + self.add_factor
self.batch_id += 1


class ExperimentTiming(Callback):
def __init__(self, epoch_every=None, batch_every=None):
super().__init__()
Expand Down Expand Up @@ -340,11 +417,24 @@ def on_epoch_end(self, *args, **kwargs):

def _get_validation_loss(self):
output, epoch_loss = self._transform()
y_pred = self._generate_prediction(output)
logger.info('Selecting best threshold')

iout_best, threshold_best = 0.0, 0.5
for threshold in np.linspace(0.5, 0.3, 21):
y_pred = self._generate_prediction(output, threshold)
iout_score = intersection_over_union_thresholds(self.y_true, y_pred)
logger.info('threshold {} IOUT {}'.format(threshold, iout_score))
if iout_score > iout_best:
iout_best = iout_score
threshold_best = threshold
else:
break
logger.info('Selected best threshold {} IOUT {}'.format(threshold_best, iout_best))

logger.info('Calculating IOU and IOUT Scores')
iou_score = intersection_over_union(self.y_true, y_pred)
y_pred = self._generate_prediction(output, threshold_best)
iout_score = intersection_over_union_thresholds(self.y_true, y_pred)
iou_score = intersection_over_union(self.y_true, y_pred)
logger.info('IOU score on validation is {}'.format(iou_score))
logger.info('IOUT score on validation is {}'.format(iout_score))

Expand Down Expand Up @@ -407,14 +497,14 @@ def _transform(self):

return outputs, average_losses

def _generate_prediction(self, outputs):
def _generate_prediction(self, outputs, threshold):
data = {'callback_input': {'meta': self.meta_valid,
'meta_valid': None,
},
'unet_output': {**outputs}
}
with TemporaryDirectory() as cache_dirpath:
pipeline = self.validation_pipeline(cache_dirpath, self.loader_mode)
pipeline = self.validation_pipeline(cache_dirpath, self.loader_mode, threshold)
output = pipeline.transform(data)
y_pred = output['y_pred']
return y_pred
Expand Down Expand Up @@ -494,7 +584,7 @@ def on_epoch_end(self, *args, **kwargs):
self.epoch_id += 1


def postprocessing_pipeline_simplified(cache_dirpath, loader_mode):
def postprocessing_pipeline_simplified(cache_dirpath, loader_mode, threshold):
if loader_mode == 'resize_and_pad':
size_adjustment_function = partial(crop_image, target_size=ORIGINAL_SIZE)
elif loader_mode == 'resize':
Expand All @@ -513,7 +603,7 @@ def postprocessing_pipeline_simplified(cache_dirpath, loader_mode):

binarizer = Step(name='binarizer',
transformer=make_apply_transformer(
partial(binarize, threshold=THRESHOLD),
partial(binarize, threshold=threshold),
output_name='binarized_images',
apply_on=['images']),
input_steps=[mask_resize],
Expand Down
4 changes: 3 additions & 1 deletion common_blocks/loaders.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
import json
from steppy.base import BaseTransformer

from .utils import from_pil, to_pil, binary_from_rle, ImgAug
from .utils import from_pil, to_pil, binary_from_rle, ImgAug, AddDepthChannels


class ImageReader(BaseTransformer):
Expand Down Expand Up @@ -337,6 +337,7 @@ def __init__(self, train_mode, loader_params, dataset_params, augmentation_param
transforms.ToTensor(),
transforms.Normalize(mean=self.dataset_params.MEAN,
std=self.dataset_params.STD),
AddDepthChannels()
])
self.mask_transform = transforms.Compose([transforms.Lambda(to_array),
transforms.Lambda(to_tensor),
Expand Down Expand Up @@ -364,6 +365,7 @@ def __init__(self, loader_params, dataset_params, augmentation_params):
transforms.ToTensor(),
transforms.Normalize(mean=self.dataset_params.MEAN,
std=self.dataset_params.STD),
AddDepthChannels()
])
self.mask_transform = transforms.Compose([transforms.Lambda(to_array),
transforms.Lambda(to_tensor),
Expand Down

0 comments on commit 10af852

Please sign in to comment.