Skip to content

Commit 3048a4b

Browse files
authored
Dev (#105)
* fix steps issues #48 and #49 * prepare sum of distances, not distances to 2 closest objects * back to const erosion * fixes * fix * [:-4] -> os.path.splitext() * loss weighted by size of the object * prepare masks, distances and sizes * cleaning * adapt models and loaders to handle size matrix and calculate size weights * adapt models and loaders to handle size matrix and calculate size weights v2 * fix pipelines.py * fix some issues with calculating size-weighted loss * cleaning * update mean and std * fixes * clean * fix recall in evaluation * fix bug in erosion (#91) * Dev mosaic padding inference (#81) * added mosaic seq, unet_mosaic pipe, mosaic loader * added unet_weighted * dropped input resize at inference * dropped rescaling in loader, fixed postpro cropping * local dev * updated dilation/erosion, joined pipelines * dropped unet mask saving * added replication padding * renamed mosaic->padded, moved params to configs * padding->inference_padding * config updates * refactored padded unet * refactored unet_padding * Dev dice loss (#89) * fix size weights * add mixed dice + weighted ce loss * fixes * parametrize loss weights * remove get_datagen function overriding * dice loss per channel, some fixes * fixes and smooth added to Dice loss instead of eps * fixes and smooth added to Dice loss and eps, and parametrized * sigmoid -> softmax in dice loss * softmax2d * move softmax to models.py * parametrize softmax and sigmoid in dice loss * Dev mask prep speed up (#94) * distributed mask/distance/size generation added * dropped deprecated * dropped mask param * Dev random crop (#97) * local * added random cropping, refactored augmentations * Dev borders and dilation in preprocessing (#96) * merge multithread * preparing borders * fix PR #96 and add update metadata generation * Dev deeper archs (#102) * dropped mask param * added deeper resnets and spatial2d dropout * updated config * fixed casting * updated index * fix evaluate, add score builder in stream mode (#104) * added initial version * added simple evaluate on checkpoint script * updated config * added neptune file definition * fixed conflicts
1 parent fe241ff commit 3048a4b

File tree

6 files changed

+88
-19
lines changed

6 files changed

+88
-19
lines changed

evaluate_checkpoint.py

Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
import yaml
2+
import subprocess
3+
import os
4+
5+
import click
6+
7+
MISSING_TRANSFORMERS = ['prediction_crop',
8+
'prediction_renamed',
9+
'mask_resize',
10+
'category_mapper',
11+
'mask_erosion',
12+
'labeler',
13+
'mask_dilation',
14+
'score_builder',
15+
'output']
16+
17+
18+
@click.group()
19+
def main():
20+
pass
21+
22+
23+
@main.command()
24+
@click.option('-e', '--experiment_dir', help='experiment that you want to run evaluation on', required=True)
25+
@click.option('-t', '--temp_inference_dir', help='temporary directory', required=True)
26+
@click.option('-n', '--neptune_file', help='neptne file path', required=True)
27+
def run(temp_inference_dir, experiment_dir, neptune_file):
28+
transformer_dir = os.path.join(temp_inference_dir, 'transformers')
29+
checkpoints_dir = os.path.join(temp_inference_dir, 'checkpoints')
30+
31+
cmd = 'cp -rf {} {}'.format(experiment_dir, temp_inference_dir)
32+
subprocess.call(cmd, shell=True)
33+
34+
cmd = 'cp {}/unet/best.torch {}/unet'.format(checkpoints_dir, transformer_dir)
35+
subprocess.call(cmd, shell=True)
36+
37+
for missing_transformer in MISSING_TRANSFORMERS:
38+
cmd = 'touch {}/{}'.format(transformer_dir, missing_transformer)
39+
subprocess.call(cmd, shell=True)
40+
41+
cmd = 'cp {} temporary_neptune.yaml'.format(neptune_file, checkpoints_dir, transformer_dir)
42+
subprocess.call(cmd, shell=True)
43+
44+
cmd = 'cp {} temporary_neptune.yaml'.format(neptune_file, checkpoints_dir, transformer_dir)
45+
subprocess.call(cmd, shell=True)
46+
47+
with open("temporary_neptune.yaml", 'r+') as f:
48+
doc = yaml.load(f)
49+
doc['parameters']['experiment_dir'] = temp_inference_dir
50+
51+
with open("temporary_neptune.yaml", 'w+') as f:
52+
yaml.dump(doc, f, default_flow_style=False)
53+
54+
cmd = 'neptune run --config temporary_neptune.yaml main.py -- evaluate -p unet_weighted_padded'
55+
subprocess.call(cmd, shell=True)
56+
57+
cmd = 'rm temporary_neptune.yaml'
58+
subprocess.call(cmd, shell=True)
59+
60+
cmd = 'rm -rf {}'.format(temp_inference_dir)
61+
subprocess.call(cmd, shell=True)
62+
63+
64+
if __name__ == "__main__":
65+
main()

main.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ def prepare_masks(dev_mode):
5353
for dataset in ["train", "val"]:
5454
logger.info('Overlaying masks, dataset: {}'.format(dataset))
5555
target_dir = "{}_eroded_{}_dilated_{}".format(params.masks_overlayed_dir[:-1],
56-
params.erode_selem_size, params.dilate_selem_size)
56+
params.erode_selem_size, params.dilate_selem_size)
5757
logger.info('Output directory: {}'.format(target_dir))
5858

5959
overlay_masks(data_dir=params.data_dir,
@@ -249,9 +249,8 @@ def _generate_prediction(meta_data, pipeline, logger, category_ids):
249249
output = pipeline.transform(data)
250250
pipeline.clean_cache()
251251
y_pred = output['y_pred']
252-
y_scores = output['y_scores']
253252

254-
prediction = create_annotations(meta_data, y_pred, y_scores, logger, category_ids)
253+
prediction = create_annotations(meta_data, y_pred, logger, category_ids)
255254
return prediction
256255

257256

@@ -269,9 +268,8 @@ def _generate_prediction_in_chunks(meta_data, pipeline, logger, category_ids, ch
269268
output = pipeline.transform(data)
270269
pipeline.clean_cache()
271270
y_pred = output['y_pred']
272-
y_scores = output['y_scores']
273271

274-
prediction_chunk = create_annotations(meta_chunk, y_pred, y_scores, logger, category_ids)
272+
prediction_chunk = create_annotations(meta_chunk, y_pred, logger, category_ids)
275273
prediction.extend(prediction_chunk)
276274

277275
return prediction

neptune.yaml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ parameters:
5959
pool_kernel: 3
6060
pool_stride: 2
6161
repeat_blocks: 4
62-
encoder: ResNet152
62+
encoder: AlbuNet
6363

6464
# U-Net loss weights (multi-output)
6565
bce_mask: 1.0
@@ -91,8 +91,8 @@ parameters:
9191
threshold: 0.5
9292
min_nuclei_size: 20
9393
erosion_percentages: '[10,20,30]'
94-
erode_selem_size: 3
95-
dilate_selem_size: 0
94+
erode_selem_size: 2
95+
dilate_selem_size: 3
9696

9797
# Inference padding
9898
crop_image_h: 300

pipelines.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -78,8 +78,7 @@ def unet_padded(config, train_mode):
7878
output = Step(name='output',
7979
transformer=Dummy(),
8080
input_steps=[mask_postprocessed],
81-
adapter={'y_pred': ([(mask_postprocessed.name, 'images')]),
82-
'y_scores': ([(mask_postprocessed.name, 'scores')])
81+
adapter={'y_pred': ([(mask_postprocessed.name, 'images_with_scores')]),
8382
},
8483
cache_dirpath=config.env.cache_dirpath,
8584
save_output=save_output)
@@ -246,7 +245,6 @@ def _preprocessing_multitask_generator(config, is_train, use_patching):
246245

247246

248247
def mask_postprocessing(loader, model, config, save_output=False):
249-
250248
if config.postprocessor.crf.apply_crf:
251249
dense_crf = Step(name='dense_crf',
252250
transformer=post.DenseCRFStream(**config.postprocessor.crf) if config.execution.stream_mode \

postprocessing.py

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -123,11 +123,10 @@ def transform(self, images):
123123

124124
class ScoreBuilder(BaseTransformer):
125125
def transform(self, images, probabilities):
126-
scores = []
126+
images_with_scores = []
127127
for image, image_probabilities in tqdm(zip(images, probabilities)):
128-
scores.append(build_score(image, image_probabilities))
129-
return {'images': images,
130-
'scores': scores}
128+
images_with_scores.append((image, build_score(image, image_probabilities)))
129+
return {'images_with_scores': images_with_scores}
131130

132131

133132
class MulticlassLabelerStream(BaseTransformer):
@@ -246,6 +245,15 @@ def _transform(self, images):
246245
yield crop_image_center_per_class(image, (self.h_crop, self.w_crop))
247246

248247

248+
class ScoreBuilderStream(BaseTransformer):
249+
def transform(self, images, probabilities):
250+
return {'images_with_scores': self._transform(images, probabilities)}
251+
252+
def _transform(self, images, probabilities):
253+
for image, image_probabilities in tqdm(zip(images, probabilities)):
254+
yield (image, build_score(image, image_probabilities))
255+
256+
249257
def label_multiclass_image(mask):
250258
labeled_channels = []
251259
for label_nr in range(0, mask.max() + 1):

utils.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ def decompose(labeled):
8282
return masks
8383

8484

85-
def create_annotations(meta, predictions, scores, logger, category_ids, save=False, experiment_dir='./'):
85+
def create_annotations(meta, predictions, logger, category_ids, save=False, experiment_dir='./'):
8686
'''
8787
:param meta: pd.DataFrame with metadata
8888
:param predictions: list of labeled masks or numpy array of size [n_images, im_height, im_width]
@@ -93,7 +93,7 @@ def create_annotations(meta, predictions, scores, logger, category_ids, save=Fal
9393
'''
9494
annotations = []
9595
logger.info('Creating annotations')
96-
for image_id, prediction, image_scores in zip(meta["ImageId"].values, predictions, scores):
96+
for image_id, (prediction, image_scores) in zip(meta["ImageId"].values, predictions):
9797
for category_nr, (category_instances, category_scores) in enumerate(zip(prediction, image_scores)):
9898
if category_ids[category_nr] != None:
9999
masks = decompose(category_instances)
@@ -159,7 +159,7 @@ def _generate_metadata(dataset):
159159
if dataset != "test_images":
160160
images_path = os.path.join(images_path, "images")
161161

162-
if public_paths: # TODO: implement public generating public path
162+
if public_paths: # TODO: implement public generating public path
163163
raise NotImplementedError
164164
else:
165165
images_path_to_write = images_path
@@ -366,7 +366,7 @@ def coco_evaluation(gt_filepath, prediction_filepath, image_ids, category_ids, s
366366
cocoEval.accumulate()
367367
cocoEval.summarize()
368368

369-
return cocoEval.stats[0], cocoEval.stats[4]
369+
return cocoEval.stats[0], cocoEval.stats[3]
370370

371371

372372
def label(mask):

0 commit comments

Comments
 (0)