|
7 | 7 | import json |
8 | 8 | import os |
9 | 9 |
|
| 10 | +import torch |
10 | 11 |
|
11 | 12 | def evaluate_coco(dataset, model, threshold=0.05): |
12 | 13 |
|
13 | 14 | model.eval() |
| 15 | + |
| 16 | + with torch.no_grad(): |
14 | 17 |
|
15 | | - # start collecting results |
16 | | - results = [] |
17 | | - image_ids = [] |
| 18 | + # start collecting results |
| 19 | + results = [] |
| 20 | + image_ids = [] |
18 | 21 |
|
19 | | - for index in range(len(dataset)): |
20 | | - data = dataset[index] |
21 | | - scale = data['scale'] |
| 22 | + for index in range(len(dataset)): |
| 23 | + data = dataset[index] |
| 24 | + scale = data['scale'] |
22 | 25 |
|
23 | | - # run network |
24 | | - scores, labels, boxes = model(data['img'].permute(2, 0, 1).cuda().float().unsqueeze(dim=0)) |
25 | | - scores = scores.cpu() |
26 | | - labels = labels.cpu() |
27 | | - boxes = boxes.cpu() |
| 26 | + # run network |
| 27 | + scores, labels, boxes = model(data['img'].permute(2, 0, 1).cuda().float().unsqueeze(dim=0)) |
| 28 | + scores = scores.cpu() |
| 29 | + labels = labels.cpu() |
| 30 | + boxes = boxes.cpu() |
28 | 31 |
|
29 | | - # correct boxes for image scale |
30 | | - boxes /= scale |
| 32 | + # correct boxes for image scale |
| 33 | + boxes /= scale |
31 | 34 |
|
32 | | - # change to (x, y, w, h) (MS COCO standard) |
33 | | - boxes[:, 2] -= boxes[:, 0] |
34 | | - boxes[:, 3] -= boxes[:, 1] |
| 35 | + # change to (x, y, w, h) (MS COCO standard) |
| 36 | + boxes[:, 2] -= boxes[:, 0] |
| 37 | + boxes[:, 3] -= boxes[:, 1] |
35 | 38 |
|
36 | | - # compute predicted labels and scores |
37 | | - #for box, score, label in zip(boxes[0], scores[0], labels[0]): |
38 | | - for box_id in range(boxes.shape[0]): |
39 | | - score = float(scores[box_id]) |
40 | | - label = int(labels[box_id]) |
41 | | - box = boxes[box_id, :] |
| 39 | + # compute predicted labels and scores |
| 40 | + #for box, score, label in zip(boxes[0], scores[0], labels[0]): |
| 41 | + for box_id in range(boxes.shape[0]): |
| 42 | + score = float(scores[box_id]) |
| 43 | + label = int(labels[box_id]) |
| 44 | + box = boxes[box_id, :] |
42 | 45 |
|
43 | | - # scores are sorted, so we can break |
44 | | - if score < threshold: |
45 | | - break |
| 46 | + # scores are sorted, so we can break |
| 47 | + if score < threshold: |
| 48 | + break |
46 | 49 |
|
47 | | - # append detection for each positively labeled class |
48 | | - image_result = { |
49 | | - 'image_id' : dataset.image_ids[index], |
50 | | - 'category_id' : dataset.label_to_coco_label(label), |
51 | | - 'score' : float(score), |
52 | | - 'bbox' : box.tolist(), |
53 | | - } |
| 50 | + # append detection for each positively labeled class |
| 51 | + image_result = { |
| 52 | + 'image_id' : dataset.image_ids[index], |
| 53 | + 'category_id' : dataset.label_to_coco_label(label), |
| 54 | + 'score' : float(score), |
| 55 | + 'bbox' : box.tolist(), |
| 56 | + } |
54 | 57 |
|
55 | | - # append detection to results |
56 | | - results.append(image_result) |
| 58 | + # append detection to results |
| 59 | + results.append(image_result) |
57 | 60 |
|
58 | | - # append image to list of processed images |
59 | | - image_ids.append(dataset.image_ids[index]) |
| 61 | + # append image to list of processed images |
| 62 | + image_ids.append(dataset.image_ids[index]) |
60 | 63 |
|
61 | | - # print progress |
62 | | - print('{}/{}'.format(index, len(dataset)), end='\r') |
| 64 | + # print progress |
| 65 | + print('{}/{}'.format(index, len(dataset)), end='\r') |
63 | 66 |
|
64 | | - if not len(results): |
65 | | - return |
| 67 | + if not len(results): |
| 68 | + return |
| 69 | + |
| 70 | + # write output |
| 71 | + json.dump(results, open('{}_bbox_results.json'.format(dataset.set_name), 'w'), indent=4) |
| 72 | + |
| 73 | + # load results in COCO evaluation tool |
| 74 | + coco_true = dataset.coco |
| 75 | + coco_pred = coco_true.loadRes('{}_bbox_results.json'.format(dataset.set_name)) |
66 | 76 |
|
67 | | - # write output |
68 | | - json.dump(results, open('{}_bbox_results.json'.format(dataset.set_name), 'w'), indent=4) |
69 | | - json.dump(image_ids, open('{}_processed_image_ids.json'.format(dataset.set_name), 'w'), indent=4) |
70 | | - |
71 | | - # load results in COCO evaluation tool |
72 | | - coco_true = dataset.coco |
73 | | - coco_pred = coco_true.loadRes('{}_bbox_results.json'.format(dataset.set_name)) |
74 | | - |
75 | | - # run COCO evaluation |
76 | | - coco_eval = COCOeval(coco_true, coco_pred, 'bbox') |
77 | | - coco_eval.params.imgIds = image_ids |
78 | | - coco_eval.evaluate() |
79 | | - coco_eval.accumulate() |
80 | | - coco_eval.summarize() |
81 | | - |
82 | | - coco_tag = ['AP @[ IoU=0.50:0.95 | area= all | maxDets=100 ]', |
83 | | - 'AP @[ IoU=0.50 | area= all | maxDets=100 ]', |
84 | | - 'AP @[ IoU=0.75 | area= all | maxDets=100 ]', |
85 | | - 'AP @[ IoU=0.50:0.95 | area= small | maxDets=100 ]', |
86 | | - 'AP @[ IoU=0.50:0.95 | area=medium | maxDets=100 ]', |
87 | | - 'AP @[ IoU=0.50:0.95 | area= large | maxDets=100 ]', |
88 | | - 'AR @[ IoU=0.50:0.95 | area= all | maxDets= 1 ]', |
89 | | - 'AR @[ IoU=0.50:0.95 | area= all | maxDets= 10 ]', |
90 | | - 'AR @[ IoU=0.50:0.95 | area= all | maxDets=100 ]', |
91 | | - 'AR @[ IoU=0.50:0.95 | area= small | maxDets=100 ]', |
92 | | - 'AR @[ IoU=0.50:0.95 | area=medium | maxDets=100 ]', |
93 | | - 'AR @[ IoU=0.50:0.95 | area= large | maxDets=100 ]'] |
94 | | - |
95 | | - coco_eval_stats = coco_eval.stats |
96 | | - |
97 | | - for index, result in enumerate(coco_eval_stats): |
98 | | - print('{}. {}: {}'.format(index + 1, coco_tag[index], coco_eval_stats[index])) |
99 | | - |
100 | | - model.train() |
101 | | - |
102 | | - return |
| 77 | + # run COCO evaluation |
| 78 | + coco_eval = COCOeval(coco_true, coco_pred, 'bbox') |
| 79 | + coco_eval.params.imgIds = image_ids |
| 80 | + coco_eval.evaluate() |
| 81 | + coco_eval.accumulate() |
| 82 | + coco_eval.summarize() |
| 83 | + |
| 84 | + model.train() |
| 85 | + |
| 86 | + return |
0 commit comments