Skip to content

Commit 249c7da

Browse files
committed
Clean up of code.
1 parent 9e023e3 commit 249c7da

File tree

4 files changed

+34
-17
lines changed

4 files changed

+34
-17
lines changed

README.md

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,16 +4,22 @@ A real-time object recognition application using [Google's TensorFlow Object Det
44

55
## Getting Started
66
1. `conda env create -f environment.yml`
7-
2. `python object_detection_app.py`
7+
2. `python object_detection_app.py`
8+
Optional arguments (default value):
9+
* Device index of the camera `--source=0`
10+
* Width of the frames in the video stream `--width=480`
11+
* Height of the frames in the video stream `--height=360`
12+
* Number of workers `--num-workers=2`
13+
* Size of the queue `--queue-size=5`
814

915
## Requirements
1016
- [Anaconda / Python 3.5](https://www.continuum.io/downloads)
1117
- [TensorFlow 1.2](https://www.tensorflow.org/)
12-
- [OpenCV 3.1](http://opencv.org/)
18+
- [OpenCV 3.0](http://opencv.org/)
1319

1420
## Notes
15-
- OpenCV might crash on OSX after a while. See open issue and solution [here](https://github.com/opencv/opencv/issues/5874)
16-
- Moving the `.read()` part of the video stream in a subprocess did not work but only to a separate thread
21+
- OpenCV 3.1 might crash on OSX after a while, so that's why I had to switch to version 3.0. See open issue and solution [here](https://github.com/opencv/opencv/issues/5874).
22+
- Moving the `.read()` part of the video stream in a multiple child processes did not work. However, it was possible to move it to a separate thread.
1723

1824
## Copyright
1925

environment.yml

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,11 @@ channels: !!python/tuple
44
- defaults
55
dependencies:
66
- freetype=2.5.5=2
7-
- hdf5=1.8.17=1
87
- jbig=2.1=0
8+
- jlaura::opencv3=3.0.0=py35_0
99
- jpeg=9b=0
1010
- libpng=1.6.27=0
1111
- libtiff=4.0.6=3
12-
- menpo::opencv3=3.1.0=py35_0
1312
- menpo::tbb=4.3_20141023=0
1413
- mkl=2017.0.1=0
1514
- numpy=1.13.0=py35_0

object_detection_app.py

Lines changed: 20 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import os
22
import cv2
3+
import time
34
import argparse
45
import multiprocessing
56
import numpy as np
@@ -21,9 +22,6 @@
2122

2223
NUM_CLASSES = 90
2324

24-
NUM_WORKERS = 2 # cv2.getNumberOfCPUs() - 1
25-
QUEUE_SIZE = 5
26-
2725
# Loading label map
2826
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
2927
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,
@@ -88,33 +86,47 @@ def worker(input_q, output_q):
8886
parser = argparse.ArgumentParser()
8987
parser.add_argument('-src', '--source', dest='video_source', type=int,
9088
default=0, help='Device index of the camera.')
89+
parser.add_argument('-wd', '--width', dest='width', type=int,
90+
default=480, help='Width of the frames in the video stream.')
91+
parser.add_argument('-ht', '--height', dest='height', type=int,
92+
default=360, help='Height of the frames in the video stream.')
93+
parser.add_argument('-num-w', '--num-workers', dest='num_workers', type=int,
94+
default=2, help='Number of workers.')
95+
parser.add_argument('-q-size', '--queue-size', dest='queue_size', type=int,
96+
default=5, help='Size of the queue.')
9197
args = parser.parse_args()
9298

9399
logger = multiprocessing.log_to_stderr()
94100
logger.setLevel(multiprocessing.SUBDEBUG)
95101

96-
input_q = Queue(maxsize=QUEUE_SIZE)
97-
output_q = Queue(maxsize=QUEUE_SIZE)
102+
input_q = Queue(maxsize=args.queue_size)
103+
output_q = Queue(maxsize=args.queue_size)
98104

99105
process = Process(target=worker, args=((input_q, output_q)))
100106
process.daemon = True
101-
pool = Pool(NUM_WORKERS, worker, (input_q, output_q))
107+
pool = Pool(args.num_workers, worker, (input_q, output_q))
102108

103-
video_capture = WebcamVideoStream(src=args.video_source).start()
109+
video_capture = WebcamVideoStream(src=args.video_source,
110+
width=args.width,
111+
height=args.height).start()
104112
fps = FPS().start()
105113

106114
while True: # fps._numFrames < 120
107115
frame = video_capture.read()
108116
input_q.put(frame)
109117

118+
t = time.time()
119+
110120
cv2.imshow('Video', output_q.get())
111121
fps.update()
112122

123+
print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))
124+
113125
if cv2.waitKey(1) & 0xFF == ord('q'):
114126
break
115127

116128
fps.stop()
117-
print('[INFO] elasped time: {:.2f}'.format(fps.elapsed()))
129+
print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
118130
print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))
119131

120132
video_capture.stop()

utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -38,12 +38,12 @@ def fps(self):
3838

3939

4040
class WebcamVideoStream:
41-
def __init__(self, src=0):
41+
def __init__(self, src, width, height):
4242
# initialize the video camera stream and read the first frame
4343
# from the stream
4444
self.stream = cv2.VideoCapture(src)
45-
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, 480)
46-
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, 360)
45+
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
46+
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
4747
(self.grabbed, self.frame) = self.stream.read()
4848

4949
# initialize the variable used to indicate if the thread should

0 commit comments

Comments
 (0)