|
1 | | -import numpy as np |
2 | 1 | import os |
| 2 | +import cv2 |
| 3 | +import numpy as np |
3 | 4 | import tensorflow as tf |
4 | 5 |
|
5 | 6 | from PIL import Image |
@@ -40,44 +41,56 @@ def load_image_into_numpy_array(image): |
40 | 41 | (im_height, im_width, 3)).astype(np.uint8) |
41 | 42 |
|
42 | 43 |
|
43 | | -with detection_graph.as_default(): |
44 | | - with tf.Session(graph=detection_graph) as sess: |
45 | | - image = Image.open('image.jpg') |
46 | | - |
47 | | - # the array based representation of the image will be used later in order to prepare the |
48 | | - # result image with boxes and labels on it. |
49 | | - image_np = load_image_into_numpy_array(image) |
50 | | - |
51 | | - # Expand dimensions since the model expects images to have shape: [1, None, None, 3] |
52 | | - image_np_expanded = np.expand_dims(image_np, axis=0) |
53 | | - image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') |
54 | | - |
55 | | - # Each box represents a part of the image where a particular object was detected. |
56 | | - boxes = detection_graph.get_tensor_by_name('detection_boxes:0') |
57 | | - |
58 | | - # Each score represent how level of confidence for each of the objects. |
59 | | - # Score is shown on the result image, together with the class label. |
60 | | - scores = detection_graph.get_tensor_by_name('detection_scores:0') |
61 | | - classes = detection_graph.get_tensor_by_name('detection_classes:0') |
62 | | - num_detections = detection_graph.get_tensor_by_name('num_detections:0') |
63 | | - |
64 | | - # Actual detection. |
65 | | - (boxes, scores, classes, num_detections) = sess.run( |
66 | | - [boxes, scores, classes, num_detections], |
67 | | - feed_dict={image_tensor: image_np_expanded}) |
68 | | - |
69 | | - # Visualization of the results of a detection. |
70 | | - vis_util.visualize_boxes_and_labels_on_image_array( |
71 | | - image_np, |
72 | | - np.squeeze(boxes), |
73 | | - np.squeeze(classes).astype(np.int32), |
74 | | - np.squeeze(scores), |
75 | | - category_index, |
76 | | - use_normalized_coordinates=True, |
77 | | - line_thickness=8) |
78 | | - |
79 | | - img = Image.fromarray(image_np) |
80 | | - img.save("image.jpg") |
| 44 | +def detect_objects(image_stream): |
| 45 | + with detection_graph.as_default(): |
| 46 | + with tf.Session(graph=detection_graph) as sess: |
| 47 | + image = Image.fromarray(image_stream) |
| 48 | + |
| 49 | + # the array based representation of the image will be used later in order to prepare the |
| 50 | + # result image with boxes and labels on it. |
| 51 | + image_np = load_image_into_numpy_array(image) |
| 52 | + |
| 53 | + # Expand dimensions since the model expects images to have shape: [1, None, None, 3] |
| 54 | + image_np_expanded = np.expand_dims(image_np, axis=0) |
| 55 | + image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') |
| 56 | + |
| 57 | + # Each box represents a part of the image where a particular object was detected. |
| 58 | + boxes = detection_graph.get_tensor_by_name('detection_boxes:0') |
| 59 | + |
| 60 | + # Each score represent how level of confidence for each of the objects. |
| 61 | + # Score is shown on the result image, together with the class label. |
| 62 | + scores = detection_graph.get_tensor_by_name('detection_scores:0') |
| 63 | + classes = detection_graph.get_tensor_by_name('detection_classes:0') |
| 64 | + num_detections = detection_graph.get_tensor_by_name('num_detections:0') |
| 65 | + |
| 66 | + # Actual detection. |
| 67 | + (boxes, scores, classes, num_detections) = sess.run( |
| 68 | + [boxes, scores, classes, num_detections], |
| 69 | + feed_dict={image_tensor: image_np_expanded}) |
| 70 | + |
| 71 | + # Visualization of the results of a detection. |
| 72 | + vis_util.visualize_boxes_and_labels_on_image_array( |
| 73 | + image_np, |
| 74 | + np.squeeze(boxes), |
| 75 | + np.squeeze(classes).astype(np.int32), |
| 76 | + np.squeeze(scores), |
| 77 | + category_index, |
| 78 | + use_normalized_coordinates=True, |
| 79 | + line_thickness=8) |
| 80 | + |
| 81 | + return image_np |
| 82 | + |
81 | 83 |
|
82 | 84 | if __name__ == '__main__': |
83 | | - pass |
| 85 | + video_capture = cv2.VideoCapture(1) |
| 86 | + |
| 87 | + while True: |
| 88 | + ret, frame = video_capture.read() |
| 89 | + cv2.imshow('Video', detect_objects(frame)) |
| 90 | + |
| 91 | + if cv2.waitKey(1) & 0xFF == ord('q'): |
| 92 | + break |
| 93 | + |
| 94 | + # When everything is done, release the capture |
| 95 | + video_capture.release() |
| 96 | + cv2.destroyAllWindows() |
0 commit comments