|
| 1 | +import argparse |
| 2 | + |
| 3 | +import cv2 |
| 4 | +import numpy as np |
| 5 | + |
| 6 | +def build_arg_parser(): |
| 7 | + parser = argparse.ArgumentParser(description='Find fundamental matrix \ |
| 8 | + using the two input stereo images and draw epipolar lines') |
| 9 | + parser.add_argument("--img-left", dest="img_left", required=True, |
| 10 | + help="Image captured from the left view") |
| 11 | + parser.add_argument("--img-right", dest="img_right", required=True, |
| 12 | + help="Image captured from the right view") |
| 13 | + parser.add_argument("--feature-type", dest="feature_type", |
| 14 | + required=True, help="Feature extractor that will be used; can be either 'sift' or 'surf'") |
| 15 | + return parser |
| 16 | + |
| 17 | +def draw_lines(img_left, img_right, lines, pts_left, pts_right): |
| 18 | + h,w = img_left.shape |
| 19 | + img_left = cv2.cvtColor(img_left, cv2.COLOR_GRAY2BGR) |
| 20 | + img_right = cv2.cvtColor(img_right, cv2.COLOR_GRAY2BGR) |
| 21 | + |
| 22 | + for line, pt_left, pt_right in zip(lines, pts_left, pts_right): |
| 23 | + x_start,y_start = map(int, [0, -line[2]/line[1] ]) |
| 24 | + x_end,y_end = map(int, [w, -(line[2]+line[0]*w)/line[1] ]) |
| 25 | + color = tuple(np.random.randint(0,255,2).tolist()) |
| 26 | + cv2.line(img_left, (x_start,y_start), (x_end,y_end), color,1) |
| 27 | + cv2.circle(img_left, tuple(pt_left), 5, color, -1) |
| 28 | + cv2.circle(img_right, tuple(pt_right), 5, color, -1) |
| 29 | + |
| 30 | + return img_left, img_right |
| 31 | + |
| 32 | +def get_descriptors(gray_image, feature_type): |
| 33 | + if feature_type == 'surf': |
| 34 | + feature_extractor = cv2.SURF() |
| 35 | + |
| 36 | + elif feature_type == 'sift': |
| 37 | + feature_extractor = cv2.SIFT() |
| 38 | + |
| 39 | + else: |
| 40 | + raise TypeError("Invalid feature type; should be either 'surf' or 'sift'") |
| 41 | + |
| 42 | + keypoints, descriptors = feature_extractor.detectAndCompute(gray_image, None) |
| 43 | + return keypoints, descriptors |
| 44 | + |
| 45 | +if __name__=='__main__': |
| 46 | + args = build_arg_parser().parse_args() |
| 47 | + img_left = cv2.imread(args.img_left,0) # left image |
| 48 | + img_right = cv2.imread(args.img_right,0) # right image |
| 49 | + feature_type = args.feature_type |
| 50 | + |
| 51 | + if feature_type not in ['sift', 'surf']: |
| 52 | + raise TypeError("Invalid feature type; has to be either 'sift' or 'surf'") |
| 53 | + |
| 54 | + scaling_factor = 1.0 |
| 55 | + img_left = cv2.resize(img_left, None, fx=scaling_factor, |
| 56 | + fy=scaling_factor, interpolation=cv2.INTER_AREA) |
| 57 | + img_right = cv2.resize(img_right, None, fx=scaling_factor, |
| 58 | + fy=scaling_factor, interpolation=cv2.INTER_AREA) |
| 59 | + |
| 60 | + kps_left, des_left = get_descriptors(img_left, feature_type) |
| 61 | + kps_right, des_right = get_descriptors(img_right, feature_type) |
| 62 | + |
| 63 | + # FLANN parameters |
| 64 | + FLANN_INDEX_KDTREE = 0 |
| 65 | + index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) |
| 66 | + search_params = dict(checks=50) |
| 67 | + |
| 68 | + # Get the matches based on the descriptors |
| 69 | + flann = cv2.FlannBasedMatcher(index_params, search_params) |
| 70 | + matches = flann.knnMatch(des_left, des_right, k=2) |
| 71 | + |
| 72 | + pts_left_image = [] |
| 73 | + pts_right_image = [] |
| 74 | + |
| 75 | + # ratio test to retain only the good matches |
| 76 | + for i,(m,n) in enumerate(matches): |
| 77 | + if m.distance < 0.7*n.distance: |
| 78 | + pts_left_image.append(kps_left[m.queryIdx].pt) |
| 79 | + pts_right_image.append(kps_right[m.trainIdx].pt) |
| 80 | + |
| 81 | + pts_left_image = np.float32(pts_left_image) |
| 82 | + pts_right_image = np.float32(pts_right_image) |
| 83 | + F, mask = cv2.findFundamentalMat(pts_left_image, pts_right_image, cv2.FM_LMEDS) |
| 84 | + |
| 85 | + # Selecting only the inliers |
| 86 | + pts_left_image = pts_left_image[mask.ravel()==1] |
| 87 | + pts_right_image = pts_right_image[mask.ravel()==1] |
| 88 | + |
| 89 | + # Drawing the lines on left image and the corresponding feature points on the right image |
| 90 | + lines1 = cv2.computeCorrespondEpilines(pts_right_image.reshape(-1,1,2), 2, F) |
| 91 | + lines1 = lines1.reshape(-1,3) |
| 92 | + img_left_lines, img_right_pts = draw_lines(img_left, img_right, lines1, pts_left_image, pts_right_image) |
| 93 | + |
| 94 | + # Drawing the lines on right image and the corresponding feature points on the left image |
| 95 | + lines2 = cv2.computeCorrespondEpilines(pts_left_image.reshape(-1,1,2), 1,F) |
| 96 | + lines2 = lines2.reshape(-1,3) |
| 97 | + img_right_lines, img_left_pts = draw_lines(img_right, img_left, lines2, pts_right_image, pts_left_image) |
| 98 | + |
| 99 | + cv2.imshow('Epi lines on left image', img_left_lines) |
| 100 | + cv2.imshow('Feature points on right image', img_right_pts) |
| 101 | + cv2.imshow('Epi lines on right image', img_right_lines) |
| 102 | + cv2.imshow('Feature points on left image', img_left_pts) |
| 103 | + cv2.waitKey() |
| 104 | + cv2.destroyAllWindows() |
0 commit comments