# Importing required modules
import cv2
import matplotlib.pyplot as plt
# reading images
img1 = cv2.imread("image1.jpg")
img2 = cv2.imread("image2.jpg")
# function for feature matching
def BFMatching(img1, img2):
# Initiate SIFT detector
feat = cv2.ORB_create(5)
# find the keypoints and descriptors with SIFT
kpnt1, des1 = feat.detectAndCompute(img1, None)
kpnt2, des2 = feat.detectAndCompute(img2, None)
# BFMatcher with default parameters
bf = cv2.BFMatcher()
# finding matches from BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
# Apply ratio test
good = []
matched_image = cv2.drawMatchesKnn(img1,
kpnt1, img2, kpnt2, matches, None,
matchColor=(0, 255, 0), matchesMask=None,
singlePointColor=(255, 0, 0), flags=0)
# creating a criteria for the good matches
# and appending good matchings in good[]
for m, n in matches:
# print("m.distance is <",m.distance,">
# 1.001*n.distance is <",0.98*n.distance,">")
if m.distance < 0.98 * n.distance:
good.append([m])
# for jupyter notebook use this function
# to see output image
# plt.imshow(matched_image)
# if you are using python then run this-
cv2.imshow("matches", matched_image)
cv2.waitKey(0)
# uncomment the below section if you want to see
# the key points that are being used by the above program
print("key points of first image- ")
print(kpnt1)
print("\nkey points of second image-")
print(kpnt2)
print("\noverall features that matched by BFMatcher()-")
print(matches)
return("good features", good) # returning ggod features
BFMatching(img1, img2)