import cv2
import numpy as np
def sift_keypoints_detect(image):
gray_image=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
sift=cv2.xfeatures2d.SIFT_create()
keypoints,features=sift.detectAndCompute(image,None)
keypoints_image=cv2.drawKeypoints(gray_image,keypoints,None,flags=cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS)
return keypoints_image,keypoints,features
def get_feature_point_ensemble(features_right,features_left):
bf=cv2.BFMatcher()
matches=bf.knnMatch(features_right,features_left,k=2)
matches=sorted(matches,key=lambda x:x[0].distance/x[1].distance)
good=[]
for m, n in matches:
ratio=0.6
if m.distance<ratio*n.distance:
good.append(m)
return good
def Panorama_stitching(image_right,image_left):
_, keypoints_right, features_right=sift_keypoints_detect(image_right)
_, keypoints_left, features_left=sift_keypoints_detect(image_left)
goodMatch=get_feature_point_ensemble(features_right, features_left)
if len(goodMatch)>4:
ptsR=np.float32(
[keypoints_right[m.queryIdx].pt for m in goodMatch]).reshape(-1,1,2)
ptsL=np.float32(
[keypoints_left[m.trainIdx].pt for m in goodMatch]).reshape(-1,1,2)
ransacReprojThreshold=4
Homography,status=cv2.findHomography(
ptsR,ptsL,cv2.RANSAC,ransacReprojThreshold)
Panorama=cv2.warpPerspective(
image_right,Homography,(image_right.shape[1]+image_left.shape[1],image_right.shape[0]))
cv2.imshow("twist transformation",Panorama)
cv2.waitKey(0)
cv2.destroyAllWindows()
Panorama[0:image_left.shape[0],0:image_left.shape[1]]=image_left
return Panorama
if __name__=='__main__':
image_left=cv2.imread("C:/Users/xpp/Desktop/Lena.png")
image_right=cv2.imread("C:/Users/xpp/Desktop/Lena.png")
image_right=cv2.resize(image_right,None,fx=30,fy=30)
image_left=cv2.resize(image_left,(image_right.shape[1],image_right.shape[0]))
keypoints_image_right,keypoints_right,features_right=sift_keypoints_detect(image_right)
keypoints_image_left,keypoints_left,features_left=sift_keypoints_detect(image_left)
cv2.imshow(zh_ch("feature point detection_01"),np.hstack((image_left, keypoints_image_left)))
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imshow(zh_ch("feature point detection_02"),np.hstack((image_right,keypoints_image_right)))
cv2.waitKey(0)
cv2.destroyAllWindows()
goodMatch=get_feature_point_ensemble(features_right,features_left)
all_goodmatch_image=cv2.drawMatches(
image_right,keypoints_right,image_left,keypoints_left,goodMatch,None,None,None,None,flags=2)
cv2.imshow("line",all_goodmatch_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
Panorama=Panorama_stitching(image_right, image_left)
cv2.namedWindow("Panorama",cv.WINDOW_AUTOSIZE)
cv2.imshow("Panorama",Panorama)
cv2.imwrite("C:/Users/xpp/Desktop/Panorama.png", Panorama)
cv2.waitKey(0)
cv2.destroyAllWindows()
算法:图像全景拼接是缝合”两张具有重叠区域的图像来创建一张全景图像。
本文分享自 图像处理与模式识别研究所 微信公众号,前往查看
如有侵权,请联系 cloudcommunity@tencent.com 删除。
本文参与 腾讯云自媒体同步曝光计划 ,欢迎热爱写作的你一起参与!