前几周搞了一个基于模板跟踪的算法实现。即在视频中跟踪出模板目标
闲话不多说,直接程序给出:
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 30 09:13:24 2016
@author: liu
"""
import cv2
import numpy as np
from matplotlib import pyplot as plt
#img = cv2.imread('xucaise.jpg',0)
#template = cv2.imread('xuxu.jpg',0)
template = cv2.imread('ppp.png',0)
w, h = template.shape[::-1]
#s1=cv2.imread('xupanmu.jpg',cv2.IMREAD_GRAYSCALE)
#s1 = cv2.imread(ph1)
#s2 = cv2.imread(ph2)
methods = ['cv2.TM_SQDIFF_NORMED']
#methods = ['cv2.TM_CCOEFF_NORMED']#平方差匹配法,最好的匹配为0,值越大匹配越差;
cap = cv2.VideoCapture('double.mov')
ret,frame = cap.read()
c=0
time=10
a=1
while ret:
ret ,frame = cap.read()
c=c+1
if ret == True:
if (c%time==0):
for meth in methods:
method = cv2.TM_SQDIFF_NORMED#eval(meth)
cv2.imwrite('xuphoto'+str(c)+'.png',frame)
s2=cv2.imread('xuphoto'+str(c)+'.png',0)
#s2=frame
#img = cv2.imread('xucaise.jpg',0)
res = cv2.matchTemplate(s2,template,method)
#cv2.imwrite('pp'+str(c)+'.jpg',res)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
#top_left = max_loc
bottom_right = (top_left[0] + w +100, top_left[1] + h +100 )
cv2.rectangle(s2,top_left, bottom_right, 255, 2)
#cv2.imshow('img2',s2)
cv2.imwrite('iphone'+str(c)+'.png',s2)
plt.subplot(121),plt.imshow(res,cmap = 'gray')
plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
# cv2.imwrite('ccc'+str(c)+'.jpg',res)
plt.subplot(122),plt.imshow(s2,cmap = 'gray')
plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
plt.suptitle('cv2.TM_SQDIFF_NORMED')
#plt.suptitle('cv2.TM_CCOEFF_NORMED')
plt.show()
#k = cv2.waitKey(60) & 0xff
#if k == 27:
# break
#pic_sub(emptyimg,s1,s2)
if c>200:
c=0
cv2.destroyAllWindows()
cap.release()
程序目前是这些,如有看不懂的地方可以和我交流:liushengkai008@163.com
这个实现的结果就是能够跟踪我们选取的模板目标。但是对于模板的选取我们也需要小心点,要不然会跟踪丢失。这个你们用了之后会发现其中的小奥秘。
后期我会使用kinnect传感器来进行对于视觉的处理,请大家耐心等待哦
**粗体** _斜体_ [链接](http://example.com) `代码` - 列表 > 引用
。你还可以使用@
来通知其他用户。