summaryrefslogtreecommitdiff
path: root/test.py
blob: 4a5b87ef384cc4c9bdd6a9aa74c1776383197e4a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import cv2
import math
import numpy
from math import sin,cos

cap = cv2.VideoCapture("../vid.mp4")

feature_params = dict( maxCorners = 100,
                       qualityLevel = 0.3,
                       minDistance = 20,
                       blockSize = 7 )

scale_factor=0.5
scr_width=1600
scr_height=900


# Parameters for lucas kanade optical flow
lk_params = dict( winSize  = (15,15),
                  maxLevel = 2,
                  criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))


ret,oldframe=cap.read()
oldgray=cv2.cvtColor(oldframe,cv2.COLOR_BGR2GRAY)

height, width, bpp = oldframe.shape

mask = numpy.ones((height,width, 1), numpy.uint8) * 255
screencontent = numpy.zeros((scr_height, scr_width,3), numpy.uint8)

total_angle=0.
total_x=1500
total_y=0

while(cap.isOpened()):
    ret, frame = cap.read()
    
    gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)

    
    p0 = cv2.goodFeaturesToTrack(oldgray, mask = None, **feature_params)

    # calculate optical flow
    p1, st, err = cv2.calcOpticalFlowPyrLK(oldgray, gray, p0, None, **lk_params)

    # Select good points
    good_new = p1[st==1]
    good_old = p0[st==1]

    for i in p0:
        x,y=i.ravel()
        cv2.line(frame, (x,y),(x,y), (0,0,255),5)

    one=[]
    two=[]
    for i,(new,old) in enumerate(zip(good_new, good_old)):
        x1,y1=new.ravel()
        x2,y2=old.ravel()
        cv2.line(frame, (x1,y1),(x2,y2), (0,255,0),5)
        one=one+[(x1,y1)]
        two=two+[(x2,y2)]

    mat = cv2.estimateRigidTransform(gray,oldgray,False)
    angle = math.atan2(mat[0,1],mat[0,0])
    stretch = int((math.sqrt(mat[0,1]**2+mat[0,0]**2)-1)*100)

    # calculate shift_x and _y is if one would rotate-and-stretch around the center of the image, not the topleft corner
    shift_x = mat[0,2] - width/2 + ( mat[0,0]*width/2 + mat[0,1]*height/2 )
    shift_y = mat[1,2] - height/2 + ( mat[1,0]*width/2 + mat[1,1]*height/2 )

    total_x = total_x + shift_x
    total_y = total_y + shift_y
    total_angle=total_angle+angle
    
    print angle/3.141592654*180,'deg\t',stretch,"%\t", shift_x,'\t',shift_y
    
    mat2=cv2.getRotationMatrix2D((width/2,height/2), total_angle/3.141593654*180, scale_factor) 
    mat2[0,2] = mat2[0,2]+total_x*scale_factor
    mat2[1,2] = mat2[1,2]+total_y*scale_factor
    print mat2.__repr__()


#    mat2=numpy.array([[cos(total_angle), sin(total_angle), total_x], [-sin(total_angle),cos(total_angle),total_y]])
    frame2= cv2.warpAffine(frame, mat2, (scr_width,scr_height) )
    mask2 = cv2.warpAffine(mask,  mat2, (scr_width,scr_height) )
    ret, mask2 = cv2.threshold(mask2, 254, 255, cv2.THRESH_BINARY)
    #mask2 = cv2.dilate(mask2, numpy.ones((3,3),numpy.uint8))
    #screencontent = frame2
    screencontent = cv2.bitwise_and(screencontent,screencontent, mask=cv2.bitwise_not(mask2))
    screencontent = cv2.add(screencontent, cv2.bitwise_and(frame2,frame2,mask=mask2))
    #screencontent = frame2
    #screencontent = mask2

    cv2.imshow('frame', frame)
    cv2.imshow('screencontent', screencontent)
    
    oldframe=frame
    oldgray=gray

    if cv2.waitKey(20) & 0xFF == ord("q"):
        break