-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathwebcam.py
141 lines (109 loc) · 5.06 KB
/
webcam.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 15 09:55:40 2018
@author: Robin Amsters
@email: [email protected]
Aruco marker video analysis functions
"""
import cv2
import pickle
import time
import cv2.aruco as aruco
import numpy as np
#==============================================================================
# WEBCAM VIDEO FILE FUNCTIONS
#==============================================================================
def count_frames_manual(video_file):
"""
Funtion that counts the total number of frames in a video file.
This is a very unoptimized implementation
"""
# initialize the total number of frames read
total = 0
video = cv2.VideoCapture(video_file)
# loop over the frames of the video
while True:
# grab the current frame
(grabbed, frame) = video.read()
# check to see if we have reached the end of the
# video
if not grabbed:
break
# increment the total number of frames read
total += 1
# return the total number of frames in the video file
return total
def get_webcam_reference(video_file, cam_params_file, dictionary, marker_size, board, show_video=False, save_output=False, output_file_name='output.avi'):
"""
Function that returns the position and orientation of a marker from its
initial position.
INPUTS:
- video_file: path to a video file to be processed
- cam_params_file: pickle file containing parameters from camera calibration
- dictionary: aruco predifined dictionary used to generate markers
- board: aruco marker board
- marker_size: size of marker to detect in meters
- show_video (default=False): play video with detection results. Video playback can be stopped by pressing q.
- save_output (default=False): save the detection output to a video file
- output_file_name (default='output.avi'): name of output video file
OUTPUTS:
- all_tvec: marker coordinates of each frame [x, y, z]
- all_rvec: marker orientations of each frame [x, y, z]
"""
# Open video file and get number of frames
print('Preprocessing: counting number of frames')
n_frames = count_frames_manual(video_file)
cap = cv2.VideoCapture(video_file)
# Parameters from camera calibration
cal = pickle.load(open(cam_params_file, "rb" ))
cMat = cal[0]
dist = cal[1][0]
# Initialize collections
all_tvec = list()
all_rvec = list()
print('Tracking marker')
if save_output:
# Define the codec and create VideoWriter object
size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
out = cv2.VideoWriter(output_file_name,fourcc, 29.0, size, True) # 'False' for 1-ch instead of 3-ch for color
parameters = aruco.DetectorParameters_create() # Obtain detection parameters
# Capture frame-by-frame
for i in range(n_frames):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert to grayscale
gray = frame
# lists of ids and the corners belonging to each id
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, dictionary, parameters=parameters)
corners, ids, rejectedImgPoints, recoveredIdxs = aruco.refineDetectedMarkers(gray, board, corners, ids, rejectedCorners=rejectedImgPoints, cameraMatrix=cMat, distCoeffs=dist)
if ids is not None:
# Obtain rotation and translation vectors
rObject, tObject, _objPoints = aruco.estimatePoseSingleMarkers(corners, marker_size, cameraMatrix=cMat, distCoeffs=dist) # corners, size of markers in meters, [3x3] intrinsic camera parameters, 5 distortion coefficients
rvec = [rObject.item(0), rObject.item(1), rObject.item(2)]
tvec = [tObject.item(0), tObject.item(1), tObject.item(2)]
all_rvec.append(rvec)
all_tvec.append(tvec)
# show information on image
frameWithMarkers = aruco.drawDetectedMarkers(gray, corners, ids=ids) # Draw marker borders
cv2.putText(frameWithMarkers, "ID: " + str(ids), (0,64), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 2, cv2.LINE_AA) # Show detected IDs
aruco.drawAxis(frameWithMarkers, cMat, dist, np.array(rvec), np.array(tvec), 0.1) #Draw Axis
# Display the resulting frame
if show_video:
cv2.imshow('frame',frameWithMarkers)
else:
# Display: no IDs
cv2.putText(gray, "No IDs", (0,64), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0),2,cv2.LINE_AA)
if show_video:
cv2.imshow('frame',gray)
if save_output:
out.write(gray)
# Stop when q is pressed
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
if save_output:
out.release()
cv2.destroyAllWindows()
return np.array(all_tvec), np.array(all_rvec)