-
Notifications
You must be signed in to change notification settings - Fork 0
/
video_gen.py
101 lines (93 loc) · 3.25 KB
/
video_gen.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
# import the necessary packages
from __future__ import print_function
from imutils.video import VideoStream
import numpy as np
import argparse
import imutils
import time
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-o", "--output", required=True,
help="path to output video file")
ap.add_argument("-p", "--picamera", type=int, default=-1,
help="whether or not the Raspberry Pi camera should be used")
ap.add_argument("-f", "--fps", type=int, default=20,
help="FPS of output video")
ap.add_argument("-c", "--codec", type=str, default="MJPG",
help="codec of output video")
args = vars(ap.parse_args())
# initialize the video stream and allow the camera
# sensor to warmup
print("[INFO] warming up camera...")
# vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
# time.sleep(2.0)
# initialize the FourCC, video writer, dimensions of the frame, and
# zeros array
dispW=800
dispH=600
flip=2
# check gst-inspect-1.0 nvarguscamerasrc for property options
camSet='nvarguscamerasrc sensor-id=0 wbmode=2 tnr-mode=2 tnr-strength=1'
camSet+=' ! video/x-raw(memory:NVMM), width=3264, height=2464, framerate=21/1, format=NV12'
camSet+=' ! nvvidconv flip-method=2'
camSet+=' ! video/x-raw, width='+str(dispW)+', height='+str(dispH)+', format=BGRx'
camSet+=' ! videoconvert ! video/x-raw, format=BGR ! appsink'
cam = cv2.VideoCapture(camSet)
# while True:
# ret, frame = cam.read()
# cv2.imshow('piCam', frame)
# if cv2.waitKey(1) == ord('q'):
# break
# cam.release()
fourcc = cv2.VideoWriter_fourcc(*args["codec"])
writer = None
(h, w) = (None, None)
zeros = None
#"""
# loop over frames from the video stream
while True:
# grab the frame from the video stream and resize it to have a
# maximum width of 300 pixels
# frame = vs.read()
ret, frame = cam.read()
# frame = imutils.resize(frame, width=300)
# check if the writer is None
if writer is None:
# store the image dimensions, initialize the video writer,
# and construct the zeros array
(h, w) = frame.shape[:2]
writer = cv2.VideoWriter(args["output"], fourcc, args["fps"],
(w * 2, h * 2), True)
zeros = np.zeros((h, w), dtype="uint8")
# break the image into its RGB components, then construct the
# RGB representation of each frame individually
(B, G, R) = cv2.split(frame)
R = cv2.merge([zeros, zeros, R])
G = cv2.merge([zeros, G, zeros])
B = cv2.merge([B, zeros, zeros])
# construct the final output frame, storing the original frame
# at the top-left, the red channel in the top-right, the green
# channel in the bottom-right, and the blue channel in the
# bottom-left
output = np.zeros((h * 2, w * 2, 3), dtype="uint8")
output[0:h, 0:w] = frame
output[0:h, w:w * 2] = R
output[h:h * 2, w:w * 2] = G
output[h:h * 2, 0:w] = B
# write the output frame to file
writer.write(output)
# show the frames
cv2.imshow("Frame", frame)
#cv2.imshow("Output", output)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
print("[INFO] cleaning up...")
cv2.destroyAllWindows()
#vs.stop()
cam.release()
writer.release()
#"""