Skip to content

Commit

Permalink
Merge branch '1-transform-perspective-image-sakurai'
Browse files Browse the repository at this point in the history
  • Loading branch information
misakurai committed Aug 15, 2024
2 parents 2e0d52c + dce017d commit 557cce4
Show file tree
Hide file tree
Showing 2 changed files with 95 additions and 37 deletions.
130 changes: 93 additions & 37 deletions src/testVideo1.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import cv2
import numpy as np
import math
import argparse

# カメラの高さと目標の座標を設定
camera_height = 100
Expand All @@ -21,55 +21,111 @@
# フレームのサイズを取得
height, width, channels = frame.shape

# 変換前後の対応点を設定
a = width / 2
b = a * X / math.sqrt(X**2 + Y**2)

A = [width/2, height - Y]
B = [width, height - Y]
C = [width, height]
D = [width/2, height]
def order_points(pts):
rect = np.zeros((4, 2), dtype = "float32")
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return rect


# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", help = "path to the image file")
ap.add_argument("-c", "--coords",
help = "comma seperated list of source points")
args = vars(ap.parse_args())
pts = np.array(eval(args["coords"]), dtype = "float32")

rect = order_points(pts)
(tl, tr, br, bl) = rect
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))

heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))

dst = np.array([
[maxWidth/4, maxHeight/4],
[maxWidth*3/4 , maxHeight/4],
[maxWidth*3/4 , maxHeight*3/4 ],
[maxWidth/4, maxHeight*3/4 ]], dtype = "float32")

def onMouse(event, x, y, corners):
if event == cv2.EVENT_LBUTTONDOWN:
print(x, y)
corners.add([x,y])

# driver function
if __name__=="__main__":

# reading the image
ret, frame = capture.read()

# A_trans = [width/2, height - Y]
# B_trans = [width, height - Y]
# C_trans = [width, height]
# D_trans = [width/2, height]
# 射影変換
# M = cv2.getPerspectiveTransform(p_original, p_trans)
M = cv2.getPerspectiveTransform(rect, dst)
frame_trans = cv2.warpPerspective(frame, M, (width, height))

# displaying the image
cv2.imshow('変換後の映像', frame_trans)


corners = []
cv2.setMouseCallback('image',onMouse(corners))

Y = a
print(f'Y:{Y}')
while True:
if(len(corners)==4):
break

while True:
# カメラからのフレーム取得
ret, frame = capture.read()
if not ret:
break

# 射影変換
# M = cv2.getPerspectiveTransform(p_original, p_trans)
M = cv2.getPerspectiveTransform(rect, dst)
frame_trans = cv2.warpPerspective(frame, M, (width, height))


A_trans = [width/2,0]
B_trans = [width/2 + a,0]
C_trans = [width/2 + a,Y]
D_trans = [width/2,Y]
print(A_trans)

# 表示
cv2.imshow('変換後の映像', frame_trans)
cv2.setMouseCallback('image',onMouse)

# 変換前後の対応点
p_original = np.float32([A, B, C, D])
p_trans = np.float32([A_trans, B_trans, C_trans, D_trans])
# cv2.imshow('変換前の映像', frame)

while True:
# カメラからのフレーム取得
ret, frame = capture.read()
if not ret:
break
if cv2.waitKey(1) & 0xFF == ord('q'):
break

# 射影変換
M = cv2.getPerspectiveTransform(p_original, p_trans)
frame_trans = cv2.warpPerspective(frame, M, (width, height))

# A_trans、B_transの各点に円を描画する
cv2.circle(frame_trans, (int(A_trans[0]), int(A_trans[1])), 5, (255, 0, 0), -1)
cv2.circle(frame_trans, (int(B_trans[0]), int(B_trans[1])), 5, (255, 0, 0), -1)
# while True:
# # カメラからのフレーム取得
# ret, frame = capture.read()
# if not ret:
# break

# 表示
cv2.imshow('変換後の映像', frame_trans)
# # 射影変換
# # M = cv2.getPerspectiveTransform(p_original, p_trans)
# M = cv2.getPerspectiveTransform(rect, dst)
# frame_trans = cv2.warpPerspective(frame, M, (width, height))


# # 表示
# cv2.imshow('変換後の映像', frame_trans)
# # cv2.imshow('変換前の映像', frame)

if cv2.waitKey(1) & 0xFF == ord('q'):
break
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break

capture.release()
cv2.destroyAllWindows()
2 changes: 2 additions & 0 deletions src/transform_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@

#[(275, 329), (785,321), (914,584), (141,592)]

# python src/transform_example.py --image public/sample-desktop-1.jpg --coords "[(275, 329), (785,321), (914,584), (141,592)]"

from transform import four_point_transform
import numpy as np
import argparse
Expand Down

0 comments on commit 557cce4

Please sign in to comment.