Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added aligner.py and YearbookRevampLibrary #119

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
134 changes: 134 additions & 0 deletions app/YearbookRevampLibrary/AutoAlignerModule.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,134 @@
import os
import cv2 as cv
import mediapipe as mp
from YearbookRevampLibrary.utils import output_image_files, collect_image_files


class AutoAligner():

def __init__(self):

self.mp_drawing = mp.solutions.drawing_utils
self.mp_pose = mp.solutions.pose
self.mp_holistic = mp.solutions.holistic
self.mp_face_detection = mp.solutions.face_detection

def align_image(self, img, min_face_detection_confidence=0.5, min_pose_detection_confidence=0.5):
"""
:param img: image to align
:param min_face_detection_confidence: confidence for face detection in the image
:param min_pose_detection_confidence: confidence for pose detection in the image
:return: aligned image
"""
with self.mp_pose.Pose(static_image_mode=True, model_complexity=2,
min_detection_confidence=min_pose_detection_confidence) as pose:
image = img
image_height, image_width, _ = image.shape

results = pose.process(cv.cvtColor(image, cv.COLOR_BGR2RGB))

if not results.pose_landmarks:
i = 0
while (True):
mp_face_detection = self.mp_face_detection

with mp_face_detection.FaceDetection(model_selection=1,
min_detection_confidence=min_face_detection_confidence) as face_detection:

if i == 4:
return image
results2 = face_detection.process(cv.cvtColor(image, cv.COLOR_BGR2RGB))
if not results2.detections:
i += 1
image = cv.rotate(image, rotateCode=0)
continue

return image

n = results.pose_landmarks.landmark[self.mp_holistic.PoseLandmark.NOSE].y
r = results.pose_landmarks.landmark[self.mp_holistic.PoseLandmark.RIGHT_SHOULDER].y
l = results.pose_landmarks.landmark[self.mp_holistic.PoseLandmark.LEFT_SHOULDER].y

if n < r and n < l:
pass
elif n > r and n > l:
image = cv.rotate(image, rotateCode=1)
elif n < r and n > l:
image = cv.rotate(image, rotateCode=0)
else:
image = cv.rotate(image, rotateCode=2)

return image


def auto_align(cv2_list = None, input_path = None, output_path = None, min_face_detection_confidence=0.5, min_pose_detection_confidence=0.5):
"""
:param cv2_list: list of cv2 objects to be aligned
:param input_path: path of the folder containing images
:param output_path: path of the folder to save aligned images
:param min_face_detection_confidence: confidence for face detection in the image
:param min_pose_detection_confidence: confidence for pose detection in the image
:return: list of aligned cv2 objects
"""
images, filenames = collect_image_files(cv2_list, input_path)
# makeFolder(output_file)

path = output_path
refined_images = []

mp_drawing = mp.solutions.drawing_utils
mp_pose = mp.solutions.pose
mp_holistic = mp.solutions.holistic

with mp_pose.Pose(static_image_mode=True, model_complexity=2,
min_detection_confidence=min_pose_detection_confidence) as pose:

for idx, file in enumerate(images):

image = images[idx]
image_height, image_width, _ = image.shape

results = pose.process(cv.cvtColor(image, cv.COLOR_BGR2RGB))

if not results.pose_landmarks:
i = 0
while (True):

mp_face_detection = mp.solutions.face_detection
with mp_face_detection.FaceDetection(model_selection=1,
min_detection_confidence=min_face_detection_confidence) as face_detection:

if i == 4:
refined_images.append(image)
break

results2 = face_detection.process(cv.cvtColor(image, cv.COLOR_BGR2RGB))

if not results2.detections:
i += 1
image = cv.rotate(image, rotateCode=0)
continue

refined_images.append(image)
break

continue

n = results.pose_landmarks.landmark[mp_holistic.PoseLandmark.NOSE].y
r = results.pose_landmarks.landmark[mp_holistic.PoseLandmark.RIGHT_SHOULDER].y
l = results.pose_landmarks.landmark[mp_holistic.PoseLandmark.LEFT_SHOULDER].y

if n < r and n < l:
pass
elif n > r and n > l:
image = cv.rotate(image, rotateCode=1)
elif n < r and n > l:
image = cv.rotate(image, rotateCode=0)
else:
image = cv.rotate(image, rotateCode=2)

refined_images.append(image)

output = output_image_files(refined_images, output_path, filenames)
return output

66 changes: 66 additions & 0 deletions app/YearbookRevampLibrary/BackgroundModule.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
import os
import cv2 as cv
import mediapipe as mp
import numpy as np
from YearbookRevampLibrary.utils import output_image_files, collect_image_files



class SelfiSegmentation():

def __init__(self, model=1):
"""
:param model: model type 0 or 1. 0 is general 1 is landscape(faster)
"""
self.model = model
self.mpDraw = mp.solutions.drawing_utils
self.mpSelfieSegmentation = mp.solutions.selfie_segmentation
self.selfieSegmentation = self.mpSelfieSegmentation.SelfieSegmentation(self.model)

def removeBG(self, img, imgBg=(255, 255, 255), threshold=0.1):
"""
:param img: image to remove background from
:param imgBg: BackGround Image
:param threshold: higher = more cut, lower = less cut
:return: background removed image
"""
imgRGB = cv.cvtColor(img, cv.COLOR_BGR2RGB)
results = self.selfieSegmentation.process(imgRGB)
condition = np.stack(
(results.segmentation_mask,) * 3, axis=-1) > threshold
if isinstance(imgBg, tuple):
_imgBg = np.zeros(img.shape, dtype=np.uint8)
_imgBg[:] = imgBg
imgOut = np.where(condition, img, _imgBg)
else:
imgOut = np.where(condition, img, imgBg)
return imgOut


def remove_background(cv2_list = None, input_path = None, output_path = None, background_img=(255, 255, 255), model=0, threshold=0.1):
"""
:param cv2_list: list of cv2 objects
:param input_path: path of the folder containing images
:param output_path: path of the folder to save background removed images
:param background_img: image to set for the background
:param model: model type 0 or 1. 0 is general 1 is landscape(faster)
:param threshold: higher = more cut, lower = less cut
:return: list of cv2 objects with background removed
"""

images, filenames = collect_image_files(cv2_list, input_path)
refined_images = []
# makeFolder(output_file)

segmentor = SelfiSegmentation(model)

for idx, file in enumerate(images):

img = images[idx]
imgOut = segmentor.removeBG(img, background_img, threshold)

refined_images.append(imgOut)

output = output_image_files(refined_images, output_path, filenames)
return output

95 changes: 95 additions & 0 deletions app/YearbookRevampLibrary/Cartoon.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
import cv2
import os
import numpy as np
from YearbookRevampLibrary.utils import output_image_files, collect_image_files


def edge_mask(img, line_size, blur_value):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray_blur = cv2.medianBlur(gray, blur_value)
edges = cv2.adaptiveThreshold(gray_blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, line_size, blur_value)
return edges

def Countours(image):
contoured_image = image
gray = cv2.cvtColor(contoured_image, cv2.COLOR_BGR2GRAY)
edged = cv2.Canny(gray, 200, 200)
contours, hierarchy = cv2.findContours(edged, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2:]
cv2.drawContours(contoured_image, contours, contourIdx=-1, color=6, thickness=1)
return contoured_image

def ColourQuantization(image, K=9):
Z = image.reshape((-1, 3))
Z = np.float32(Z)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
compactness, label, center = cv2.kmeans(Z, K, None, criteria, 1, cv2.KMEANS_RANDOM_CENTERS)
center = np.uint8(center)
res = center[label.flatten()]
res2 = res.reshape((image.shape))
return res2

def BlurredCartoonFilter(cv2_list = None, input_path = None, output_path = None):
"""
:param cv2_list: list of cv2 objects
:param input_path: image folder path

:param output_path: output folder path
:return: list of cv2 objects


"""
images, filenames = collect_image_files(cv2_list, input_path)

final_output = []

for img in images:
line_size = 7
blur_value = 7
edges = edge_mask(img, line_size, blur_value)
total_color = 8
k = total_color
data = np.float32(img).reshape((-1, 3))
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 20, 0.001)
ret, label, center = cv2.kmeans(data, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
center = np.uint8(center)
result = center[label.flatten()]
result = result.reshape(img.shape)
blurred = cv2.bilateralFilter(result, d=10, sigmaColor=250, sigmaSpace=250)
cartoon = cv2.bitwise_and(blurred, blurred, mask=edges)
final_output.append(cartoon)

output = output_image_files(final_output, output_path, filenames)
return output

def CartoonFilter(cv2_list = None, input_path = None, output_path = None):
"""
:param cv2_list: list of cv2 objects
:param input_path: image folder path

:param output_path: output folder path
:return: list of cv2 objects


"""
images, filenames = collect_image_files(cv2_list, input_path)

final_output = []
for image in images:
coloured = ColourQuantization(image)
contoured = Countours(coloured)
final_image = contoured

final_output.append(final_image)

output = output_image_files(final_output, output_path, filenames)
return output

if __name__ == "__main__":
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.chdir(BASE_DIR)

input_path = os.path.join(BASE_DIR + "\\Input")
output_path = os.path.join(BASE_DIR + "\\Output")
CartoonFilter(cv2_list=None,input_path=input_path, output_path = output_path)


Loading