-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathZoomCamera.py
More file actions
73 lines (59 loc) · 2.59 KB
/
ZoomCamera.py
File metadata and controls
73 lines (59 loc) · 2.59 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import cv2
import mediapipe as mp
import platform
gb_zoom = 0
mpHands = mp.solutions.hands
hands = mpHands.Hands()
mpDraw = mp.solutions.drawing_utils
handNo = 0
device_val = None
def zoom_frame(image,same_image,coord):
zoom_type="transitioin"
global gb_zoom
# If zoom_type is transition check if Zoom is already done else zoom by 0.1 in current frame
if zoom_type == 'transition' and gb_zoom < 3.0:
gb_zoom = gb_zoom + 0.1
# If zoom_type is normal zoom check if zoom more than 1.4 if soo zoom out by 0.1 in each frame
if gb_zoom != 1.4 and zoom_type is None:
gb_zoom = gb_zoom - 0.1
zoom = gb_zoom
# If coordinates to zoom around are not specified, default to center of the frame.
cy, cx = [i /2 for i in image.shape[:-1]] if coord is None else coord[::-1]
# Scaling the image using getRotationMatrix2D to appropriate zoom.
rot_mat = cv2.getRotationMatrix2D((cx, cy), 0, 1.5)
# Use warpAffine to make sure that lines remain parallel
result1 = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
result2 = cv2.warpAffine(same_image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return result2
def zoomat(img):
unl_image=img
lmList = []
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = hands.process(img)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mpDraw.draw_landmarks(img, hand_landmarks, mpHands.HAND_CONNECTIONS)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
coordinates = None
zoom_transition = None
if results.multi_hand_landmarks:
myHand = results.multi_hand_landmarks[handNo]
for id, lm in enumerate(myHand.landmark):
height, width, channels = img.shape
cx, cy = int(lm.x * width), int(lm.y * height)
lmList.append([id, cx, cy])
# Fetch coordinates of nose, right ear and left ear
nose = lmList[9][1:]
right_ear = lmList[20][1:]
left_ear = lmList[4][1:]
# print("nose=",nose,"\nright finger=",right_ear,"\nleft finger=",left_ear)
# get coordinates for right ear and left ear
right_ear_x = int(right_ear[0] * width)
left_ear_x = int(left_ear[0] * width)
# Fetch coordinates for the nose and set as center
center_x = int(right_ear[0])
center_y = int(left_ear[1])
coordinates = [center_x, center_y]
# Perform zoom on the image
img = zoom_frame(img,unl_image,coordinates)
return img