-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrack.py
169 lines (121 loc) · 6.07 KB
/
track.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
# import cv2
# import mediapipe as mp
# import pyautogui
# import time
# import streamlit as st
# from streamlit_webrtc import VideoProcessorBase, webrtc_streamer
# import av
# # st.set_page_config( layout = 'wide')
# with open('track.css') as css:
# st.markdown(f'<style>{css.read()}</style>', unsafe_allow_html = True)
# def landmarks_close(lm1, lm2, threshold=0.09):
# return abs(lm1.x - lm2.x) < threshold and abs(lm1.y - lm2.y) < threshold and abs(lm1.z - lm2.z) < threshold
# pyautogui.FAILSAFE = False
# mp_drawing = mp.solutions.drawing_utils
# mp_hands = mp.solutions.hands
# screen_width, screen_height = pyautogui.size()
# st.title("HandSync 🙌")
# st.subheader("Control your mouse with hand gestures.")
# class VideoProcessor(VideoProcessorBase):
# def __init__(self):
# self.hands = mp_hands.Hands(
# model_complexity=0,
# min_detection_confidence=0.9,
# min_tracking_confidence=0.9
# )
# def recv(self, frame):
# image = frame.to_ndarray(format="bgr24")
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# image = cv2.flip(image, 1)
# copy_image = image.copy()
# copy_image = cv2.cvtColor(copy_image, cv2.COLOR_RGB2BGR)
# copy_image = cv2.resize(copy_image, (screen_width // 5, screen_height // 5), interpolation=cv2.INTER_AREA)
# image = cv2.resize(image, (int(screen_width * 1.2), int(screen_height * 1.2)), interpolation=cv2.INTER_AREA)
# results = self.hands.process(image)
# if results.multi_hand_landmarks:
# for hand_landmarks in results.multi_hand_landmarks:
# mp_drawing.draw_landmarks(
# image,
# hand_landmarks,
# mp_hands.HAND_CONNECTIONS,
# mp_drawing.DrawingSpec(color=(0, 0, 255), thickness=2, circle_radius=2),
# mp_drawing.DrawingSpec(color=(0, 255, 0), thickness=2, circle_radius=2)
# )
# for id, landmark in enumerate(hand_landmarks.landmark):
# height, width, _ = image.shape
# cx, cy = int(landmark.x * width), int(landmark.y * height)
# if not landmarks_close(hand_landmarks.landmark[4], hand_landmarks.landmark[8]):
# if id == 8:
# cv2.circle(copy_image, (cx // 5, cy // 5), 1, (255, 0, 0), 3)
# pyautogui.moveTo(cx, cy)
# print('Moving Mouse!')
# elif landmarks_close(hand_landmarks.landmark[4], hand_landmarks.landmark[8]) and id == 8:
# pyautogui.dragTo(cx, cy)
# print('Dragging!')
# return av.VideoFrame.from_ndarray(copy_image, format="bgr24")
# webrtc_streamer(key="hand-tracking", video_processor_factory=VideoProcessor)
# st.write("Move the mouse with your index finger and click/drag by pinching your thumb and index finger together.")
# st.markdown('---')
# st.write('Developed by Herbert Sekpey with ❤️')
import cv2
import mediapipe as mp
import pyautogui
import streamlit as st
from streamlit_webrtc import VideoProcessorBase, webrtc_streamer
import av
# Set up CSS for styling
with open('track.css') as css:
st.markdown(f'<style>{css.read()}</style>', unsafe_allow_html=True)
def landmarks_close(lm1, lm2, threshold=0.09):
return (abs(lm1.x - lm2.x) < threshold and
abs(lm1.y - lm2.y) < threshold and
abs(lm1.z - lm2.z) < threshold)
pyautogui.FAILSAFE = False
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
screen_width, screen_height = pyautogui.size()
st.title("HandSync 🙌")
st.subheader("Control your mouse with hand gestures.")
class VideoProcessor(VideoProcessorBase):
def __init__(self):
self.hands = mp_hands.Hands(
model_complexity=0,
min_detection_confidence=0.9,
min_tracking_confidence=0.9
)
def recv(self, frame):
image = frame.to_ndarray(format="bgr24")
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.flip(image, 1)
copy_image = image.copy()
copy_image = cv2.cvtColor(copy_image, cv2.COLOR_RGB2BGR)
copy_image = cv2.resize(copy_image, (screen_width // 5, screen_height // 5), interpolation=cv2.INTER_AREA)
image = cv2.resize(image, (int(screen_width * 1.2), int(screen_height * 1.2)), interpolation=cv2.INTER_AREA)
results = self.hands.process(image)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_drawing.draw_landmarks(
image,
hand_landmarks,
mp_hands.HAND_CONNECTIONS,
mp_drawing.DrawingSpec(color=(0, 0, 255), thickness=2, circle_radius=2),
mp_drawing.DrawingSpec(color=(0, 255, 0), thickness=2, circle_radius=2)
)
for id, landmark in enumerate(hand_landmarks.landmark):
height, width, _ = image.shape
cx, cy = int(landmark.x * width), int(landmark.y * height)
if not landmarks_close(hand_landmarks.landmark[4], hand_landmarks.landmark[8]):
if id == 8:
cv2.circle(copy_image, (cx // 5, cy // 5), 1, (255, 0, 0), 3)
pyautogui.moveTo(cx, cy)
print('Moving Mouse!')
elif landmarks_close(hand_landmarks.landmark[4], hand_landmarks.landmark[8]) and id == 8:
pyautogui.dragTo(cx, cy)
print('Dragging!')
return av.VideoFrame.from_ndarray(copy_image, format="bgr24")
# Start the WebRTC streamer
webrtc_streamer(key="hand-tracking", video_processor_factory=VideoProcessor)
# Instructions for users
st.write("Move the mouse with your index finger and click/drag by pinching your thumb and index finger together.")
st.markdown('---')
st.write('Developed by Herbert Sekpey with ❤️')