听说有不少小伙伴宅家抗疫
偷偷长胖了?
如今刘畊宏的“毽子操”突然席卷全网
《本草纲目》再度翻红
小伙伴们纷纷化身
“刘畊宏女孩”“刘畊宏男孩”
AI能让我们的锻炼更有趣,我今天制作一个“毽子操陪练AI辅助器”,让我们一起燃烧卡路里!!!
步骤1 硬件
步骤2 Mediapipe
该解决方案采用两步检测器-跟踪器 ML 管道,在我们的MediaPipe Hands和MediaPipe Face Mesh解决方案中被证明是有效的。使用检测器,管道首先在帧内定位人/姿势感兴趣区域 (ROI)。跟踪器随后使用 ROI 裁剪帧作为输入来预测 ROI 内的姿势标志和分割掩码。请注意,对于视频用例,仅在需要时调用检测器,即,对于第一帧以及当跟踪器无法再识别前一帧中的身体姿势存在时。对于其他帧,管道只是从前一帧的姿势标志中导出 ROI。
步骤3 mind+安装Mediapipe库
在Mind+Python模式下“库管理”中使用“PIP模式”安装“Mediapipe”
步骤4 测试手部坐标点
"""
Pose Module
By: Computer Vision Zone
Website: https://www.computervision.zone/
"""
import cv2
import mediapipe as mp
import math
class PoseDetector:
"""
Estimates Pose points of a human body using the mediapipe library.
"""
def __init__(self, mode=False, smooth=True,
detectionCon=0.5, trackCon=0.5):
"""
:param mode: In static mode, detection is done on each image: slower
:param upBody: Upper boy only flag
:param smooth: Smoothness Flag
:param detectionCon: Minimum Detection Confidence Threshold
:param trackCon: Minimum Tracking Confidence Threshold
"""
self.mode = mode
self.smooth = smooth
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpDraw = mp.solutions.drawing_utils
self.mpPose = mp.solutions.pose
self.pose = self.mpPose.Pose(static_image_mode=self.mode,
smooth_landmarks=self.smooth,
min_detection_confidence=self.detectionCon,
min_tracking_confidence=self.trackCon)
def findPose(self, img, draw=True):
"""
Find the pose landmarks in an Image of BGR color space.
:param img: Image to find the pose in.
:param draw: Flag to draw the output on the image.
:return: Image with or without drawings
"""
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.pose.process(imgRGB)
if self.results.pose_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, self.results.pose_landmarks,
self.mpPose.POSE_CONNECTIONS)
return img
def findPosition(self, img, draw=True, bboxWithHands=False):
self.lmList = []
self.bboxInfo = {}
if self.results.pose_landmarks:
for id, lm in enumerate(self.results.pose_landmarks.landmark):
h, w, c = img.shape
cx, cy, cz = int(lm.x * w), int(lm.y * h), int(lm.z * w)
self.lmList.append([id, cx, cy, cz])
# Bounding Box
ad = abs(self.lmList[12][1] - self.lmList[11][1]) // 2
if bboxWithHands:
x1 = self.lmList[16][1] - ad
x2 = self.lmList[15][1] + ad
else:
x1 = self.lmList[12][1] - ad
x2 = self.lmList[11][1] + ad
y2 = self.lmList[29][2] + ad
y1 = self.lmList[1][2] - ad
bbox = (x1, y1, x2 - x1, y2 - y1)
cx, cy = bbox[0] + (bbox[2] // 2), \
bbox[1] + bbox[3] // 2
self.bboxInfo = {"bbox": bbox, "center": (cx, cy)}
if draw:
cv2.rectangle(img, bbox, (255, 0, 255), 3)
cv2.circle(img, (cx, cy), 5, (255, 0, 0), cv2.FILLED)
return self.lmList, self.bboxInfo
def findAngle(self, img, p1, p2, p3, draw=True):
"""
Finds angle between three points. Inputs index values of landmarks
instead of the actual points.
:param img: Image to draw output on.
:param p1: Point1 - Index of Landmark 1.
:param p2: Point2 - Index of Landmark 2.
:param p3: Point3 - Index of Landmark 3.
:param draw: Flag to draw the output on the image.
:return:
"""
# Get the landmarks
x1, y1 = self.lmList[p1][1:]
x2, y2 = self.lmList[p2][1:]
x3, y3 = self.lmList[p3][1:]
# Calculate the Angle
angle = math.degrees(math.atan2(y3 - y2, x3 - x2) -
math.atan2(y1 - y2, x1 - x2))
if angle < 0:
angle += 360
# Draw
if draw:
cv2.line(img, (x1, y1), (x2, y2), (255, 255, 255), 3)
cv2.line(img, (x3, y3), (x2, y2), (255, 255, 255), 3)
cv2.circle(img, (x1, y1), 10, (0, 0, 255), cv2.FILLED)
cv2.circle(img, (x1, y1), 15, (0, 0, 255), 2)
cv2.circle(img, (x2, y2), 10, (0, 0, 255), cv2.FILLED)
cv2.circle(img, (x2, y2), 15, (0, 0, 255), 2)
cv2.circle(img, (x3, y3), 10, (0, 0, 255), cv2.FILLED)
cv2.circle(img, (x3, y3), 15, (0, 0, 255), 2)
cv2.putText(img, str(int(angle)), (x2 - 50, y2 + 50),
cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), 2)
return angle
def findDistance(self, p1, p2,img):
x1, y1 = self.lmList[p1][1:3]
x2, y2 = self.lmList[p2][1:3]
cx, cy = (x1 + x2) // 2, (y1 + y2) // 2
length = math.hypot(x2 - x1, y2 - y1)
if length<50:
cv2.circle(img, ((x1+x2)//2,(y1+y2)//2),int(length), (255, 255, 255), 10)
return length,img
def angleCheck(self, myAngle, targetAngle, addOn=20):
return targetAngle - addOn < myAngle < targetAngle + addOn
def main():
cap = cv2.VideoCapture(0)
detector = PoseDetector()
while True:
success, img = cap.read()
img = detector.findPose(img)
lmList, bboxInfo = detector.findPosition(img, bboxWithHands=False)
if bboxInfo:
center = bboxInfo["center"]
cv2.circle(img, center, 5, (255, 0, 255), cv2.FILLED)
length,img=detector.findDistance(16,15,img)
print(lmList[16][1:3])
cv2.imshow("Image", img)
cv2.waitKey(1)
if __name__ == "__main__":
main()
步骤5 使用比例表达两点距离
因人像在摄像头中会出现近大远小的现象,所以只使用固定值来表达人体姿态中两坐标点的距离不合适。所以我采用两坐标点距离和两肩中点与两髋中点距离的比例,做为判断距离状态。
如:右手(16)到右膝(26)距离与两肩中点(C1)与两髋中点(C2)距离,做比值。
"""
Pose Module
By: Computer Vision Zone
Website: https://www.computervision.zone/
"""
import cv2
import mediapipe as mp
import math
class PoseDetector:
"""
Estimates Pose points of a human body using the mediapipe library.
"""
def __init__(self, mode=False, smooth=True,
detectionCon=0.5, trackCon=0.5):
"""
:param mode: In static mode, detection is done on each image: slower
:param upBody: Upper boy only flag
:param smooth: Smoothness Flag
:param detectionCon: Minimum Detection Confidence Threshold
:param trackCon: Minimum Tracking Confidence Threshold
"""
self.mode = mode
self.smooth = smooth
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpDraw = mp.solutions.drawing_utils
self.mpPose = mp.solutions.pose
self.pose = self.mpPose.Pose(static_image_mode=self.mode,
smooth_landmarks=self.smooth,
min_detection_confidence=self.detectionCon,
min_tracking_confidence=self.trackCon)
def findPose(self, img, draw=True):
"""
Find the pose landmarks in an Image of BGR color space.
:param img: Image to find the pose in.
:param draw: Flag to draw the output on the image.
:return: Image with or without drawings
"""
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.pose.process(imgRGB)
if self.results.pose_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, self.results.pose_landmarks,
self.mpPose.POSE_CONNECTIONS)
return img
def findPosition(self, img, draw=True, bboxWithHands=False):
self.lmList = []
self.bboxInfo = {}
if self.results.pose_landmarks:
for id, lm in enumerate(self.results.pose_landmarks.landmark):
h, w, c = img.shape
cx, cy, cz = int(lm.x * w), int(lm.y * h), int(lm.z * w)
self.lmList.append([id, cx, cy, cz])
# Bounding Box
ad = abs(self.lmList[12][1] - self.lmList[11][1]) // 2
if bboxWithHands:
x1 = self.lmList[16][1] - ad
x2 = self.lmList[15][1] + ad
else:
x1 = self.lmList[12][1] - ad
x2 = self.lmList[11][1] + ad
y2 = self.lmList[29][2] + ad
y1 = self.lmList[1][2] - ad
bbox = (x1, y1, x2 - x1, y2 - y1)
cx, cy = bbox[0] + (bbox[2] // 2), \
bbox[1] + bbox[3] // 2
self.bboxInfo = {"bbox": bbox, "center": (cx, cy)}
if draw:
cv2.rectangle(img, bbox, (255, 0, 255), 3)
cv2.circle(img, (cx, cy), 5, (255, 0, 0), cv2.FILLED)
return self.lmList, self.bboxInfo
def findAngle(self, img, p1, p2, p3, draw=True):
"""
Finds angle between three points. Inputs index values of landmarks
instead of the actual points.
:param img: Image to draw output on.
:param p1: Point1 - Index of Landmark 1.
:param p2: Point2 - Index of Landmark 2.
:param p3: Point3 - Index of Landmark 3.
:param draw: Flag to draw the output on the image.
:return:
"""
# Get the landmarks
x1, y1 = self.lmList[p1][1:]
x2, y2 = self.lmList[p2][1:]
x3, y3 = self.lmList[p3][1:]
# Calculate the Angle
angle = math.degrees(math.atan2(y3 - y2, x3 - x2) -
math.atan2(y1 - y2, x1 - x2))
if angle < 0:
angle += 360
# Draw
if draw:
cv2.line(img, (x1, y1), (x2, y2), (255, 255, 255), 3)
cv2.line(img, (x3, y3), (x2, y2), (255, 255, 255), 3)
cv2.circle(img, (x1, y1), 10, (0, 0, 255), cv2.FILLED)
cv2.circle(img, (x1, y1), 15, (0, 0, 255), 2)
cv2.circle(img, (x2, y2), 10, (0, 0, 255), cv2.FILLED)
cv2.circle(img, (x2, y2), 15, (0, 0, 255), 2)
cv2.circle(img, (x3, y3), 10, (0, 0, 255), cv2.FILLED)
cv2.circle(img, (x3, y3), 15, (0, 0, 255), 2)
cv2.putText(img, str(int(angle)), (x2 - 50, y2 + 50),
cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), 2)
return angle
def findDistance(self, p1, p2,img):
x1, y1 = self.lmList[12][1:3]
x2, y2 = self.lmList[11][1:3]
cx1, cy1 = (x1 + x2) // 2, (y1 + y2) // 2
x1, y1 = self.lmList[23][1:3]
x2, y2 = self.lmList[24][1:3]
cx2, cy2 = (x1 + x2) // 2, (y1 + y2) // 2
length1 = math.hypot(cx2 - cx1, cy2 - cy1)
print(length1)
x1, y1 = self.lmList[p1][1:3]
x2, y2 = self.lmList[p2][1:3]
cx, cy = (x1 + x2) // 2, (y1 + y2) // 2
length2 = math.hypot(x2 - x1, y2 - y1)
if length2/length1<0.3:
cv2.circle(img, ((x1+x2)//2,(y1+y2)//2),int(length2), (255, 255, 255), 10)
return length2,img
def angleCheck(self, myAngle, targetAngle, addOn=20):
return targetAngle - addOn < myAngle < targetAngle + addOn
def main():
cap = cv2.VideoCapture(0)
detector = PoseDetector()
while True:
success, img = cap.read()
img = detector.findPose(img)
lmList, bboxInfo = detector.findPosition(img, bboxWithHands=False)
if bboxInfo:
center = bboxInfo["center"]
cv2.circle(img, center, 5, (255, 0, 255), cv2.FILLED)
length,img=detector.findDistance(16,15,img)
print(lmList[16][1:3])
cv2.imshow("Image", img)
cv2.waitKey(1)
if __name__ == "__main__":
main()
如果length2/length1<0.5,认为手碰到膝盖或脚踝。
步骤6 加声音特效,完成程序编写
import cv2
import mediapipe as mp
import math
import time
import pygame
pygame.init()
pygame.mixer.init()
sound1 = pygame.mixer.Sound("chimes.wav")
class PoseDetector:
"""
Estimates Pose points of a human body using the mediapipe library.
"""
def __init__(self, mode=False, smooth=True,
detectionCon=0.5, trackCon=0.8):
"""
:param mode: In static mode, detection is done on each image: slower
:param upBody: Upper boy only flag
:param smooth: Smoothness Flag
:param detectionCon: Minimum Detection Confidence Threshold
:param trackCon: Minimum Tracking Confidence Threshold
"""
self.ptime=0
self.mode = mode
self.smooth = smooth
self.detectionCon = detectionCon
self.trackCon = trackCon
self.i=0
self.mpDraw = mp.solutions.drawing_utils
self.mpPose = mp.solutions.pose
self.pose = self.mpPose.Pose(static_image_mode=self.mode,
smooth_landmarks=self.smooth,
min_detection_confidence=self.detectionCon,
min_tracking_confidence=self.trackCon)
def findPose(self, img, draw=True):
"""
Find the pose landmarks in an Image of BGR color space.
:param img: Image to find the pose in.
:param draw: Flag to draw the output on the image.
:return: Image with or without drawings
"""
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.pose.process(imgRGB)
if self.results.pose_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, self.results.pose_landmarks,
self.mpPose.POSE_CONNECTIONS)
return img
def findPosition(self, img, draw=True, bboxWithHands=False):
self.lmList = []
self.bboxInfo = {}
if self.results.pose_landmarks:
for id, lm in enumerate(self.results.pose_landmarks.landmark):
h, w, c = img.shape
cx, cy, cz = int(lm.x * w), int(lm.y * h), int(lm.z * w)
self.lmList.append([id, cx, cy, cz])
# Bounding Box
ad = abs(self.lmList[12][1] - self.lmList[11][1]) // 2
if bboxWithHands:
x1 = self.lmList[16][1] - ad
x2 = self.lmList[15][1] + ad
else:
x1 = self.lmList[12][1] - ad
x2 = self.lmList[11][1] + ad
y2 = self.lmList[29][2] + ad
y1 = self.lmList[1][2] - ad
bbox = (x1, y1, x2 - x1, y2 - y1)
cx, cy = bbox[0] + (bbox[2] // 2), \
bbox[1] + bbox[3] // 2
self.bboxInfo = {"bbox": bbox, "center": (cx, cy)}
if draw:
cv2.rectangle(img, bbox, (255, 0, 255), 3)
cv2.circle(img, (cx, cy), 5, (255, 0, 0), cv2.FILLED)
return self.lmList, self.bboxInfo
def findAngle(self, img, p1, p2, p3, draw=True):
# Get the landmarks
x1, y1 = self.lmList[p1][1:]
x2, y2 = self.lmList[p2][1:]
x3, y3 = self.lmList[p3][1:]
# Calculate the Angle
angle = math.degrees(math.atan2(y3 - y2, x3 - x2) -
math.atan2(y1 - y2, x1 - x2))
if angle < 0:
angle += 360
# Draw
if draw:
cv2.line(img, (x1, y1), (x2, y2), (255, 255, 255), 3)
cv2.line(img, (x3, y3), (x2, y2), (255, 255, 255), 3)
cv2.circle(img, (x1, y1), 10, (0, 0, 255), cv2.FILLED)
cv2.circle(img, (x1, y1), 15, (0, 0, 255), 2)
cv2.circle(img, (x2, y2), 10, (0, 0, 255), cv2.FILLED)
cv2.circle(img, (x2, y2), 15, (0, 0, 255), 2)
cv2.circle(img, (x3, y3), 10, (0, 0, 255), cv2.FILLED)
cv2.circle(img, (x3, y3), 15, (0, 0, 255), 2)
cv2.putText(img, str(int(angle)), (x2 - 50, y2 + 50),
cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), 2)
return angle
def findDistance(self, p1, p2,img):
x1, y1 = self.lmList[12][1:3]
x2, y2 = self.lmList[11][1:3]
cx1, cy1 = (x1 + x2) // 2, (y1 + y2) // 2
x1, y1 = self.lmList[23][1:3]
x2, y2 = self.lmList[24][1:3]
cx2, cy2 = (x1 + x2) // 2, (y1 + y2) // 2
length1 = math.hypot(cx2 - cx1, cy2 - cy1)
x1, y1 = self.lmList[p1][1:3]
x2, y2 = self.lmList[p2][1:3]
cx, cy = (x1 + x2) // 2, (y1 + y2) // 2
length2 = math.hypot(x2 - x1, y2 - y1)
if length2/length1<0.3:
cv2.circle(img, ((x1+x2)//2,(y1+y2)//2),int(length2), (255, 255, 255), 10)
if time.time()-self.ptime>0.3:
self.ptime=time.time()
sound1.play()
print(length2/length1)
self.i+=1
return img
def angleCheck(self, myAngle, targetAngle, addOn=20):
return targetAngle - addOn < myAngle < targetAngle + addOn
def main():
cap = cv2.VideoCapture("jianzicao1.mp4")
detector = PoseDetector()
post=[25,26,27,28]
while True:
success, img = cap.read()
img=cv2.resize(img,(640,480))
img = detector.findPose(img)
lmList, bboxInfo = detector.findPosition(img, bboxWithHands=False)
if bboxInfo:
center = bboxInfo["center"]
cv2.circle(img, center, 5, (255, 0, 255), cv2.FILLED)
for i in post:
img=detector.findDistance(16,i,img)
img=detector.findDistance(15,i,img)
cv2.putText(img, str(detector.i), (50, 200),cv2.FONT_HERSHEY_PLAIN, 8, (0, 0, 255), 5)
cv2.imshow("Image", img)
cv2.waitKey(1)
if __name__ == "__main__":
main()
声音文件,在下面附件中。
附件
步骤7 使用网络视频进行测试
步骤8 实景测试
步骤9 投电视(大屏测试)
通过HDMI线,投到电视上。
三春牛-创客2023.02.02
赞
三春牛-创客2023.02.02
厉害
梅零落2022.12.10
厉害厉害,复刻一个
feng05392022.06.13
大牛天天吃核桃
telling00002022.06.13
这个不错,学习了
DI玩2022.05.31
技术型畊宏男孩~有料有趣
许培享2022.05.30
嗨玩AI Mediapipe