Python Opencv实践 - 手势音量控制
2023-12-21 00:37:25
? ? 本文基于前面的手部跟踪功能做一个手势音量控制功能,代码用到了前面手部跟踪封装的HandDetector.这篇文章在这里:
? ? ? 使用了pycaw来做音量控制,pacaw的安装直接使用pip install pycaw即可。
? ? ? ? 代码如下:
import cv2 as cv
import math
import mediapipe as mp
import time
from ctypes import cast,POINTER
from comtypes import CLSCTX_ALL
#使用pycaw来控制音量,pip install pycaw
from pycaw.pycaw import AudioUtilities,IAudioEndpointVolume
class HandDetector():
def __init__(self, mode=False,
maxNumHands=2,
modelComplexity=1,
minDetectionConfidence=0.5,
minTrackingConfidence=0.5):
self.mode = mode
self.maxNumHands = maxNumHands
self.modelComplexity = modelComplexity
self.minDetectionConfidence = minDetectionConfidence
self.minTrackingConfidence = minTrackingConfidence
#创建mediapipe的solutions.hands对象
self.mpHands = mp.solutions.hands
self.handsDetector = self.mpHands.Hands(self.mode, self.maxNumHands, self.modelComplexity, self.minDetectionConfidence, self.minTrackingConfidence)
#创建mediapipe的绘画工具
self.mpDrawUtils = mp.solutions.drawing_utils
def findHands(self, img, drawOnImage=True):
#mediapipe手部检测器需要输入图像格式为RGB
#cv默认的格式是BGR,需要转换
imgRGB = cv.cvtColor(img, cv.COLOR_BGR2RGB)
#调用手部检测器的process方法进行检测
self.results = self.handsDetector.process(imgRGB)
#print(results.multi_hand_landmarks)
#如果multi_hand_landmarks有值表示检测到了手
if self.results.multi_hand_landmarks:
#遍历每一只手的landmarks
for handLandmarks in self.results.multi_hand_landmarks:
if drawOnImage:
self.mpDrawUtils.draw_landmarks(img, handLandmarks, self.mpHands.HAND_CONNECTIONS)
return img;
#从结果中查询某只手的landmark list
def findHandPositions(self, img, handID=0, drawOnImage=True):
landmarkList = []
if self.results.multi_hand_landmarks:
handLandmarks = self.results.multi_hand_landmarks[handID]
for id,landmark in enumerate(handLandmarks.landmark):
#处理每一个landmark,将landmark里的X,Y(比例)转换为帧数据的XY坐标
h,w,c = img.shape
centerX,centerY = int(landmark.x * w), int(landmark.y * h)
landmarkList.append([id, centerX, centerY])
if (drawOnImage):
#将landmark绘制成圆
cv.circle(img, (centerX,centerY), 8, (0,255,0))
return landmarkList
def DisplayFPS(img, preTime):
curTime = time.time()
if (curTime - preTime == 0):
return curTime;
fps = 1 / (curTime - preTime)
cv.putText(img, "FPS:" + str(int(fps)), (10,70), cv.FONT_HERSHEY_PLAIN,
3, (0,255,0), 3)
return curTime
def AudioEndpointGet():
devices = AudioUtilities.GetSpeakers()
interface = devices.Activate(IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
volume = cast(interface, POINTER(IAudioEndpointVolume))
range = volume.GetVolumeRange()
return volume,range
def AudioVolumeLevelSet(volume, range, value):
if volume:
if (value < range[0]) or (value > range[1]):
return
volume.SetMasterVolumeLevel(value, None)
def main():
video = cv.VideoCapture('../../SampleVideos/handVolumeControl.mp4')
#FPS显示
preTime = 0
handDetector = HandDetector(minDetectionConfidence=0.7)
volume,volumeRange = AudioEndpointGet()
print(volumeRange)
#AudioVolumeLevelSet(volume, volumeRange, volumeRange[0])
minFingerDistance = 1000
maxFingerDistance = 0
while True:
ret,frame = video.read()
if ret == False:
break;
frame = handDetector.findHands(frame)
hand0Landmarks = handDetector.findHandPositions(frame)
if (len(hand0Landmarks) != 0):
#print(hand0Landmarks[4], hand0Landmarks[8])
#取出大拇指(4)和食指(8)的指尖的点对应的坐标
thumbX,thumbY = hand0Landmarks[4][1], hand0Landmarks[4][2]
indexFingerX,indexFingerY = hand0Landmarks[8][1],hand0Landmarks[8][2]
#计算两个指尖的点指尖的中点
cx,cy = (thumbX + indexFingerX) / 2, (thumbY + indexFingerY) / 2
#用实心圆突出显示出这两个个点
cv.circle(frame, (thumbX,thumbY), 18, (90,220,180), cv.FILLED)
cv.circle(frame, (indexFingerX,indexFingerY), 18, (0,120,255), cv.FILLED)
#绘制两个点形成的直线
cv.line(frame, (thumbX,thumbY), (indexFingerX,indexFingerY), (255,60,60), 3)
#计算食指和拇指指尖的距离
distance = math.hypot(thumbX - indexFingerX, thumbY - indexFingerY)
#测试两指指尖最小和最大距离,改进方案可以是用摄像头做实时校准后再进行控制
#本案例中直接获取视频里的最小和最大距离直接用作判断(我拍的视频里范围是30 - 425之间)
if distance < minFingerDistance:
minFingerDistance = distance
if distance > maxFingerDistance:
maxFingerDistance = distance
#print(distance)
if distance < 40:
#两个指尖的中点显示为绿色,音量设置为最小值
cv.circle(frame, (int(cx),int(cy)), 18, (0,255,0), cv.FILLED)
AudioVolumeLevelSet(volume, volumeRange, volumeRange[0])
else:
cv.circle(frame, (int(cx),int(cy)), 18, (0,0,255), cv.FILLED)
#这里为了方便直接使用425(本视频最大值)做比例换算
#我本机的volumeRange是-63.5 到 0, 步长0.5
value = volumeRange[0] * (1 - (distance / 425))
print(value)
AudioVolumeLevelSet(volume, volumeRange, value)
preTime = DisplayFPS(frame, preTime)
cv.imshow('Real Time Hand Detection', frame)
if cv.waitKey(30) & 0xFF == ord('q'):
break;
print("Min & Max distance between thumb and index finger tips: ", minFingerDistance, maxFingerDistance)
video.release()
cv.destroyAllWindows()
if __name__ == "__main__":
main()
? ? ? ? 效果可以参考我的B站视频:
文章来源:https://blog.csdn.net/vivo01/article/details/135118979
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。 如若内容造成侵权/违法违规/事实不符,请联系我的编程经验分享网邮箱:veading@qq.com进行投诉反馈,一经查实,立即删除!
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。 如若内容造成侵权/违法违规/事实不符,请联系我的编程经验分享网邮箱:veading@qq.com进行投诉反馈,一经查实,立即删除!