from XEdu.hub import Workflow as wf
import cv2
import numpy as np
import paho.mqtt.client as mqtt
# MQTT Broker 地址和端口
broker_address = "nbzch.cn"
broker_port = 1883
# MQTT 主题
mqtt_topic = "led"
# 初始化 MQTT 客户端
mqtt_client = mqtt.Client()
# 连接 MQTT Broker
mqtt_client.connect(broker_address, broker_port)
cap = cv2.VideoCapture(0)
body = wf(task='body17') # 实例化pose模型
det = wf(task='bodydetect') # 实例化detect模型
hands_up_prev = False # 用于跟踪上一次的状态
# 设置窗口的初始大小
cv2.namedWindow('video', cv2.WINDOW_NORMAL)
cv2.resizeWindow('video', 1280, 1024)
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
bboxs = det.inference(data=frame, thr=0.3)
img = frame
hands_up = False
for i, bbox in enumerate(bboxs):
keypoints, img = body.inference(data=img, img_type='cv2', bbox=bbox)
# 计算鼻子位置
nose_x, nose_y = keypoints[0][0], keypoints[0][1]
# 计算两手高度
left_hand_y = keypoints[9][1]
right_hand_y = keypoints[10][1]
# 判断是否两手都比鼻子高
if left_hand_y < nose_y and right_hand_y < nose_y:
hands_up = True
for [x1, y1, x2, y2] in bboxs: # 画检测框
cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2)
if hands_up != hands_up_prev: # 只有在状态改变时发送 MQTT 消息
if hands_up:
mqtt_client.publish(mqtt_topic, "on")
cv2.putText(img, "Hands Up", (20, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
else:
mqtt_client.publish(mqtt_topic, "off")
hands_up_prev = hands_up # 更新状态
cv2.imshow('video', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
最后编辑:2024年05月16日
©著作权归作者所有