yolo v11 pose 推理部署实战 2025
目录
视频推理代码:
图片推理全身代码:
图片推理上半身代码:
yolov11 pose 比 yolov8 在俯视角上效果有明显改善。速度也有很大提升。
https://github.com/alanli1997/slim-neck-by-gsconv/tree/56813dd43394e608ee49f00d4dfdff5eb666ad18/gsconv-yolov8_9_10_11
| 模型 | 尺寸<br><sup>(像素) | mAP<sup>pose<br>50-95 | mAP<sup>pose<br>50 | 速度<br><sup>CPU ONNX<br>(ms) | 速度<br><sup>Tesla T4 TensorRT<br>(ms) | 参数<br><sup>(M) | FLOPs<br><sup>(B) |
| ---------------------------------------------------------------------------------------------- | ------------------- | --------------------- | ------------------ | ----------------------------- | -------------------------------------- | ---------------- | ----------------- |
| [YOLO11n-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-pose.pt) | 640 | 50.0 | 81.0 | 52.40 ± 0.51 ms | 1.72 ± 0.01 ms | 2.9 | 7.6 |
| [YOLO11s-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-pose.pt) | 640 | 58.9 | 86.3 | 90.54 ± 0.59 ms | 2.57 ± 0.00 ms | 9.9 | 23.2 |
| [YOLO11m-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-pose.pt) | 640 | 64.9 | 89.4 | 187.28 ± 0.77 ms | 4.94 ± 0.05 ms | 20.9 | 71.7 |
| [YOLO11l-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-pose.pt) | 640 | 66.1 | 89.9 | 247.69 ± 1.10 ms | 6.42 ± 0.13 ms | 26.2 | 90.7 |
| [YOLO11x-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-pose.pt) | 640 | 69.5
视频推理代码:
import time
from ultralytics import YOLO
import cv2# 加载模型
model = YOLO(r"D:\project\huichui\huichui_code\weights\yolo11s-pose.pt")video_path = r"D:\data\Preview_10\chantu\20251021-190007.mp4"
cap = cv2.VideoCapture(video_path)# 定义上半身关键点索引
upper_body_ids = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# 定义上半身连线(关键点对)
upper_body_pairs = [(5, 6), # 肩连线(5, 7), (7, 9), # 左臂(6, 8), (8, 10), # 右臂(0, 5), (0, 6) # 鼻子连肩
]while cap.isOpened():ret, frame = cap.read()if not ret:breakframe = cv2.resize(frame, (960, 540))start = time.time()results = model(frame, verbose=False)print('推理时间:', round(time.time() - start, 3), '秒')annotated_frame = frame.copy()# 遍历检测到的人for r in results:keypoints = r.keypointsif keypoints is None:continue# keypoints.xy: [num_person, num_keypoints, 2]for person in keypoints.xy:# 绘制上半身关键点for idx in upper_body_ids:if idx >= len(person):continuex, y = person[idx]if x > 0 and y > 0:cv2.circle(annotated_frame, (int(x), int(y)), 4, (0, 255, 0), -1)# 绘制上半身连线for (p1, p2) in upper_body_pairs:if p1 < len(person) and p2 < len(person):x1, y1 = person[p1]x2, y2 = person[p2]if x1 > 0 and y1 > 0 and x2 > 0 and y2 > 0:cv2.line(annotated_frame, (int(x1), int(y1)), (int(x2), int(y2)), (255, 0, 0), 2)cv2.imshow('YOLOv11 Upper Body Pose', annotated_frame)if cv2.waitKey(1) & 0xFF == ord('q'):breakcap.release()
cv2.destroyAllWindows()
图片推理全身代码:
import glob
import time
from ultralytics import YOLO
import cv2# 加载模型
model = YOLO(r"D:\project\huichui\huichui_code\weights\yolo11s-pose.pt")# 图像路径
dir_a = r"D:\data\Preview_10\chantu\imgs/"
files = glob.glob(dir_a + "*.png")# 定义全身关键点索引(COCO格式,共17个)
# 0:nose, 1:left_eye, 2:right_eye, 3:left_ear, 4:right_ear,
# 5:left_shoulder, 6:right_shoulder, 7:left_elbow, 8:right_elbow,
# 9:left_wrist, 10:right_wrist, 11:left_hip, 12:right_hip,
# 13:left_knee, 14:right_knee, 15:left_ankle, 16:right_ankle
full_body_ids = list(range(17))# 定义全身关键点连线(根据 COCO 人体骨架连接规则)
full_body_pairs = [(5, 6), # 肩膀(5, 7), (7, 9), # 左臂(6, 8), (8, 10), # 右臂(5, 11), (6, 12), # 躯干到髋部(11, 12), # 髋部连线(11, 13), (13, 15), # 左腿(12, 14), (14, 16), # 右腿(0, 5), (0, 6), # 鼻子到肩(0, 1), (0, 2), # 鼻子到眼(1, 3), (2, 4) # 眼到耳
]for img_path in files:frame = cv2.imread(img_path)start = time.time()results = model(frame, verbose=False)print('推理时间:', round(time.time() - start, 3), '秒')annotated_frame = frame.copy()# 遍历检测到的人for r in results:keypoints = r.keypointsif keypoints is None:continue# keypoints.xy: [num_person, num_keypoints, 2]for person in keypoints.xy:# 绘制全身关键点for idx in full_body_ids:if idx >= len(person):continuex, y = person[idx]if x > 0 and y > 0:cv2.circle(annotated_frame, (int(x), int(y)), 4, (0, 255, 0), -1)# 绘制全身连线for (p1, p2) in full_body_pairs:if p1 < len(person) and p2 < len(person):x1, y1 = person[p1]x2, y2 = person[p2]if x1 > 0 and y1 > 0 and x2 > 0 and y2 > 0:cv2.line(annotated_frame, (int(x1), int(y1)), (int(x2), int(y2)), (255, 0, 0), 2)annotated_frame = cv2.resize(annotated_frame, (960, 540))cv2.imshow('YOLOv11 Full Body Pose', annotated_frame)# 按任意键继续,q 退出key = cv2.waitKey(0)if key & 0xFF == ord('q'):breakcv2.destroyAllWindows()
图片推理上半身代码:
import glob
import time
from ultralytics import YOLO
import cv2# 加载模型
model = YOLO(r"D:\project\huichui\huichui_code\weights\yolo11s-pose.pt")video_path = r"D:\data\Preview_10\chantu\20251021-190007.mp4"
cap = cv2.VideoCapture(video_path)# 定义上半身关键点索引
upper_body_ids = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# 定义上半身连线(关键点对)
upper_body_pairs = [(5, 6), # 肩连线(5, 7), (7, 9), # 左臂(6, 8), (8, 10), # 右臂(0, 5), (0, 6) # 鼻子连肩
]dir_a = r"D:\data\Preview_10\chantu\imgs/"files=glob.glob(dir_a+"*.png")for img_path in files:frame=cv2.imread(img_path)start = time.time()results = model(frame, verbose=False)print('推理时间:', round(time.time() - start, 3), '秒')annotated_frame = frame.copy()# 遍历检测到的人for r in results:keypoints = r.keypointsif keypoints is None:continue# keypoints.xy: [num_person, num_keypoints, 2]for person in keypoints.xy:# 绘制上半身关键点for idx in upper_body_ids:if idx >= len(person):continuex, y = person[idx]if x > 0 and y > 0:cv2.circle(annotated_frame, (int(x), int(y)), 4, (0, 255, 0), -1)# 绘制上半身连线for (p1, p2) in upper_body_pairs:if p1 < len(person) and p2 < len(person):x1, y1 = person[p1]x2, y2 = person[p2]if x1 > 0 and y1 > 0 and x2 > 0 and y2 > 0:cv2.line(annotated_frame, (int(x1), int(y1)), (int(x2), int(y2)), (255, 0, 0), 2)annotated_frame = cv2.resize(annotated_frame, (960, 540))cv2.imshow('YOLOv11 Upper Body Pose', annotated_frame)if cv2.waitKey(0) & 0xFF == ord('q'):breakcap.release()cv2.destroyAllWindows()