YOLO系列——实时屏幕检测
通过PIL的ImageGrab.grab可以截取屏幕,转换成BGR格式后就可以给YOLO进行检测,一旦屏幕上出现指定的内容,就会标记出来。
import cv2
from ultralytics import YOLO
from PIL import ImageGrab
import numpy as np
import cv2 as cvmodel = YOLO("../yolov8n.pt")
a = (100,200,800,1000)#None 全屏,[100,200,800,1000]
a=None
while 1:scrn = ImageGrab.grab(bbox=a)#把RGB->BGRscrn = np.array(scrn)scrn = cv.cvtColor(scrn,cv2.COLOR_RGB2BGR)#这下YOLO可以用了 GBRresults = model.predict(scrn)#results[0]保存了第0张图片的x,y,x,y坐标,conf每个目标的置信度,cls每个飙的泪飙# for box in results[0].boxes:# print("坐标:",box.xyxy[0].tolist())# print("置信度:",box.conf[0].item())# print("类别ID:",box.cls[0].item())annotated_frame=results[0].plot()cv2.imshow('jian',annotated_frame) #窗口名jian,后面是检测到的帧信息if cv2.waitKey(1) &0xFF == ord('q'):breakcv2.destroyAllWindows()
屏幕一旦出现关注的目标就把屏幕保存,比如抓舞弊:
import cv2
from ultralytics import YOLO
from PIL import ImageGrab
import numpy as np
import cv2 as cvmodel = YOLO(r"D:\PyCharm\LearningYOLO\da_fa_yolo\runs\detect\train4\weights\best.pt")
a = (100,200,800,1000)#None 全屏,[100,200,800,1000]
a=None
while 1:scrn = ImageGrab.grab(bbox=a)#把RGB->BGRscrn1 = np.array(scrn)scrn1 = cv.cvtColor(scrn1,cv2.COLOR_RGB2BGR)#这下YOLO可以用了 GBRresults = model.predict(scrn)#results[0]保存了第0张图片的x,y,x,y坐标,conf每个目标的置信度,cls每个飙的泪飙# for box in results[0].boxes:# print("坐标:",box.xyxy[0].tolist())# print("置信度:",box.conf[0].item())# print("类别ID:",box.cls[0].item())c=0t=500for box in results[0].boxes:if box.cls[0]== 0:print("找到了目标")# 保存截图scrn.save(fr"./t/{c}.png")# 发出蜂鸣winsound.Beep(1000,t) #蜂鸣的频率1000,维持时间msc+=1annotated_frame = results[0].plot()cv2.imshow('jian',annotated_frame)if cv2.waitKey(1)&0xFF==ord('q'):breakcv2.destroyAllWindows()