YOLOv5-入门篇笔记
1.创建环境
conda create -n yolvo5 python=3.8
去pytorch.org下载1.8.2的版本。
pip --default-timeout=1688 install torch==1.8.2 torchvision==0.9.2 torchaudio==0.8.2 --extra-index-url https://download.pytorch.org/whl/lts/1.8/cu111
github上下载yolov5的zip
pip --default-timeout=1688 install -r requirements.txt
通过网盘分享的文件:yolov5
链接: https://pan.baidu.com/s/1oigw2mPo9uVoX4hWXmiGWA?pwd=npue 提取码: npue
安装jupyter
pip install pywinpty==2.0.5
pip --default-timeout=1688 install jupyterlab
安装标注工具
pip install labelimg
pip install pyqt5 sip
2.模型检测
weights
训练好的模型文件
python detect.py --weights yolov5x.pt
source
检测的目标,可以是单张图片,文件夹,屏幕或者摄像头等。
conf-thres
iou-thres
基于torch.hub的检测方法
import torchmodel = torch.hub.load("./", "yolov5s", source = "local")img = "./data/images/zidane.jpg"results = model(img)results.show()
3.数据集构建
用labelimg
存储格式改成yolov
a上一张图
d下一张图
w创建框
4.模型训练
创建
分别存放没标注的和标注的文件
把labels里的classes移到和images、labels同一目录
看detect.py文件中的weight和data参数
相关文件的路径
把.yaml中的内容修改,训练要把train.py的
修改成修改的.yaml的路径。
修改虚拟内存
设置 系统 系统信息 高级系统设置
更改所在盘的虚拟内存就行了。
训练好后,可以用
tensorboard --logdir runs
看参数
没数据的话,安装一下这个
pip install protobuf==3.20.0
检测视频
python detect.py --weights runs/train/exp11/weights/best.pt --source BVN.mp4 --view-img
5.Pyside6可视化界面
pip install Pyside6
在可视化界面弄好前端后,在vscode中下载pyside6插件后。将ui文件转换成py
import cv2
import sys
import torch
from PySide6.QtWidgets import QMainWindow, QApplication, QFileDialog
from PySide6.QtGui import QPixmap, QImage
from PySide6.QtCore import QTimerfrom main_window_ui import Ui_MainWindowdef convert2QImage(img):height, width, channel = img.shapereturn QImage(img, width, height, width * channel, QImage.Format_RGB888)class MainWindow(QMainWindow, Ui_MainWindow):def __init__(self):super(MainWindow, self).__init__()self.model = torch.hub.load('./', # 指向本地克隆的仓库'custom', path='runs/train/exp11/weights/best.pt',source='local')self.setupUi(self)self.timer = QTimer()self.timer.setInterval(100)self.video = Noneself.bind_slots()def image_pred(self, file_path):results = self.model(file_path)image = results.render()[0]return convert2QImage(image)def open_image(self):print("点击了检测文件")self.timer.stop()file_path, _ = QFileDialog.getOpenFileName(self, dir = "./demo_images/images/train", filter = "*.jpg;*.png;*.jpeg")if file_path:pixmap = QPixmap(file_path)qimage = self.image_pred(file_path)self.input.setPixmap(pixmap)self.output.setPixmap(QPixmap.fromImage(qimage))def video_pred(self):ret, frame = self.video.read()if not ret:self.timer.stop()else:frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)self.input.setPixmap(QPixmap.fromImage(convert2QImage(frame)))results = self.model(frame)image = results.render()[0]self.output.setPixmap(QPixmap.fromImage(convert2QImage(image)))def open_video(self):print("点击了检测文件")file_path, _ = QFileDialog.getOpenFileName(self, dir = "./", filter = "*.mp4")if file_path:self.video = cv2.VideoCapture(file_path)self.timer.start() def bind_slots(self):self.detpic.clicked.connect(self.open_image)self.detvid.clicked.connect(self.open_video)self.timer.timeout.connect(self.video_pred)if __name__ == "__main__":app = QApplication(sys.argv)window = MainWindow()window.show()app.exec()
6.Gradio搭建Web GUI
pip install gradio
import torch
import gradio as grmodel = torch.hub.load('./', 'custom', path='runs/train/exp11/weights/best.pt',source='local')title = "基于Gradio的YOLOv5演示项目"desc = "这是一个基于Gradio的YOLOv5演示项目,非常简洁,非常方便!"base_conf, base_iou = 0.25, 0.45def det_image(img, conf, iou):model.conf = confmodel.iou = ioureturn model(img).render()[0]
gr.Interface(inputs = ["image", gr.Slider(minimum = 0, maximum = 1, value = base_conf, interactive=True), gr.Slider(minimum = 0, maximum = 1, value = base_iou, interactive=True)],outputs = ["image"],fn = det_image,title = title,description = desc,live = True,examples = [["./demo_images/images/train/demo_images30.jpg", base_conf, base_iou], ["./demo_images/images/train/demo_images120.jpg", base_conf, base_iou]]).launch()