当前位置: 首页 > news >正文

使用tensorRT10部署yolov5目标检测模型(2)

        本文基于实际项目的使用经验,优化了原本的代码的文件结构,使得最新的部署代码可以更加方便的嵌入到不同的项目,同时优化的代码也变得更加鲁棒。从pt-->onnx-->engine的部署路线,使用基于tensorRT10实现任务。

tensorRT10的推理接口使用enqueueV3,相交V2版本的cuda管道数据省略复制的过程,优化推理时间。同时可以在推理管道添加cuda graph优化数据流动。

我的项目文件夹组织如下:

其中utils.hpp文件源码如下:

#ifndef UTILS_HPP
#define UTILS_HPP#include <opencv2/opencv.hpp>
#include <cuda_runtime.h>
#include <cassert>
#include <iostream>
#include <memory>#ifndef CUDA_CHECK
#define CUDA_CHECK(call)                                                                  \do {                                                                                  \cudaError_t err__ = (call);                                                       \if (err__ != cudaSuccess) {                                                       \std::cerr << "CUDA error [" << static_cast<int>(err__) << "] "                \<< cudaGetErrorString(err__) << " at " << __FILE__                  \<< ":" << __LINE__ << std::endl;                                    \assert(false);                                                                \}                                                                                 \} while (0)
#endif// // 管理 TensorRT/NV 对象:调用 p->destroy()
// template<typename T>
// inline std::shared_ptr<T> make_nvshared(T* ptr){
//     return std::shared_ptr<T>(ptr, [](T* p){ if(p) p->destroy(); });
// }// 管理 TensorRT/NV 对象:使用 delete p;
template<typename T>
inline std::shared_ptr<T> make_nvshared(T* ptr){return std::shared_ptr<T>(ptr, std::default_delete<T>());
}/*-------------------------- YOLOV5_DETECT --------------------------*/
struct detectRes {int label { -1 };float confidence { 0.f };cv::Rect box {};cv::Scalar box_color {};
};
/*-------------------------- END YOLOV5_DETECT ----------------------*/#endif // UTILS_HPP

由于在最新的tensoRT10版本中,对于智能只能指针暂时不支持destroy(),所以这边使用std::detault_delete()来释放指针。

后续tensorRT开放使用后可以解开注释。

TrtModel.hpp文件如下:

#ifndef TRTMODEL_HPP
#define TRTMODEL_HPP#include <NvInfer.h>
#include <NvOnnxParser.h>
#include "logger.h"
#include "common.h"
#include <fstream>
#include <iostream>
#include <vector>
#include <string>
#include <random>
#include <cuda_runtime_api.h>
#include <unordered_map>#include "utils.hpp"class TrtModel
{
public:TrtModel(std::string onnxfilepath, bool fp16);  ~TrtModel();                                                                          /*使用默认析构函数*/std::vector<detectRes> detect_postprocess(cv::Mat& frame);  void det_drawResult(cv::Mat& image, const std::vector<detectRes>& outputs);private:bool genEngine();                                                                     /*onnx转为engine */std::vector<unsigned char> load_engine_file();                                        /*加载engine模型*/bool Runtime();                                                                       /*从engine穿件推理运行时,执行上下文*/bool trtIOMemory();void preprocess(cv::Mat srcimg);           /*图像预处理操作*/bool createCudaGraph();std::shared_ptr<nvinfer1::IRuntime> m_runtime {nullptr};                              /*声明模型的推理运行时指针*/std::shared_ptr<nvinfer1::ICudaEngine> m_engine {nullptr};                            /*声明模型反序列化指针*/std::shared_ptr<nvinfer1::IExecutionContext> m_context {nullptr};                     /*声明模型执行上下文指针*/cudaStream_t m_stream;                                                                /*声明cuda流*/// 绑定名/索引(TRT 8.5+ 推荐用 I/O Tensor 名)std::string m_inputName, m_outputName;int m_inputIndex {-1}, m_outputIndex {-1};float* m_input_device_memory {nullptr};float* m_input_host_memory {nullptr};float* m_detect_bindings[2] {nullptr, nullptr};float* m_detect_host_memory {nullptr};float* m_detect_device_memory {nullptr};nvinfer1::Dims m_inputDims{};                                                        /*声明输入图片属性的索引*/nvinfer1::Dims m_detectDims{};std::string m_enginePath {};                                                         /*指定生成的engine模型的地址*/std::string onnx_file_path {};                                                       /*指定输入的onnx模型的地址*/bool FP16 {};                                                                        /*判断是否使用半精度进行面模型优化*/int m_inputSize {};                                                                  /*图像需要预处理的大小*/int m_imgArea {};                                                                    /*使用指针对图像预处理的偏移量大小,不同图像通道*/int m_detectSize {};int kInputH {};                                                                      /*模型预处理的图像的高度,最好是32的整数倍*/int kInputW {};                                                                      /*模型预处理的图像的宽度,最好是32的整数倍*/float i2d[6] {}, d2i[6] {};float kNmsThresh = 0.2f;                                                             /*后处理的NMS的阈值*/float kConfThresh = 0.5f;                                                            /*模型检测的置信度的阈值*/cudaGraphExec_t m_graphExec {nullptr};bool m_useGraph {false};const std::vector<std::string> CLASS_NAMES = {  /*需要检测的目标类别*/   "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant","stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse","sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis","snowboard", "sports ball", "kite", "baseball bat", "baseball glove","skateboard", "surfboard", "tennis racket", "bottle", "wine glass","cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich","orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv","laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase","scissors", "teddy bear", "hair drier", "toothbrush"};const std::vector<std::vector<unsigned int>> COLORS_HEX = {     /*对不同的检测目标绘制不同的颜色*/{0x00, 0x72, 0xBD}, {0xD9, 0x53, 0x19}, {0xED, 0xB1, 0x20}, {0x7E, 0x2F, 0x8E}, {0x77, 0xAC, 0x30}, {0x4D, 0xBE, 0xEE},{0xA2, 0x14, 0x2F}, {0x4C, 0x4C, 0x4C}, {0x99, 0x99, 0x99}, {0xFF, 0x00, 0x00}, {0xFF, 0x80, 0x00}, {0xBF, 0xBF, 0x00},{0x00, 0xFF, 0x00}, {0x00, 0x00, 0xFF}, {0xAA, 0x00, 0xFF}, {0x55, 0x55, 0x00}, {0x55, 0xAA, 0x00}, {0x55, 0xFF, 0x00},{0xAA, 0x55, 0x00}, {0xAA, 0xAA, 0x00}, {0xAA, 0xFF, 0x00}, {0xFF, 0x55, 0x00}, {0xFF, 0xAA, 0x00}, {0xFF, 0xFF, 0x00},{0x00, 0x55, 0x80}, {0x00, 0xAA, 0x80}, {0x00, 0xFF, 0x80}, {0x55, 0x00, 0x80}, {0x55, 0x55, 0x80}, {0x55, 0xAA, 0x80},{0x55, 0xFF, 0x80}, {0xAA, 0x00, 0x80}, {0xAA, 0x55, 0x80}, {0xAA, 0xAA, 0x80}, {0xAA, 0xFF, 0x80}, {0xFF, 0x00, 0x80},{0xFF, 0x55, 0x80}, {0xFF, 0xAA, 0x80}, {0xFF, 0xFF, 0x80}, {0x00, 0x55, 0xFF}, {0x00, 0xAA, 0xFF}, {0x00, 0xFF, 0xFF},{0x55, 0x00, 0xFF}, {0x55, 0x55, 0xFF}, {0x55, 0xAA, 0xFF}, {0x55, 0xFF, 0xFF}, {0xAA, 0x00, 0xFF}, {0xAA, 0x55, 0xFF},{0xAA, 0xAA, 0xFF}, {0xAA, 0xFF, 0xFF}, {0xFF, 0x00, 0xFF}, {0xFF, 0x55, 0xFF}, {0xFF, 0xAA, 0xFF}, {0x55, 0x00, 0x00},{0x80, 0x00, 0x00}, {0xAA, 0x00, 0x00}, {0xD4, 0x00, 0x00}, {0xFF, 0x00, 0x00}, {0x00, 0x2B, 0x00}, {0x00, 0x55, 0x00},{0x00, 0x80, 0x00}, {0x00, 0xAA, 0x00}, {0x00, 0xD4, 0x00}, {0x00, 0xFF, 0x00}, {0x00, 0x00, 0x2B}, {0x00, 0x00, 0x55},{0x00, 0x00, 0x80}, {0x00, 0x00, 0xAA}, {0x00, 0x00, 0xD4}, {0x00, 0x00, 0xFF}, {0x00, 0x00, 0x00}, {0x24, 0x24, 0x24},{0x49, 0x49, 0x49}, {0x6D, 0x6D, 0x6D}, {0x92, 0x92, 0x92}, {0xB6, 0xB6, 0xB6}, {0xDB, 0xDB, 0xDB}, {0x00, 0x72, 0xBD},{0x50, 0xB7, 0xBD}, {0x80, 0x80, 0x00}};};#endif // TRTMODEL_H

和tensorRT5稍有一点不同,主要是添加了    bool createCudaGraph()方法用来管理cuda graph。

TrtModel.cpp文件源码如下:

#include "TrtModel.hpp"
#include <cstring>
#include <memory>
#include <sys/stat.h>static inline bool file_exists(const std::string& name) {struct stat buffer{};return (stat(name.c_str(), &buffer) == 0);
}//!初始化推理引擎,如果没有推理引擎,则从onnx模型构建推理引擎
TrtModel::TrtModel(std::string onnxfilepath, bool fp16):onnx_file_path(std::move(onnxfilepath)), FP16(fp16)
{const auto idx = onnx_file_path.find(".onnx");const auto basename = onnx_file_path.substr(0, idx);m_enginePath = basename + ".engine";if (file_exists(m_enginePath)) {std::cout << "[TRT] Deserialize from engine: " << m_enginePath << std::endl;Runtime();} else {std::cout << "[TRT] Build from onnx: " << onnx_file_path << std::endl;if (!genEngine()) throw std::runtime_error("build engine failed");Runtime();}this->trtIOMemory();
}bool TrtModel::genEngine(){// 打印模型编译过程的日志sample::gLogger.setReportableSeverity(nvinfer1::ILogger::Severity::kVERBOSE);// 创建builderauto builder = make_nvshared(nvinfer1::createInferBuilder(sample::gLogger.getTRTLogger()));if(!builder){std::cout<<" (T_T)~~~, Failed to create builder."<<std::endl;return false;}// 声明显性batch,创建networkconst auto explicitBatch = 1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);auto network = make_nvshared(builder->createNetworkV2(explicitBatch));if(!network){std::cout<<" (T_T)~~~, Failed to create network."<<std::endl;return false;}// 创建 configauto config = make_nvshared(builder->createBuilderConfig());if(!config){std::cout<<" (T_T)~~~, Failed to create config."<<std::endl;return false;}// 创建parser 从onnx自动构建模型,否则需要自己构建每个算子auto parser = make_nvshared(nvonnxparser::createParser(*network, sample::gLogger.getTRTLogger()));if(!parser){std::cout<<" (T_T)~~~, Failed to create parser."<<std::endl;return false;}{// 读取onnx模型文件开始构建模型auto parsed = parser->parseFromFile(onnx_file_path.c_str(), static_cast<int>(sample::gLogger.getReportableSeverity()));if(!parsed){std::cout<<" (T_T)~~~ ,Failed to parse onnx file."<<std::endl;return false;}auto profile = builder->createOptimizationProfile();                                                          config->addOptimizationProfile(profile);// 判断是否使用半精度优化模型if(FP16)  config->setFlag(nvinfer1::BuilderFlag::kFP16);// DLA 仅在可用时开启const int numDLACores = builder->getNbDLACores();if (numDLACores > 0) {config->setDefaultDeviceType(nvinfer1::DeviceType::kDLA);config->setDLACore(0);config->setFlag(nvinfer1::BuilderFlag::kGPU_FALLBACK);std::cout << "[TRT] Using DLA core 0 with GPU fallback" << std::endl;} else {config->setDefaultDeviceType(nvinfer1::DeviceType::kGPU);}}// builder->setMaxBatchSize(1);config->setMemoryPoolLimit(nvinfer1::MemoryPoolType::kWORKSPACE, 1 << 30);      /*在新的版本中被使用*/auto profileStream = samplesCommon::makeCudaStream();if(!profileStream){std::cout<<" (T_T)~~~, Failed to makeCudaStream."<<std::endl;return false;}config->setProfileStream(*profileStream);// 创建序列化引擎文件auto plan = make_nvshared(builder->buildSerializedNetwork(*network, *config));if(!plan){std::cout<<" (T_T)~~~, Failed to SerializedNetwork."<<std::endl;return false;}std::cout<<"********************* check model input shape *********************"<<std::endl;std::cout<<"input number : "<< network->getNbInputs()<<std::endl;for(size_t i=0; i<static_cast<size_t>(network->getNbInputs()); ++i){auto mInputDims = network->getInput(i)->getDimensions();std::cout<<" ✨~ model input dims: "<<mInputDims.nbDims <<std::endl;std::cout<<" ✨^_^ model input dim : "<<mInputDims<<std::endl;}std::cout<<"********************* check model output shape *********************"<<std::endl;std::cout<<"output number : "<< network->getNbOutputs()<<std::endl;for(size_t i=0; i<static_cast<size_t>(network->getNbOutputs()); ++i){auto mOutputDims = network->getOutput(i)->getDimensions();std::cout<<" ✨~ model output dims: "<<mOutputDims.nbDims <<std::endl;std::cout<<" ✨^_^ model output dim : "<<mOutputDims<<std::endl;}// 序列化保存推理引擎文件文件std::ofstream engine_file(m_enginePath, std::ios::binary);if(!engine_file.good()){std::cout<<" (T_T)~~~, Failed to open engine file"<<std::endl;return false;}engine_file.write((char *)plan->data(), plan->size());engine_file.close();std::cout << " ~~Congratulations! 🎉🎉🎉~  Engine build success!!! ✨✨✨~~ " << std::endl;return true;}std::vector<unsigned char> TrtModel::load_engine_file(){std::vector<unsigned char> engine_data;std::ifstream engine_file(m_enginePath, std::ios::binary);if(!engine_file.is_open()) { std::cerr << "[TRT] open engine failed" << std::endl; return engine_data; }engine_file.seekg(0, std::ios::end);const auto length = static_cast<size_t>(engine_file.tellg());engine_data.resize(length);engine_file.seekg(0, std::ios::beg);engine_file.read(reinterpret_cast<char*>(engine_data.data()), length);return engine_data;
}bool TrtModel::Runtime(){initLibNvInferPlugins(&sample::gLogger.getTRTLogger(), "");auto plan = load_engine_file();sample::setReportableSeverity(sample::Severity::kINFO);m_runtime = make_nvshared(nvinfer1::createInferRuntime(sample::gLogger.getTRTLogger()));if(!m_runtime){ std::cerr << " (T_T)~~~, create runtime failed" << std::endl; return false; }m_engine = make_nvshared(m_runtime->deserializeCudaEngine(plan.data(), plan.size()));if(!m_engine){ std::cerr << " (T_T)~~~, deserialize failed" << std::endl; return false; }// 记录 I/O 名与索引(TRT 8.5+)const int nbIO = m_engine->getNbIOTensors();for (int i = 0; i < nbIO; ++i) {const char* name = m_engine->getIOTensorName(i);auto mode = m_engine->getTensorIOMode(name);if (mode == nvinfer1::TensorIOMode::kINPUT)  { m_inputName = name; m_inputIndex  = i; }if (mode == nvinfer1::TensorIOMode::kOUTPUT) { m_outputName = name; m_outputIndex = i; }auto dims = m_engine->getTensorShape(name);auto dtype = m_engine->getTensorDataType(name);std::cout << "[TRT] IO["<<i<<"] "<< name << ", mode=" << (int)mode<< ", dims=" << dims << ", dtype=" << (int)dtype << std::endl;}m_context = make_nvshared(m_engine->createExecutionContext());if(!m_context){ std::cerr << " (T_T)~~~, create context failed" << std::endl; return false; }CUDA_CHECK(cudaStreamCreateWithFlags(&m_stream, cudaStreamNonBlocking));std::cout << " (T_T)~~~, runtime ready" << std::endl;return true;
}bool TrtModel::trtIOMemory(){m_inputDims = m_context->getTensorShape(m_inputName.c_str());m_detectDims = m_context->getTensorShape(m_outputName.c_str());this->kInputH = m_inputDims.d[2];this->kInputW = m_inputDims.d[3];m_imgArea = m_inputDims.d[2] * m_inputDims.d[3];m_inputSize = static_cast<size_t>(m_inputDims.d[0]) * m_inputDims.d[1] * m_imgArea * sizeof(float);m_detectSize = static_cast<size_t>(m_detectDims.d[0]) * m_detectDims.d[1] * m_detectDims.d[2] * sizeof(float);CUDA_CHECK(cudaMallocHost(&m_input_host_memory,  m_inputSize));CUDA_CHECK(cudaMalloc(&m_input_device_memory,    m_inputSize));CUDA_CHECK(cudaMallocHost(&m_detect_host_memory, m_detectSize));CUDA_CHECK(cudaMalloc(&m_detect_device_memory,   m_detectSize));m_detect_bindings[m_inputIndex]  = m_input_device_memory;m_detect_bindings[m_outputIndex] = m_detect_device_memory;// Set tensor addresses for enqueueV3if (!m_context->setTensorAddress(m_inputName.c_str(), m_input_device_memory)) {std::cerr << "[TRT] Failed to set input tensor address" << std::endl;return false;}if (!m_context->setTensorAddress(m_outputName.c_str(), m_detect_device_memory)) {std::cerr << "[TRT] Failed to set output tensor address" << std::endl;return false;}std::cout << "[TRT] after optimizer input:  " << m_inputDims  << std::endl;std::cout << "[TRT] after optimizer output: " << m_detectDims << std::endl;m_useGraph = createCudaGraph();if (m_useGraph) {std::cout << "[TRT] CUDA Graph created successfully" << std::endl;} else {std::cout << "[TRT] Failed to create CUDA Graph, falling back to enqueueV3" << std::endl;}return true;
}bool TrtModel::createCudaGraph() {cudaError_t err = cudaStreamSynchronize(m_stream);if (err != cudaSuccess) {std::cerr << "[TRT] cudaStreamSynchronize failed: " << cudaGetErrorString(err) << std::endl;return false;}err = cudaStreamBeginCapture(m_stream, cudaStreamCaptureModeGlobal);if (err != cudaSuccess) {std::cerr << "[TRT] cudaStreamBeginCapture failed: " << cudaGetErrorString(err) << std::endl;return false;}bool enq_ok = m_context->enqueueV3(m_stream);if (!enq_ok) {std::cerr << "[TRT] enqueueV3 failed during capture" << std::endl;cudaStreamEndCapture(m_stream, nullptr);return false;}cudaGraph_t graph;err = cudaStreamEndCapture(m_stream, &graph);if (err != cudaSuccess) {std::cerr << "[TRT] cudaStreamEndCapture failed: " << cudaGetErrorString(err) << std::endl;return false;}err = cudaGraphInstantiate(&m_graphExec, graph, nullptr, nullptr, 0);cudaGraphDestroy(graph);if (err != cudaSuccess) {std::cerr << "[TRT] cudaGraphInstantiate failed: " << cudaGetErrorString(err) << std::endl;return false;}return true;
}static inline void affine_project(const float* M, float x, float y, float& ox, float& oy) {ox = M[0]*x + M[1]*y + M[2];oy = M[3]*x + M[4]*y + M[5];
}void TrtModel::preprocess(cv::Mat srcimg)
{// 1) 计算 letterbox 仿射矩阵const float r  = std::min(kInputW / (float)srcimg.cols, kInputH / (float)srcimg.rows);const float dx = (kInputW - r * srcimg.cols) * 0.5f;const float dy = (kInputH - r * srcimg.rows) * 0.5f;i2d[0] = r;    i2d[1] = 0.0f; i2d[2] = dx;i2d[3] = 0.0f; i2d[4] = r;    i2d[5] = dy;cv::Mat m2x3_i2d(2, 3, CV_32F, i2d);cv::Mat m2x3_d2i(2, 3, CV_32F, d2i);cv::invertAffineTransform(m2x3_i2d, m2x3_d2i);// 2) 复用一张静态缓存,避免每帧 new(放到类里做一次性成员也行)static thread_local cv::Mat letterbox;      // CV_8UC3letterbox.create(kInputH, kInputW, CV_8UC3);cv::warpAffine(srcimg, letterbox, m2x3_i2d, letterbox.size(),cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar::all(0));if (!letterbox.data) { std::cerr << "ERROR: Image is empty!\n"; return; }// 3) BGR->RGB,uint8->float,并归一化到[0,1]static thread_local cv::Mat rgb8, rgb32f;cv::cvtColor(letterbox, rgb8, cv::COLOR_BGR2RGB);rgb8.convertTo(rgb32f, CV_32F, 1.0 / 255.0);cv::Mat planes[3]; cv::split(rgb32f, planes);// 5) 直接把每个平面 memcpy 到 pinned host buffer(NCHW)const size_t planeBytes = (size_t)m_imgArea * sizeof(float);// NCHW offsetsfloat* dst = m_input_host_memory;std::memcpy(dst + 0 * m_imgArea, planes[0].ptr<float>(), planeBytes); // Rstd::memcpy(dst + 1 * m_imgArea, planes[1].ptr<float>(), planeBytes); // Gstd::memcpy(dst + 2 * m_imgArea, planes[2].ptr<float>(), planeBytes); // B// 6) H2D(仍然异步)CUDA_CHECK(cudaMemcpyAsync(m_input_device_memory, m_input_host_memory,m_inputSize, cudaMemcpyHostToDevice, m_stream));
}std::vector<detectRes> TrtModel::detect_postprocess(cv::Mat& frame){std::vector<detectRes> results;// auto start_time = std::chrono::high_resolution_clock::now();preprocess(frame);// auto end_time = std::chrono::high_resolution_clock::now();// std::chrono::duration<double, std::milli> duration = end_time - start_time;// std::cout << "平均耗时: " << duration.count() << " 毫秒" << std::endl;if (m_useGraph) {CUDA_CHECK(cudaGraphLaunch(m_graphExec, m_stream));} else {bool ok = m_context->enqueueV3(m_stream);if (!ok) std::cerr << "[TRT] enqueueV3 failed" << std::endl;}CUDA_CHECK(cudaMemcpyAsync(m_detect_host_memory, m_detect_device_memory,m_detectSize, cudaMemcpyDeviceToHost, m_stream));CUDA_CHECK(cudaStreamSynchronize(m_stream));float* pdata = m_detect_host_memory;std::vector<cv::Rect>  boxes; boxes.reserve(m_detectDims.d[1]);std::vector<float>     scores; scores.reserve(m_detectDims.d[1]);std::vector<int>       labels; labels.reserve(m_detectDims.d[1]);for (int i = 0; i < m_detectDims.d[1]; ++i) {const int index = i * m_detectDims.d[2];const float obj_conf = pdata[index + 4];if (obj_conf <= kConfThresh) continue;float* max_cls = std::max_element(pdata + index + 5, pdata + index + m_detectDims.d[2]);float cls_conf = (*max_cls) * obj_conf;if (cls_conf <= kConfThresh) continue;const float cx = pdata[index + 0];const float cy = pdata[index + 1];const float w  = pdata[index + 2];const float h  = pdata[index + 3];float left = cx - 0.5f*w,  top = cy - 0.5f*h;float right= cx + 0.5f*w,  bottom = cy + 0.5f*h;float xmin, ymin, xmax, ymax;affine_project(d2i, left, top,    xmin, ymin);affine_project(d2i, right,bottom, xmax, ymax);xmin = std::clamp(xmin, 0.f, (float)frame.cols - 1);xmax = std::clamp(xmax, 0.f, (float)frame.cols - 1);ymin = std::clamp(ymin, 0.f, (float)frame.rows - 1);ymax = std::clamp(ymax, 0.f, (float)frame.rows - 1);const float bw = xmax - xmin, bh = ymax - ymin;if (bw > 1.f && bh > 1.f) {const int label = static_cast<int>(max_cls - (pdata + index + 5));boxes.emplace_back((int)std::round(xmin), (int)std::round(ymin),(int)std::round(bw),   (int)std::round(bh));scores.push_back(cls_conf);labels.push_back(label);}}std::vector<int> keep;cv::dnn::NMSBoxes(boxes, scores, kConfThresh, kNmsThresh, keep);std::mt19937 gen{std::random_device{}()};std::uniform_int_distribution<int> dis(80, 180);for (int idx : keep) {detectRes r;r.label = labels[idx];r.confidence = scores[idx];r.box = boxes[idx];r.box_color = cv::Scalar(dis(gen), dis(gen), dis(gen));results.push_back(r);}return results;
}void TrtModel::det_drawResult(cv::Mat& image, const std::vector<detectRes>& outputs)
{int detections = outputs.size();for (int i = 0; i < detections; ++i){detectRes detection = outputs[i];cv::Rect box = detection.box;cv::Scalar color = detection.box_color;// Detection boxcv::rectangle(image, box, color, 2);// Detection box textstd::string classString = std::to_string(detection.label) + ' ' + std::to_string(detection.confidence).substr(0, 4);cv::Size textSize = cv::getTextSize(classString, cv::FONT_HERSHEY_DUPLEX, 1, 2, 0);cv::Rect textBox(box.x, box.y - 40, textSize.width + 10, textSize.height + 20);cv::rectangle(image, textBox, color, cv::FILLED);cv::putText(image, classString, cv::Point(box.x + 5, box.y - 10), cv::FONT_HERSHEY_DUPLEX, 1, cv::Scalar(0, 0, 0), 2, 0);}
}TrtModel::~TrtModel(){if (m_graphExec) { cudaGraphExecDestroy(m_graphExec); m_graphExec = nullptr; }if (m_stream) { cudaStreamDestroy(m_stream); m_stream = nullptr; }if (m_input_host_memory)   { cudaFreeHost(m_input_host_memory);   m_input_host_memory=nullptr; }if (m_input_device_memory) { cudaFree(m_input_device_memory);     m_input_device_memory=nullptr; }if (m_detect_host_memory)  { cudaFreeHost(m_detect_host_memory);  m_detect_host_memory=nullptr; }if (m_detect_device_memory){ cudaFree(m_detect_device_memory);    m_detect_device_memory=nullptr; }
}

使用的接口代码和之前trt8一致,接口使用代码如下:

#include <iostream>
#include "TrtModel.hpp"// int X {1141}, Y {316}, W {306}, H {344};// void drawResult(cv::Mat& image, const std::vector<detectRes>& outputs)
// {
//     int detections = outputs.size();
//     for (int i = 0; i < detections; ++i)
//     {
//         detectRes detection = outputs[i];//         // cv::Rect box = detection.box;
//         int x = detection.box.x + X;
//         int y = detection.box.y + Y;
//         int width = detection.box.width;
//         int height = detection.box.height;
//         cv::Scalar color = detection.box_color;
//         cv::Rect box = cv::Rect(x, y, width, height);//         // Detection box
//         cv::rectangle(image, box, color, 2);//         // Detection box text
//         std::string classString = std::to_string(detection.label) + ' ' + std::to_string(detection.confidence).substr(0, 4);
//         cv::Size textSize = cv::getTextSize(classString, cv::FONT_HERSHEY_DUPLEX, 1, 2, 0);
//         cv::Rect textBox(box.x, box.y - 40, textSize.width + 10, textSize.height + 20);//         cv::rectangle(image, textBox, color, cv::FILLED);
//         cv::putText(image, classString, cv::Point(box.x + 5, box.y - 10), cv::FONT_HERSHEY_DUPLEX, 1, cv::Scalar(0, 0, 0), 2, 0);
//     }
// }int main() {static TrtModel trtmodel("weights/yolov5s.onnx", true);cv::VideoCapture cap("media/234.mp4");// 检查视频是否成功打开if (!cap.isOpened()) {std::cerr << "Error: Could not open video file.\n";return -1;}cv::Size frameSize(cap.get(cv::CAP_PROP_FRAME_WIDTH), cap.get(cv::CAP_PROP_FRAME_HEIGHT));// 获取帧率double video_fps = cap.get(cv::CAP_PROP_FPS);std::cout << "width: " << frameSize.width << " height: " << frameSize.height << " fps: " << video_fps << std::endl;cv::Mat frame;int frame_nums = 0;// 读取和显示视频帧,直到视频结束while (cap.read(frame)) {auto start_time = std::chrono::high_resolution_clock::now();// cv::Rect roi(X, Y, W, H);// cv::Mat frame1 = frame(roi).clone();auto output = trtmodel.detect_postprocess(frame);// drawResult(frame, output);trtmodel.det_drawResult(frame, output);cv::putText(frame, "duck_nums: " + std::to_string(000), cv::Point(10, 100),cv::FONT_HERSHEY_SIMPLEX, 3, cv::Scalar(0, 0, 255), 5);auto end_time = std::chrono::high_resolution_clock::now();std::chrono::duration<double, std::milli> duration = end_time - start_time;std::cout << "平均耗时: " << duration.count() << " 毫秒" << std::endl;double fps = 1000.0 / duration.count();// 格式化FPS文本std::stringstream ss;ss << "FPS: " << std::fixed << std::setprecision(2) << fps;std::string fps_text = ss.str();// 在帧上绘制FPScv::putText(frame, fps_text, cv::Point(200, 200), cv::FONT_HERSHEY_DUPLEX, 3, cv::Scalar(0, 255, 0), 2, 0);// // 显示处理后的帧cv::imshow("Processed-trans-Video", frame);frame_nums += 1;std::string filename = "./111/" + std::to_string(frame_nums) + ".jpg";// cv::imwrite(filename, frame);if (cv::waitKey(25) == 27) {break;}}// 释放视频捕获对象和关闭所有窗口cap.release();cv::destroyAllWindows();return 0;
}

我的CMakeLists.txt配置文件如下:


cmake_minimum_required(VERSION 3.11)
project(CountObj LANGUAGES CXX)set(CMAKE_CXX_STANDARD 17)list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}")
# CUDA 配置
set(CMAKE_CUDA_COMPILER /usr/local/cuda-12.3/bin/nvcc)
enable_language(CUDA)# 查找并包含Eigen3
if(NOT EIGEN3_FOUND)set(EIGEN3_INCLUDE_DIR "/usr/include/eigen3")include_directories(${EIGEN3_INCLUDE_DIR})
endif()# 添加bytetrack文件夹中的源文件和头文件
file(GLOB BYTE_TRACK_SOURCES "bytetrack/*.cpp")
file(GLOB BYTE_TRACK_HEADERS "bytetrack/*.h")# 添加src文件夹中的源文件和头文件
file(GLOB SRC_CPP "src/*.cpp")
file(GLOB SRC_HPP "src/*.hpp")# OpenCV
find_package(OpenCV REQUIRED)# 包含目录配置
include_directories(/usr/local/cuda-12.3/include  # CUDA/opt/TensorRT/TensorRT-10.10.0.31/include/  # TensorRT/opt/TensorRT/TensorRT-10.10.0.31/samples/common  # tensorRT的使用例子logger/opt/TensorRT/TensorRT-10.10.0.31/samples${CMAKE_CURRENT_SOURCE_DIR}/bytetrack  # 确保包含bytetrack文件夹${CMAKE_CURRENT_SOURCE_DIR}/src  # 确保包含src文件夹${EIGEN3_INCLUDE_DIR}  # 包含Eigen库的头文件路径
)# 链接目录配置
link_directories(/usr/local/cuda-12.3/lib64/opt/TensorRT/TensorRT-10.10.0.31/lib//usr/local/lib
)add_executable(buildmain13.cpp/opt/TensorRT/TensorRT-10.10.0.31/samples/common/logger.cpp${BYTE_TRACK_SOURCES}${SRC_CPP}
)# 设置 CUDA 架构
set_target_properties(build PROPERTIES CUDA_ARCHITECTURES "61;70;75;89"CUDA_SEPARABLE_COMPILATION ON
)# 链接库配置
target_link_libraries(build PRIVATE# OpenCV${OpenCV_LIBS} # TensorRTnvinfer nvinfer_plugin nvonnxparser # CUDAcudart
)# 附加编译选项(可选)
target_compile_options(build PRIVATE-Wall-Wno-deprecated-declarations-O2
)

完整详细代码见云盘分享:

通过网盘分享的文件:trt10_yolov5_detect_demo.zip
链接: https://pan.baidu.com/s/1wkSnxDyaQn2Eo2ZKLNs1hg?pwd=xbnq 提取码: xbnq 
--来自百度网盘超级会员v6的分享

http://www.dtcms.com/a/347538.html

相关文章:

  • 【深度学习】深度学习中的结构化概率模型:理论、方法与应用
  • Qt从qmake迁移到cmake的记录
  • 【深度学习新浪潮】有哪些工具可以帮助我们对视频进行内容分析和关键信息提取?
  • 从0开始学习Java+AI知识点总结-23.web实战案例(班级和学生增删改查、信息统计)
  • Day58 Java面向对象13 instanceof 和 类型转换
  • 自动化运维Ansible
  • 13.机器学习—— ML特征工程和优化方法
  • CANN安装
  • 电力方向国际期刊推荐
  • 分析 HashMap 源码
  • 《TCP多线程通信代码C语言开发流程解析》
  • redis----hash类型详解
  • 领码方案:新一代页面权限体系全景解析(完整版)
  • Radis安装部署(Linux,Docker)
  • 温度对直线导轨的性能有哪些影响?
  • TypeScript 的泛型(Generics)作用理解
  • 如何优雅解决 OpenCV 分段错误(Segfault):子进程隔离实战
  • 工业企业与海关匹配数据(2000-2013)
  • Unity中删除不及时的问题
  • DeepSeek-V3.1发布,预示下一代国产芯片即将发布,更新一小版本,跨出一大步
  • 深入理解3x3矩阵
  • Java—— 配置文件Properties
  • Spring Boot 实现 POJO 级联封装复杂属性
  • Redis学习笔记 ----- 缓存
  • 寻鲜之旅“咖”约深圳,容声冰箱引领“养鲜”新体验
  • 解决coze api使用coze.workflows.runs.create运行workflow返回400,但text为空
  • ⚡ Ranger 基础命令与功能详解
  • Talkie AI
  • 硬件笔记(27)---- 恒流源电路原理
  • 环境 (shell) 变量