当前位置: 首页 > wzjs >正文

对手网站分析百度推广开户免费

对手网站分析,百度推广开户免费,网站收录提交入口大全,闸北专业做网站提示:文章写完后,目录可以自动生成,如何生成可参考右边的帮助文档 JetsonNano详细使用笔记(二) Jetson Nano交换空间的设置能力提高相关内容jetpack-sdk-461安装 TensorRT版本是:8.2.1.8十一:在Jetson Nano上部署jetson-inference项目在Jetson Nano上部署图像分类模型je…

提示:文章写完后,目录可以自动生成,如何生成可参考右边的帮助文档

JetsonNano详细使用笔记(二)

    • Jetson Nano交换空间的设置
  • 能力提高相关内容
    • jetpack-sdk-461安装 TensorRT版本是:8.2.1.8
  • 十一:在Jetson Nano上部署jetson-inference项目
      • 在Jetson Nano上部署图像分类模型
      • jetson-inference本地编译部署
      • Docker容器方式部署jetson-inference
    • 运行jetson-inference目标检测模型
      • 训练Open Images Dataset 格式目标检测数据集
      • 训练Pascal VOC格式目标检测数据集

"""
An example that uses TensorRT's Python api to make inferences.
"""
import ctypes
import os
import shutil
import random
import sys
import threading
import time
import cv2
import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trtCONF_THRESH = 0.5
IOU_THRESHOLD = 0.4def get_img_path_batches(batch_size, img_dir):ret = []batch = []for root, dirs, files in os.walk(img_dir):for name in files:if len(batch) == batch_size:ret.append(batch)batch = []batch.append(os.path.join(root, name))if len(batch) > 0:ret.append(batch)return retdef plot_one_box(x, img, color=None, label=None, line_thickness=None):"""description: Plots one bounding box on image img,this function comes from YoLov5 project.param: x:      a box likes [x1,y1,x2,y2]img:    a opencv image objectcolor:  color to draw rectangle, such as (0,255,0)label:  strline_thickness: intreturn:no return"""tl = (line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1)  # line/font thicknesscolor = color or [random.randint(0, 255) for _ in range(3)]c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)if label:tf = max(tl - 1, 1)  # font thicknesst_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA)  # filledcv2.putText(img,label,(c1[0], c1[1] - 2),0,tl / 3,[225, 255, 255],thickness=tf,lineType=cv2.LINE_AA,)class YoLov5TRT(object):"""description: A YOLOv5 class that warps TensorRT ops, preprocess and postprocess ops."""def __init__(self, engine_file_path):# Create a Context on this device,self.ctx = cuda.Device(0).make_context()stream = cuda.Stream()TRT_LOGGER = trt.Logger(trt.Logger.INFO)runtime = trt.Runtime(TRT_LOGGER)# Deserialize the engine from filewith open(engine_file_path, "rb") as f:engine = runtime.deserialize_cuda_engine(f.read())context = engine.create_execution_context()host_inputs = []cuda_inputs = []host_outputs = []cuda_outputs = []bindings = []for binding in engine:print('bingding:', binding, engine.get_binding_shape(binding))size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_sizedtype = trt.nptype(engine.get_binding_dtype(binding))# Allocate host and device buffershost_mem = cuda.pagelocked_empty(size, dtype)cuda_mem = cuda.mem_alloc(host_mem.nbytes)# Append the device buffer to device bindings.bindings.append(int(cuda_mem))# Append to the appropriate list.if engine.binding_is_input(binding):self.input_w = engine.get_binding_shape(binding)[-1]self.input_h = engine.get_binding_shape(binding)[-2]host_inputs.append(host_mem)cuda_inputs.append(cuda_mem)else:host_outputs.append(host_mem)cuda_outputs.append(cuda_mem)# Storeself.stream = streamself.context = contextself.engine = engineself.host_inputs = host_inputsself.cuda_inputs = cuda_inputsself.host_outputs = host_outputsself.cuda_outputs = cuda_outputsself.bindings = bindingsself.batch_size = engine.max_batch_sizedef infer(self, raw_image_generator):threading.Thread.__init__(self)# Make self the active context, pushing it on top of the context stack.self.ctx.push()# Restorestream = self.streamcontext = self.contextengine = self.enginehost_inputs = self.host_inputscuda_inputs = self.cuda_inputshost_outputs = self.host_outputscuda_outputs = self.cuda_outputsbindings = self.bindings# Do image preprocessbatch_image_raw = []batch_origin_h = []batch_origin_w = []batch_input_image = np.empty(shape=[self.batch_size, 3, self.input_h, self.input_w])for i, image_raw in enumerate(raw_image_generator):input_image, image_raw, origin_h, origin_w = self.preprocess_image(image_raw)batch_image_raw.append(image_raw)batch_origin_h.append(origin_h)batch_origin_w.append(origin_w)np.copyto(batch_input_image[i], input_image)batch_input_image = np.ascontiguousarray(batch_input_image)# Copy input image to host buffernp.copyto(host_inputs[0], batch_input_image.ravel())start = time.time()# Transfer input data  to the GPU.cuda.memcpy_htod_async(cuda_inputs[0], host_inputs[0], stream)# Run inference.context.execute_async(batch_size=self.batch_size, bindings=bindings, stream_handle=stream.handle)# Transfer predictions back from the GPU.cuda.memcpy_dtoh_async(host_outputs[0], cuda_outputs[0], stream)# Synchronize the streamstream.synchronize()end = time.time()# Remove any context from the top of the context stack, deactivating it.self.ctx.pop()# Here we use the first row of output in that batch_size = 1output = host_outputs[0]# Do postprocessfor i in range(self.batch_size):result_boxes, result_scores, result_classid = self.post_process(output[i * 6001: (i + 1) * 6001], batch_origin_h[i], batch_origin_w[i])# Draw rectangles and labels on the original imagefor j in range(len(result_boxes)):box = result_boxes[j]plot_one_box(box,batch_image_raw[i],label="{}:{:.2f}".format(categories[int(result_classid[j])], result_scores[j]),)return batch_image_raw, end - startdef destroy(self):# Remove any context from the top of the context stack, deactivating it.self.ctx.pop()def get_raw_image(self, image_path_batch):"""description: Read an image from image path"""for img_path in image_path_batch:yield cv2.imread(img_path)def get_raw_image_zeros(self, image_path_batch=None):"""description: Ready data for warmup"""for _ in range(self.batch_size):yield np.zeros([self.input_h, self.input_w, 3], dtype=np.uint8)def preprocess_image(self, raw_bgr_image):"""description: Convert BGR image to RGB,resize and pad it to target size, normalize to [0,1],transform to NCHW format.param:input_image_path: str, image pathreturn:image:  the processed imageimage_raw: the original imageh: original heightw: original width"""image_raw = raw_bgr_imageh, w, c = image_raw.shapeimage = cv2.cvtColor(image_raw, cv2.COLOR_BGR2RGB)# Calculate widht and height and paddingsr_w = self.input_w / wr_h = self.input_h / hif r_h > r_w:tw = self.input_wth = int(r_w * h)tx1 = tx2 = 0ty1 = int((self.input_h - th) / 2)ty2 = self.input_h - th - ty1else:tw = int(r_h * w)th = self.input_htx1 = int((self.input_w - tw) / 2)tx2 = self.input_w - tw - tx1ty1 = ty2 = 0# Resize the image with long side while maintaining ratioimage = cv2.resize(image, (tw, th))# Pad the short side with (128,128,128)image = cv2.copyMakeBorder(image, ty1, ty2, tx1, tx2, cv2.BORDER_CONSTANT, (128, 128, 128))image = image.astype(np.float32)# Normalize to [0,1]image /= 255.0# HWC to CHW format:image = np.transpose(image, [2, 0, 1])# CHW to NCHW formatimage = np.expand_dims(image, axis=0)# Convert the image to row-major order, also known as "C order":image = np.ascontiguousarray(image)return image, image_raw, h, wdef xywh2xyxy(self, origin_h, origin_w, x):"""description:    Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-rightparam:origin_h:   height of original imageorigin_w:   width of original imagex:          A boxes numpy, each row is a box [center_x, center_y, w, h]return:y:          A boxes numpy, each row is a box [x1, y1, x2, y2]"""y = np.zeros_like(x)r_w = self.input_w / origin_wr_h = self.input_h / origin_hif r_h > r_w:y[:, 0] = x[:, 0] - x[:, 2] / 2y[:, 2] = x[:, 0] + x[:, 2] / 2y[:, 1] = x[:, 1] - x[:, 3] / 2 - (self.input_h - r_w * origin_h) / 2y[:, 3] = x[:, 1] + x[:, 3] / 2 - (self.input_h - r_w * origin_h) / 2y /= r_welse:y[:, 0] = x[:, 0] - x[:, 2] / 2 - (self.input_w - r_h * origin_w) / 2y[:, 2] = x[:, 0] + x[:, 2] / 2 - (self.input_w - r_h * origin_w) / 2y[:, 1] = x[:, 1] - x[:, 3] / 2y[:, 3] = x[:, 1] + x[:, 3] / 2y /= r_hreturn ydef post_process(self, output, origin_h, origin_w):"""description: postprocess the predictionparam:output:     A numpy likes [num_boxes,cx,cy,w,h,conf,cls_id, cx,cy,w,h,conf,cls_id, ...] origin_h:   height of original imageorigin_w:   width of original imagereturn:result_boxes: finally boxes, a boxes numpy, each row is a box [x1, y1, x2, y2]result_scores: finally scores, a numpy, each element is the score correspoing to boxresult_classid: finally classid, a numpy, each element is the classid correspoing to box"""# Get the num of boxes detectednum = int(output[0])# Reshape to a two dimentional ndarraypred = np.reshape(output[1:], (-1, 6))[:num, :]# Do nmsboxes = self.non_max_suppression(pred, origin_h, origin_w, conf_thres=CONF_THRESH, nms_thres=IOU_THRESHOLD)result_boxes = boxes[:, :4] if len(boxes) else np.array([])result_scores = boxes[:, 4] if len(boxes) else np.array([])result_classid = boxes[:, 5] if len(boxes) else np.array([])return result_boxes, result_scores, result_classiddef bbox_iou(self, box1, box2, x1y1x2y2=True):"""description: compute the IoU of two bounding boxesparam:box1: A box coordinate (can be (x1, y1, x2, y2) or (x, y, w, h))box2: A box coordinate (can be (x1, y1, x2, y2) or (x, y, w, h))            x1y1x2y2: select the coordinate formatreturn:iou: computed iou"""if not x1y1x2y2:# Transform from center and width to exact coordinatesb1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2else:# Get the coordinates of bounding boxesb1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]# Get the coordinates of the intersection rectangleinter_rect_x1 = np.maximum(b1_x1, b2_x1)inter_rect_y1 = np.maximum(b1_y1, b2_y1)inter_rect_x2 = np.minimum(b1_x2, b2_x2)inter_rect_y2 = np.minimum(b1_y2, b2_y2)# Intersection areainter_area = np.clip(inter_rect_x2 - inter_rect_x1 + 1, 0, None) * \np.clip(inter_rect_y2 - inter_rect_y1 + 1, 0, None)# Union Areab1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)iou = inter_area / (b1_area + b2_area - inter_area + 1e-16)return ioudef non_max_suppression(self, prediction, origin_h, origin_w, conf_thres=0.5, nms_thres=0.4):"""description: Removes detections with lower object confidence score than 'conf_thres' and performsNon-Maximum Suppression to further filter detections.param:prediction: detections, (x1, y1, x2, y2, conf, cls_id)origin_h: original image heightorigin_w: original image widthconf_thres: a confidence threshold to filter detectionsnms_thres: a iou threshold to filter detectionsreturn:boxes: output after nms with the shape (x1, y1, x2, y2, conf, cls_id)"""# Get the boxes that score > CONF_THRESHboxes = prediction[prediction[:, 4] >= conf_thres]# Trandform bbox from [center_x, center_y, w, h] to [x1, y1, x2, y2]boxes[:, :4] = self.xywh2xyxy(origin_h, origin_w, boxes[:, :4])# clip the coordinatesboxes[:, 0] = np.clip(boxes[:, 0], 0, origin_w -1)boxes[:, 2] = np.clip(boxes[:, 2], 0, origin_w -1)boxes[:, 1] = np.clip(boxes[:, 1], 0, origin_h -1)boxes[:, 3] = np.clip(boxes[:, 3], 0, origin_h -1)# Object confidenceconfs = boxes[:, 4]# Sort by the confsboxes = boxes[np.argsort(-confs)]# Perform non-maximum suppressionkeep_boxes = []while boxes.shape[0]:large_overlap = self.bbox_iou(np.expand_dims(boxes[0, :4], 0), boxes[:, :4]) > nms_threslabel_match = boxes[0, -1] == boxes[:, -1]# Indices of boxes with lower confidence scores, large IOUs and matching labelsinvalid = large_overlap & label_matchkeep_boxes += [boxes[0]]boxes = boxes[~invalid]boxes = np.stack(keep_boxes, 0) if len(keep_boxes) else np.array([])return boxesclass inferThread(threading.Thread):def __init__(self, yolov5_wrapper, image_path_batch):threading.Thread.__init__(self)self.yolov5_wrapper = yolov5_wrapperself.image_path_batch = image_path_batchdef run(self):batch_image_raw, use_time = self.yolov5_wrapper.infer(self.yolov5_wrapper.get_raw_image(self.image_path_batch))for i, img_path in enumerate(self.image_path_batch):parent, filename = os.path.split(img_path)save_name = os.path.join('output', filename)# Save imagecv2.imwrite(save_name, batch_image_raw[i])print('input->{}, time->{:.2f}ms, saving into output/'.format(self.image_path_batch, use_time * 1000))class warmUpThread(threading.Thread):def __init__(self, yolov5_wrapper):threading.Thread.__init__(self)self.yolov5_wrapper = yolov5_wrapperdef run(self):batch_image_raw, use_time = self.yolov5_wrapper.infer(self.yolov5_wrapper.get_raw_image_zeros())print('warm_up->{}, time->{:.2f}ms'.format(batch_image_raw[0].shape, use_time * 1000))if __name__ == "__main__":# load custom plugin and enginePLUGIN_LIBRARY = "build/libmyplugins.so"engine_file_path = "build/yolov5s.engine"if len(sys.argv) > 1:engine_file_path = sys.argv[1]if len(sys.argv) > 2:PLUGIN_LIBRARY = sys.argv[2]ctypes.CDLL(PLUGIN_LIBRARY)# load coco labelscategories = ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light","fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow","elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee","skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard","tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple","sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch","potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone","microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear","hair drier", "toothbrush"]if os.path.exists('output/'):shutil.rmtree('output/')os.makedirs('output/')# a YoLov5TRT instanceyolov5_wrapper = YoLov5TRT(engine_file_path)try:print('batch size is', yolov5_wrapper.batch_size)image_dir = "TestImages/"image_path_batches = get_img_path_batches(yolov5_wrapper.batch_size, image_dir)for i in range(10):# create a new thread to do warm_upthread1 = warmUpThread(yolov5_wrapper)thread1.start()thread1.join()for batch in image_path_batches:# create a new thread to do inferencethread1 = inferThread(yolov5_wrapper, batch)thread1.start()thread1.join()finally:# destroy the instanceyolov5_wrapper.destroy()

(f)摄像头检测:

"""
TensorRT YOLOv5 Real-time Camera Detection
"""
import ctypes
import os
import time
import cv2
import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt
import random# 检测参数
CONF_THRESH = 0.5
IOU_THRESHOLD = 0.4class YoLov5TRT(object):"""YOLOv5 TensorRT推理类"""def __init__(self, engine_file_path):# 创建CUDA上下文self.ctx = cuda.Device(0).make_context()self.stream = cuda.Stream()# 加载TensorRT引擎TRT_LOGGER = trt.Logger(trt.Logger.INFO)runtime = trt.Runtime(TRT_LOGGER)with open(engine_file_path, "rb") as f:engine = runtime.deserialize_cuda_engine(f.read())self.context = engine.create_execution_context()# 准备输入输出缓冲区self.host_inputs = []self.cuda_inputs = []self.host_outputs = []self.cuda_outputs = []self.bindings = []for binding in engine:size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_sizedtype = trt.nptype(engine.get_binding_dtype(binding))# 分配主机和设备内存host_mem = cuda.pagelocked_empty(size, dtype)cuda_mem = cuda.mem_alloc(host_mem.nbytes)self.bindings.append(int(cuda_mem))if engine.binding_is_input(binding):self.input_w = engine.get_binding_shape(binding)[-1]self.input_h = engine.get_binding_shape(binding)[-2]self.host_inputs.append(host_mem)self.cuda_inputs.append(cuda_mem)else:self.host_outputs.append(host_mem)self.cuda_outputs.append(cuda_mem)# 其他参数self.engine = engineself.batch_size = engine.max_batch_sizedef infer(self, raw_image):"""单帧推理"""self.ctx.push()# 预处理input_image, image_raw, origin_h, origin_w = self.preprocess_image(raw_image)# 拷贝输入数据np.copyto(self.host_inputs[0], input_image.ravel())# 异步推理start = time.time()cuda.memcpy_htod_async(self.cuda_inputs[0], self.host_inputs[0], self.stream)self.context.execute_async(batch_size=1, bindings=self.bindings, stream_handle=self.stream.handle)cuda.memcpy_dtoh_async(self.host_outputs[0], self.cuda_outputs[0], self.stream)self.stream.synchronize()inference_time = time.time() - start# 后处理output = self.host_outputs[0]result_boxes, result_scores, result_classid = self.post_process(output, origin_h, origin_w)# 绘制检测框for j in range(len(result_boxes)):box = result_boxes[j]self.plot_one_box(box,image_raw,label="{}:{:.2f}".format(categories[int(result_classid[j])], result_scores[j]),)self.ctx.pop()return image_raw, inference_timedef preprocess_image(self, raw_bgr_image):"""图像预处理"""image_raw = raw_bgr_imageh, w, c = image_raw.shapeimage = cv2.cvtColor(image_raw, cv2.COLOR_BGR2RGB)# 计算缩放比例和填充r_w = self.input_w / wr_h = self.input_h / hif r_h > r_w:tw = self.input_wth = int(r_w * h)tx1 = tx2 = 0ty1 = int((self.input_h - th) / 2)ty2 = self.input_h - th - ty1else:tw = int(r_h * w)th = self.input_htx1 
http://www.dtcms.com/wzjs/582303.html

相关文章:

  • 福州做网站的北京做网站需要多少钱
  • 做平台的网站有哪些体育网站建设的分析
  • 门网站源码做外贸 是否需要做中文网站
  • 0基础建站教程护肤品网站建设的意义
  • 电商网站架构wordpress搜索修改
  • 怎么做提卡密网站营销技巧美剧第一季在线
  • 免费手机端网站模板下载工具aso优化的主要内容
  • 做网站的不给ftp深圳定制家具厂排名
  • 如何做让公众都知道的网站办个网站卖什么好处
  • 中国做的很好的食品网站下面哪些不是网页制作工具
  • wordpress主题视频站app开发软件多少钱
  • 从什么网站找做app的代码河北邢台人品怎么样
  • 信阳市网站建设公司今天军事新闻最新消息视频
  • 公司网站建设与维护方案app开发定制公司推荐
  • 哈尔滨发布信息的网站搭建网站干什么
  • 共享虚拟主机 几个网站网站如何在手机上显示
  • 门户网站建设招投标网上注册公司核名流程
  • wordpress如何做网站做一个app需要多少成本
  • 免费发布信息网站大全注册站长工具网站
  • 网站公司建设都招聘那些职位三维网站搭建教程
  • 加强网站备案管理北京制作app
  • 网站开发 简历项目经历大型网站制作教程
  • 做一个展示网站多少钱秀设计网站
  • 怎样做网站网站网站整体规划方案
  • 网页设计与网站建设基础心得体会成都专业做网站的公司
  • 公司制作一个网站要多少钱h5开发网站优点
  • 成都网站建设专家外贸型网站
  • 广西梧州市住房和城乡建设局网站滁州做网站公司
  • 网站seo注意事项公司网站 数据库
  • 网站建设需要ui吗网站是怎么做的