当前位置: 首页 > news >正文

Autodl训练Faster-RCNN网络--自己的数据集(二)

在上一篇,Autodl训练Faster-RCNN网络--自己的数据集(一)-CSDN博客

我们已经实现了成功跑通fasterrcnn,但是如果在论文中做对比实验还需要四个精度指标等数据,根据博主的test代码只能输出map50,所以还需要增加计算评估指标代码。

参考文章:

Faster-RCNN: 目标检测Faster-RCNN的Pytorch版本实现。README附详细使用教程。

Faster RCNN训练自己的数据集-CSDN博客

下面就是test_net.py代码,经过修改后能够输出四个指标,但是以下几点需要注意:

  1. 其中测试集路径需要根据实际情况进行修改
  2. 我只有5个类别,运行之前需要检查代码是否符合你的类别数量
from __future__ import absolute_import
from __future__ import division
from __future__ import print_functionimport _init_paths
import numpy as np
import argparse
import pprint
import pdb
import time
import re
import cv2
import numpy as np
import xml.etree.ElementTree as ET  # 添加XML解析库import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import pickle
import sys
import ossys.path.append(os.path.dirname(os.path.abspath(__file__)))
from lib.datasets.voc_eval import voc_evalfrom roi_data_layer.roidb import combined_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from model.rpn.bbox_transform import clip_boxes
# from model.nms.nms_wrapper import nms
from model.roi_layers import nms
from model.rpn.bbox_transform import bbox_transform_inv
from model.utils.net_utils import save_net, load_net, vis_detections
from model.faster_rcnn.vgg16 import vgg16
from model.faster_rcnn.resnet import resnettry:xrange  # Python 2
except NameError:xrange = range  # Python 3# 添加IoU计算函数
def bbox_iou(boxes1, boxes2):"""计算两组边界框的IoUboxes1: numpy数组,形状为[N, 4](xmin, ymin, xmax, ymax)boxes2: numpy数组,形状为[M, 4]返回: IoU矩阵,形状为[N, M]"""N = boxes1.shape[0]M = boxes2.shape[0]# 计算每个框的面积area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])# 计算交集坐标xx1 = np.maximum(boxes1[:, 0].reshape(-1, 1), boxes2[:, 0].reshape(1, -1))yy1 = np.maximum(boxes1[:, 1].reshape(-1, 1), boxes2[:, 1].reshape(1, -1))xx2 = np.minimum(boxes1[:, 2].reshape(-1, 1), boxes2[:, 2].reshape(1, -1))yy2 = np.minimum(boxes1[:, 3].reshape(-1, 1), boxes2[:, 3].reshape(1, -1))# 计算交集面积w = np.maximum(0, xx2 - xx1)h = np.maximum(0, yy2 - yy1)inter = w * h# 计算并集面积union = area1.reshape(-1, 1) + area2.reshape(1, -1) - inter# 防止除零iou = np.divide(inter, union, out=np.zeros_like(inter), where=union!=0)return ioudef parse_args():"""Parse input arguments"""parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')parser.add_argument('--dataset', dest='dataset',help='training dataset',default='pascal_voc', type=str)parser.add_argument('--cfg', dest='cfg_file',help='optional config file',default='cfgs/vgg16.yml', type=str)parser.add_argument('--net', dest='net',help='vgg16, res50, res101, res152',default='res101', type=str)parser.add_argument('--set', dest='set_cfgs',help='set config keys', default=None,nargs=argparse.REMAINDER)parser.add_argument('--load_dir', dest='load_dir',help='directory to load models', default="models",type=str)parser.add_argument('--cuda', dest='cuda',help='whether use CUDA',action='store_true')parser.add_argument('--ls', dest='large_scale',help='whether use large imag scale',action='store_true')parser.add_argument('--mGPUs', dest='mGPUs',help='whether use multiple GPUs',action='store_true')parser.add_argument('--cag', dest='class_agnostic',help='whether perform class_agnostic bbox regression',action='store_true')parser.add_argument('--parallel_type', dest='parallel_type',help='which part of model to parallel, 0: all, 1: model before roi pooling',default=0, type=int)parser.add_argument('--checksession', dest='checksession',help='checksession to load model',default=1, type=int)parser.add_argument('--checkepoch', dest='checkepoch',help='checkepoch to load network',default=1, type=int)parser.add_argument('--checkpoint', dest='checkpoint',help='checkpoint to load network',default=10021, type=int)parser.add_argument('--vis', dest='vis',help='visualization mode',action='store_true')args = parser.parse_args()return argslr = cfg.TRAIN.LEARNING_RATE
momentum = cfg.TRAIN.MOMENTUM
weight_decay = cfg.TRAIN.WEIGHT_DECAYdef save_detections_manually(all_boxes, output_dir, imdb):os.makedirs(output_dir, exist_ok=True)for cls_idx in range(1, imdb.num_classes):  # 跳过背景类cls_name = imdb.classes[cls_idx]det_file = os.path.join(output_dir, f"comp4_det_test_{cls_name}.txt")with open(det_file, "w") as f:for i in range(len(all_boxes[cls_idx])):  # 遍历数据加载顺序的索引 iimg_id = imdb.image_index[i]  # 通过 i 直接获取对应的图像 IDdets = all_boxes[cls_idx][i]if dets.size == 0:continuefor det in dets:score = det[-1]bbox = det[:4]# 保留两位小数确保坐标精度f.write(f"{img_id} {score:.6f} {bbox[0]:.2f} {bbox[1]:.2f} {bbox[2]:.2f} {bbox[3]:.2f}\n")print(f"✅ 检测结果已按图像 ID 顺序保存至 {output_dir}")if __name__ == '__main__':args = parse_args()print('Called with args:')print(args)if torch.cuda.is_available() and not args.cuda:print("WARNING: You have a CUDA device, so you should probably run with --cuda")np.random.seed(cfg.RNG_SEED)if args.dataset == "pascal_voc":args.imdb_name = "voc_2007_trainval"args.imdbval_name = "voc_2007_test"args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']elif args.dataset == "pascal_voc_0712":args.imdb_name = "voc_2007_trainval+voc_2012_trainval"args.imdbval_name = "voc_2007_test"args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']elif args.dataset == "coco":args.imdb_name = "coco_2014_train+coco_2014_valminusminival"args.imdbval_name = "coco_2014_minival"args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']elif args.dataset == "imagenet":args.imdb_name = "imagenet_train"args.imdbval_name = "imagenet_val"args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']elif args.dataset == "vg":args.imdb_name = "vg_150-50-50_minitrain"args.imdbval_name = "vg_150-50-50_minival"args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']args.cfg_file = "cfgs/{}_ls.yml".format(args.net) if args.large_scale else "cfgs/{}.yml".format(args.net)if args.cfg_file is not None:cfg_from_file(args.cfg_file)if args.set_cfgs is not None:cfg_from_list(args.set_cfgs)print('Using config:')pprint.pprint(cfg)cfg.TRAIN.USE_FLIPPED = Falseimdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdbval_name, False)imdb.config = {'matlab_eval': False}  # 关键修复行imdb.competition_mode(on=True)print('{:d} roidb entries'.format(len(roidb)))input_dir = "/root/autodl-fs/models/vgg16/pascal_voc"if not os.path.exists(input_dir):raise Exception('There is no input directory for loading network from ' + input_dir)load_name = os.path.join(input_dir,'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))# initilize the network here.if args.net == 'vgg16':fasterRCNN = vgg16(imdb.classes, pretrained=False, class_agnostic=args.class_agnostic)elif args.net == 'res101':fasterRCNN = resnet(imdb.classes, 101, pretrained=False, class_agnostic=args.class_agnostic)elif args.net == 'res50':fasterRCNN = resnet(imdb.classes, 50, pretrained=False, class_agnostic=args.class_agnostic)elif args.net == 'res152':fasterRCNN = resnet(imdb.classes, 152, pretrained=False, class_agnostic=args.class_agnostic)else:print("network is not defined")pdb.set_trace()fasterRCNN.create_architecture()print("load checkpoint %s" % (load_name))checkpoint = torch.load(load_name)fasterRCNN.load_state_dict(checkpoint['model'])if 'pooling_mode' in checkpoint.keys():cfg.POOLING_MODE = checkpoint['pooling_mode']print('load model successfully!')# initilize the tensor holder here.im_data = torch.FloatTensor(1)im_info = torch.FloatTensor(1)num_boxes = torch.LongTensor(1)gt_boxes = torch.FloatTensor(1)# ship to cudaif args.cuda:im_data = im_data.cuda()im_info = im_info.cuda()num_boxes = num_boxes.cuda()gt_boxes = gt_boxes.cuda()# make variableim_data = Variable(im_data)im_info = Variable(im_info)num_boxes = Variable(num_boxes)gt_boxes = Variable(gt_boxes)if args.cuda:cfg.CUDA = Trueif args.cuda:fasterRCNN.cuda()start = time.time()max_per_image = 100vis = args.visif vis:thresh = 0.05else:thresh = 0.01  # 降低阈值以保留更多预测框用于评估save_name = 'faster_rcnn_10'num_images = len(imdb.image_index)all_boxes = [[[] for _ in xrange(num_images)]for _ in xrange(imdb.num_classes)]output_dir = get_output_dir(imdb, save_name)print("output_dir:", output_dir)dataset = roibatchLoader(roidb, ratio_list, ratio_index, 1, \imdb.num_classes, training=False, normalize=False)dataloader = torch.utils.data.DataLoader(dataset, batch_size=1,shuffle=False, num_workers=0,pin_memory=True)data_iter = iter(dataloader)_t = {'im_detect': time.time(), 'misc': time.time()}det_file = os.path.join(output_dir, 'detections.pkl')fasterRCNN.eval()empty_array = np.transpose(np.array([[], [], [], [], []]), (1, 0))for i in range(num_images):data = next(data_iter)with torch.no_grad():im_data.resize_(data[0].size()).copy_(data[0])im_info.resize_(data[1].size()).copy_(data[1])gt_boxes.resize_(data[2].size()).copy_(data[2])num_boxes.resize_(data[3].size()).copy_(data[3])det_tic = time.time()rois, cls_prob, bbox_pred, \rpn_loss_cls, rpn_loss_box, \RCNN_loss_cls, RCNN_loss_bbox, \rois_label = fasterRCNN(im_data, im_info, gt_boxes, num_boxes)scores = cls_prob.databoxes = rois.data[:, :, 1:5]if cfg.TEST.BBOX_REG:# Apply bounding-box regression deltasbox_deltas = bbox_pred.dataif cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:# Optionally normalize targets by a precomputed mean and stdevif args.class_agnostic:box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()box_deltas = box_deltas.view(1, -1, 4)else:box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()box_deltas = box_deltas.view(1, -1, 4 * len(imdb.classes))pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)else:# Simply repeat the boxes, once for each classpred_boxes = np.tile(boxes, (1, scores.shape[1]))pred_boxes /= data[1][0][2].item()scores = scores.squeeze()pred_boxes = pred_boxes.squeeze()det_toc = time.time()detect_time = det_toc - det_ticmisc_tic = time.time()if vis:im = cv2.imread(imdb.image_path_at(i))im2show = np.copy(im)for j in xrange(1, imdb.num_classes):inds = torch.nonzero(scores[:, j] > thresh).view(-1)# if there is detif inds.numel() > 0:cls_scores = scores[:, j][inds]_, order = torch.sort(cls_scores, 0, True)if args.class_agnostic:cls_boxes = pred_boxes[inds, :]else:cls_boxes = pred_boxes[inds][:, j * 4:(j + 1) * 4]cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1)), 1)# cls_dets = torch.cat((cls_boxes, cls_scores), 1)cls_dets = cls_dets[order]keep = nms(cls_boxes[order, :], cls_scores[order], cfg.TEST.NMS)cls_dets = cls_dets[keep.view(-1).long()]if vis:im2show = vis_detections(im2show, imdb.classes[j], cls_dets.cpu().numpy(), 0.3)all_boxes[j][i] = cls_dets.cpu().numpy()else:all_boxes[j][i] = empty_array# Limit to max_per_image detections *over all classes*if max_per_image > 0:image_scores = np.hstack([all_boxes[j][i][:, -1]for j in xrange(1, imdb.num_classes)])if len(image_scores) > max_per_image:image_thresh = np.sort(image_scores)[-max_per_image]for j in xrange(1, imdb.num_classes):keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]all_boxes[j][i] = all_boxes[j][i][keep, :]misc_toc = time.time()nms_time = misc_toc - misc_ticsys.stdout.write('im_detect: {:d}/{:d} {:.3f}s {:.3f}s   \r' \.format(i + 1, num_images, detect_time, nms_time))sys.stdout.flush()if vis:cv2.imwrite('result.png', im2show)pdb.set_trace()# cv2.imshow('test', im2show)# cv2.waitKey(0)with open(det_file, 'wb') as f:pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)# 手动保存检测结果save_detections_manually(all_boxes, output_dir, imdb)# 添加全局变量定义MINOVERLAP = 0.5  # IoU匹配阈值,与mAP@0.5一致# 定义真实框路径格式(修复此处)annopath = os.path.join(cfg.DATA_DIR, 'Annotations', '{}.xml')  # 添加这一行# 解析真实框并存储为字典(避免后续重复解析XML)print("\n[解析真实框]")gt_dict = {}for image_id in imdb.image_index:xml_path = annopath.format(image_id)root = ET.parse(xml_path).getroot()gt_boxes = []for obj in root.findall('object'):obj_name = obj.find('name').textif obj_name == '__background__':continuebndbox = obj.find('bndbox')xmin = float(bndbox.find('xmin').text)ymin = float(bndbox.find('ymin').text)xmax = float(bndbox.find('xmax').text)ymax = float(bndbox.find('ymax').text)gt_boxes.append({'class': obj_name,'bbox': [xmin, ymin, xmax, ymax],'difficult': int(obj.find('difficult').text) if obj.find('difficult') is not None else 0})gt_dict[image_id] = gt_boxesprint(f"✅ 真实框解析完成,共 {len(gt_dict)} 张图像")# 计算特定置信度阈值下的Recall和Precisionprint("\n[计算特定置信度下的评估指标]")score_threhold = 0.5  # 置信度阈值,可根据需要调整mean_precision = []mean_recall = []for cls_idx in range(1, imdb.num_classes):cls_name = imdb.classes[cls_idx]dets = all_boxes[cls_idx]all_detections = []# 收集所有图像中该类别的预测框(过滤低置信度)for i in range(num_images):img_id = imdb.image_index[i]img_dets = dets[i]if img_dets.size == 0:continue# 过滤置信度≥阈值的预测框keep = np.where(img_dets[:, -1] >= score_threhold)[0]filtered_dets = img_dets[keep]all_detections.extend(filtered_dets)if not all_detections:mean_precision.append(0.0)mean_recall.append(0.0)continue# 转换为numpy数组all_detections = np.array(all_detections)det_bboxes = all_detections[:, :4]  # [xmin, ymin, xmax, ymax]det_scores = all_detections[:, 4]   # 置信度# 获取该类别的所有真实框(从已解析的gt_dict中提取)gt_bboxes = []for image_id in imdb.image_index:for obj in gt_dict[image_id]:if obj['class'] == cls_name and obj['difficult'] == 0:  # 只考虑非difficult的真实框gt_bboxes.append(obj['bbox'])gt_bboxes = np.array(gt_bboxes)num_gt = len(gt_bboxes)if num_gt == 0:mean_precision.append(0.0)mean_recall.append(0.0)continue# 计算IoU矩阵ious = bbox_iou(det_bboxes, gt_bboxes)# 按置信度降序排序预测框(已在原代码中排序,但为保险起见再次排序)sorted_indices = np.argsort(-det_scores)det_bboxes_sorted = det_bboxes[sorted_indices]det_scores_sorted = det_scores[sorted_indices]ious_sorted = ious[sorted_indices]# 标记真实框是否已被匹配gt_matched = np.zeros(num_gt, dtype=bool)tp = np.zeros(len(det_bboxes_sorted))  # 真阳性fp = np.zeros(len(det_bboxes_sorted))  # 假阳性# 匹配预测框和真实框for d_idx, (bbox, iou_row) in enumerate(zip(det_bboxes_sorted, ious_sorted)):max_iou = np.max(iou_row)max_gt_idx = np.argmax(iou_row)if max_iou >= MINOVERLAP and not gt_matched[max_gt_idx]:tp[d_idx] = 1  # 预测正确gt_matched[max_gt_idx] = True  # 标记该真实框已被匹配else:fp[d_idx] = 1  # 预测错误# 计算累计TP和FPcum_tp = np.cumsum(tp)cum_fp = np.cumsum(fp)# 计算Precision和Recall曲线precision = cum_tp / (cum_tp + cum_fp + 1e-8)recall = cum_tp / (num_gt + 1e-8)# 找到置信度阈值对应的Precision和Recall(最后一个满足置信度≥阈值的位置)valid_indices = np.where(det_scores_sorted >= score_threhold)[0]if valid_indices.size > 0:last_valid_idx = valid_indices[-1]mean_prec = precision[last_valid_idx]mean_rec = recall[last_valid_idx]else:mean_prec = 0.0mean_rec = 0.0mean_precision.append(mean_prec)mean_recall.append(mean_rec)# 打印该类别的指标print(f"  {cls_name}: Precision@{score_threhold}={mean_prec:.4f}, Recall@{score_threhold}={mean_rec:.4f}")# 计算所有类别的平均Precision和Recallmean_precision_50 = np.nanmean(mean_precision)mean_recall_50 = np.nanmean(mean_recall)print(f"所有类别的平均: Precision@{score_threhold}={mean_precision_50:.4f}, Recall@{score_threhold}={mean_recall_50:.4f}")# 继续原有代码的mAP计算部分official_mAP50 = Noneif hasattr(imdb, '_det_results') and 'mAP@0.5' in imdb._det_results:official_mAP50 = imdb._det_results['mAP@0.5']else:# 方法2: 解析输出文本from io import StringIOoriginal_stdout = sys.stdoutsys.stdout = captured_output = StringIO()imdb.evaluate_detections(all_boxes, output_dir)sys.stdout = original_stdoutoutput_lines = captured_output.getvalue().split('\n')# 保存输出用于调试with open('eval_output.txt', 'w') as f:f.write('\n'.join(output_lines))# 尝试多种匹配模式patterns = [r"Mean AP = (\d+\.\d+)",r"mAP@0.5 = (\d+\.\d+)",r"AP@0.5 = (\d+\.\d+)",r"Mean AP @ 0.5 = (\d+\.\d+)"]for line in output_lines:for pattern in patterns:match = re.search(pattern, line)if match:official_mAP50 = float(match.group(1))breakif official_mAP50 is not None:breakif official_mAP50 is None:raise ValueError("未找到官方评估的mAP@0.5结果")annopath = os.path.join(cfg.DATA_DIR, 'Annotations', '{0}.xml')imagesetfile = os.path.join(cfg.DATA_DIR, 'ImageSets/Main/test.txt')det_file_pattern = os.path.join(output_dir, 'comp4_det_test_{}.txt')iou_thresholds = np.arange(0.5, 1.0, 0.05)mAPs = []precisions_at_50 = []recalls_at_50 = []class_metrics = {}  # 存储各类别的评估指标print("\n[验证评估文件]")all_files_exist = Truefor cls in imdb.classes[1:]:  # 跳过背景类filename = det_file_pattern.format(cls)if os.path.exists(filename):print(f"✅ 找到类别 {cls} 的评估文件")else:print(f"❌ 未找到类别 {cls} 的评估文件: {filename}")all_files_exist = Falseif not all_files_exist:print("\n⚠️ 部分评估文件缺失,可能影响自定义评估结果")for iou in iou_thresholds:print(f"\n=== 计算IoU阈值 {iou:.2f} ===")aps = []for cls in imdb.classes[1:]:if cls == '__background__':continue# 检测结果文件路径filename = det_file_pattern.format(cls)if not os.path.exists(filename):print(f"  ! 文件不存在: {filename},使用AP=0")aps.append(0.0)continuetry:cachedir = os.path.join(output_dir, 'cache')os.makedirs(cachedir, exist_ok=True)# 计算该类别的AP、Precision和Recallrec, prec, ap = voc_eval(filename, annopath, imagesetfile, cls, ovthresh=iou,use_07_metric=True, cachedir=cachedir)aps.append(ap)if cls not in class_metrics:class_metrics[cls] = {}class_metrics[cls][iou] = {'ap': ap,'precision': prec,  # 完整的精确率曲线'recall': rec  # 完整的召回率曲线}# 仅当IoU=0.5时记录if iou == 0.5:# 计算该类别在IoU=0.5时的平均Precision和Recallmean_prec = np.mean(prec) if len(prec) > 0 else 0.0mean_rec = np.mean(rec) if len(rec) > 0 else 0.0precisions_at_50.append(mean_prec)recalls_at_50.append(mean_rec)print(f"  ✅ {cls}: AP={ap:.4f}, Precision={mean_prec:.4f}, Recall={mean_rec:.4f}")except Exception as e:print(f"  ❌ 计算 {cls} 时出错: {str(e)},使用AP=0")aps.append(0.0)# 计算当前IoU的mAPcurrent_mAP = np.nanmean(aps)mAPs.append(current_mAP)print(f"IoU {iou:.2f} | mAP = {current_mAP:.4f}")# 输出评估指标时:mAP50 = mAPs[0] if mAPs else 0.0  # mAP@0.5mAP50_95 = np.mean(mAPs) if mAPs else 0.0  # mAP@[0.5:0.95]mean_precision_50_voc = np.nanmean(precisions_at_50) if precisions_at_50 else 0.0  # 所有类别的平均Precision@0.5 (VOC)mean_recall_50_voc = np.nanmean(recalls_at_50) if recalls_at_50 else 0.0  # 所有类别的平均Recall@0.5 (VOC)print("\n" + "=" * 50)print(f"{'评估指标':<20}{'值':>15}")print("-" * 50)print(f"官方 mAP@0.5:{official_mAP50:>15.4f}")  # 直接使用官方结果print(f"自定义 mAP@0.5:{mAP50:>15.4f}")  # 自定义计算结果(应与官方一致)print(f"mAP@[0.5:0.95]:{mAP50_95:>15.4f}")  # COCO标准的mAPprint(f"Mean Precision@0.5:{mean_precision_50:>15.4f}")  # 自定义计算的特定置信度Precisionprint(f"Mean Recall@0.5:{mean_recall_50:>15.4f}")  # 自定义计算的特定置信度Recall#print(f"VOC Precision@0.5:{mean_precision_50_voc:>15.4f}")  # VOC评估的平均Precision#print(f"VOC Recall@0.5:{mean_recall_50_voc:>15.4f}")  # VOC评估的平均Recallprint("=" * 50)end = time.time()print("test time: %0.4fs" % (end - start))

相关文章:

  • C++虚函数与类对象模型深度解析
  • binlog 解析工具——my2sql
  • 使用JSP踩过的坑
  • Double使用注意事项
  • 【编程实践】利用open3d对点云进行聚类并可视化
  • Kotlin IR编译器插件开发指南
  • 互联网大厂Java求职面试:短视频平台大规模实时互动系统架构设计
  • Eigen 库实现最小二乘算法(Least Squares)
  • Unity基础学习(九)Resources资源同步与异步加载
  • 如何在 Linux 系统中永久禁用交换分区 ?
  • 实验绘图参考-0525版(自用)
  • PostgreSQL 与 MongoDB:为您的数据项目选择合适的数据库
  • 记录第一次正式收到SCI期刊论文的审稿
  • Ubantu22.04离线安装、卸载mysql8.0.39并设置开机自启
  • 深入理解 Linux 的 set、env 和 printenv 命令
  • 使用粘滞键修改windows密码
  • 医学写作供应商管理全流程优化
  • 前端课设Web2
  • 微服务——网关
  • 第九章 云平台开发
  • 酒店网站建设方案ppt/网站404页面怎么做
  • wordpress多站点的路径/关键词首页排名优化平台
  • 网站建设相关的网站/seo链接优化建议
  • 四川成都进出口贸易公司/北京seo地址
  • 帝国cms做企业网站/西安搜建站科技网站
  • 秦淮区建设局网站/西安seo网站优化