当前位置: 首页 > news >正文

sam2分割空心物体

目录

sam2分割空心物体,并求轮廓距离

分割空心物体:


sam2分割空心物体,并求轮廓距离

import argparse
import json
import os.path as osp
import timeimport numpy as np
import gc
import syssys.path.append("./sam2")
from sam2.build_sam import build_sam2
from sam2.sam2_image_predictor import SAM2ImagePredictorimport os
from glob import glob
import supervision as sv
import torch
import cv2from scipy.spatial import cKDTreedef determine_model_cfg(model_path):if "large" in model_path:return "configs/samurai/sam2.1_hiera_l.yaml"elif "base_plus" in model_path:return "configs/samurai/sam2.1_hiera_b+.yaml"elif "small" in model_path:return "configs/samurai/sam2.1_hiera_s.yaml"elif "tiny" in model_path:return "configs/samurai/sam2.1_hiera_t.yaml"else:raise ValueError("Unknown model size in path!")def top_red_points(img, top_k=5):hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)# 红色区间(两段)lower_red1 = np.array([0, 100, 100])upper_red1 = np.array([10, 255, 255])lower_red2 = np.array([170, 100, 100])upper_red2 = np.array([180, 255, 255])mask1 = cv2.inRange(hsv, lower_red1, upper_red1)mask2 = cv2.inRange(hsv, lower_red2, upper_red2)mask = cv2.bitwise_or(mask1, mask2)red_pixels = np.column_stack(np.where(mask > 0))  # (y, x)if red_pixels.size == 0:return []scores = mask[red_pixels[:, 0], red_pixels[:, 1]]sorted_idx = np.argsort(scores)[::-1]result = []for idx in sorted_idx[:top_k]:y, x = red_pixels[idx]  # 注意顺序 (y,x)score = int(scores[idx])result.append((int(x), int(y)))return resultdef min_distance_point_to_contour(point, contours):"""计算 point 到所有轮廓的最小距离contours: list of np.array, 每个形状为 (N,1,2)"""min_dist = float('inf')for cnt in contours:pts = cnt[:, 0, :]  # shape (N,2)dists = np.linalg.norm(pts - point, axis=1)min_dist = min(min_dist, dists.min())return min_distdef main(args):model_cfg = determine_model_cfg(args.model_path)device = "cuda:0"sam2_image_predictor = SAM2ImagePredictor(build_sam2(model_cfg, args.model_path, device=device))# sam2_image_predictor.set_image_size(1024)start = time.time()dir_a=r"D:\data\jiezhi\lunkuo_test"files=glob(os.path.join(dir_a, "*.jpg"))for img_path in files:new_img_name = os.path.basename(img_path)frame = cv2.imdecode(np.fromfile(img_path, dtype=np.uint8), 1)h_o, w_o = frame.shape[:2]h_center, w_center = frame.shape[:2]input_boxes = []sam2_image_predictor.set_image(frame)positive_points = top_red_points(frame,top_k=5)negative_points = [[w_center, h_center],  # 负样本2]# 合并point_coords = np.array(positive_points + negative_points)# 3 个正样本(1),2 个负样本(0)point_labels = np.array([1, 1,1,1,1,  0], dtype=np.int32)masks, scores, logits = sam2_image_predictor.predict(point_coords=point_coords, point_labels=point_labels, box=None,multimask_output=False, )mask = Noneif masks.ndim == 2:mask = masksmasks = masks[None]elif masks.ndim == 3:mask = masks[0]elif masks.ndim == 4:masks = masks.squeeze(1)non_zero_indices = np.argwhere(mask > 0)annotation = {'version': "5.3.1", 'flags': {}, 'imageData': None, 'imageHeight': h_o, 'imageWidth': w_o,'imagePath': new_img_name, 'box_xie': []}if len(non_zero_indices) > 0:contours, hierarchy = cv2.findContours(mask.astype(np.uint8), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)# Parent = -1 表示外轮廓outer_contours = []inner_contours = []if hierarchy is not None:hierarchy = hierarchy[0]  # 因为 findContours 返回 (contours, hierarchy)for i, h in enumerate(hierarchy):if h[3] == -1:# 没有父轮廓 → 外轮廓outer_contours.append(contours[i])else:# 有父轮廓 → 内轮廓inner_contours.append(contours[i])outer_contours = [cnt for cnt in outer_contours if len(cnt) > 100]inner_contours = [cnt for cnt in inner_contours if len(cnt) > 100]if len(outer_contours) > 1:outer_contours = [max(outer_contours, key=lambda x: len(x))]# 如果有多个内轮廓,取点最多的if len(inner_contours) > 1:inner_contours = [max(inner_contours, key=lambda x: len(x))]if len(outer_contours)==0 or len(inner_contours)==0:continueprint(f"外轮廓数量: {len(outer_contours[0])}, 内轮廓数量: {len(inner_contours[0])}")out_dis = min_distance_point_to_contour((w_center, h_center), outer_contours)in_dis = min_distance_point_to_contour((w_center, h_center), inner_contours)outer_pts = outer_contours[0][:, 0, :]inner_pts = inner_contours[0][:, 0, :]tree = cKDTree(outer_pts)dists, _ = tree.query(inner_pts)avg_dist = np.mean(dists)print("avg_dist:", avg_dist,'in_dis',in_dis,'out_dis',out_dis)vis = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)cv2.putText(vis, f'{avg_dist:.1f}', (50, 50), cv2.FONT_HERSHEY_SIMPLEX,0.8, (0, 0, 255), 2)# 绘制外轮廓(绿色)cv2.drawContours(vis, outer_contours, -1, (0, 255, 0), 2)# 绘制内轮廓(红色)cv2.drawContours(vis, inner_contours, -1, (0, 0, 255), 2)cv2.imshow("contours", vis)cv2.waitKey(0)if 0:# 注意 OpenCV 坐标 (x, y)points = non_zero_indices[:, [1, 0]].astype(np.float32)new_shape = {"label": "xie", "points": points.tolist(), "group_id": None, "description": "","shape_type": "rectangle", "flags": {}}annotation['box_xie'].append(new_shape)with open(json_path, 'w') as file:json.dump(annotation, file, indent=4)if __name__ == "__main__":parser = argparse.ArgumentParser()# parser.add_argument("--model_path", default=r"D:\data\models\sam2.1_hiera_large.pt",)parser.add_argument("--model_path", default=r"D:\data\models\sam2.1_hiera_small.pt",)# parser.add_argument("--model_path", default=r"D:\data\models\sam2.1_hiera_base_plus.pt",parser.add_argument("--save_to_video", default=True, help="Save results to a video.")args = parser.parse_args()main(args)

分割空心物体:

import argparse
import json
import os.path as osp
import timeimport numpy as np
import gc
import syssys.path.append("./sam2")
from sam2.build_sam import build_sam2
from sam2.sam2_image_predictor import SAM2ImagePredictorimport os
from glob import glob
import supervision as sv
import torch
import cv2def determine_model_cfg(model_path):if "large" in model_path:return "configs/samurai/sam2.1_hiera_l.yaml"elif "base_plus" in model_path:return "configs/samurai/sam2.1_hiera_b+.yaml"elif "small" in model_path:return "configs/samurai/sam2.1_hiera_s.yaml"elif "tiny" in model_path:return "configs/samurai/sam2.1_hiera_t.yaml"else:raise ValueError("Unknown model size in path!")def top_red_points(img, top_k=5):hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)# 红色区间(两段)lower_red1 = np.array([0, 100, 100])upper_red1 = np.array([10, 255, 255])lower_red2 = np.array([170, 100, 100])upper_red2 = np.array([180, 255, 255])mask1 = cv2.inRange(hsv, lower_red1, upper_red1)mask2 = cv2.inRange(hsv, lower_red2, upper_red2)mask = cv2.bitwise_or(mask1, mask2)red_pixels = np.column_stack(np.where(mask > 0))  # (y, x)if red_pixels.size == 0:return []scores = mask[red_pixels[:, 0], red_pixels[:, 1]]sorted_idx = np.argsort(scores)[::-1]result = []for idx in sorted_idx[:top_k]:y, x = red_pixels[idx]  # 注意顺序 (y,x)score = int(scores[idx])result.append((int(x), int(y)))return resultdef main(args):model_cfg = determine_model_cfg(args.model_path)device = "cuda:0"sam2_image_predictor = SAM2ImagePredictor(build_sam2(model_cfg, args.model_path, device=device))start = time.time()dir_a=r"D:\data\jiezhi\lunkuo_test"files=glob(os.path.join(dir_a, "*.jpg"))for img_path in files:new_img_name = os.path.basename(img_path)frame = cv2.imdecode(np.fromfile(img_path, dtype=np.uint8), 1)h_o, w_o = frame.shape[:2]h_center, w_center = frame.shape[:2]input_boxes = []sam2_image_predictor.set_image(frame)positive_points = top_red_points(frame,top_k=3)negative_points = [[w_center, h_center],  # 负样本2]# 合并point_coords = np.array(positive_points + negative_points)# 3 个正样本(1),2 个负样本(0)point_labels = np.array([1, 1, 1, 0], dtype=np.int32)masks, scores, logits = sam2_image_predictor.predict(point_coords=point_coords, point_labels=point_labels, box=None,multimask_output=False, )mask = Noneif masks.ndim == 2:mask = masksmasks = masks[None]elif masks.ndim == 3:mask = masks[0]elif masks.ndim == 4:masks = masks.squeeze(1)non_zero_indices = np.argwhere(mask > 0)annotation = {'version': "5.3.1", 'flags': {}, 'imageData': None, 'imageHeight': h_o, 'imageWidth': w_o,'imagePath': new_img_name, 'box_xie': []}if len(non_zero_indices) > 0:contours, hierarchy = cv2.findContours(mask.astype(np.uint8), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)# Parent = -1 表示外轮廓outer_contours = []inner_contours = []if hierarchy is not None:hierarchy = hierarchy[0]  # 因为 findContours 返回 (contours, hierarchy)for i, h in enumerate(hierarchy):if h[3] == -1:# 没有父轮廓 → 外轮廓outer_contours.append(contours[i])else:# 有父轮廓 → 内轮廓inner_contours.append(contours[i])print(f"外轮廓数量: {len(outer_contours)}, 内轮廓数量: {len(inner_contours)}")vis = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)# 绘制外轮廓(绿色)cv2.drawContours(vis, outer_contours, -1, (0, 255, 0), 2)# 绘制内轮廓(红色)cv2.drawContours(vis, inner_contours, -1, (0, 0, 255), 2)cv2.imshow("contours", vis)cv2.waitKey(0)if 0:# 注意 OpenCV 坐标 (x, y)points = non_zero_indices[:, [1, 0]].astype(np.float32)new_shape = {"label": "xie", "points": points.tolist(), "group_id": None, "description": "","shape_type": "rectangle", "flags": {}}annotation['box_xie'].append(new_shape)with open(json_path, 'w') as file:json.dump(annotation, file, indent=4)if __name__ == "__main__":parser = argparse.ArgumentParser()parser.add_argument("--model_path", default=r"D:\data\models\sam2.1_hiera_large.pt",help="Path to the model checkpoint.")parser.add_argument("--save_to_video", default=True, help="Save results to a video.")args = parser.parse_args()main(args)

http://www.dtcms.com/a/579621.html

相关文章:

  • wordpress 仿站教程wordpress 获取自定义字段值
  • 网站下方一般放什么原因html5 爱情网站模板
  • 建立网站需要准备的材料网站开发广告语大全
  • easyui做的网站发布活动的平台app
  • 阿里巴巴网站域名网页添加兼容性站点
  • 网站空间管理平台行业网站运营计划
  • RestTemplate设置接口超时时间的方法
  • 简述网站建设的过程wordpress 自定义筛选
  • 南京网站推广营销公司网站建设找哪个
  • dell网站设计特色wordpress基础
  • 深一网站建设火星时代ui设计培训怎么样
  • 蓝衫网站建设用html5做的网站代码
  • 域名回收网站wdlinux wordpress
  • 东莞网站推广怎么样拼多多一件代发免费货源
  • 香水网站设计网页怎么查公司联系方式
  • 图表设计网站漳州 网站建设公司哪家好
  • 为网站网站做宣传网页设计发展前景分析
  • 网站制作公司哪儿济南兴田德润有活动吗网络推广这个工作怎么样
  • 化妆品企业网站建设的缺点电子商务专业毕业从事什么工作
  • 莱芜正规的网站建设做网站协议怎么签
  • 企业网站开发制作免费制作小程序软件
  • 开办网站备案帮卖货平台
  • 企业网站建设方案精英百度网站怎样做
  • 如何建设免费网站视频wordpress模板 众筹
  • 有哪些做品牌特卖的网站重庆有哪些网站
  • 电子商务网站建设前的分析pageadmin建站系统破解版
  • 北京网站建设推广服linux php网站部署
  • 苏州网站建设制作公司网站建设预算策划
  • 网站建设主持词ps在线图片编辑
  • 企业网站建设教程视频易语言 wordpress登录注册