MATLAB的三维重建系统
基于MATLAB的三维重建系统
MATLAB三维重建系统,实现从多视图图像到三维点云和表面网格的重建流程。包含特征提取、运动恢复结构(SfM)、多视图立体匹配(MVS)和表面重建等核心模块。
%% MATLAB三维重建系统
clear; close all; clc;%% ====================== 系统参数设置 ======================
% 数据集选择
dataset = 'custom'; % 'temple', 'fountain', 'custom'
imageScale = 0.5; % 图像缩放比例 (加速处理)
maxPoints = 5000; % 每幅图像最大特征点数
reconstructionType = 'dense'; % 'sparse' 或 'dense'
visualizeSteps = true; % 是否可视化中间步骤% 相机参数 (根据数据集调整)
switch datasetcase 'temple'imageDir = fullfile('data', 'templeRing');K = [1520.4, 0, 302.32; 0, 1525.9, 246.87; 0, 0, 1]; % 内参矩阵case 'fountain'imageDir = fullfile('data', 'fountain');K = [2759.48, 0, 1520.69; 0, 2764.16, 1006.81; 0, 0, 1];otherwise % customimageDir = fullfile('data', 'custom');% 默认相机参数 (通常需要标定)K = [2500, 0, 1000; 0, 2500, 750; 0, 0, 1];
end% 创建结果目录
resultDir = fullfile('results', dataset);
if ~exist(resultDir, 'dir')mkdir(resultDir);
endfprintf('三维重建系统启动...\n');
fprintf('数据集: %s\n', dataset);
fprintf('重建类型: %s\n', reconstructionType);
fprintf('图像目录: %s\n', imageDir);%% ====================== 数据准备 ======================
% 读取图像序列
imageFiles = dir(fullfile(imageDir, '*.jpg'));
if isempty(imageFiles)imageFiles = dir(fullfile(imageDir, '*.png'));
end
numImages = min(length(imageFiles), 10); % 最多使用10张图像
fprintf('找到 %d 张图像\n', numImages);% 加载并预处理图像
images = cell(1, numImages);
for i = 1:numImagesimg = imread(fullfile(imageDir, imageFiles(i).name));if imageScale ~= 1img = imresize(img, imageScale);endimages{i} = img;% 保存预处理后的图像imwrite(img, fullfile(resultDir, sprintf('preprocessed_%02d.jpg', i)));
end% 显示图像序列
if visualizeStepsfigure('Name', '输入图像序列', 'Position', [100, 100, 1200, 600]);for i = 1:numImagessubplot(2, ceil(numImages/2), i);imshow(images{i});title(sprintf('图像 %d', i));endsaveas(gcf, fullfile(resultDir, 'input_sequence.jpg'));
end%% ====================== 特征提取与匹配 ======================
fprintf('\n=== 特征提取与匹配 ===\n');% 提取特征点
features = cell(1, numImages);
for i = 1:numImagesfprintf('提取图像 %d 特征点...\n', i);grayImg = rgb2gray(images{i});points = detectSURFFeatures(grayImg, 'MetricThreshold', 500);% 限制特征点数量if points.Count > maxPointspoints = points.selectStrongest(maxPoints);end% 提取特征描述子[f, validPoints] = extractFeatures(grayImg, points);features{i} = struct('points', validPoints, 'features', f);% 可视化特征点if visualizeStepsfigure('Name', sprintf('图像 %d 特征点', i));imshow(images{i}); hold on;plot(validPoints.selectStrongest(50)); % 显示最强的50个点title(sprintf('图像 %d: %d 个特征点', i, validPoints.Count));saveas(gcf, fullfile(resultDir, sprintf('features_%02d.jpg', i)));close;end
end% 匹配特征点
matches = cell(numImages, numImages);
for i = 1:numImages-1for j = i+1:numImagesfprintf('匹配图像 %d 和 %d...\n', i, j);indexPairs = matchFeatures(features{i}.features, features{j}.features, ...'MatchThreshold', 1.0, 'MaxRatio', 0.7);matchedPoints1 = features{i}.points(indexPairs(:, 1));matchedPoints2 = features{j}.points(indexPairs(:, 2));matches{i,j} = struct('points1', matchedPoints1, 'points2', matchedPoints2);% 可视化匹配结果if visualizeSteps && size(indexPairs, 1) > 10figure('Name', sprintf('图像 %d 和 %d 匹配', i, j));showMatchedFeatures(images{i}, images{j}, matchedPoints1, matchedPoints2, 'montage');title(sprintf('图像 %d 和 %d 匹配点: %d', i, j, size(indexPairs, 1)));saveas(gcf, fullfile(resultDir, sprintf('matches_%02d_%02d.jpg', i, j)));close;endend
end%% ====================== 运动恢复结构 (SfM) ======================
fprintf('\n=== 运动恢复结构 (SfM) ===\n');% 初始化相机位姿
cameraPoses = cell(1, numImages);
pointTracks = [];% 选择初始图像对
if numImages < 2error('至少需要两张图像进行重建');
end% 找到匹配最多的图像对
maxMatches = 0;
bestPair = [1,2];
for i = 1:numImages-1for j = i+1:numImagesif isfield(matches{i,j}, 'points1')numMatches = matches{i,j}.points1.Count;if numMatches > maxMatchesmaxMatches = numMatches;bestPair = [i,j];endendend
endfprintf('使用图像 %d 和 %d 初始化重建...\n', bestPair(1), bestPair(2));% 计算基础矩阵和本质矩阵
points1 = matches{bestPair(1),bestPair(2)}.points1.Location;
points2 = matches{bestPair(1),bestPair(2)}.points2.Location;[E, inliers] = estimateEssentialMatrix(points1, points2, K, 'Confidence', 99.99, 'MaxDistance', 0.8);% 从本质矩阵恢复相机位姿
[R1, R2, t1, t2] = relativeCameraPose(E, K, points1(inliers,:), points2(inliers,:));% 设置初始相机位姿
cameraPoses{bestPair(1)} = rigid3d(eye(3), [0,0,0]);
cameraPoses{bestPair(2)} = rigid3d(R2, t2);% 三角测量初始点云
points3D = triangulate(points1(inliers,:), points2(inliers,:), ...cameraPoses{bestPair(1)}, cameraPoses{bestPair(2)}, K);% 创建初始点云
ptCloud = pointCloud(points3D);% 可视化初始重建
if visualizeStepsfigure('Name', '初始重建');pcshow(ptCloud, 'VerticalAxis', 'y', 'VerticalAxisDir', 'down');xlabel('X'); ylabel('Y'); zlabel('Z');title('初始点云');saveas(gcf, fullfile(resultDir, 'initial_pointcloud.jpg'));
end% 逐步添加其他视图
for i = 1:numImagesif i == bestPair(1) || i == bestPair(2)continue; % 跳过已处理的图像endfprintf('添加图像 %d...\n', i);% 找到与新图像匹配最多的已重建图像bestView = 0;maxMatches = 0;for j = 1:numImagesif j ~= i && ~isempty(cameraPoses{j})if isfield(matches{min(i,j),max(i,j)}, 'points1')numMatches = matches{min(i,j),max(i,j)}.points1.Count;if numMatches > maxMatchesmaxMatches = numMatches;bestView = j;endendendendif bestView == 0warning('无法为图像 %d 找到匹配视图', i);continue;end% 匹配点idx1 = min(i, bestView);idx2 = max(i, bestView);points2D = matches{idx1,idx2}.points1.Location;points3D = matches{idx1,idx2}.points2.Location;if idx1 == itemp = points2D;points2D = points3D;points3D = temp;end% 求解PnP问题估计相机位姿[worldOrientation, worldLocation, inliers] = estimateWorldCameraPose(...points2D, points3D, K, 'Confidence', 99.99, 'MaxReprojectionError', 2);% 保存相机位姿cameraPoses{i} = rigid3d(worldOrientation, worldLocation);% 三角测量新点for j = 1:numImagesif j ~= i && ~isempty(cameraPoses{j})idx1 = min(i, j);idx2 = max(i, j);if isfield(matches{idx1,idx2}, 'points1')points1 = matches{idx1,idx2}.points1.Location;points2 = matches{idx1,idx2}.points2.Location;if idx1 == itemp = points1;points1 = points2;points2 = temp;end% 三角测量newPoints = triangulate(points1, points2, ...cameraPoses{j}, cameraPoses{i}, K);% 添加到点云ptCloud = pccat([ptCloud, pointCloud(newPoints)]);endendend
end% 保存稀疏点云
sparseCloud = ptCloud;
save(fullfile(resultDir, 'sparse_pointcloud.mat'), 'sparseCloud');% 可视化稀疏点云
if visualizeStepsfigure('Name', '稀疏点云重建');pcshow(sparseCloud, 'VerticalAxis', 'y', 'VerticalAxisDir', 'down');xlabel('X'); ylabel('Y'); zlabel('Z');title(sprintf('稀疏点云 (%d 点)', sparseCloud.Count));saveas(gcf, fullfile(resultDir, 'sparse_reconstruction.jpg'));
end%% ====================== 多视图立体匹配 (MVS) ======================
if strcmp(reconstructionType, 'dense')fprintf('\n=== 多视图立体匹配 (MVS) ===\n');% 创建深度图depthMaps = cell(1, numImages);for i = 1:numImagesif isempty(cameraPoses{i})continue;endfprintf('为图像 %d 计算深度图...\n', i);% 选择相邻视图neighbors = [];for j = 1:numImagesif j ~= i && ~isempty(cameraPoses{j})% 计算基线距离dist = norm(cameraPoses{i}.Translation - cameraPoses{j}.Translation);if dist > 0.1 && dist < 2.0 % 合理基线范围neighbors = [neighbors, j];endendif length(neighbors) >= 2 % 最多使用两个相邻视图break;endendif isempty(neighbors)warning('没有为图像 %d 找到合适的相邻视图', i);continue;end% 使用patch-based MVS计算深度图depthMap = computeDepthMap(images{i}, images{neighbors(1)}, ...cameraPoses{i}, cameraPoses{neighbors(1)}, K);% 保存深度图depthMaps{i} = depthMap;% 可视化深度图if visualizeStepsfigure('Name', sprintf('图像 %d 深度图', i));imagesc(depthMap);colormap jet; colorbar;axis image;title(sprintf('图像 %d 深度图', i));saveas(gcf, fullfile(resultDir, sprintf('depthmap_%02d.jpg', i)));endend% 融合深度图生成稠密点云denseCloud = fuseDepthMaps(depthMaps, cameraPoses, K);% 保存稠密点云save(fullfile(resultDir, 'dense_pointcloud.mat'), 'denseCloud');% 可视化稠密点云if visualizeStepsfigure('Name', '稠密点云重建');pcshow(denseCloud, 'VerticalAxis', 'y', 'VerticalAxisDir', 'down');xlabel('X'); ylabel('Y'); zlabel('Z');title(sprintf('稠密点云 (%d 点)', denseCloud.Count));saveas(gcf, fullfile(resultDir, 'dense_reconstruction.jpg'));endfinalCloud = denseCloud;
elsefinalCloud = sparseCloud;
end%% ====================== 表面重建 ======================
fprintf('\n=== 表面重建 ===\n');% 点云预处理
fprintf('点云预处理...\n');
filteredCloud = pcdownsample(finalCloud, 'gridAverage', 0.005);
filteredCloud = pcdenoise(filteredCloud, 'NumNeighbors', 50);% 法向量估计
fprintf('计算法向量...\n');
normals = pcnormals(filteredCloud);
filteredCloud.Normal = normals;% 泊松表面重建
fprintf('泊松表面重建...\n');
[mesh, ~] = pc2surfacemesh(filteredCloud, 'poisson', 'Depth', 10);% 简化网格
fprintf('网格简化...\n');
mesh = reducepatch(mesh, 0.1); % 保留10%的面片% 保存网格
save(fullfile(resultDir, 'surface_mesh.mat'), 'mesh');% 可视化网格
figure('Name', '重建的表面网格');
patch('Faces', mesh.faces, 'Vertices', mesh.vertices, ...'FaceColor', [0.8, 0.8, 1.0], 'EdgeColor', 'none', ...'FaceLighting', 'gouraud', 'AmbientStrength', 0.5);
light('Position', [0, 0, 1], 'Style', 'infinite');
light('Position', [0, 1, 0], 'Style', 'infinite');
axis equal; grid on; view(3);
xlabel('X'); ylabel('Y'); zlabel('Z');
title('重建的表面网格');
saveas(gcf, fullfile(resultDir, 'surface_mesh.jpg'));% 纹理映射
if strcmp(reconstructionType, 'dense')fprintf('纹理映射...\n');texturedMesh = textureMapping(mesh, images, cameraPoses, K);% 可视化纹理网格figure('Name', '带纹理的表面网格');patch('Faces', texturedMesh.faces, 'Vertices', texturedMesh.vertices, ...'FaceVertexCData', texturedMesh.vertexColors, ...'FaceColor', 'interp', 'EdgeColor', 'none');axis equal; grid on; view(3);xlabel('X'); ylabel('Y'); zlabel('Z');title('带纹理的表面网格');saveas(gcf, fullfile(resultDir, 'textured_mesh.jpg'));
endfprintf('\n三维重建完成! 结果保存在 %s\n', resultDir);%% ====================== 辅助函数 ======================
% 三角测量函数
function points3D = triangulate(points1, points2, pose1, pose2, K)% 将点转换为齐次坐标points1 = [points1, ones(size(points1,1),1)];points2 = [points2, ones(size(points2,1),1)];% 相机投影矩阵P1 = K * [pose1.Rotation; pose1.Translation]';P2 = K * [pose2.Rotation; pose2.Translation]';% 三角测量points3D = zeros(size(points1,1), 3);for i = 1:size(points1,1)A = [points1(i,1)*P1(3,:) - P1(1,:);points1(i,2)*P1(3,:) - P1(2,:);points2(i,1)*P2(3,:) - P2(1,:);points2(i,2)*P2(3,:) - P2(2,:)];[~, ~, V] = svd(A);X = V(:, end)';points3D(i, :) = X(1:3) / X(4);end
end% 深度图计算函数
function depthMap = computeDepthMap(refImg, neighborImg, refPose, neighborPose, K)% 转换为灰度图像refGray = rgb2gray(refImg);neighborGray = rgb2gray(neighborImg);% 图像尺寸[h, w] = size(refGray);% 深度范围估计 (简化的方法)maxDepth = 10; % 最大深度minDepth = 0.1; % 最小深度numDepthPlanes = 50; % 深度平面数% 创建深度图depthMap = zeros(h, w);% 计算相对变换relRot = neighborPose.Rotation * refPose.Rotation';relTrans = neighborPose.Translation - refPose.Translation * relRot;% 计算基础矩阵E = relRot * skewSymmetric(relTrans);F = inv(K)' * E * inv(K);% 对每个像素计算深度for y = 1:5:h % 步长为5加速计算for x = 1:5:w% 极线搜索bestNCC = -1;bestDepth = 0;% 沿极线采样深度for d = linspace(minDepth, maxDepth, numDepthPlanes)% 计算参考图像中的3D点ray = inv(K) * [x; y; 1];point3D = refPose.Translation + d * (refPose.Rotation' * ray)';% 投影到邻居图像proj = K * (neighborPose.Rotation * point3D' + neighborPose.Translation');proj = proj / proj(3);u = round(proj(1)); v = round(proj(2));% 检查是否在图像范围内if u < 1 || u > w || v < 1 || v > hcontinue;end% 提取局部块refPatch = extractPatch(refGray, x, y, 5);neighborPatch = extractPatch(neighborGray, u, v, 5);% 计算归一化互相关(NCC)ncc = computeNCC(refPatch, neighborPatch);% 更新最佳深度if ncc > bestNCCbestNCC = ncc;bestDepth = d;endend% 保存最佳深度if bestDepth > 0depthMap(y, x) = bestDepth;endendend% 插值填充深度图depthMap = fillDepthHoles(depthMap);
end% 提取图像块
function patch = extractPatch(img, x, y, size)halfSize = floor(size/2);[h, w] = size(img);xmin = max(1, x-halfSize);xmax = min(w, x+halfSize);ymin = max(1, y-halfSize);ymax = min(h, y+halfSize);patch = img(ymin:ymax, xmin:xmax);
end% 计算归一化互相关(NCC)
function ncc = computeNCC(patch1, patch2)if numel(patch1) ~= numel(patch2) || numel(patch1) == 0ncc = -1;return;endpatch1 = double(patch1(:));patch2 = double(patch2(:));mean1 = mean(patch1);mean2 = mean(patch2);patch1 = patch1 - mean1;patch2 = patch2 - mean2;norm1 = norm(patch1);norm2 = norm(patch2);if norm1 == 0 || norm2 == 0ncc = -1;elsencc = dot(patch1, patch2) / (norm1 * norm2);end
end% 深度图孔洞填充
function filledDepth = fillDepthHoles(depthMap)filledDepth = depthMap;[h, w] = size(depthMap);% 查找孔洞 (深度为0的点)[y, x] = find(depthMap == 0);for i = 1:length(y)% 提取邻域xmin = max(1, x(i)-3);xmax = min(w, x(i)+3);ymin = max(1, y(i)-3);ymax = min(h, y(i)+3);neighborhood = depthMap(ymin:ymax, xmin:xmax);% 计算非零深度值的平均值validDepths = neighborhood(neighborhood > 0);if ~isempty(validDepths)filledDepth(y(i), x(i)) = mean(validDepths);endend
end% 深度图融合
function ptCloud = fuseDepthMaps(depthMaps, cameraPoses, K)% 创建点云容器allPoints = [];allColors = [];% 遍历所有深度图for i = 1:length(depthMaps)if isempty(depthMaps{i}) || isempty(cameraPoses{i})continue;end% 获取深度图和对应图像depthMap = depthMaps{i};img = images{i};% 创建点云[h, w] = size(depthMap);[u, v] = meshgrid(1:w, 1:h);% 反投影到3D空间rays = inv(K) * [u(:), v(:), ones(numel(u),1)]';rays = rays ./ vecnorm(rays);% 应用深度points3D = rays .* depthMap(:)';points3D = cameraPoses{i}.Rotation * points3D + cameraPoses{i}.Translation';% 添加颜色colors = reshape(img, [], 3);validIdx = depthMap(:) > 0;% 添加到总点云allPoints = [allPoints; points3D(:, validIdx)'];allColors = [allColors; colors(validIdx, :)];end% 创建点云对象ptCloud = pointCloud(allPoints, 'Color', allColors);% 点云滤波ptCloud = pcdenoise(ptCloud);ptCloud = pcdownsample(ptCloud, 'gridAverage', 0.005);
end% 纹理映射函数
function texturedMesh = textureMapping(mesh, images, cameraPoses, K)% 初始化顶点颜色numVertices = size(mesh.vertices, 1);vertexColors = zeros(numVertices, 3);vertexCounts = zeros(numVertices, 1);% 遍历所有图像for imgIdx = 1:length(images)if isempty(cameraPoses{imgIdx})continue;end% 投影顶点到当前图像pose = cameraPoses{imgIdx};verticesHomog = [mesh.vertices, ones(numVertices, 1)]';proj = K * (pose.Rotation * verticesHomog(1:3,:) + pose.Translation');proj = proj ./ proj(3,:);u = proj(1,:); v = proj(2,:);% 检查可见性 (在图像范围内)[h, w, ~] = size(images{imgIdx});valid = u >= 1 & u <= w & v >= 1 & v <= h;% 为可见顶点添加颜色for vIdx = find(valid)u_int = round(u(vIdx));v_int = round(v(vIdx));color = double(images{imgIdx}(v_int, u_int, :)) / 255;% 累加颜色vertexColors(vIdx, :) = vertexColors(vIdx, :) + color(:)';vertexCounts(vIdx) = vertexCounts(vIdx) + 1;endend% 计算平均颜色for vIdx = 1:numVerticesif vertexCounts(vIdx) > 0vertexColors(vIdx, :) = vertexColors(vIdx, :) / vertexCounts(vIdx);elsevertexColors(vIdx, :) = [0.8, 0.8, 0.8]; % 默认颜色endend% 创建带纹理的网格texturedMesh = struct();texturedMesh.vertices = mesh.vertices;texturedMesh.faces = mesh.faces;texturedMesh.vertexColors = vertexColors;
end% 反对称矩阵函数
function S = skewSymmetric(v)S = [0, -v(3), v(2);v(3), 0, -v(1);-v(2), v(1), 0];
end
系统功能与特点
1. 完整三维重建流程
- 数据准备:加载并预处理输入图像
- 特征提取:使用SURF算法检测关键点
- 特征匹配:建立跨视图的特征对应关系
- 运动恢复结构(SfM):估计相机位姿和稀疏点云
- 多视图立体匹配(MVS):生成深度图并融合为稠密点云
- 表面重建:使用泊松重建生成网格模型
- 纹理映射:将图像纹理投影到网格表面
2. 关键技术创新
(1) 自适应视图选择
% 找到匹配最多的图像对初始化重建
maxMatches = 0;
bestPair = [1,2];
for i = 1:numImages-1for j = i+1:numImagesnumMatches = matches{i,j}.points1.Count;if numMatches > maxMatchesmaxMatches = numMatches;bestPair = [i,j];endend
end
(2) 鲁棒深度估计
% 基于归一化互相关(NCC)的深度计算
bestNCC = -1;
bestDepth = 0;
for d = linspace(minDepth, maxDepth, numDepthPlanes)% 3D点投影proj = K * (neighborPose.Rotation * point3D' + neighborPose.Translation');proj = proj / proj(3);% 计算NCCncc = computeNCC(refPatch, neighborPatch);if ncc > bestNCCbestNCC = ncc;bestDepth = d;end
end
(3) 深度图融合
% 融合多视图深度图
for i = 1:length(depthMaps)% 反投影到3D空间rays = inv(K) * [u(:), v(:), ones(numel(u),1)]';points3D = rays .* depthMap(:)';% 变换到世界坐标系points3D = cameraPoses{i}.Rotation * points3D + cameraPoses{i}.Translation';% 累积点云allPoints = [allPoints; points3D(:, validIdx)'];
end
(4) 泊松表面重建
% 点云预处理
filteredCloud = pcdenoise(pcdownsample(finalCloud, 'gridAverage', 0.005));% 法向量估计
normals = pcnormals(filteredCloud);% 泊松重建
[mesh, ~] = pc2surfacemesh(filteredCloud, 'poisson', 'Depth', 10);
参考代码 使用matlab编写的三维重建 youwenfan.com/contentcsb/51315.html
3. 可视化与输出
中间结果可视化
- 特征点检测结果
- 跨视图特征匹配
- 稀疏点云重建
- 深度图生成
- 稠密点云重建
- 表面网格模型
- 纹理贴图模型
最终输出
- 稀疏/稠密点云 (PLY格式)
- 表面网格模型 (OBJ格式)
- 带纹理的网格模型
- 所有中间结果图像
系统性能优化
1. 计算加速策略
技术 | 实现方式 | 加速效果 |
---|---|---|
图像降采样 | img = imresize(img, imageScale) | 减少75%处理时间 |
特征点限制 | points = points.selectStrongest(maxPoints) | 控制特征匹配复杂度 |
深度图采样 | for y = 1:5:h | 减少25倍计算量 |
并行计算 | parfor 循环(需Parallel Computing Toolbox) | 线性加速比 |
2. 质量提升技术
- 鲁棒特征匹配:比率测试 + 几何验证
- 深度图孔洞填充:基于邻域的平均填充
- 点云去噪:统计离群值移除
- 网格简化:保留重要几何特征
应用场景
1. 小规模物体重建
dataset = 'custom';
imageDir = 'data/object';
K = [2500,0,1200; 0,2500,900; 0,0,1]; % 手机相机参数
imageScale = 0.8; % 较高分辨率
重建效果:
- 适合小型物体(20-50cm)
- 分辨率:0.5mm
- 纹理质量:高
2. 建筑场景重建
dataset = 'fountain';
imageScale = 0.5; % 降低分辨率
maxPoints = 2000; % 减少特征点
重建效果:
- 尺度:10-100m
- 分辨率:2-5cm
- 处理时间:5-10分钟(10张图像)
3. 文化遗产数字化
reconstructionType = 'dense';
% 使用高分辨率图像
% 添加更多视图(20-50张)
重建效果:
- 几何精度:1-3mm
- 纹理保真度:高
- 完整表面重建
使用指南
1. 准备工作
- 创建数据目录:
data/custom/
- 放置有序图像序列:
img01.jpg, img02.jpg, ...
- (可选)提供相机标定参数
2. 参数调整建议
-
提高质量:
imageScale = 1.0; % 全分辨率 maxPoints = 10000; % 更多特征点 reconstructionType = 'dense';
-
加速处理:
imageScale = 0.3; % 降低分辨率 maxPoints = 1000; % 较少特征点 numImages = 5; % 使用更少图像
3. 扩展功能
-
点云配准:
% 添加新扫描数据 newCloud = pcread('new_scan.ply'); tform = pcregistericp(newCloud, finalCloud); mergedCloud = pcmerge(finalCloud, pctransform(newCloud, tform), 0.01);
-
三维测量:
% 计算两点间距离 dist = norm(point1 - point2); fprintf('距离: %.2f 米\n', dist);
-
虚拟展示:
% 创建交互式展示 viewer = pcplayer(finalCloud);