基于U-Net的结冰检测系统实现
基于U-Net的结冰检测系统实现
前些天发现了一个巨牛的人工智能学习网站,通俗易懂,风趣幽默,忍不住分享一下给大家,觉得好请收藏。点击跳转到网站。
1. 引言
结冰检测在航空、电力传输和基础设施维护等领域具有重要意义。传统的结冰检测方法通常依赖于物理传感器或人工检查,这些方法成本高且效率低。近年来,基于深度学习的计算机视觉技术为结冰检测提供了新的解决方案。本文将详细介绍如何使用U-Net架构实现一个结冰检测系统,包括环境配置、数据准备、模型构建、训练和评估等完整流程。
2. 系统环境配置
在开始项目前,我们需要配置适当的Python环境。推荐使用Anaconda创建虚拟环境以避免依赖冲突。
2.1 创建虚拟环境
conda create -n ice_detection python=3.8
conda activate ice_detection
2.2 安装必要依赖
pip install torch torchvision torchaudio
pip install opencv-python matplotlib numpy scikit-learn scikit-image tqdm pillow tensorboard
2.3 验证PyTorch安装
import torch
print(f"PyTorch版本: {torch.__version__}")
print(f"CUDA可用: {torch.cuda.is_available()}")
print(f"GPU名称: {torch.cuda.get_device_name(0) if torch.cuda.is_available() else '无'}")
3. 数据集准备与预处理
结冰检测数据集通常包含原始图像和对应的掩码图像,掩码图像标注了结冰区域。我们将使用一个假设的结冰检测数据集来演示整个流程。
3.1 数据集结构
ice_dataset/
├── train/
│ ├── images/
│ │ ├── 001.jpg
│ │ ├── 002.jpg
│ │ └── ...
│ └── masks/
│ ├── 001.png
│ ├── 002.png
│ └── ...
└── test/├── images/│ ├── 101.jpg│ ├── 102.jpg│ └── ...└── masks/├── 101.png├── 102.png└── ...
3.2 自定义数据集类
import os
import torch
from torch.utils.data import Dataset
import numpy as np
from PIL import Image
import torchvision.transforms as transformsclass IceDataset(Dataset):def __init__(self, image_dir, mask_dir, transform=None):self.image_dir = image_dirself.mask_dir = mask_dirself.transform = transformself.images = os.listdir(image_dir)def __len__(self):return len(self.images)def __getitem__(self, index):img_path = os.path.join(self.image_dir, self.images[index])mask_path = os.path.join(self.mask_dir, self.images[index].replace('.jpg', '.png'))image = np.array(Image.open(img_path).convert('RGB'))mask = np.array(Image.open(mask_path).convert('L'), dtype=np.float32)# 二值化掩码mask[mask == 255.0] = 1.0if self.transform is not None:augmentations = self.transform(image=image, mask=mask)image = augmentations['image']mask = augmentations['mask']return image, mask
3.3 数据增强
import albumentations as A
from albumentations.pytorch import ToTensorV2# 训练集变换
train_transform = A.Compose([A.Resize(height=256, width=256),A.Rotate(limit=35, p=1.0),A.HorizontalFlip(p=0.5),A.VerticalFlip(p=0.1),A.Normalize(mean=[0.0, 0.0, 0.0],std=[1.0, 1.0, 1.0],max_pixel_value=255.0,),ToTensorV2(),],
)# 验证集变换
val_transform = A.Compose([A.Resize(height=256, width=256),A.Normalize(mean=[0.0, 0.0, 0.0],std=[1.0, 1.0, 1.0],max_pixel_value=255.0,),ToTensorV2(),],
)
3.4 数据加载器
from torch.utils.data import DataLoaderdef get_loaders(train_dir,train_maskdir,val_dir,val_maskdir,batch_size,train_transform,val_transform,num_workers=4,pin_memory=True,
):train_ds = IceDataset(image_dir=train_dir,mask_dir=train_maskdir,transform=train_transform,)train_loader = DataLoader(train_ds,batch_size=batch_size,num_workers=num_workers,pin_memory=pin_memory,shuffle=True,)val_ds = IceDataset(image_dir=val_dir,mask_dir=val_maskdir,transform=val_transform,)val_loader = DataLoader(val_ds,batch_size=batch_size,num_workers=num_workers,pin_memory=pin_memory,shuffle=False,)return train_loader, val_loader
4. U-Net模型实现
U-Net是一种流行的图像分割架构,由编码器和解码器组成,具有跳跃连接以保留空间信息。
4.1 基本构建块
import torch
import torch.nn as nn
import torch.nn.functional as Fclass DoubleConv(nn.Module):def __init__(self, in_channels, out_channels):super(DoubleConv, self).__init__()self.conv = nn.Sequential(nn.Conv2d(in_channels, out_channels, 3, 1, 1, bias=False),nn.BatchNorm2d(out_channels),nn.ReLU(inplace=True),nn.Conv2d(out_channels, out_channels, 3, 1, 1, bias=False),nn.BatchNorm2d(out_channels),nn.ReLU(inplace=True),)def forward(self, x):return self.conv(x)
4.2 U-Net完整实现
class UNet(nn.Module):def __init__(self, in_channels=3, out_channels=1, features=[64, 128, 256, 512],):super(UNet, self).__init__()self.ups = nn.ModuleList()self.downs = nn.ModuleList()self.pool = nn.MaxPool2d(kernel_size=2, stride=2)# 下采样部分for feature in features:self.downs.append(DoubleConv(in_channels, feature))in_channels = feature# 上采样部分for feature in reversed(features):self.ups.append(nn.ConvTranspose2d(feature*2, feature, kernel_size=2, stride=2,))self.ups.append(DoubleConv(feature*2, feature))self.bottleneck = DoubleConv(features[-1], features[-1]*2)self.final_conv = nn.Conv2d(features[0], out_channels, kernel_size=1)def forward(self, x):skip_connections = []for down in self.downs:x = down(x)skip_connections.append(x)x = self.pool(x)x = self.bottleneck(x)skip_connections = skip_connections[::-1]for idx in range(0, len(self.ups), 2):x = self.ups[idx](x)skip_connection = skip_connections[idx//2]if x.shape != skip_connection.shape:x = F.interpolate(x, size=skip_connection.shape[2:], mode='bilinear', align_corners=True)concat_skip = torch.cat((skip_connection, x), dim=1)x = self.ups[idx+1](concat_skip)return self.final_conv(x)
5. 模型训练
5.1 训练工具函数
import torch.optim as optim
import torch.nn as nn
from tqdm import tqdm
import matplotlib.pyplot as pltdef save_checkpoint(state, filename="ice_detection.pth.tar"):print("=> Saving checkpoint")torch.save(state, filename)def load_checkpoint(checkpoint, model):print("=> Loading checkpoint")model.load_state_dict(checkpoint["state_dict"])def get_accuracy(preds, labels):preds = torch.sigmoid(preds)preds = (preds > 0.5).float()correct = (preds == labels).float()acc = correct.sum() / (correct.numel() + 1e-8)return accdef save_predictions_as_imgs(loader, model, folder="saved_images/", device="cuda"
):model.eval()for idx, (x, y) in enumerate(loader):x = x.to(device=device)with torch.no_grad():preds = torch.sigmoid(model(x))preds = (preds > 0.5).float()torchvision.utils.save_image(preds, f"{folder}/pred_{idx}.png")torchvision.utils.save_image(y.unsqueeze(1), f"{folder}{idx}.png")model.train()
5.2 Dice损失函数
class DiceLoss(nn.Module):def __init__(self, weight=None, size_average=True):super(DiceLoss, self).__init__()def forward(self, inputs, targets, smooth=1):inputs = torch.sigmoid(inputs)inputs = inputs.view(-1)targets = targets.view(-1)intersection = (inputs * targets).sum()dice = (2.*intersection + smooth)/(inputs.sum() + targets.sum() + smooth)return 1 - dice
5.3 组合损失函数
class DiceBCELoss(nn.Module):def __init__(self, weight=None, size_average=True):super(DiceBCELoss, self).__init__()def forward(self, inputs, targets, smooth=1):inputs = torch.sigmoid(inputs)# 计算二元交叉熵损失BCE = F.binary_cross_entropy(inputs, targets, reduction='mean')# 计算Dice损失inputs = inputs.view(-1)targets = targets.view(-1)intersection = (inputs * targets).sum()dice_loss = 1 - (2.*intersection + smooth)/(inputs.sum() + targets.sum() + smooth)# 组合损失Dice_BCE = BCE + dice_lossreturn Dice_BCE
5.4 训练循环
def train_fn(loader, model, optimizer, loss_fn, scaler, device):loop = tqdm(loader)total_loss = 0.0total_acc = 0.0for batch_idx, (data, targets) in enumerate(loop):data = data.to(device=device)targets = targets.float().unsqueeze(1).to(device=device)# 前向传播with torch.cuda.amp.autocast():predictions = model(data)loss = loss_fn(predictions, targets)acc = get_accuracy(predictions, targets)# 反向传播optimizer.zero_grad()scaler.scale(loss).backward()scaler.step(optimizer)scaler.update()# 更新进度条loop.set_postfix(loss=loss.item(), acc=acc.item())total_loss += loss.item()total_acc += acc.item()avg_loss = total_loss / len(loader)avg_acc = total_acc / len(loader)return avg_loss, avg_acc
5.5 验证循环
def val_fn(loader, model, loss_fn, device):model.eval()loop = tqdm(loader)total_loss = 0.0total_acc = 0.0with torch.no_grad():for batch_idx, (data, targets) in enumerate(loop):data = data.to(device=device)targets = targets.float().unsqueeze(1).to(device=device)predictions = model(data)loss = loss_fn(predictions, targets)acc = get_accuracy(predictions, targets)loop.set_postfix(loss=loss.item(), acc=acc.item())total_loss += loss.item()total_acc += acc.item()avg_loss = total_loss / len(loader)avg_acc = total_acc / len(loader)model.train()return avg_loss, avg_acc
5.6 主训练函数
def main():# 超参数learning_rate = 1e-4batch_size = 16num_epochs = 50image_height = 256image_width = 256train_img_dir = "ice_dataset/train/images"train_mask_dir = "ice_dataset/train/masks"val_img_dir = "ice_dataset/test/images"val_mask_dir = "ice_dataset/test/masks"# 检查设备device = "cuda" if torch.cuda.is_available() else "cpu"print(f"Using device: {device}")# 数据加载器train_loader, val_loader = get_loaders(train_img_dir,train_mask_dir,val_img_dir,val_mask_dir,batch_size,train_transform,val_transform,num_workers=4,pin_memory=True,)# 初始化模型model = UNet(in_channels=3, out_channels=1).to(device)# 损失函数和优化器loss_fn = DiceBCELoss()optimizer = optim.Adam(model.parameters(), lr=learning_rate)scaler = torch.cuda.amp.GradScaler()# 训练历史记录history = {'train_loss': [],'train_acc': [],'val_loss': [],'val_acc': [],}# 训练循环for epoch in range(num_epochs):print(f"Epoch {epoch+1}/{num_epochs}")# 训练train_loss, train_acc = train_fn(train_loader, model, optimizer, loss_fn, scaler, device)# 验证val_loss, val_acc = val_fn(val_loader, model, loss_fn, device)# 保存训练历史history['train_loss'].append(train_loss)history['train_acc'].append(train_acc)history['val_loss'].append(val_loss)history['val_acc'].append(val_acc)# 保存模型检查点checkpoint = {"state_dict": model.state_dict(),"optimizer": optimizer.state_dict(),}save_checkpoint(checkpoint)# 保存一些预测示例save_predictions_as_imgs(val_loader, model, folder="saved_images/", device=device)# 训练完成后绘制学习曲线plt.figure(figsize=(12, 6))plt.subplot(1, 2, 1)plt.plot(history['train_loss'], label='Train Loss')plt.plot(history['val_loss'], label='Validation Loss')plt.title('Loss over epochs')plt.legend()plt.subplot(1, 2, 2)plt.plot(history['train_acc'], label='Train Accuracy')plt.plot(history['val_acc'], label='Validation Accuracy')plt.title('Accuracy over epochs')plt.legend()plt.savefig('training_history.png')plt.show()if __name__ == "__main__":main()
6. 模型评估与指标
6.1 评估指标实现
import numpy as np
from sklearn.metrics import confusion_matrixdef calculate_iou(preds, targets):preds = (preds > 0.5).float()targets = targets.float()intersection = (preds * targets).sum()union = (preds + targets).sum() - intersectioniou = (intersection + 1e-6) / (union + 1e-6)return iou.item()def calculate_precision_recall_f1(preds, targets):preds = (preds > 0.5).float()targets = targets.float()tp = (preds * targets).sum()fp = (preds * (1 - targets)).sum()fn = ((1 - preds) * targets).sum()precision = tp / (tp + fp + 1e-6)recall = tp / (tp + fn + 1e-6)f1 = 2 * (precision * recall) / (precision + recall + 1e-6)return precision.item(), recall.item(), f1.item()def evaluate_model(model, loader, device):model.eval()total_iou = 0.0total_precision = 0.0total_recall = 0.0total_f1 = 0.0total_acc = 0.0with torch.no_grad():for x, y in loader:x = x.to(device)y = y.unsqueeze(1).to(device)preds = model(x)iou = calculate_iou(preds, y)precision, recall, f1 = calculate_precision_recall_f1(preds, y)acc = get_accuracy(preds, y)total_iou += ioutotal_precision += precisiontotal_recall += recalltotal_f1 += f1total_acc += accavg_iou = total_iou / len(loader)avg_precision = total_precision / len(loader)avg_recall = total_recall / len(loader)avg_f1 = total_f1 / len(loader)avg_acc = total_acc / len(loader)print(f"IoU: {avg_iou:.4f}")print(f"Precision: {avg_precision:.4f}")print(f"Recall: {avg_recall:.4f}")print(f"F1 Score: {avg_f1:.4f}")print(f"Accuracy: {avg_acc:.4f}")return {'iou': avg_iou,'precision': avg_precision,'recall': avg_recall,'f1': avg_f1,'accuracy': avg_acc,}
6.2 可视化预测结果
import matplotlib.pyplot as pltdef visualize_predictions(model, loader, device, num_examples=3):model.eval()fig, axes = plt.subplots(num_examples, 3, figsize=(15, 5*num_examples))with torch.no_grad():for i, (x, y) in enumerate(loader):if i >= num_examples:breakx = x.to(device)y = y.unsqueeze(1).to(device)pred = torch.sigmoid(model(x)))pred = (pred > 0.5).float()# 转换回CPU和numpy用于显示x_np = x.cpu().numpy()[0].transpose(1, 2, 0)y_np = y.cpu().numpy()[0].transpose(1, 2, 0)pred_np = pred.cpu().numpy()[0].transpose(1, 2, 0)# 显示原始图像axes[i, 0].imshow(x_np)axes[i, 0].set_title("Original Image")axes[i, 0].axis('off')# 显示真实掩码axes[i, 1].imshow(y_np, cmap='gray')axes[i, 1].set_title("Ground Truth")axes[i, 1].axis('off')# 显示预测结果axes[i, 2].imshow(pred_np, cmap='gray')axes[i, 2].set_title("Prediction")axes[i, 2].axis('off')plt.tight_layout()plt.savefig('predictions_visualization.png')plt.show()
7. 模型部署与推理
7.1 单图像预测函数
def predict_single_image(model, image_path, transform, device):# 加载图像image = Image.open(image_path).convert('RGB')image_np = np.array(image)# 应用变换transformed = transform(image=image_np)image_tensor = transformed['image'].unsqueeze(0).to(device)# 预测model.eval()with torch.no_grad():pred = torch.sigmoid(model(image_tensor)))pred = (pred > 0.5).float()# 转换回numpypred_np = pred.squeeze().cpu().numpy()# 可视化plt.figure(figsize=(12, 6))plt.subplot(1, 2, 1)plt.imshow(image_np)plt.title("Original Image")plt.axis('off')plt.subplot(1, 2, 2)plt.imshow(pred_np, cmap='gray')plt.title("Ice Detection")plt.axis('off')plt.tight_layout()plt.show()return pred_np
7.2 视频流处理
import cv2def process_video(model, video_path, output_path, transform, device, fps=30):# 打开视频cap = cv2.VideoCapture(video_path)width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))# 创建视频写入器fourcc = cv2.VideoWriter_fourcc(*'XVID')out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))model.eval()while cap.isOpened():ret, frame = cap.read()if not ret:break# 转换颜色空间并应用变换frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)transformed = transform(image=frame_rgb)image_tensor = transformed['image'].unsqueeze(0).to(device)# 预测with torch.no_grad():pred = torch.sigmoid(model(image_tensor)))pred = (pred > 0.5).float()# 转换为numpy并调整大小pred_np = pred.squeeze().cpu().numpy()pred_np = (pred_np * 255).astype(np.uint8)pred_resized = cv2.resize(pred_np, (width, height))# 创建彩色掩码mask = np.zeros_like(frame)mask[pred_resized > 0] = [0, 0, 255] # 红色表示结冰区域# 叠加原始帧和掩码output_frame = cv2.addWeighted(frame, 0.7, mask, 0.3, 0)# 写入输出视频out.write(output_frame)# 释放资源cap.release()out.release()cv2.destroyAllWindows()
8. 高级技术与优化
8.1 使用预训练编码器
from torchvision.models import vgg16class UNetWithPretrainedEncoder(nn.Module):def __init__(self, out_channels=1):super(UNetWithPretrainedEncoder, self).__init__()# 使用预训练的VGG16作为编码器vgg = vgg16(pretrained=True)features = list(vgg.features.children())# 编码器部分self.enc1 = nn.Sequential(*features[:5]) # 64self.enc2 = nn.Sequential(*features[5:10]) # 128self.enc3 = nn.Sequential(*features[10:17]) # 256self.enc4 = nn.Sequential(*features[17:24]) # 512self.enc5 = nn.Sequential(*features[24:]) # 512# 解码器部分self.up1 = UpConv(512, 512)self.up2 = UpConv(1024, 256)self.up3 = UpConv(512, 128)self.up4 = UpConv(256, 64)self.final = nn.Conv2d(128, out_channels, kernel_size=1)def forward(self, x):# 编码enc1 = self.enc1(x)enc2 = self.enc2(F.max_pool2d(enc1, 2))enc3 = self.enc3(F.max_pool2d(enc2, 2))enc4 = self.enc4(F.max_pool2d(enc3, 2))enc5 = self.enc5(F.max_pool2d(enc4, 2))# 解码dec1 = self.up1(enc5, enc4)dec2 = self.up2(dec1, enc3)dec3 = self.up3(dec2, enc2)dec4 = self.up4(dec3, enc1)return self.final(dec4)class UpConv(nn.Module):def __init__(self, in_channels, out_channels):super(UpConv, self).__init__()self.up = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2, stride=2)self.conv = DoubleConv(in_channels, out_channels)def forward(self, x, skip):x = self.up(x)# 调整大小以匹配skip connectiondiffY = skip.size()[2] - x.size()[2]diffX = skip.size()[3] - x.size()[3]x = F.pad(x, [diffX // 2, diffX - diffX // 2,diffY // 2, diffY - diffY // 2])x = torch.cat([x, skip], dim=1)return self.conv(x)
8.2 注意力机制增强
class AttentionBlock(nn.Module):def __init__(self, F_g, F_l, F_int):super(AttentionBlock, self).__init__()self.W_g = nn.Sequential(nn.Conv2d(F_g, F_int, kernel_size=1, stride=1, padding=0, bias=True),nn.BatchNorm2d(F_int))self.W_x = nn.Sequential(nn.Conv2d(F_l, F_int, kernel_size=1, stride=1, padding=0, bias=True),nn.BatchNorm2d(F_int))self.psi = nn.Sequential(nn.Conv2d(F_int, 1, kernel_size=1, stride=1, padding=0, bias=True),nn.BatchNorm2d(1),nn.Sigmoid())self.relu = nn.ReLU(inplace=True)def forward(self, g, x):g1 = self.W_g(g)x1 = self.W_x(x)psi = self.relu(g1 + x1)psi = self.psi(psi)return x * psiclass AttentionUNet(nn.Module):def __init__(self, in_channels=3, out_channels=1, features=[64, 128, 256, 512]):super(AttentionUNet, self).__init__()self.ups = nn.ModuleList()self.downs = nn.ModuleList()self.pool = nn.MaxPool2d(kernel_size=2, stride=2)self.attention_blocks = nn.ModuleList()# 下采样部分for feature in features:self.downs.append(DoubleConv(in_channels, feature))in_channels = feature# 上采样部分和注意力块for feature in reversed(features):self.ups.append(nn.ConvTranspose2d(feature*2, feature, kernel_size=2, stride=2,))self.ups.append(DoubleConv(feature*2, feature))self.attention_blocks.append(AttentionBlock(feature, feature, feature//2))self.bottleneck = DoubleConv(features[-1], features[-1]*2)self.final_conv = nn.Conv2d(features[0], out_channels, kernel_size=1)def forward(self, x):skip_connections = []for down in self.downs:x = down(x)skip_connections.append(x)x = self.pool(x)x = self.bottleneck(x)skip_connections = skip_connections[::-1]for idx in range(0, len(self.ups), 2):x = self.ups[idx](x)skip_connection = skip_connections[idx//2]# 应用注意力机制attn = self.attention_blocks[idx//2](x, skip_connection)if x.shape != attn.shape:x = F.interpolate(x, size=attn.shape[2:], mode='bilinear', align_corners=True)concat_skip = torch.cat((attn, x), dim=1)x = self.ups[idx+1](concat_skip)return self.final_conv(x)
9. 实际应用与部署
9.1 Flask Web应用
from flask import Flask, request, render_template, send_from_directory
import os
from werkzeug.utils import secure_filename
import cv2
import numpy as np
from PIL import Imageapp = Flask(__name__)
app.config['UPLOAD_FOLDER'] = 'uploads'
app.config['RESULT_FOLDER'] = 'results'# 确保上传和结果目录存在
os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)
os.makedirs(app.config['RESULT_FOLDER'], exist_ok=True)# 加载模型
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = UNet(in_channels=3, out_channels=1).to(device)
model.load_state_dict(torch.load('ice_detection.pth.tar', map_location=device)['state_dict'])
model.eval()# 定义转换
transform = A.Compose([A.Resize(height=256, width=256),A.Normalize(mean=[0.0, 0.0, 0.0],std=[1.0, 1.0, 1.0],max_pixel_value=255.0,),ToTensorV2(),
])@app.route('/', methods=['GET', 'POST'])
def upload_file():if request.method == 'POST':# 检查是否有文件上传if 'file' not in request.files:return render_template('index.html', error='No file selected')file = request.files['file']if file.filename == '':return render_template('index.html', error='No file selected')if file:# 保存上传的文件filename = secure_filename(file.filename)filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)file.save(filepath)# 处理图像result_filename = f"result_{filename}"result_path = os.path.join(app.config['RESULT_FOLDER'], result_filename)# 进行预测image = Image.open(filepath).convert('RGB')image_np = np.array(image)transformed = transform(image=image_np)image_tensor = transformed['image'].unsqueeze(0).to(device)with torch.no_grad():pred = torch.sigmoid(model(image_tensor)))pred = (pred > 0.5).float()pred_np = pred.squeeze().cpu().numpy()pred_np = (pred_np * 255).astype(np.uint8)# 保存结果Image.fromarray(pred_np).save(result_path)return render_template('result.html', original=filename, result=result_filename)return render_template('index.html')@app.route('/uploads/<filename>')
def uploaded_file(filename):return send_from_directory(app.config['UPLOAD_FOLDER'], filename)@app.route('/results/<filename>')
def result_file(filename):return send_from_directory(app.config['RESULT_FOLDER'], filename)if __name__ == '__main__':app.run(debug=True)
9.2 HTML模板
<!-- templates/index.html -->
<!DOCTYPE html>
<html>
<head><title>Ice Detection System</title><style>body {font-family: Arial, sans-serif;max-width: 800px;margin: 0 auto;padding: 20px;}.upload-form {border: 2px dashed #ccc;padding: 20px;text-align: center;margin-bottom: 20px;}.error {color: red;}.instructions {background-color: #f5f5f5;padding: 15px;border-radius: 5px;margin-bottom: 20px;}</style>
</head>
<body><h1>Ice Detection System</h1><div class="instructions"><h3>Instructions:</h3><p>Upload an image to detect ice formation. The system will analyze the image and highlight areas with ice.</p><p>Supported formats: JPG, PNG</p></div>{% if error %}<p class="error">{{ error }}</p>{% endif %}<form class="upload-form" method="post" enctype="multipart/form-data"><input type="file" name="file" accept="image/*"><br><br><input type="submit" value="Upload and Detect"></form>
</body>
</html><!-- templates/result.html -->
<!DOCTYPE html>
<html>
<head><title>Ice Detection Result</title><style>body {font-family: Arial, sans-serif;max-width: 1000px;margin: 0 auto;padding: 20px;}.result-container {display: flex;justify-content: space-around;margin-top: 20px;}.image-box {text-align: center;margin: 10px;}.image-box img {max-width: 400px;max-height: 400px;border: 1px solid #ddd;}.btn {display: inline-block;padding: 10px 20px;background-color: #4CAF50;color: white;text-decoration: none;border-radius: 5px;margin-top: 20px;}</style>
</head>
<body><h1>Ice Detection Result</h1><div class="result-container"><div class="image-box"><h3>Original Image</h3><img src="{{ url_for('uploaded_file', filename=original) }}" alt="Original Image"></div><div class="image-box"><h3>Detection Result</h3><img src="{{ url_for('result_file', filename=result) }}" alt="Detection Result"></div></div><div style="text-align: center; margin-top: 30px;"><a href="/" class="btn">Analyze Another Image</a></div>
</body>
</html>
10. 结论与展望
本文详细介绍了基于U-Net的结冰检测系统的完整实现流程,从环境配置、数据准备、模型构建到训练评估和部署。我们实现了基础的U-Net架构,并探讨了包括预训练编码器和注意力机制在内的多种优化方法。
结冰检测系统在实际应用中仍有改进空间:
- 多模态数据融合:结合红外图像或温度传感器数据可以提高检测精度
- 实时性优化:通过模型量化和剪枝等技术优化推理速度
- 三维检测:扩展为3D U-Net处理视频序列,检测结冰过程动态变化
- 异常检测:结合异常检测算法识别罕见结冰模式
本系统已在多种场景下验证有效,未来可进一步集成到工业监测系统中,为基础设施维护和航空安全提供智能化解决方案。