当前位置: 首页 > news >正文

Python打卡第54天

@浙大疏锦行

作业:

  1. inception网络cifar10观察精度
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import CosineAnnealingLR# 设备配置
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")# 数据增强和加载
def get_dataloaders():transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4),transforms.RandomHorizontalFlip(),transforms.ToTensor(),transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])transform_test = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)trainloader = DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2)testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)testloader = DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)return trainloader, testloaderclass BasicConv2d(nn.Module):def __init__(self, in_channels, out_channels, **kwargs):super(BasicConv2d, self).__init__()self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)self.bn = nn.BatchNorm2d(out_channels)def forward(self, x):return F.relu(self.bn(self.conv(x)))class Inception(nn.Module):def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj):super(Inception, self).__init__()# 1x1分支self.branch1 = BasicConv2d(in_channels, ch1x1, kernel_size=1)# 1x1 -> 3x3分支self.branch2 = nn.Sequential(BasicConv2d(in_channels, ch3x3red, kernel_size=1),BasicConv2d(ch3x3red, ch3x3, kernel_size=3, padding=1))# 1x1 -> 5x5分支self.branch3 = nn.Sequential(BasicConv2d(in_channels, ch5x5red, kernel_size=1),BasicConv2d(ch5x5red, ch5x5, kernel_size=5, padding=2))# 3x3池化 -> 1x1分支self.branch4 = nn.Sequential(nn.MaxPool2d(kernel_size=3, stride=1, padding=1),BasicConv2d(in_channels, pool_proj, kernel_size=1))def forward(self, x):return torch.cat([self.branch1(x),self.branch2(x),self.branch3(x),self.branch4(x)], 1)class InceptionNet_CIFAR(nn.Module):def __init__(self, num_classes=10):super(InceptionNet_CIFAR, self).__init__()# 适应CIFAR-10的初始层self.stem = nn.Sequential(BasicConv2d(3, 64, kernel_size=3, stride=1, padding=1),BasicConv2d(64, 64, kernel_size=3, padding=1),BasicConv2d(64, 64, kernel_size=3, padding=1),nn.MaxPool2d(kernel_size=3, stride=2, padding=1))# 修正通道数计算# inception3a的输出通道数 = 32+32+16+16 = 96self.inception3a = Inception(64, 32, 32, 32, 16, 16, 16)# inception3b的输入通道数应为96(来自inception3a的输出)self.inception3b = Inception(96, 64, 32, 64, 16, 32, 32)# 分类头(inception3b的输出通道数 = 64+64+32+32 = 192)self.avgpool = nn.AdaptiveAvgPool2d((1, 1))self.dropout = nn.Dropout(0.5)self.fc = nn.Linear(192, num_classes)def forward(self, x):x = self.stem(x)  # [B, 64, 16, 16]x = self.inception3a(x)  # [B, 96, 16, 16]x = self.inception3b(x)  # [B, 192, 16, 16]x = F.max_pool2d(x, kernel_size=2)  # [B, 192, 8, 8]x = self.avgpool(x)  # [B, 192, 1, 1]x = self.dropout(torch.flatten(x, 1))x = self.fc(x)return x# 训练函数
def train(model, device, trainloader, optimizer, criterion, epoch):model.train()correct = 0total = 0for batch_idx, (inputs, targets) in enumerate(trainloader):inputs, targets = inputs.to(device), targets.to(device)optimizer.zero_grad()outputs = model(inputs)loss = criterion(outputs, targets)loss.backward()optimizer.step()_, predicted = outputs.max(1)total += targets.size(0)correct += predicted.eq(targets).sum().item()if batch_idx % 100 == 0:print(f'Epoch: {epoch} | Batch: {batch_idx}/{len(trainloader)} 'f'| Loss: {loss.item():.3f} | Acc: {100.*correct/total:.1f}%')return 100.*correct/total# 测试函数
def test(model, device, testloader, criterion):model.eval()test_loss = 0correct = 0total = 0with torch.no_grad():for batch_idx, (inputs, targets) in enumerate(testloader):inputs, targets = inputs.to(device), targets.to(device)outputs = model(inputs)loss = criterion(outputs, targets)test_loss += loss.item()_, predicted = outputs.max(1)total += targets.size(0)correct += predicted.eq(targets).sum().item()acc = 100.*correct/totalprint(f'Test Loss: {test_loss/(batch_idx+1):.3f} | Acc: {acc:.1f}%')return acc# 主函数
def main():trainloader, testloader = get_dataloaders()model = InceptionNet_CIFAR().to(device)criterion = nn.CrossEntropyLoss()optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)scheduler = CosineAnnealingLR(optimizer, T_max=200)best_acc = 0for epoch in range(200):print(f"\nEpoch: {epoch+1}")train_acc = train(model, device, trainloader, optimizer, criterion, epoch)test_acc = test(model, device, testloader, criterion)scheduler.step()if test_acc > best_acc:best_acc = test_acctorch.save(model.state_dict(), 'best_model.pth')print(f"\nBest Test Accuracy: {best_acc:.2f}%")if __name__ == '__main__':main()
  1. 消融实验:引入残差机制cbam模块分别进行消融
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torch.optim import SGD
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.utils.tensorboard import SummaryWriter# 设备配置
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")# 数据增强和加载
def get_dataloaders():transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4),transforms.RandomHorizontalFlip(),transforms.ToTensor(),transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])transform_test = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)trainloader = DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2)testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)testloader = DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)return trainloader, testloader# 基础组件
class BasicConv2d(nn.Module):def __init__(self, in_channels, out_channels, **kwargs):super().__init__()self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)self.bn = nn.BatchNorm2d(out_channels)def forward(self, x):return F.relu(self.bn(self.conv(x)))class CBAM(nn.Module):"""Convolutional Block Attention Module"""def __init__(self, channels, reduction=16):super().__init__()# 通道注意力self.channel_att = nn.Sequential(nn.AdaptiveAvgPool2d(1),nn.Conv2d(channels, channels//reduction, 1),nn.ReLU(),nn.Conv2d(channels//reduction, channels, 1),nn.Sigmoid())# 空间注意力self.spatial_att = nn.Sequential(nn.Conv2d(2, 1, 7, padding=3),nn.Sigmoid())def forward(self, x):# 通道注意力channel = self.channel_att(x)# 空间注意力spatial = torch.cat([x.mean(1,keepdim=True), x.max(1,keepdim=True)[0]], 1)spatial = self.spatial_att(spatial)return x * channel * spatial# Inception模块变体
class BaseInception(nn.Module):def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj):super().__init__()self.branch1 = BasicConv2d(in_channels, ch1x1, kernel_size=1)self.branch2 = nn.Sequential(BasicConv2d(in_channels, ch3x3red, kernel_size=1),BasicConv2d(ch3x3red, ch3x3, kernel_size=3, padding=1))self.branch3 = nn.Sequential(BasicConv2d(in_channels, ch5x5red, kernel_size=1),BasicConv2d(ch5x5red, ch5x5, kernel_size=5, padding=2))self.branch4 = nn.Sequential(nn.MaxPool2d(kernel_size=3, stride=1, padding=1),BasicConv2d(in_channels, pool_proj, kernel_size=1))def forward(self, x):return torch.cat([self.branch1(x),self.branch2(x),self.branch3(x),self.branch4(x)], 1)class ResInception(BaseInception):def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj):super().__init__(in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj)out_channels = ch1x1 + ch3x3 + ch5x5 + pool_projself.residual = nn.Identity()if in_channels != out_channels:self.residual = BasicConv2d(in_channels, out_channels, kernel_size=1)def forward(self, x):residual = self.residual(x)return F.relu(residual + super().forward(x))class CBAMInception(BaseInception):def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj):super().__init__(in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj)out_channels = ch1x1 + ch3x3 + ch5x5 + pool_projself.cbam = CBAM(out_channels)def forward(self, x):out = super().forward(x)return self.cbam(out)class ResCBAMInception(ResInception):def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj):super().__init__(in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj)out_channels = ch1x1 + ch3x3 + ch5x5 + pool_projself.cbam = CBAM(out_channels)def forward(self, x):residual = self.residual(x)out = super().forward(x)return F.relu(residual + self.cbam(out))# 网络架构
class InceptionNet(nn.Module):def __init__(self, inception_block=BaseInception, num_classes=10):super().__init__()self.stem = nn.Sequential(BasicConv2d(3, 64, kernel_size=3, stride=1, padding=1),BasicConv2d(64, 64, kernel_size=3, padding=1),BasicConv2d(64, 64, kernel_size=3, padding=1),nn.MaxPool2d(kernel_size=3, stride=2, padding=1))self.inception3a = inception_block(64, 32, 32, 32, 16, 16, 16)self.inception3b = inception_block(96, 64, 32, 64, 16, 32, 32)self.avgpool = nn.AdaptiveAvgPool2d((1, 1))self.dropout = nn.Dropout(0.5)self.fc = nn.Linear(192, num_classes)def forward(self, x):x = self.stem(x)  # [B, 64, 16, 16]x = self.inception3a(x)  # [B, 96, 16, 16]x = self.inception3b(x)  # [B, 192, 16, 16]x = F.max_pool2d(x, kernel_size=2)  # [B, 192, 8, 8]x = self.avgpool(x)  # [B, 192, 1, 1]x = self.dropout(torch.flatten(x, 1))x = self.fc(x)return x# 训练和测试函数
def train(model, loader, optimizer, criterion, epoch, writer=None):model.train()correct, total = 0, 0for inputs, targets in loader:inputs, targets = inputs.to(device), targets.to(device)optimizer.zero_grad()outputs = model(inputs)loss = criterion(outputs, targets)loss.backward()optimizer.step()_, predicted = outputs.max(1)total += targets.size(0)correct += predicted.eq(targets).sum().item()acc = 100.*correct/totalif writer:writer.add_scalar('train_acc', acc, epoch)return accdef test(model, loader, criterion, epoch, writer=None):model.eval()test_loss, correct, total = 0, 0, 0with torch.no_grad():for inputs, targets in loader:inputs, targets = inputs.to(device), targets.to(device)outputs = model(inputs)loss = criterion(outputs, targets)test_loss += loss.item()_, predicted = outputs.max(1)total += targets.size(0)correct += predicted.eq(targets).sum().item()acc = 100.*correct/totalif writer:writer.add_scalar('test_acc', acc, epoch)return acc# 消融实验主函数
def run_ablation():trainloader, testloader = get_dataloaders()criterion = nn.CrossEntropyLoss()variants = {"Baseline": InceptionNet(BaseInception),"Residual": InceptionNet(ResInception),"CBAM": InceptionNet(CBAMInception),"Res+CBAM": InceptionNet(ResCBAMInception)}results = {}for name, model_cls in variants.items():print(f"\n=== Training {name} ===")writer = SummaryWriter(f'runs/{name}')model = model_cls().to(device)optimizer = SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)scheduler = CosineAnnealingLR(optimizer, T_max=200)best_acc = 0for epoch in range(200):train_acc = train(model, trainloader, optimizer, criterion, epoch, writer)test_acc = test(model, testloader, criterion, epoch, writer)scheduler.step()if test_acc > best_acc:best_acc = test_acctorch.save(model.state_dict(), f'{name}_best.pth')if epoch % 10 == 0:print(f'Epoch {epoch}: Train Acc={train_acc:.2f}%, Test Acc={test_acc:.2f}%')results[name] = best_accwriter.close()print("\n=== Final Results ===")for name, acc in results.items():print(f"{name}: {acc:.2f}%")if __name__ == '__main__':run_ablation()

相关文章:

  • 使用存储型 XSS 窃取 cookie 并发送到你控制的服务器
  • 【JS-2】JavaScript基础语法完全指南:从入门到精通
  • 【小沐杂货铺】基于Babylon.JS绘制三维数字地球Earth(GIS 、WebGL、vue、react,提供全部源代码)
  • Java学习笔记——lambda表达式
  • LangGraph--设计一个给出标准提示词模板的聊天机器人
  • 【MySQL】TencentOS 安装登录MySQL
  • 如何通过 noindex 阻止网页被搜索引擎编入索引?
  • 土建施工安全管理难?免费AI系统 24h 监控预警
  • 疗愈经济崛起:如何把“情绪价值”转化为医疗健康产品?
  • 【JavaEE】(2) 多线程1
  • nlp和大模型
  • 大语言模型:提示词决定自我反思效果: “检查回答是否错误” “验证回答是否正确”
  • EXCEL破解VBA密码 ( 仅供学习研究使用)
  • 多模态大语言模型arxiv论文略读(122)
  • 期权入门介绍
  • 基于CNN深度学习的小程序识别-视频介绍下自取
  • Java—— ArrayList 和 LinkedList 详解
  • Linux服务器入门教程
  • c++中list的使用
  • 【图像处理入门】8. 数学基础与优化:线性代数、概率与算法调优实战
  • php 手机网站/怎么做电商卖东西
  • 设计教程网站/软文案例大全
  • 软膜做网站有用吗/按效果付费的推广
  • 公司网站招聘的作用/思亿欧seo靠谱吗
  • 唐山做网站的电话/搜索引擎营销的特点是什么
  • 男人女人做羞羞事网站/网络营销师怎么考