当前位置: 首页 > wzjs >正文

网站后台加密2022今日最新军事新闻

网站后台加密,2022今日最新军事新闻,网站建设好学吗,南京做公司网站的公司DAY 54 Inception网络及其思考 知识点回顾: 传统计算机视觉发展史:LeNet-->AlexNet-->VGGNet-->nceptionNet-->ResNetinception模块和网络特征融合方法阶段性总结:逐元素相加、逐元素相乘、concat通道数增加等感受野与卷积核变体…

DAY 54 Inception网络及其思考

知识点回顾:

  1. 传统计算机视觉发展史:LeNet-->AlexNet-->VGGNet-->nceptionNet-->ResNet
  2. inception模块和网络
  3. 特征融合方法阶段性总结:逐元素相加、逐元素相乘、concat通道数增加等
  4. 感受野与卷积核变体:深入理解不同模块和类的设计初衷

作业:一次稍微有点学术感觉的作业:

  1. 对inception网络在cifar10上观察精度
  2. 消融实验:引入残差机制和cbam模块分别进行消融

1、对inception网络在cifar10上观察精度

import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import numpy as np# 设置替代中文字体(适用于Linux)
plt.rcParams["font.family"] = ["WenQuanYi Micro Hei"]
plt.rcParams['axes.unicode_minus'] = Falseclass Inception(nn.Module):def __init__(self, in_channels):"""Inception模块初始化,实现多尺度特征并行提取与融合参数:in_channels: 输入特征图的通道数"""super(Inception, self).__init__()# 1x1卷积分支:降维并提取通道间特征关系# 减少后续卷积的计算量,同时保留局部特征信息self.branch1x1 = nn.Sequential(nn.Conv2d(in_channels, 64, kernel_size=1),  # 降维至64通道nn.ReLU()  # 引入非线性激活)# 3x3卷积分支:通过1x1卷积降维后使用3x3卷积捕捉中等尺度特征# 先降维减少计算量,再进行空间特征提取self.branch3x3 = nn.Sequential(nn.Conv2d(in_channels, 96, kernel_size=1),  # 降维至96通道nn.ReLU(),nn.Conv2d(96, 128, kernel_size=3, padding=1),  # 3x3卷积,保持空间尺寸不变nn.ReLU())# 5x5卷积分支:通过1x1卷积降维后使用5x5卷积捕捉大尺度特征# 较大的感受野用于提取更全局的结构信息self.branch5x5 = nn.Sequential(nn.Conv2d(in_channels, 16, kernel_size=1),  # 大幅降维至16通道nn.ReLU(),nn.Conv2d(16, 32, kernel_size=5, padding=2),  # 5x5卷积,保持空间尺寸不变nn.ReLU())# 池化分支:通过池化操作保留全局信息并降维# 增强特征的平移不变性self.branch_pool = nn.Sequential(nn.MaxPool2d(kernel_size=3, stride=1, padding=1),  # 3x3最大池化,保持尺寸nn.Conv2d(in_channels, 32, kernel_size=1),  # 降维至32通道nn.ReLU())def forward(self, x):"""前向传播函数,并行计算四个分支并在通道维度拼接参数:x: 输入特征图,形状为[batch_size, in_channels, height, width]返回:拼接后的特征图,形状为[batch_size, 256, height, width]"""# 注意,这里是并行计算四个分支branch1x1 = self.branch1x1(x)  # 输出形状: [batch_size, 64, height, width]branch3x3 = self.branch3x3(x)  # 输出形状: [batch_size, 128, height, width]branch5x5 = self.branch5x5(x)  # 输出形状: [batch_size, 32, height, width]branch_pool = self.branch_pool(x)  # 输出形状: [batch_size, 32, height, width]# 在通道维度(dim=1)拼接四个分支的输出# 总通道数: 64 + 128 + 32 + 32 = 256outputs = [branch1x1, branch3x3, branch5x5, branch_pool]return torch.cat(outputs, dim=1)class InceptionNet(nn.Module):def __init__(self, num_classes=10):super(InceptionNet, self).__init__()self.conv1 = nn.Sequential(nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3),nn.ReLU(),nn.MaxPool2d(kernel_size=3, stride=2, padding=1))self.inception1 = Inception(64)self.inception2 = Inception(256)self.avgpool = nn.AdaptiveAvgPool2d((1, 1))self.fc = nn.Linear(256, num_classes)def forward(self, x):x = self.conv1(x)x = self.inception1(x)x = self.inception2(x)x = self.avgpool(x)x = torch.flatten(x, 1)x = self.fc(x)return x# 检查GPU是否可用
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")# 数据预处理(与原代码一致)
train_transform = transforms.Compose([transforms.RandomCrop(32, padding=4),transforms.RandomHorizontalFlip(),transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1),transforms.RandomRotation(15),transforms.ToTensor(),transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])test_transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])# 加载数据集(与原代码一致)
train_dataset = datasets.CIFAR10(root='./cifar_data/cifar_data', train=True, download=True, transform=train_transform)
test_dataset = datasets.CIFAR10(root='./cifar_data/cifar_data', train=False, transform=test_transform)
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False)# 初始化模型、损失函数、优化器
model = InceptionNet().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,        # 指定要控制的优化器(这里是Adam)mode='min',       # 监测的指标是"最小化"(如损失函数)patience=3,       # 如果连续3个epoch指标没有改善,才降低LRfactor=0.5        # 降低LR的比例(新LR = 旧LR × 0.5)
)# 5. 训练模型(记录每个 iteration 的损失)
def train(model, train_loader, test_loader, criterion, optimizer, scheduler, device, epochs):model.train()  # 设置为训练模式# 记录每个 iteration 的损失all_iter_losses = []  # 存储所有 batch 的损失iter_indices = []     # 存储 iteration 序号# 记录每个 epoch 的准确率和损失train_acc_history = []test_acc_history = []train_loss_history = []test_loss_history = []for epoch in range(epochs):running_loss = 0.0correct = 0total = 0for batch_idx, (data, target) in enumerate(train_loader):data, target = data.to(device), target.to(device)  # 移至GPUoptimizer.zero_grad()  # 梯度清零output = model(data)  # 前向传播loss = criterion(output, target)  # 计算损失loss.backward()  # 反向传播optimizer.step()  # 更新参数# 记录当前 iteration 的损失iter_loss = loss.item()all_iter_losses.append(iter_loss)iter_indices.append(epoch * len(train_loader) + batch_idx + 1)# 统计准确率和损失running_loss += iter_loss_, predicted = output.max(1)total += target.size(0)correct += predicted.eq(target).sum().item()# 每100个批次打印一次训练信息if (batch_idx + 1) % 100 == 0:print(f'Epoch: {epoch+1}/{epochs} | Batch: {batch_idx+1}/{len(train_loader)} 'f'| 单Batch损失: {iter_loss:.4f} | 累计平均损失: {running_loss/(batch_idx+1):.4f}')# 计算当前epoch的平均训练损失和准确率epoch_train_loss = running_loss / len(train_loader)epoch_train_acc = 100. * correct / totaltrain_acc_history.append(epoch_train_acc)train_loss_history.append(epoch_train_loss)# 测试阶段model.eval()  # 设置为评估模式test_loss = 0correct_test = 0total_test = 0with torch.no_grad():for data, target in test_loader:data, target = data.to(device), target.to(device)output = model(data)test_loss += criterion(output, target).item()_, predicted = output.max(1)total_test += target.size(0)correct_test += predicted.eq(target).sum().item()epoch_test_loss = test_loss / len(test_loader)epoch_test_acc = 100. * correct_test / total_testtest_acc_history.append(epoch_test_acc)test_loss_history.append(epoch_test_loss)# 更新学习率调度器scheduler.step(epoch_test_loss)print(f'Epoch {epoch+1}/{epochs} 完成 | 训练准确率: {epoch_train_acc:.2f}% | 测试准确率: {epoch_test_acc:.2f}%')# 绘制所有 iteration 的损失曲线plot_iter_losses(all_iter_losses, iter_indices)# 绘制每个 epoch 的准确率和损失曲线plot_epoch_metrics(train_acc_history, test_acc_history, train_loss_history, test_loss_history)return epoch_test_acc  # 返回最终测试准确率# 6. 绘制每个 iteration 的损失曲线
def plot_iter_losses(losses, indices):plt.figure(figsize=(10, 4))plt.plot(indices, losses, 'b-', alpha=0.7, label='Iteration Loss')plt.xlabel('Iteration(Batch序号)')plt.ylabel('损失值')plt.title('每个 Iteration 的训练损失')plt.legend()plt.grid(True)plt.tight_layout()plt.show()# 7. 绘制每个 epoch 的准确率和损失曲线
def plot_epoch_metrics(train_acc, test_acc, train_loss, test_loss):epochs = range(1, len(train_acc) + 1)plt.figure(figsize=(12, 4))# 绘制准确率曲线plt.subplot(1, 2, 1)plt.plot(epochs, train_acc, 'b-', label='训练准确率')plt.plot(epochs, test_acc, 'r-', label='测试准确率')plt.xlabel('Epoch')plt.ylabel('准确率 (%)')plt.title('训练和测试准确率')plt.legend()plt.grid(True)# 绘制损失曲线plt.subplot(1, 2, 2)plt.plot(epochs, train_loss, 'b-', label='训练损失')plt.plot(epochs, test_loss, 'r-', label='测试损失')plt.xlabel('Epoch')plt.ylabel('损失值')plt.title('训练和测试损失')plt.legend()plt.grid(True)plt.tight_layout()plt.show()# 8. 执行训练和测试
epochs = 20  # 增加训练轮次以获得更好效果
print("开始使用CNN训练模型...")
final_accuracy = train(model, train_loader, test_loader, criterion, optimizer, scheduler, device, epochs)
print(f"训练完成!最终测试准确率: {final_accuracy:.2f}%")

2、消融实验:引入残差机制和cbam模块分别进行消融

import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import numpy as np# 设置替代中文字体(适用于Linux)
plt.rcParams["font.family"] = ["WenQuanYi Micro Hei"]
plt.rcParams['axes.unicode_minus'] = Falseimport torch
import torch.nn as nn
import torch.nn.functional as F# 通道注意力
class ChannelAttention(nn.Module):def __init__(self, in_channels, ratio=16):super().__init__()self.avg_pool = nn.AdaptiveAvgPool2d(1)self.max_pool = nn.AdaptiveMaxPool2d(1)self.fc = nn.Sequential(nn.Linear(in_channels, in_channels // ratio, bias=False),nn.ReLU(),nn.Linear(in_channels // ratio, in_channels, bias=False))self.sigmoid = nn.Sigmoid()def forward(self, x):b, c, h, w = x.shapeavg_out = self.fc(self.avg_pool(x).view(b, c))max_out = self.fc(self.max_pool(x).view(b, c))attention = self.sigmoid(avg_out + max_out).view(b, c, 1, 1)return x * attention# 空间注意力模块
class SpatialAttention(nn.Module):def __init__(self, kernel_size=7):super().__init__()self.conv = nn.Conv2d(2, 1, kernel_size, padding=kernel_size//2, bias=False)self.sigmoid = nn.Sigmoid()def forward(self, x):avg_out = torch.mean(x, dim=1, keepdim=True)max_out, _ = torch.max(x, dim=1, keepdim=True)pool_out = torch.cat([avg_out, max_out], dim=1)attention = self.conv(pool_out)return x * self.sigmoid(attention)# CBAM模块
class CBAM(nn.Module):def __init__(self, in_channels, ratio=16, kernel_size=7):super().__init__()self.channel_attn = ChannelAttention(in_channels, ratio)self.spatial_attn = SpatialAttention(kernel_size)def forward(self, x):x = self.channel_attn(x)x = self.spatial_attn(x)return x# 集成CBAM的Inception模块
class InceptionWithCBAM(nn.Module):def __init__(self, in_channels, use_cbam=True):"""集成CBAM注意力机制的Inception模块参数:in_channels: 输入特征图的通道数use_cbam: 是否使用CBAM,默认为True"""super(InceptionWithCBAM, self).__init__()# 1x1卷积分支self.branch1x1 = nn.Sequential(nn.Conv2d(in_channels, 64, kernel_size=1),nn.ReLU())# 为1x1分支添加CBAM(如果启用)self.branch1x1_cbam = CBAM(64) if use_cbam else nn.Identity()# 3x3卷积分支self.branch3x3 = nn.Sequential(nn.Conv2d(in_channels, 96, kernel_size=1),nn.ReLU(),nn.Conv2d(96, 128, kernel_size=3, padding=1),nn.ReLU())# 为3x3分支添加CBAM(如果启用)self.branch3x3_cbam = CBAM(128) if use_cbam else nn.Identity()# 5x5卷积分支self.branch5x5 = nn.Sequential(nn.Conv2d(in_channels, 16, kernel_size=1),nn.ReLU(),nn.Conv2d(16, 32, kernel_size=5, padding=2),nn.ReLU())# 为5x5分支添加CBAM(如果启用)self.branch5x5_cbam = CBAM(32) if use_cbam else nn.Identity()# 池化分支self.branch_pool = nn.Sequential(nn.MaxPool2d(kernel_size=3, stride=1, padding=1),nn.Conv2d(in_channels, 32, kernel_size=1),nn.ReLU())# 为池化分支添加CBAM(如果启用)self.branch_pool_cbam = CBAM(32) if use_cbam else nn.Identity()# 是否使用CBAMself.use_cbam = use_cbamdef forward(self, x):# 计算各个分支branch1x1 = self.branch1x1(x)# 应用CBAM注意力if self.use_cbam:branch1x1 = self.branch1x1_cbam(branch1x1)branch3x3 = self.branch3x3(x)if self.use_cbam:branch3x3 = self.branch3x3_cbam(branch3x3)branch5x5 = self.branch5x5(x)if self.use_cbam:branch5x5 = self.branch5x5_cbam(branch5x5)branch_pool = self.branch_pool(x)if self.use_cbam:branch_pool = self.branch_pool_cbam(branch_pool)# 拼接输出return torch.cat([branch1x1, branch3x3, branch5x5, branch_pool], dim=1)# 集成CBAM的InceptionNet
class InceptionNetWithCBAM(nn.Module):def __init__(self, num_classes=10, use_cbam=True):super(InceptionNetWithCBAM, self).__init__()self.conv1 = nn.Sequential(nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3),nn.ReLU(),nn.MaxPool2d(kernel_size=3, stride=2, padding=1))# 第一个Inception模块(带CBAM)self.inception1 = InceptionWithCBAM(64, use_cbam)# 第二个Inception模块(带CBAM)self.inception2 = InceptionWithCBAM(256, use_cbam)# 为整个网络添加全局CBAM(可选)self.global_cbam = CBAM(256) if use_cbam else nn.Identity()self.avgpool = nn.AdaptiveAvgPool2d((1, 1))self.fc = nn.Linear(256, num_classes)def forward(self, x):x = self.conv1(x)x = self.inception1(x)x = self.inception2(x)# 应用全局CBAM(可选)if hasattr(self, 'global_cbam'):x = self.global_cbam(x)x = self.avgpool(x)x = torch.flatten(x, 1)x = self.fc(x)return x# 检查GPU是否可用
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")# 数据预处理(与原代码一致)
train_transform = transforms.Compose([transforms.RandomCrop(32, padding=4),transforms.RandomHorizontalFlip(),transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1),transforms.RandomRotation(15),transforms.ToTensor(),transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])test_transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])# 加载数据集(与原代码一致)
train_dataset = datasets.CIFAR10(root='./cifar_data/cifar_data', train=True, download=True, transform=train_transform)
test_dataset = datasets.CIFAR10(root='./cifar_data/cifar_data', train=False, transform=test_transform)
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False)# 初始化模型、损失函数、优化器
model = InceptionNetWithCBAM().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,        # 指定要控制的优化器(这里是Adam)mode='min',       # 监测的指标是"最小化"(如损失函数)patience=3,       # 如果连续3个epoch指标没有改善,才降低LRfactor=0.5        # 降低LR的比例(新LR = 旧LR × 0.5)
)# 5. 训练模型(记录每个 iteration 的损失)
def train(model, train_loader, test_loader, criterion, optimizer, scheduler, device, epochs):model.train()  # 设置为训练模式# 记录每个 iteration 的损失all_iter_losses = []  # 存储所有 batch 的损失iter_indices = []     # 存储 iteration 序号# 记录每个 epoch 的准确率和损失train_acc_history = []test_acc_history = []train_loss_history = []test_loss_history = []for epoch in range(epochs):running_loss = 0.0correct = 0total = 0for batch_idx, (data, target) in enumerate(train_loader):data, target = data.to(device), target.to(device)  # 移至GPUoptimizer.zero_grad()  # 梯度清零output = model(data)  # 前向传播loss = criterion(output, target)  # 计算损失loss.backward()  # 反向传播optimizer.step()  # 更新参数# 记录当前 iteration 的损失iter_loss = loss.item()all_iter_losses.append(iter_loss)iter_indices.append(epoch * len(train_loader) + batch_idx + 1)# 统计准确率和损失running_loss += iter_loss_, predicted = output.max(1)total += target.size(0)correct += predicted.eq(target).sum().item()# 每100个批次打印一次训练信息if (batch_idx + 1) % 100 == 0:print(f'Epoch: {epoch+1}/{epochs} | Batch: {batch_idx+1}/{len(train_loader)} 'f'| 单Batch损失: {iter_loss:.4f} | 累计平均损失: {running_loss/(batch_idx+1):.4f}')# 计算当前epoch的平均训练损失和准确率epoch_train_loss = running_loss / len(train_loader)epoch_train_acc = 100. * correct / totaltrain_acc_history.append(epoch_train_acc)train_loss_history.append(epoch_train_loss)# 测试阶段model.eval()  # 设置为评估模式test_loss = 0correct_test = 0total_test = 0with torch.no_grad():for data, target in test_loader:data, target = data.to(device), target.to(device)output = model(data)test_loss += criterion(output, target).item()_, predicted = output.max(1)total_test += target.size(0)correct_test += predicted.eq(target).sum().item()epoch_test_loss = test_loss / len(test_loader)epoch_test_acc = 100. * correct_test / total_testtest_acc_history.append(epoch_test_acc)test_loss_history.append(epoch_test_loss)# 更新学习率调度器scheduler.step(epoch_test_loss)print(f'Epoch {epoch+1}/{epochs} 完成 | 训练准确率: {epoch_train_acc:.2f}% | 测试准确率: {epoch_test_acc:.2f}%')# 绘制所有 iteration 的损失曲线plot_iter_losses(all_iter_losses, iter_indices)# 绘制每个 epoch 的准确率和损失曲线plot_epoch_metrics(train_acc_history, test_acc_history, train_loss_history, test_loss_history)return epoch_test_acc  # 返回最终测试准确率# 6. 绘制每个 iteration 的损失曲线
def plot_iter_losses(losses, indices):plt.figure(figsize=(10, 4))plt.plot(indices, losses, 'b-', alpha=0.7, label='Iteration Loss')plt.xlabel('Iteration(Batch序号)')plt.ylabel('损失值')plt.title('每个 Iteration 的训练损失')plt.legend()plt.grid(True)plt.tight_layout()plt.show()# 7. 绘制每个 epoch 的准确率和损失曲线
def plot_epoch_metrics(train_acc, test_acc, train_loss, test_loss):epochs = range(1, len(train_acc) + 1)plt.figure(figsize=(12, 4))# 绘制准确率曲线plt.subplot(1, 2, 1)plt.plot(epochs, train_acc, 'b-', label='训练准确率')plt.plot(epochs, test_acc, 'r-', label='测试准确率')plt.xlabel('Epoch')plt.ylabel('准确率 (%)')plt.title('训练和测试准确率')plt.legend()plt.grid(True)# 绘制损失曲线plt.subplot(1, 2, 2)plt.plot(epochs, train_loss, 'b-', label='训练损失')plt.plot(epochs, test_loss, 'r-', label='测试损失')plt.xlabel('Epoch')plt.ylabel('损失值')plt.title('训练和测试损失')plt.legend()plt.grid(True)plt.tight_layout()plt.show()# 8. 执行训练和测试
epochs = 20  # 增加训练轮次以获得更好效果
print("开始使用CNN训练模型...")
final_accuracy = train(model, train_loader, test_loader, criterion, optimizer, scheduler, device, epochs)
print(f"训练完成!最终测试准确率: {final_accuracy:.2f}%")

http://www.dtcms.com/wzjs/274404.html

相关文章:

  • 溧阳做网站湖北网站seo设计
  • 专业的网站建设电话北京计算机培训机构前十名
  • 国内知名的网站建设南宁seo优化公司排名
  • 宁波本地网站排行阿里巴巴关键词排名优化
  • 网站商品页面设计2023第二波疫情已经到来了
  • 微信小商店开通长沙百度网站优化
  • 招聘网站开发计划网站运营
  • wordpress怎么做两个语言网站网络营销带来的效果
  • 给博彩网站做推广犯法河南网站公司
  • 可以做问卷的网站天津百度推广排名
  • 怎么看网站是不是h5做的网络营销公司是做什么的
  • 品牌网站案例网络营销创意案例
  • php做网站切换语言培训机构咨询
  • 网站建设流程有哪些广告推广免费发布
  • 个人网站可以做论坛搜索引擎优化百度
  • 香港公司能在大陆做网站备案嘛网站建设技术外包
  • 做网站发表的赚钱电商运营方案
  • 建设网站属于什么费用网上永久视频会员是真的吗
  • 网站 多个ip 备案百度seo是啥
  • 怎么学网站设计深圳网络营销推广专员
  • 网站推广优化平台立即优化在哪里
  • 朋友要我帮忙做网站百度竞价app
  • layui 网站建设模板重庆seo代理
  • 女生学网站建设好学吗百度一下百度首页登录
  • 建网站服务器用哪种99个创意营销方案
  • 淘宝联盟合作网站api网上找客户有什么渠道
  • 南京小程序开发网站制培训机构如何招生营销
  • 发布网站搭建教程百度网站排名优化
  • spring boot 做网站河南百度推广公司
  • 手机网站字体自适应seo顾问服务公司