当前位置: 首页 > wzjs >正文

h5个人网站代码上海品牌营销咨询公司

h5个人网站代码,上海品牌营销咨询公司,代理注册公司是不是要户口本,营销案例分享知识点回顾: resnet结构解析CBAM放置位置的思考针对预训练模型的训练策略 差异化学习率三阶段微调 作业: 好好理解下resnet18的模型结构尝试对vgg16cbam进行微调策略 import time import torch import torch.nn as nn import torch.optim as optim impo…

知识点回顾:

  1. resnet结构解析
  2. CBAM放置位置的思考
  3. 针对预训练模型的训练策略
    1. 差异化学习率
    2. 三阶段微调

作业:

  1. 好好理解下resnet18的模型结构
  2. 尝试对vgg16+cbam进行微调策略
import time
import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter  # 新增TensorBoard导入# VGG16+CBAM模型实现
class VGGBlock(nn.Module):def __init__(self, in_channels, out_channels, num_convs, use_cbam=True):super(VGGBlock, self).__init__()layers = []for _ in range(num_convs):layers.append(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1))layers.append(nn.BatchNorm2d(out_channels))layers.append(nn.ReLU(inplace=True))in_channels = out_channelsif use_cbam:layers.append(CBAM(out_channels))layers.append(nn.MaxPool2d(kernel_size=2, stride=2))self.block = nn.Sequential(*layers)def forward(self, x):return self.block(x)# CBAM模块实现(与之前相同)
class ChannelAttention(nn.Module):def __init__(self, in_channels, reduction_ratio=16):super(ChannelAttention, self).__init__()self.avg_pool = nn.AdaptiveAvgPool2d(1)self.max_pool = nn.AdaptiveMaxPool2d(1)self.fc = nn.Sequential(nn.Conv2d(in_channels, in_channels // reduction_ratio, 1, bias=False),nn.ReLU(),nn.Conv2d(in_channels // reduction_ratio, in_channels, 1, bias=False))def forward(self, x):avg_out = self.fc(self.avg_pool(x))max_out = self.fc(self.max_pool(x))out = avg_out + max_outreturn torch.sigmoid(out)class SpatialAttention(nn.Module):def __init__(self, kernel_size=7):super(SpatialAttention, self).__init__()self.conv = nn.Conv2d(2, 1, kernel_size, padding=kernel_size//2, bias=False)def forward(self, x):avg_out = torch.mean(x, dim=1, keepdim=True)max_out, _ = torch.max(x, dim=1, keepdim=True)out = torch.cat([avg_out, max_out], dim=1)out = self.conv(out)return torch.sigmoid(out)class CBAM(nn.Module):def __init__(self, in_channels, reduction_ratio=16, kernel_size=7):super(CBAM, self).__init__()self.channel_att = ChannelAttention(in_channels, reduction_ratio)self.spatial_att = SpatialAttention(kernel_size)def forward(self, x):x = x * self.channel_att(x)x = x * self.spatial_att(x)return xclass VGG16_CBAM(nn.Module):def __init__(self, num_classes=10, init_weights=True):super(VGG16_CBAM, self).__init__()# VGG16的配置,每段表示[卷积层数, 输出通道数]cfg = [[2, 64, True],    # 第一段:2个卷积层,64通道,使用CBAM[2, 128, True],   # 第二段:2个卷积层,128通道,使用CBAM[3, 256, True],   # 第三段:3个卷积层,256通道,使用CBAM[3, 512, True],   # 第四段:3个卷积层,512通道,使用CBAM[3, 512, False]   # 第五段:3个卷积层,512通道,不使用CBAM(为了性能考虑)]self.features = self._make_layers(cfg)self.avgpool = nn.AdaptiveAvgPool2d((7, 7))self.classifier = nn.Sequential(nn.Linear(512 * 7 * 7, 4096),nn.ReLU(True),nn.Dropout(),nn.Linear(4096, 4096),nn.ReLU(True),nn.Dropout(),nn.Linear(4096, num_classes),)if init_weights:self._initialize_weights()def _make_layers(self, cfg):layers = []in_channels = 3for num_convs, out_channels, use_cbam in cfg:layers.append(VGGBlock(in_channels, out_channels, num_convs, use_cbam))in_channels = out_channelsreturn nn.Sequential(*layers)def forward(self, x):x = self.features(x)x = self.avgpool(x)x = torch.flatten(x, 1)x = self.classifier(x)return xdef _initialize_weights(self):for m in self.modules():if isinstance(m, nn.Conv2d):nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')if m.bias is not None:nn.init.constant_(m.bias, 0)elif isinstance(m, nn.BatchNorm2d):nn.init.constant_(m.weight, 1)nn.init.constant_(m.bias, 0)elif isinstance(m, nn.Linear):nn.init.normal_(m.weight, 0, 0.01)nn.init.constant_(m.bias, 0)# ======================================================================
# 4. 结合了分阶段策略和详细打印的训练函数
# ======================================================================
def set_trainable_layers(model, trainable_parts):print(f"\n---> 解冻以下部分并设为可训练: {trainable_parts}")for name, param in model.named_parameters():param.requires_grad = Falsefor part in trainable_parts:if part in name:param.requires_grad = Truebreakdef train_staged_finetuning(model, criterion, train_loader, test_loader, device, epochs, writer):optimizer = None# 初始化历史记录列表all_iter_losses, iter_indices = [], []train_acc_history, test_acc_history = [], []train_loss_history, test_loss_history = [], []for epoch in range(1, epochs + 1):epoch_start_time = time.time()# --- 动态调整学习率和冻结层 ---if epoch == 1:print("\n" + "="*50 + "\n🚀 **阶段 1:训练CBAM模块和分类器**\n" + "="*50)set_trainable_layers(model, ["cbam", "classifier"])optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=1e-3)writer.add_scalar('learning_rate', 1e-3, epoch)  # 记录学习率elif epoch == 6:print("\n" + "="*50 + "\n✈️ **阶段 2:解冻后两段卷积层 (block4, block5)**\n" + "="*50)set_trainable_layers(model, ["cbam", "classifier", "features.3", "features.4"])optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=1e-4)writer.add_scalar('learning_rate', 1e-4, epoch)  # 记录学习率elif epoch == 21:print("\n" + "="*50 + "\n🛰️ **阶段 3:解冻所有层,进行全局微调**\n" + "="*50)for param in model.parameters(): param.requires_grad = Trueoptimizer = optim.Adam(model.parameters(), lr=1e-5)writer.add_scalar('learning_rate', 1e-5, epoch)  # 记录学习率# --- 训练循环 ---model.train()running_loss, correct, total = 0.0, 0, 0for batch_idx, (data, target) in enumerate(train_loader):data, target = data.to(device), target.to(device)optimizer.zero_grad()output = model(data)loss = criterion(output, target)loss.backward()optimizer.step()# 记录每个iteration的损失iter_loss = loss.item()all_iter_losses.append(iter_loss)iter_indices.append((epoch - 1) * len(train_loader) + batch_idx + 1)# 向TensorBoard添加迭代损失global_step = (epoch - 1) * len(train_loader) + batch_idx + 1writer.add_scalar('train/iter_loss', iter_loss, global_step)running_loss += iter_loss_, predicted = output.max(1)total += target.size(0)correct += predicted.eq(target).sum().item()# 每100个batch打印一次if (batch_idx + 1) % 100 == 0:print(f'Epoch: {epoch}/{epochs} | Batch: {batch_idx+1}/{len(train_loader)} 'f'| 单Batch损失: {iter_loss:.4f} | 累计平均损失: {running_loss/(batch_idx+1):.4f}')epoch_train_loss = running_loss / len(train_loader)epoch_train_acc = 100. * correct / totaltrain_loss_history.append(epoch_train_loss)train_acc_history.append(epoch_train_acc)# 向TensorBoard添加epoch级训练指标writer.add_scalar('train/loss', epoch_train_loss, epoch)writer.add_scalar('train/accuracy', epoch_train_acc, epoch)# --- 测试循环 ---model.eval()test_loss, correct_test, total_test = 0, 0, 0with torch.no_grad():for data, target in test_loader:data, target = data.to(device), target.to(device)output = model(data)test_loss += criterion(output, target).item()_, predicted = output.max(1)total_test += target.size(0)correct_test += predicted.eq(target).sum().item()epoch_test_loss = test_loss / len(test_loader)epoch_test_acc = 100. * correct_test / total_testtest_loss_history.append(epoch_test_loss)test_acc_history.append(epoch_test_acc)# 向TensorBoard添加epoch级测试指标writer.add_scalar('test/loss', epoch_test_loss, epoch)writer.add_scalar('test/accuracy', epoch_test_acc, epoch)# 打印每个epoch的最终结果print(f'Epoch {epoch}/{epochs} 完成 | 耗时: {time.time() - epoch_start_time:.2f}s | 训练准确率: {epoch_train_acc:.2f}% | 测试准确率: {epoch_test_acc:.2f}%')# 训练结束后调用绘图函数print("\n训练完成! 开始绘制结果图表...")plot_iter_losses(all_iter_losses, iter_indices)plot_epoch_metrics(train_acc_history, test_acc_history, train_loss_history, test_loss_history)# 返回最终的测试准确率return epoch_test_acc# ======================================================================
# 5. 绘图函数定义
# ======================================================================
def plot_iter_losses(losses, indices):plt.figure(figsize=(10, 4))plt.plot(indices, losses, 'b-', alpha=0.7, label='Iteration Loss')plt.xlabel('Iteration(Batch序号)')plt.ylabel('损失值')plt.title('每个 Iteration 的训练损失')plt.legend()plt.grid(True)plt.tight_layout()plt.show()def plot_epoch_metrics(train_acc, test_acc, train_loss, test_loss):epochs = range(1, len(train_acc) + 1)plt.figure(figsize=(12, 4))plt.subplot(1, 2, 1)plt.plot(epochs, train_acc, 'b-', label='训练准确率')plt.plot(epochs, test_acc, 'r-', label='测试准确率')plt.xlabel('Epoch')plt.ylabel('准确率 (%)')plt.title('训练和测试准确率')plt.legend(); plt.grid(True)plt.subplot(1, 2, 2)plt.plot(epochs, train_loss, 'b-', label='训练损失')plt.plot(epochs, test_loss, 'r-', label='测试损失')plt.xlabel('Epoch')plt.ylabel('损失值')plt.title('训练和测试损失')plt.legend(); plt.grid(True)plt.tight_layout()plt.show()# ======================================================================
# 7. 数据加载和预处理
# ======================================================================
def load_data():# 数据预处理train_transform = transforms.Compose([transforms.RandomResizedCrop(224),transforms.RandomHorizontalFlip(),transforms.ToTensor(),transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])test_transform = transforms.Compose([transforms.Resize(256),transforms.CenterCrop(224),transforms.ToTensor(),transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])# 加载数据集(以CIFAR10为例)train_dataset = datasets.CIFAR10(root='./data', train=True,download=True, transform=train_transform)test_dataset = datasets.CIFAR10(root='./data', train=False,download=True, transform=test_transform)# 创建数据加载器train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True, num_workers=4)test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False, num_workers=4)return train_loader, test_loader# ======================================================================
# 6. 执行训练
# ======================================================================
if __name__ == "__main__":# 设置设备device = torch.device("cuda" if torch.cuda.is_available() else "cpu")print(f"使用设备: {device}")# 初始化TensorBoard日志记录器(自动生成带时间戳的日志目录)writer = SummaryWriter()# 加载数据train_loader, test_loader = load_data()# 创建模型model = VGG16_CBAM().to(device)# 记录模型结构(需要提供一个输入样例)input_sample = torch.randn(1, 3, 224, 224).to(device)writer.add_graph(model, input_sample)criterion = nn.CrossEntropyLoss()epochs = 50print("开始使用带分阶段微调策略的VGG16+CBAM模型进行训练...")final_accuracy = train_staged_finetuning(model, criterion, train_loader, test_loader, device, epochs, writer)print(f"训练完成!最终测试准确率: {final_accuracy:.2f}%")# 关闭TensorBoard日志记录器writer.close()# torch.save(model.state_dict(), 'vgg16_cbam_finetuned.pth')# print("模型已保存为: vgg16_cbam_finetuned.pth")

@浙大疏锦行


文章转载自:

http://O1nV8IaM.pjfmq.cn
http://7GrFiAl0.pjfmq.cn
http://riU5sMM5.pjfmq.cn
http://FVCuEwhO.pjfmq.cn
http://QHsIYPtv.pjfmq.cn
http://0mCLgIx4.pjfmq.cn
http://0XEtK0ez.pjfmq.cn
http://yTQ9RdlH.pjfmq.cn
http://dWLHMPYA.pjfmq.cn
http://WqG9yTaX.pjfmq.cn
http://yByVSlwi.pjfmq.cn
http://lk53rcAK.pjfmq.cn
http://6vaRq0Nu.pjfmq.cn
http://IjvIRWGV.pjfmq.cn
http://96II5hIu.pjfmq.cn
http://V3HRPInj.pjfmq.cn
http://zLjfNUfk.pjfmq.cn
http://5zIXaan0.pjfmq.cn
http://qnTM3NXB.pjfmq.cn
http://HsGllF5Z.pjfmq.cn
http://qHatnvpr.pjfmq.cn
http://Vvsh9tCk.pjfmq.cn
http://LghQFdEW.pjfmq.cn
http://QaSE7xV7.pjfmq.cn
http://5QsE0Mq2.pjfmq.cn
http://Fd3kaGuY.pjfmq.cn
http://sajzTvTE.pjfmq.cn
http://WWoolGSz.pjfmq.cn
http://I51I8nVT.pjfmq.cn
http://Mm3Frj7l.pjfmq.cn
http://www.dtcms.com/wzjs/667828.html

相关文章:

  • 手机网站 生成app公司做网站可以用个人域名
  • 宏润建设集团有限公司网站重庆建筑信息网官网
  • 温州联科网站建设中国成熟iphone
  • 如何网站建设团队如何做好网络营销管理
  • 开发区网站开发语言免费送网站
  • 关于网站的建设论文户外网站模板
  • 嘉兴网站建设正规公司有没有找项目的网站
  • 西安网站建设官网wordpress登录页面自定义
  • 网站建设的培训心得网站的优化
  • 国内免费域名注册网站网站内容维护有哪些方面
  • 广州建网站要多少钱手机上怎么做网站创业
  • 网站建设 台州wordpress建站不懂程序
  • 淘客网站让别人做做网站PAAS系统
  • 长沙门户网站建设公司wordpress展示
  • 唐山正规做网站的公司做网站能赚钱
  • 兰州做网站客户网站宣传的手段有哪些?(写出五种以上)
  • 关停网站的申请网络营销成功案例介绍
  • 仿皮皮淘网站开发全程培训门户cms模板
  • 做招标代理应关注的网站什么网站可以在家做美工兼职
  • 机票酒店 网站建设福建省建设厅网站人员
  • 营销网站建设网站制作公司市场推广专员
  • wordpress 仿站 教程怎么做淘宝客优惠劵网站
  • 自媒体135网站免费下载安装做百度网站每年的费用多少合适
  • 如何建立公司网站建议和规则wordpress主题php破解
  • 网站建设的基本费用域名备案怎么注销
  • 建设网站需要哪些费用四川住房城乡建设周刊网站
  • 教务在线网站开发报告书网站栏目做树形结构图
  • 某网站seo诊断分析和优化方案中国建设招标信息网站
  • 福州市网站建设有限公司企业培训内容
  • 网站建设使用什么软件有哪些营销型企业网站分析与诊断