当前位置: 首页 > news >正文

【Day41】

DAY 41 简单CNN

知识回顾

  1. 数据增强
  2. 卷积神经网络定义写法
  3. batch归一化调整一个批次分布常用与图像数据
  4. 特征图只有卷积操作输出才叫特征图
  5. 调度器直接修改基础学习率

卷积操作常见流程如下:

1. 输入 → 卷积层 → Batch归一化层(可选) → 池化层 → 激活函数 → 下一层

  1. Flatten -> Dense (with Dropout,可选) -> Dense (Output)

这里相关概念比较多如果之前没有学习过复试班强化班中的计算机视觉部分,请自行上网检索视频了解下基础概念也可以对照我提供之前讲义学习下

计算机视觉入门

作业:尝试手动修改下的调度器和CNN的结构,观察训练差异

"""
DAY 41 实验:比较不同的调度器和CNN结构本文件提供了多种CNN结构和学习率调度器的实现,
用于比较不同配置下的训练效果。
"""import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau, CosineAnnealingLR, OneCycleLR# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False# 设置随机种子
torch.manual_seed(42)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")# CIFAR-10数据集的类别
classes = ('飞机', '汽车', '鸟', '猫', '鹿', '狗', '青蛙', '马', '船', '卡车')#====================== 1. 数据加载与增强 ======================def load_data(batch_size=64, is_train=True):"""加载CIFAR-10数据集,并应用数据增强"""if is_train:# 训练集使用数据增强transform = transforms.Compose([transforms.RandomHorizontalFlip(),transforms.RandomRotation(10),transforms.RandomAffine(0, shear=10, scale=(0.8, 1.2)),transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2),transforms.ToTensor(),transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])else:# 测试集只需要标准化transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])dataset = torchvision.datasets.CIFAR10(root='./data', train=is_train,download=True,transform=transform)dataloader = DataLoader(dataset,batch_size=batch_size,shuffle=is_train,num_workers=2)return dataloader#====================== 2. 不同的CNN模型结构 ======================class BasicCNN(nn.Module):"""基础CNN模型:3个卷积层"""def __init__(self, use_bn=True, dropout_rate=0.5):super(BasicCNN, self).__init__()self.use_bn = use_bn# 第一个卷积块self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1)self.bn1 = nn.BatchNorm2d(32) if use_bn else nn.Identity()# 第二个卷积块self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)self.bn2 = nn.BatchNorm2d(64) if use_bn else nn.Identity()# 第三个卷积块self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1)self.bn3 = nn.BatchNorm2d(128) if use_bn else nn.Identity()# 全连接层self.fc1 = nn.Linear(128 * 4 * 4, 512)self.dropout = nn.Dropout(dropout_rate)self.fc2 = nn.Linear(512, 10)def forward(self, x):# 第一个卷积块x = self.conv1(x)x = self.bn1(x)x = F.relu(x)x = F.max_pool2d(x, 2)# 第二个卷积块x = self.conv2(x)x = self.bn2(x)x = F.relu(x)x = F.max_pool2d(x, 2)# 第三个卷积块x = self.conv3(x)x = self.bn3(x)x = F.relu(x)x = F.max_pool2d(x, 2)# 全连接层x = torch.flatten(x, 1)x = self.fc1(x)x = F.relu(x)x = self.dropout(x)x = self.fc2(x)return F.log_softmax(x, dim=1)class DeepCNN(nn.Module):"""更深的CNN模型:5个卷积层"""def __init__(self, use_bn=True, dropout_rate=0.5):super(DeepCNN, self).__init__()self.use_bn = use_bn# 第一个卷积块self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1)self.bn1 = nn.BatchNorm2d(32) if use_bn else nn.Identity()# 第二个卷积块self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)self.bn2 = nn.BatchNorm2d(64) if use_bn else nn.Identity()# 第三个卷积块self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1)self.bn3 = nn.BatchNorm2d(128) if use_bn else nn.Identity()# 第四个卷积块self.conv4 = nn.Conv2d(128, 256, kernel_size=3, padding=1)self.bn4 = nn.BatchNorm2d(256) if use_bn else nn.Identity()# 第五个卷积块self.conv5 = nn.Conv2d(256, 512, kernel_size=3, padding=1)self.bn5 = nn.BatchNorm2d(512) if use_bn else nn.Identity()# 全连接层self.fc1 = nn.Linear(512, 512)self.dropout = nn.Dropout(dropout_rate)self.fc2 = nn.Linear(512, 10)def forward(self, x):# 第一个卷积块x = self.conv1(x)x = self.bn1(x)x = F.relu(x)x = F.max_pool2d(x, 2)# 第二个卷积块x = self.conv2(x)x = self.bn2(x)x = F.relu(x)x = F.max_pool2d(x, 2)# 第三个卷积块x = self.conv3(x)x = self.bn3(x)x = F.relu(x)# 第四个卷积块x = self.conv4(x)x = self.bn4(x)x = F.relu(x)x = F.max_pool2d(x, 2)# 第五个卷积块x = self.conv5(x)x = self.bn5(x)x = F.relu(x)x = F.adaptive_avg_pool2d(x, (1, 1))# 全连接层x = torch.flatten(x, 1)x = self.fc1(x)x = F.relu(x)x = self.dropout(x)x = self.fc2(x)return F.log_softmax(x, dim=1)class ResidualBlock(nn.Module):"""残差块"""def __init__(self, in_channels, out_channels, stride=1):super(ResidualBlock, self).__init__()self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False)self.bn1 = nn.BatchNorm2d(out_channels)self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)self.bn2 = nn.BatchNorm2d(out_channels)self.shortcut = nn.Sequential()if stride != 1 or in_channels != out_channels:self.shortcut = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False),nn.BatchNorm2d(out_channels))def forward(self, x):out = F.relu(self.bn1(self.conv1(x)))out = self.bn2(self.conv2(out))out += self.shortcut(x)out = F.relu(out)return outclass ResNetCNN(nn.Module):"""带有残差连接的CNN模型"""def __init__(self, dropout_rate=0.5):super(ResNetCNN, self).__init__()self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)self.bn1 = nn.BatchNorm2d(64)# 残差块self.layer1 = self._make_layer(64, 64, 2, stride=1)self.layer2 = self._make_layer(64, 128, 2, stride=2)self.layer3 = self._make_layer(128, 256, 2, stride=2)# 全连接层(修正输入维度:256 * 8 * 8)self.fc = nn.Linear(256 * 8 * 8, 10)self.dropout = nn.Dropout(dropout_rate)def _make_layer(self, in_channels, out_channels, num_blocks, stride):layers = []layers.append(ResidualBlock(in_channels, out_channels, stride))for _ in range(1, num_blocks):layers.append(ResidualBlock(out_channels, out_channels, 1))return nn.Sequential(*layers)def forward(self, x):out = F.relu(self.bn1(self.conv1(x)))  # 32x32out = self.layer1(out)                  # 32x32out = self.layer2(out)                  # 16x16out = self.layer3(out)                  # 8x8out = torch.flatten(out, 1)             # 256*8*8out = self.dropout(out)out = self.fc(out)return F.log_softmax(out, dim=1)#====================== 3. 训练函数 ======================def train(model, train_loader, optimizer, scheduler, epoch, history):"""训练一个epoch"""model.train()train_loss = 0correct = 0total = 0for batch_idx, (data, target) in enumerate(train_loader):data, target = data.to(device), target.to(device)optimizer.zero_grad()output = model(data)loss = F.nll_loss(output, target)loss.backward()optimizer.step()train_loss += loss.item()pred = output.max(1, keepdim=True)[1]correct += pred.eq(target.view_as(pred)).sum().item()total += target.size(0)if batch_idx % 100 == 0:print(f'Train Epoch: {epoch} [{batch_idx * len(data)}/{len(train_loader.dataset)} 'f'({100. * batch_idx / len(train_loader):.0f}%)]\t'f'Loss: {loss.item():.6f}\t'f'Accuracy: {100. * correct / total:.2f}%')# 如果使用ReduceLROnPlateau,需要在每个epoch结束时更新if isinstance(scheduler, optim.lr_scheduler.ReduceLROnPlateau):scheduler.step(train_loss)elif isinstance(scheduler, optim.lr_scheduler.OneCycleLR):pass  # OneCycleLR在每个batch后更新,不在epoch结束时更新else:scheduler.step()epoch_loss = train_loss / len(train_loader)epoch_acc = 100. * correct / totalhistory['train_loss'].append(epoch_loss)history['train_acc'].append(epoch_acc)return epoch_loss, epoch_acc#====================== 4. 测试函数 ======================def test(model, test_loader, history):"""在测试集上评估模型"""model.eval()test_loss = 0correct = 0with torch.no_grad():for data, target in test_loader:data, target = data.to(device), target.to(device)output = model(data)test_loss += F.nll_loss(output, target, reduction='sum').item()pred = output.max(1, keepdim=True)[1]correct += pred.eq(target.view_as(pred)).sum().item()test_loss /= len(test_loader.dataset)accuracy = 100. * correct / len(test_loader.dataset)history['test_loss'].append(test_loss)history['test_acc'].append(accuracy)print(f'\nTest set: Average loss: {test_loss:.4f}, 'f'Accuracy: {correct}/{len(test_loader.dataset)} 'f'({accuracy:.2f}%)\n')return test_loss, accuracy#====================== 5. 可视化函数 ======================def plot_training_history(history, title):"""绘制训练历史曲线"""epochs = range(1, len(history['train_loss']) + 1)plt.figure(figsize=(12, 4))plt.subplot(1, 2, 1)plt.plot(epochs, history['train_loss'], 'b-', label='训练损失')plt.plot(epochs, history['test_loss'], 'r-', label='测试损失')plt.title(f'{title} - 训练和测试损失')plt.xlabel('Epoch')plt.ylabel('损失')plt.legend()plt.grid(True)plt.subplot(1, 2, 2)plt.plot(epochs, history['train_acc'], 'b-', label='训练准确率')plt.plot(epochs, history['test_acc'], 'r-', label='测试准确率')plt.title(f'{title} - 训练和测试准确率')plt.xlabel('Epoch')plt.ylabel('准确率 (%)')plt.legend()plt.grid(True)plt.tight_layout()plt.show()def compare_models(histories, titles):"""比较不同模型的训练历史"""epochs = range(1, len(histories[0]['train_loss']) + 1)plt.figure(figsize=(12, 8))# 比较训练损失plt.subplot(2, 2, 1)for i, history in enumerate(histories):plt.plot(epochs, history['train_loss'], label=titles[i])plt.title('训练损失比较')plt.xlabel('Epoch')plt.ylabel('损失')plt.legend()plt.grid(True)# 比较测试损失plt.subplot(2, 2, 2)for i, history in enumerate(histories):plt.plot(epochs, history['test_loss'], label=titles[i])plt.title('测试损失比较')plt.xlabel('Epoch')plt.ylabel('损失')plt.legend()plt.grid(True)# 比较训练准确率plt.subplot(2, 2, 3)for i, history in enumerate(histories):plt.plot(epochs, history['train_acc'], label=titles[i])plt.title('训练准确率比较')plt.xlabel('Epoch')plt.ylabel('准确率 (%)')plt.legend()plt.grid(True)# 比较测试准确率plt.subplot(2, 2, 4)for i, history in enumerate(histories):plt.plot(epochs, history['test_acc'], label=titles[i])plt.title('测试准确率比较')plt.xlabel('Epoch')plt.ylabel('准确率 (%)')plt.legend()plt.grid(True)plt.tight_layout()plt.show()#====================== 6. 实验函数 ======================def run_experiment(model_type, scheduler_type, epochs=10, batch_size=64, lr=0.01):"""运行一个实验:训练指定的模型和调度器"""# 初始化训练历史记录history = {'train_loss': [],'train_acc': [],'test_loss': [],'test_acc': []}# 加载数据print("正在加载训练集...")train_loader = load_data(batch_size, is_train=True)print("正在加载测试集...")test_loader = load_data(batch_size, is_train=False)# 创建模型if model_type == 'basic':model = BasicCNN(use_bn=True).to(device)model_name = "基础CNN"elif model_type == 'deep':model = DeepCNN(use_bn=True).to(device)model_name = "深层CNN"elif model_type == 'resnet':model = ResNetCNN().to(device)model_name = "残差CNN"else:raise ValueError(f"未知的模型类型: {model_type}")# 创建优化器optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4)# 创建学习率调度器if scheduler_type == 'step':scheduler = StepLR(optimizer, step_size=5, gamma=0.1)scheduler_name = "StepLR"elif scheduler_type == 'plateau':scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=3)scheduler_name = "ReduceLROnPlateau"elif scheduler_type == 'cosine':scheduler = CosineAnnealingLR(optimizer, T_max=epochs)scheduler_name = "CosineAnnealingLR"elif scheduler_type == 'onecycle':scheduler = OneCycleLR(optimizer, max_lr=lr*10, epochs=epochs, steps_per_epoch=len(train_loader))scheduler_name = "OneCycleLR"else:raise ValueError(f"未知的调度器类型: {scheduler_type}")# 训练和测试print(f"开始训练 {model_name} 使用 {scheduler_name},设备: {device}")for epoch in range(1, epochs + 1):train_loss, train_acc = train(model, train_loader, optimizer, scheduler, epoch, history)test_loss, test_acc = test(model, test_loader, history)# 返回训练历史和实验标题return history, f"{model_name} + {scheduler_name}"#====================== 7. 主函数 ======================def main():# 超参数设置epochs = 10batch_size = 64lr = 0.01# 运行不同的实验experiments = [# 比较不同的CNN结构(使用相同的调度器)('basic', 'cosine'),('deep', 'cosine'),('resnet', 'cosine'),# 比较不同的调度器(使用相同的CNN结构)# ('basic', 'step'),# ('basic', 'plateau'),# ('basic', 'cosine'),# ('basic', 'onecycle'),]histories = []titles = []for model_type, scheduler_type in experiments:history, title = run_experiment(model_type, scheduler_type, epochs, batch_size, lr)histories.append(history)titles.append(title)# 比较不同实验的结果compare_models(histories, titles)if __name__ == '__main__':main()"""
实验说明:1. 模型结构比较
- BasicCNN: 3个卷积层的基础模型
- DeepCNN: 5个卷积层的深层模型
- ResNetCNN: 带有残差连接的模型2. 学习率调度器比较
- StepLR: 按步长降低学习率
- ReduceLROnPlateau: 当指标不再改善时降低学习率
- CosineAnnealingLR: 余弦退火调整学习率
- OneCycleLR: 一个周期的学习率策略3. 如何使用
- 默认比较不同的CNN结构(使用相同的余弦退火调度器)
- 取消注释相应的代码可以比较不同的调度器(使用相同的基础CNN)
- 可以修改epochs、batch_size和lr参数来调整训练过程4. 观察重点
- 不同模型的收敛速度
- 最终的测试准确率
- 是否出现过拟合
- 学习率调度器对训练过程的影响
"""
正在加载训练集...
Files already downloaded and verified
正在加载测试集...
Files already downloaded and verified
开始训练,使用设备: cpu
Train Epoch: 1 [0/50000 (0%)]   Loss: 2.293068  Accuracy: 12.50%
Train Epoch: 1 [6400/50000 (13%)]       Loss: 1.712008  Accuracy: 27.32%
Train Epoch: 1 [12800/50000 (26%)]      Loss: 1.600459  Accuracy: 32.20%
Train Epoch: 1 [19200/50000 (38%)]      Loss: 1.775305  Accuracy: 34.62%
Train Epoch: 1 [25600/50000 (51%)]      Loss: 1.768592  Accuracy: 36.12%
Train Epoch: 1 [32000/50000 (64%)]      Loss: 1.276903  Accuracy: 37.53%
Train Epoch: 1 [38400/50000 (77%)]      Loss: 1.339285  Accuracy: 38.52%
Train Epoch: 1 [44800/50000 (90%)]      Loss: 1.675705  Accuracy: 39.45%Test set: Average loss: 1.2459, Accuracy: 5433/10000 (54.33%)Train Epoch: 2 [0/50000 (0%)]   Loss: 1.580209  Accuracy: 40.62%
Train Epoch: 2 [6400/50000 (13%)]       Loss: 1.495638  Accuracy: 47.74%
Train Epoch: 2 [12800/50000 (26%)]      Loss: 1.260183  Accuracy: 48.11%
Train Epoch: 2 [19200/50000 (38%)]      Loss: 1.548963  Accuracy: 48.80%
Train Epoch: 2 [25600/50000 (51%)]      Loss: 1.085625  Accuracy: 49.33%
Train Epoch: 2 [32000/50000 (64%)]      Loss: 1.324473  Accuracy: 49.83%
Train Epoch: 2 [38400/50000 (77%)]      Loss: 1.382297  Accuracy: 50.29%
Train Epoch: 2 [44800/50000 (90%)]      Loss: 1.364151  Accuracy: 50.72%Test set: Average loss: 1.1879, Accuracy: 5817/10000 (58.17%)Train Epoch: 3 [0/50000 (0%)]   Loss: 1.418277  Accuracy: 43.75%
Train Epoch: 3 [6400/50000 (13%)]       Loss: 0.966939  Accuracy: 58.01%
Train Epoch: 3 [12800/50000 (26%)]      Loss: 1.144377  Accuracy: 58.63%
Train Epoch: 3 [19200/50000 (38%)]      Loss: 0.795806  Accuracy: 59.14%
Train Epoch: 3 [25600/50000 (51%)]      Loss: 0.949723  Accuracy: 59.52%
Train Epoch: 3 [32000/50000 (64%)]      Loss: 1.286763  Accuracy: 59.81%
Train Epoch: 3 [38400/50000 (77%)]      Loss: 1.195282  Accuracy: 59.94%
Train Epoch: 3 [44800/50000 (90%)]      Loss: 0.957277  Accuracy: 60.14%Test set: Average loss: 0.8982, Accuracy: 6821/10000 (68.21%)Train Epoch: 4 [0/50000 (0%)]   Loss: 0.887834  Accuracy: 71.88%
Train Epoch: 4 [6400/50000 (13%)]       Loss: 1.282257  Accuracy: 62.07%
Train Epoch: 4 [12800/50000 (26%)]      Loss: 0.960115  Accuracy: 62.15%
Train Epoch: 4 [19200/50000 (38%)]      Loss: 1.008237  Accuracy: 62.35%
Train Epoch: 4 [25600/50000 (51%)]      Loss: 1.156668  Accuracy: 62.07%
Train Epoch: 4 [32000/50000 (64%)]      Loss: 0.733899  Accuracy: 62.38%
Train Epoch: 4 [38400/50000 (77%)]      Loss: 1.111758  Accuracy: 62.18%
Train Epoch: 4 [44800/50000 (90%)]      Loss: 1.084628  Accuracy: 62.32%Test set: Average loss: 0.8728, Accuracy: 6886/10000 (68.86%)训练完成,绘制训练历史...
可视化特征图...
PS C:\Users\I.Love.I\Desktop\Python_code> & C:/Users/I.Love.I/.conda/envs/DL/python.exe c:/Users/I.Love.I/Desktop/Python_code/python60-days-challenge/1_python-learning-library/Day41_simple.py
加载数据...
Files already downloaded and verified
Files already downloaded and verified实验1:训练基础CNN(3层)...
Train Epoch: 1 [0/50000 (0%)]   Loss: 2.314068  Accuracy: 14.06%        LR: 0.010000
Train Epoch: 1 [6400/50000 (13%)]       Loss: 1.568440  Accuracy: 30.23%        LR: 0.010000
Train Epoch: 1 [12800/50000 (26%)]      Loss: 1.659113  Accuracy: 35.59%        LR: 0.010000
Train Epoch: 1 [19200/50000 (38%)]      Loss: 1.617478  Accuracy: 38.03%        LR: 0.010000
Train Epoch: 1 [25600/50000 (51%)]      Loss: 1.440859  Accuracy: 39.80%        LR: 0.010000
Train Epoch: 1 [32000/50000 (64%)]      Loss: 1.178089  Accuracy: 41.20%        LR: 0.010000
Train Epoch: 1 [38400/50000 (77%)]      Loss: 1.137583  Accuracy: 42.30%        LR: 0.010000     rain Epoch: 1 [44800/50000 (90%)]      Loss: 1.342647  Accuracy: 43.36%        LR: 0.010000
Train Epoch: 1 [44800/50000 (90%)]      Loss: 1.342647  Accuracy: 43.36%        LR: 0.010000     rain Epoch: 2 [0/50000 (0%)]   Loss: 1.309829  Accuracy: 64.06%        LR: 0.010000
Train Epoch: 1 [38400/50000 (77%)]      Loss: 1.137583  Accuracy: 42.30%        LR: 0.010000
Train Epoch: 1 [44800/50000 (90%)]      Loss: 1.342647  Accuracy: 43.36%        LR: 0.010000
Epoch 1: Test loss: 1.2134, Accuracy: 56.47%
Train Epoch: 2 [0/50000 (0%)]   Loss: 1.309829  Accuracy: 64.06%        LR: 0.010000
Train Epoch: 2 [6400/50000 (13%)]       Loss: 1.275745  Accuracy: 52.75%        LR: 0.010000
Train Epoch: 2 [12800/50000 (26%)]      Loss: 1.147640  Accuracy: 53.40%        LR: 0.010000
Train Epoch: 2 [19200/50000 (38%)]      Loss: 1.347642  Accuracy: 54.10%        LR: 0.010000
Train Epoch: 2 [25600/50000 (51%)]      Loss: 1.003166  Accuracy: 54.91%        LR: 0.010000
Train Epoch: 2 [32000/50000 (64%)]      Loss: 1.219489  Accuracy: 55.54%        LR: 0.010000
Train Epoch: 2 [38400/50000 (77%)]      Loss: 1.321691  Accuracy: 56.07%        LR: 0.010000
Train Epoch: 2 [44800/50000 (90%)]      Loss: 1.249267  Accuracy: 56.47%        LR: 0.010000
Epoch 2: Test loss: 1.0821, Accuracy: 62.85%
Train Epoch: 3 [0/50000 (0%)]   Loss: 1.189534  Accuracy: 46.88%        LR: 0.001000
Train Epoch: 3 [6400/50000 (13%)]       Loss: 0.800227  Accuracy: 63.40%        LR: 0.001000
Train Epoch: 3 [12800/50000 (26%)]      Loss: 1.102200  Accuracy: 64.44%        LR: 0.001000
Train Epoch: 3 [19200/50000 (38%)]      Loss: 0.698571  Accuracy: 64.95%        LR: 0.001000
Train Epoch: 3 [25600/50000 (51%)]      Loss: 1.008356  Accuracy: 65.14%        LR: 0.001000
Train Epoch: 3 [32000/50000 (64%)]      Loss: 1.045420  Accuracy: 65.29%        LR: 0.001000
Train Epoch: 3 [38400/50000 (77%)]      Loss: 0.906162  Accuracy: 65.34%        LR: 0.001000
Train Epoch: 3 [44800/50000 (90%)]      Loss: 0.756010  Accuracy: 65.63%        LR: 0.001000
Epoch 3: Test loss: 0.8147, Accuracy: 71.67%
Train Epoch: 4 [0/50000 (0%)]   Loss: 0.931888  Accuracy: 70.31%        LR: 0.001000
Train Epoch: 4 [6400/50000 (13%)]       Loss: 1.229634  Accuracy: 67.79%        LR: 0.001000
Train Epoch: 4 [12800/50000 (26%)]      Loss: 0.853696  Accuracy: 67.86%        LR: 0.001000
Train Epoch: 4 [19200/50000 (38%)]      Loss: 0.732714  Accuracy: 68.29%        LR: 0.001000
Train Epoch: 4 [25600/50000 (51%)]      Loss: 0.950875  Accuracy: 67.96%        LR: 0.001000
Train Epoch: 4 [32000/50000 (64%)]      Loss: 0.639869  Accuracy: 68.21%        LR: 0.001000
Train Epoch: 4 [38400/50000 (77%)]      Loss: 0.861224  Accuracy: 68.15%        LR: 0.001000
Train Epoch: 4 [44800/50000 (90%)]      Loss: 1.007439  Accuracy: 68.26%        LR: 0.001000
Epoch 4: Test loss: 0.7860, Accuracy: 72.78%
Train Epoch: 5 [0/50000 (0%)]   Loss: 0.875544  Accuracy: 64.06%        LR: 0.000100
Train Epoch: 5 [6400/50000 (13%)]       Loss: 0.798205  Accuracy: 69.00%        LR: 0.000100
Train Epoch: 5 [12800/50000 (26%)]      Loss: 0.822474  Accuracy: 69.38%        LR: 0.000100
Train Epoch: 5 [19200/50000 (38%)]      Loss: 1.100589  Accuracy: 69.60%        LR: 0.000100
Train Epoch: 5 [25600/50000 (51%)]      Loss: 0.659224  Accuracy: 69.65%        LR: 0.000100
Train Epoch: 5 [32000/50000 (64%)]      Loss: 0.748625  Accuracy: 69.64%        LR: 0.000100
Train Epoch: 5 [38400/50000 (77%)]      Loss: 0.924533  Accuracy: 69.63%        LR: 0.000100
Train Epoch: 5 [44800/50000 (90%)]      Loss: 0.868795  Accuracy: 69.70%        LR: 0.000100
Epoch 5: Test loss: 0.7704, Accuracy: 73.29%实验2:训练深层CNN(4层)...
Train Epoch: 1 [0/50000 (0%)]   Loss: 2.361547  Accuracy: 10.94%        LR: 0.010000
Train Epoch: 1 [6400/50000 (13%)]       Loss: 1.508603  Accuracy: 31.33%        LR: 0.010000
Train Epoch: 1 [12800/50000 (26%)]      Loss: 1.625223  Accuracy: 36.70%        LR: 0.010000
Train Epoch: 1 [19200/50000 (38%)]      Loss: 1.560336  Accuracy: 39.16%        LR: 0.010000
Train Epoch: 1 [25600/50000 (51%)]      Loss: 1.314837  Accuracy: 41.68%        LR: 0.010000
Train Epoch: 1 [32000/50000 (64%)]      Loss: 1.158690  Accuracy: 43.56%        LR: 0.010000
Train Epoch: 1 [38400/50000 (77%)]      Loss: 1.332538  Accuracy: 45.04%        LR: 0.010000
Train Epoch: 1 [44800/50000 (90%)]      Loss: 1.487419  Accuracy: 46.50%        LR: 0.010000
Epoch 1: Test loss: 1.0876, Accuracy: 61.16%
Train Epoch: 2 [0/50000 (0%)]   Loss: 0.995175  Accuracy: 68.75%        LR: 0.009045
Train Epoch: 2 [6400/50000 (13%)]       Loss: 1.211926  Accuracy: 58.34%        LR: 0.009045
Train Epoch: 2 [12800/50000 (26%)]      Loss: 1.182561  Accuracy: 58.05%        LR: 0.009045
Train Epoch: 2 [19200/50000 (38%)]      Loss: 0.999875  Accuracy: 58.66%        LR: 0.009045
Train Epoch: 2 [25600/50000 (51%)]      Loss: 1.049008  Accuracy: 58.99%        LR: 0.009045
Train Epoch: 2 [32000/50000 (64%)]      Loss: 0.869797  Accuracy: 59.48%        LR: 0.009045
Train Epoch: 2 [38400/50000 (77%)]      Loss: 1.135825  Accuracy: 59.95%        LR: 0.009045
Train Epoch: 2 [44800/50000 (90%)]      Loss: 1.115167  Accuracy: 60.38%        LR: 0.009045
Epoch 2: Test loss: 0.9288, Accuracy: 67.52%
Train Epoch: 3 [0/50000 (0%)]   Loss: 0.859160  Accuracy: 79.69%        LR: 0.006545
Train Epoch: 3 [6400/50000 (13%)]       Loss: 0.984113  Accuracy: 67.08%        LR: 0.006545
Train Epoch: 3 [12800/50000 (26%)]      Loss: 0.934199  Accuracy: 66.88%        LR: 0.006545
Train Epoch: 3 [19200/50000 (38%)]      Loss: 1.200163  Accuracy: 67.24%        LR: 0.006545
Train Epoch: 3 [25600/50000 (51%)]      Loss: 0.886985  Accuracy: 67.39%        LR: 0.006545
Train Epoch: 3 [32000/50000 (64%)]      Loss: 0.588489  Accuracy: 67.56%        LR: 0.006545
Train Epoch: 3 [38400/50000 (77%)]      Loss: 0.802626  Accuracy: 67.73%        LR: 0.006545
Train Epoch: 3 [44800/50000 (90%)]      Loss: 0.769826  Accuracy: 68.06%        LR: 0.006545
Epoch 3: Test loss: 0.8234, Accuracy: 71.23%
Train Epoch: 4 [0/50000 (0%)]   Loss: 0.797326  Accuracy: 71.88%        LR: 0.003455
Train Epoch: 4 [6400/50000 (13%)]       Loss: 0.657178  Accuracy: 72.43%        LR: 0.003455
Train Epoch: 4 [12800/50000 (26%)]      Loss: 0.851532  Accuracy: 72.68%        LR: 0.003455
Train Epoch: 4 [19200/50000 (38%)]      Loss: 0.643618  Accuracy: 72.88%        LR: 0.003455
Train Epoch: 4 [25600/50000 (51%)]      Loss: 0.614715  Accuracy: 73.11%        LR: 0.003455
Train Epoch: 4 [32000/50000 (64%)]      Loss: 0.621094  Accuracy: 73.54%        LR: 0.003455
Train Epoch: 4 [38400/50000 (77%)]      Loss: 0.869759  Accuracy: 73.46%        LR: 0.003455
Train Epoch: 4 [44800/50000 (90%)]      Loss: 0.878335  Accuracy: 73.54%        LR: 0.003455
Epoch 4: Test loss: 0.6981, Accuracy: 75.48%
Train Epoch: 5 [0/50000 (0%)]   Loss: 0.766669  Accuracy: 68.75%        LR: 0.000955
Train Epoch: 5 [6400/50000 (13%)]       Loss: 0.553567  Accuracy: 75.59%        LR: 0.000955
Train Epoch: 5 [12800/50000 (26%)]      Loss: 0.499649  Accuracy: 76.55%        LR: 0.000955
Train Epoch: 5 [19200/50000 (38%)]      Loss: 0.672797  Accuracy: 76.39%        LR: 0.000955
Train Epoch: 5 [25600/50000 (51%)]      Loss: 0.780928  Accuracy: 76.45%        LR: 0.000955
Train Epoch: 5 [32000/50000 (64%)]      Loss: 0.580088  Accuracy: 76.59%        LR: 0.000955
Train Epoch: 5 [38400/50000 (77%)]      Loss: 0.698451  Accuracy: 76.64%        LR: 0.000955
Train Epoch: 5 [44800/50000 (90%)]      Loss: 0.682562  Accuracy: 76.69%        LR: 0.000955
Epoch 5: Test loss: 0.6332, Accuracy: 78.08%
PS C:\Users\I.Love.I\Desktop\Python_code> & C:/Users/I.Love.I/.conda/envs/DL/python.exe c:/Users/I.Love.I/Desktop/Python_code/python60-days-challenge/1_python-learning-library/Day41_experiments.py
正在加载训练集...
Files already downloaded and verified
正在加载测试集...
Files already downloaded and verified
开始训练 基础CNN 使用 CosineAnnealingLR,设备: cpu
Train Epoch: 1 [0/50000 (0%)]   Loss: 2.293068  Accuracy: 12.50%
Train Epoch: 1 [6400/50000 (13%)]       Loss: 1.722358  Accuracy: 27.48%
Train Epoch: 1 [12800/50000 (26%)]      Loss: 1.556882  Accuracy: 31.74%
Train Epoch: 1 [19200/50000 (38%)]      Loss: 1.758241  Accuracy: 34.25%
Train Epoch: 1 [25600/50000 (51%)]      Loss: 1.674700  Accuracy: 35.67%
Train Epoch: 1 [32000/50000 (64%)]      Loss: 1.315244  Accuracy: 37.07%
Train Epoch: 1 [38400/50000 (77%)]      Loss: 1.367913  Accuracy: 38.25%
Train Epoch: 1 [44800/50000 (90%)]      Loss: 1.559623  Accuracy: 39.41%Test set: Average loss: 1.2118, Accuracy: 5600/10000 (56.00%)Train Epoch: 2 [0/50000 (0%)]   Loss: 1.484513  Accuracy: 50.00%
Train Epoch: 2 [6400/50000 (13%)]       Loss: 1.664975  Accuracy: 47.22%
Train Epoch: 2 [12800/50000 (26%)]      Loss: 1.220539  Accuracy: 48.27%
Train Epoch: 2 [19200/50000 (38%)]      Loss: 1.407580  Accuracy: 49.03%
Train Epoch: 2 [25600/50000 (51%)]      Loss: 1.092925  Accuracy: 49.81%
Train Epoch: 2 [32000/50000 (64%)]      Loss: 1.206660  Accuracy: 50.29%
Train Epoch: 2 [38400/50000 (77%)]      Loss: 1.379342  Accuracy: 50.75%
Train Epoch: 2 [44800/50000 (90%)]      Loss: 1.501670  Accuracy: 51.13%Test set: Average loss: 1.1633, Accuracy: 6020/10000 (60.20%)Train Epoch: 3 [0/50000 (0%)]   Loss: 1.454061  Accuracy: 50.00%
Train Epoch: 3 [6400/50000 (13%)]       Loss: 0.939104  Accuracy: 57.24%
Train Epoch: 3 [12800/50000 (26%)]      Loss: 1.240231  Accuracy: 56.58%
Train Epoch: 3 [19200/50000 (38%)]      Loss: 0.877654  Accuracy: 57.00%
Train Epoch: 3 [25600/50000 (51%)]      Loss: 1.166832  Accuracy: 57.22%
Train Epoch: 3 [32000/50000 (64%)]      Loss: 1.057176  Accuracy: 57.53%
Train Epoch: 3 [38400/50000 (77%)]      Loss: 1.339103  Accuracy: 57.71%
Train Epoch: 3 [44800/50000 (90%)]      Loss: 1.041523  Accuracy: 58.04%Test set: Average loss: 0.9137, Accuracy: 6791/10000 (67.91%)Train Epoch: 4 [0/50000 (0%)]   Loss: 0.974381  Accuracy: 68.75%
Train Epoch: 4 [6400/50000 (13%)]       Loss: 1.366313  Accuracy: 62.33%
Train Epoch: 4 [12800/50000 (26%)]      Loss: 0.929462  Accuracy: 62.22%
Train Epoch: 4 [19200/50000 (38%)]      Loss: 0.884589  Accuracy: 62.59%
Train Epoch: 4 [25600/50000 (51%)]      Loss: 1.171518  Accuracy: 62.30%
Train Epoch: 4 [32000/50000 (64%)]      Loss: 0.842188  Accuracy: 62.71%
Train Epoch: 4 [38400/50000 (77%)]      Loss: 1.012197  Accuracy: 62.51%
Train Epoch: 4 [44800/50000 (90%)]      Loss: 1.134873  Accuracy: 62.71%Test set: Average loss: 0.8598, Accuracy: 6938/10000 (69.38%)Train Epoch: 5 [0/50000 (0%)]   Loss: 0.878952  Accuracy: 67.19%
Train Epoch: 5 [6400/50000 (13%)]       Loss: 1.031704  Accuracy: 64.60%
Train Epoch: 5 [12800/50000 (26%)]      Loss: 1.064272  Accuracy: 65.48%
Train Epoch: 5 [19200/50000 (38%)]      Loss: 1.377383  Accuracy: 65.85%
Train Epoch: 5 [25600/50000 (51%)]      Loss: 0.908284  Accuracy: 66.02%
Train Epoch: 5 [32000/50000 (64%)]      Loss: 0.973955  Accuracy: 66.14%
Train Epoch: 5 [38400/50000 (77%)]      Loss: 1.087591  Accuracy: 66.22%
Train Epoch: 5 [44800/50000 (90%)]      Loss: 0.889676  Accuracy: 66.24%Test set: Average loss: 0.7889, Accuracy: 7208/10000 (72.08%)Train Epoch: 6 [0/50000 (0%)]   Loss: 0.876616  Accuracy: 70.31%
Train Epoch: 6 [6400/50000 (13%)]       Loss: 0.897449  Accuracy: 67.44%
Train Epoch: 6 [12800/50000 (26%)]      Loss: 1.097278  Accuracy: 67.63%
Train Epoch: 6 [19200/50000 (38%)]      Loss: 0.918221  Accuracy: 68.12%
Train Epoch: 6 [25600/50000 (51%)]      Loss: 0.754201  Accuracy: 68.52%
Train Epoch: 6 [32000/50000 (64%)]      Loss: 0.915608  Accuracy: 68.48%
Train Epoch: 6 [38400/50000 (77%)]      Loss: 1.108561  Accuracy: 68.63%
Train Epoch: 6 [44800/50000 (90%)]      Loss: 0.889088  Accuracy: 68.67%Test set: Average loss: 0.6993, Accuracy: 7569/10000 (75.69%)Train Epoch: 7 [0/50000 (0%)]   Loss: 0.958089  Accuracy: 65.62%
Train Epoch: 7 [6400/50000 (13%)]       Loss: 1.091723  Accuracy: 70.81%
Train Epoch: 7 [12800/50000 (26%)]      Loss: 0.777307  Accuracy: 70.35%
Train Epoch: 7 [19200/50000 (38%)]      Loss: 1.069645  Accuracy: 70.53%
Train Epoch: 7 [25600/50000 (51%)]      Loss: 0.896357  Accuracy: 70.77%
Train Epoch: 7 [32000/50000 (64%)]      Loss: 0.776956  Accuracy: 70.76%
Train Epoch: 7 [38400/50000 (77%)]      Loss: 0.742222  Accuracy: 70.88%
Train Epoch: 7 [44800/50000 (90%)]      Loss: 0.773929  Accuracy: 70.91%Test set: Average loss: 0.6808, Accuracy: 7606/10000 (76.06%)Train Epoch: 8 [0/50000 (0%)]   Loss: 0.693062  Accuracy: 73.44%
Train Epoch: 8 [6400/50000 (13%)]       Loss: 0.597719  Accuracy: 71.81%
Train Epoch: 8 [12800/50000 (26%)]      Loss: 0.577101  Accuracy: 72.22%
Train Epoch: 8 [19200/50000 (38%)]      Loss: 0.715675  Accuracy: 72.38%
Train Epoch: 8 [25600/50000 (51%)]      Loss: 0.894164  Accuracy: 72.39%
Train Epoch: 8 [32000/50000 (64%)]      Loss: 0.889195  Accuracy: 72.42%
Train Epoch: 8 [38400/50000 (77%)]      Loss: 0.777797  Accuracy: 72.43%
Train Epoch: 8 [44800/50000 (90%)]      Loss: 0.707688  Accuracy: 72.41%Test set: Average loss: 0.6419, Accuracy: 7769/10000 (77.69%)Train Epoch: 9 [0/50000 (0%)]   Loss: 0.801439  Accuracy: 73.44%
Train Epoch: 9 [6400/50000 (13%)]       Loss: 0.908550  Accuracy: 72.94%
Train Epoch: 9 [12800/50000 (26%)]      Loss: 0.692507  Accuracy: 73.35%
Train Epoch: 9 [19200/50000 (38%)]      Loss: 0.560888  Accuracy: 73.52%
Train Epoch: 9 [25600/50000 (51%)]      Loss: 0.740002  Accuracy: 73.79%
Train Epoch: 9 [32000/50000 (64%)]      Loss: 0.751597  Accuracy: 73.90%
Train Epoch: 9 [38400/50000 (77%)]      Loss: 0.994493  Accuracy: 73.81%
Train Epoch: 9 [44800/50000 (90%)]      Loss: 0.777613  Accuracy: 73.88%Test set: Average loss: 0.6190, Accuracy: 7830/10000 (78.30%)Train Epoch: 10 [0/50000 (0%)]  Loss: 0.687152  Accuracy: 79.69%
Train Epoch: 10 [6400/50000 (13%)]      Loss: 0.801341  Accuracy: 73.86%
Train Epoch: 10 [12800/50000 (26%)]     Loss: 0.684478  Accuracy: 73.98%
Train Epoch: 10 [19200/50000 (38%)]     Loss: 0.705056  Accuracy: 74.19%
Train Epoch: 10 [25600/50000 (51%)]     Loss: 0.853381  Accuracy: 74.05%
Train Epoch: 10 [32000/50000 (64%)]     Loss: 0.580054  Accuracy: 74.07%
Train Epoch: 10 [38400/50000 (77%)]     Loss: 0.817881  Accuracy: 74.00%
Train Epoch: 10 [44800/50000 (90%)]     Loss: 0.817918  Accuracy: 74.10%Test set: Average loss: 0.6143, Accuracy: 7862/10000 (78.62%)正在加载训练集...
Files already downloaded and verified
正在加载测试集...
Files already downloaded and verified
开始训练 深层CNN 使用 CosineAnnealingLR,设备: cpu
Train Epoch: 1 [0/50000 (0%)]   Loss: 2.342756  Accuracy: 14.06%
Train Epoch: 1 [6400/50000 (13%)]       Loss: 1.850860  Accuracy: 25.28%
Train Epoch: 1 [12800/50000 (26%)]      Loss: 1.762461  Accuracy: 30.99%
Train Epoch: 1 [19200/50000 (38%)]      Loss: 1.590530  Accuracy: 34.26%
Train Epoch: 1 [25600/50000 (51%)]      Loss: 1.340634  Accuracy: 36.95%
Train Epoch: 1 [32000/50000 (64%)]      Loss: 1.501504  Accuracy: 39.23%
Train Epoch: 1 [38400/50000 (77%)]      Loss: 1.322111  Accuracy: 41.06%
Train Epoch: 1 [44800/50000 (90%)]      Loss: 1.156101  Accuracy: 42.66%Test set: Average loss: 1.2036, Accuracy: 5570/10000 (55.70%)Train Epoch: 2 [0/50000 (0%)]   Loss: 1.286792  Accuracy: 54.69%
Train Epoch: 2 [6400/50000 (13%)]       Loss: 1.453677  Accuracy: 55.48%
Train Epoch: 2 [12800/50000 (26%)]      Loss: 0.941933  Accuracy: 56.44%
Train Epoch: 2 [19200/50000 (38%)]      Loss: 1.107067  Accuracy: 56.65%
Train Epoch: 2 [25600/50000 (51%)]      Loss: 1.146062  Accuracy: 56.92%
Train Epoch: 2 [32000/50000 (64%)]      Loss: 1.112035  Accuracy: 57.31%
Train Epoch: 2 [38400/50000 (77%)]      Loss: 1.183208  Accuracy: 57.59%
Train Epoch: 2 [44800/50000 (90%)]      Loss: 1.217674  Accuracy: 58.32%Test set: Average loss: 1.0009, Accuracy: 6483/10000 (64.83%)Train Epoch: 3 [0/50000 (0%)]   Loss: 1.032111  Accuracy: 62.50%
Train Epoch: 3 [6400/50000 (13%)]       Loss: 0.904841  Accuracy: 64.26%
Train Epoch: 3 [12800/50000 (26%)]      Loss: 1.111103  Accuracy: 64.02%
Train Epoch: 3 [19200/50000 (38%)]      Loss: 1.111288  Accuracy: 64.01%
Train Epoch: 3 [25600/50000 (51%)]      Loss: 0.887255  Accuracy: 64.14%
Train Epoch: 3 [32000/50000 (64%)]      Loss: 0.821008  Accuracy: 64.42%
Train Epoch: 3 [38400/50000 (77%)]      Loss: 0.922995  Accuracy: 64.60%
Train Epoch: 3 [44800/50000 (90%)]      Loss: 0.852235  Accuracy: 64.90%Test set: Average loss: 0.7871, Accuracy: 7225/10000 (72.25%)Train Epoch: 4 [0/50000 (0%)]   Loss: 1.008779  Accuracy: 68.75%
Train Epoch: 4 [6400/50000 (13%)]       Loss: 0.877065  Accuracy: 68.30%
Train Epoch: 4 [12800/50000 (26%)]      Loss: 0.890234  Accuracy: 68.38%
Train Epoch: 4 [19200/50000 (38%)]      Loss: 0.822239  Accuracy: 68.46%
Train Epoch: 4 [25600/50000 (51%)]      Loss: 1.126034  Accuracy: 68.56%
Train Epoch: 4 [32000/50000 (64%)]      Loss: 0.859953  Accuracy: 68.46%
Train Epoch: 4 [38400/50000 (77%)]      Loss: 0.792203  Accuracy: 68.64%
Train Epoch: 4 [44800/50000 (90%)]      Loss: 0.921908  Accuracy: 68.79%Test set: Average loss: 0.7564, Accuracy: 7332/10000 (73.32%)Train Epoch: 5 [0/50000 (0%)]   Loss: 0.739065  Accuracy: 68.75%
Train Epoch: 5 [6400/50000 (13%)]       Loss: 0.773354  Accuracy: 71.61%
Train Epoch: 5 [12800/50000 (26%)]      Loss: 0.605493  Accuracy: 72.07%
Train Epoch: 5 [19200/50000 (38%)]      Loss: 0.617809  Accuracy: 72.11%
Train Epoch: 5 [25600/50000 (51%)]      Loss: 0.883427  Accuracy: 72.07%
Train Epoch: 5 [32000/50000 (64%)]      Loss: 0.842254  Accuracy: 71.99%
Train Epoch: 5 [38400/50000 (77%)]      Loss: 0.533248  Accuracy: 72.05%
Train Epoch: 5 [44800/50000 (90%)]      Loss: 1.000729  Accuracy: 72.02%Test set: Average loss: 0.6809, Accuracy: 7611/10000 (76.11%)Train Epoch: 6 [0/50000 (0%)]   Loss: 0.810092  Accuracy: 73.44%
Train Epoch: 6 [6400/50000 (13%)]       Loss: 0.616662  Accuracy: 73.48%
Train Epoch: 6 [12800/50000 (26%)]      Loss: 0.562376  Accuracy: 74.10%
Train Epoch: 6 [19200/50000 (38%)]      Loss: 0.832556  Accuracy: 73.90%
Train Epoch: 6 [25600/50000 (51%)]      Loss: 0.878011  Accuracy: 74.10%
Train Epoch: 6 [32000/50000 (64%)]      Loss: 0.509199  Accuracy: 74.05%
Train Epoch: 6 [38400/50000 (77%)]      Loss: 0.673968  Accuracy: 74.19%
Train Epoch: 6 [44800/50000 (90%)]      Loss: 0.744006  Accuracy: 74.30%Test set: Average loss: 0.6272, Accuracy: 7805/10000 (78.05%)Train Epoch: 7 [0/50000 (0%)]   Loss: 0.555623  Accuracy: 81.25%
Train Epoch: 7 [6400/50000 (13%)]       Loss: 0.578787  Accuracy: 76.49%
Train Epoch: 7 [12800/50000 (26%)]      Loss: 0.823545  Accuracy: 76.82%
Train Epoch: 7 [19200/50000 (38%)]      Loss: 0.716045  Accuracy: 76.79%
Train Epoch: 7 [25600/50000 (51%)]      Loss: 0.815958  Accuracy: 76.64%
Train Epoch: 7 [32000/50000 (64%)]      Loss: 0.755133  Accuracy: 76.55%
Train Epoch: 7 [38400/50000 (77%)]      Loss: 0.602492  Accuracy: 76.47%
Train Epoch: 7 [44800/50000 (90%)]      Loss: 0.758558  Accuracy: 76.46%Test set: Average loss: 0.5867, Accuracy: 7979/10000 (79.79%)Train Epoch: 8 [0/50000 (0%)]   Loss: 0.834396  Accuracy: 73.44%
Train Epoch: 8 [6400/50000 (13%)]       Loss: 0.533800  Accuracy: 78.57%
Train Epoch: 8 [12800/50000 (26%)]      Loss: 0.626373  Accuracy: 78.33%
Train Epoch: 8 [19200/50000 (38%)]      Loss: 0.643600  Accuracy: 78.60%
Train Epoch: 8 [25600/50000 (51%)]      Loss: 0.740194  Accuracy: 78.72%
Train Epoch: 8 [32000/50000 (64%)]      Loss: 0.602970  Accuracy: 78.66%
Train Epoch: 8 [38400/50000 (77%)]      Loss: 0.432281  Accuracy: 78.71%
Train Epoch: 8 [44800/50000 (90%)]      Loss: 0.451608  Accuracy: 78.71%Test set: Average loss: 0.5375, Accuracy: 8155/10000 (81.55%)Train Epoch: 9 [0/50000 (0%)]   Loss: 0.863743  Accuracy: 68.75%
Train Epoch: 9 [6400/50000 (13%)]       Loss: 0.412280  Accuracy: 79.07%
Train Epoch: 9 [12800/50000 (26%)]      Loss: 0.553952  Accuracy: 79.86%
Train Epoch: 9 [19200/50000 (38%)]      Loss: 0.724988  Accuracy: 79.91%
Train Epoch: 9 [25600/50000 (51%)]      Loss: 0.498772  Accuracy: 79.94%
Train Epoch: 9 [32000/50000 (64%)]      Loss: 0.791269  Accuracy: 80.01%
Train Epoch: 9 [38400/50000 (77%)]      Loss: 0.518802  Accuracy: 79.95%
Train Epoch: 9 [44800/50000 (90%)]      Loss: 0.408082  Accuracy: 79.84%Test set: Average loss: 0.5060, Accuracy: 8271/10000 (82.71%)Train Epoch: 10 [0/50000 (0%)]  Loss: 0.534627  Accuracy: 84.38%
Train Epoch: 10 [6400/50000 (13%)]      Loss: 0.785889  Accuracy: 80.35%
Train Epoch: 10 [12800/50000 (26%)]     Loss: 0.448817  Accuracy: 80.48%
Train Epoch: 10 [19200/50000 (38%)]     Loss: 0.501668  Accuracy: 80.71%
Train Epoch: 10 [25600/50000 (51%)]     Loss: 0.587260  Accuracy: 80.95%
Train Epoch: 10 [32000/50000 (64%)]     Loss: 0.423947  Accuracy: 81.13%
Train Epoch: 10 [38400/50000 (77%)]     Loss: 0.846690  Accuracy: 81.14%
Train Epoch: 10 [44800/50000 (90%)]     Loss: 0.598511  Accuracy: 81.15%Test set: Average loss: 0.4925, Accuracy: 8322/10000 (83.22%)正在加载训练集...
Files already downloaded and verified
正在加载测试集...
Files already downloaded and verified
开始训练 残差CNN 使用 CosineAnnealingLR,设备: cpu
Traceback (most recent call last):File "c:\Users\I.Love.I\Desktop\Python_code\python60-days-challenge\1_python-learning-library\Day41_experiments.py", line 506, in <module>main()File "c:\Users\I.Love.I\Desktop\Python_code\python60-days-challenge\1_python-learning-library\Day41_experiments.py", line 498, in mainhistory, title = run_experiment(model_type, scheduler_type, epochs, batch_size, lr)File "c:\Users\I.Love.I\Desktop\Python_code\python60-days-challenge\1_python-learning-library\Day41_experiments.py", line 466, in run_experimenttrain_loss, train_acc = train(model, train_loader, optimizer, scheduler, epoch, history)File "c:\Users\I.Love.I\Desktop\Python_code\python60-days-challenge\1_python-learning-library\Day41_experiments.py", line 267, in trainoutput = model(data)File "C:\Users\I.Love.I\.conda\envs\DL\lib\site-packages\torch\nn\modules\module.py", line 1553, in _wrapped_call_implreturn self._call_impl(*args, **kwargs)File "C:\Users\I.Love.I\.conda\envs\DL\lib\site-packages\torch\nn\modules\module.py", line 1562, in _call_implreturn forward_call(*args, **kwargs)File "c:\Users\I.Love.I\Desktop\Python_code\python60-days-challenge\1_python-learning-library\Day41_experiments.py", line 249, in forwardout = self.fc(out)File "C:\Users\I.Love.I\.conda\envs\DL\lib\site-packages\torch\nn\modules\module.py", line 1553, in _wrapped_call_implreturn self._call_impl(*args, **kwargs)File "C:\Users\I.Love.I\.conda\envs\DL\lib\site-packages\torch\nn\modules\module.py", line 1562, in _call_implreturn forward_call(*args, **kwargs)File "C:\Users\I.Love.I\.conda\envs\DL\lib\site-packages\torch\nn\modules\linear.py", line 117, in forwardreturn F.linear(input, self.weight, self.bias)
RuntimeError: mat1 and mat2 shapes cannot be multiplied (64x1024 and 16384x10)
PS C:\Users\I.Love.I\Desktop\Python_code> & C:/Users/I.Love.I/.conda/envs/DL/python.exe c:/Users/I.Love.I/Desktop/Python_code/python60-days-challenge/1_python-learning-library/Day41_experiments.py
正在加载训练集...
Files already downloaded and verified
正在加载测试集...
Files already downloaded and verified
开始训练 基础CNN 使用 CosineAnnealingLR,设备: cpu
Train Epoch: 1 [0/50000 (0%)]   Loss: 2.293068  Accuracy: 12.50%
Train Epoch: 1 [6400/50000 (13%)]       Loss: 1.722358  Accuracy: 27.48%
Train Epoch: 1 [12800/50000 (26%)]      Loss: 1.556882  Accuracy: 31.74%
Train Epoch: 1 [19200/50000 (38%)]      Loss: 1.758241  Accuracy: 34.25%
Train Epoch: 1 [25600/50000 (51%)]      Loss: 1.674700  Accuracy: 35.67%
Train Epoch: 1 [32000/50000 (64%)]      Loss: 1.315244  Accuracy: 37.07%
Train Epoch: 1 [38400/50000 (77%)]      Loss: 1.367913  Accuracy: 38.25%
Train Epoch: 1 [44800/50000 (90%)]      Loss: 1.559623  Accuracy: 39.41%Test set: Average loss: 1.2118, Accuracy: 5600/10000 (56.00%)Train Epoch: 2 [0/50000 (0%)]   Loss: 1.484513  Accuracy: 50.00%
Train Epoch: 2 [6400/50000 (13%)]       Loss: 1.664975  Accuracy: 47.22%
Train Epoch: 2 [12800/50000 (26%)]      Loss: 1.220539  Accuracy: 48.27%
Train Epoch: 2 [19200/50000 (38%)]      Loss: 1.407580  Accuracy: 49.03%
Train Epoch: 2 [25600/50000 (51%)]      Loss: 1.092925  Accuracy: 49.81%
Train Epoch: 2 [32000/50000 (64%)]      Loss: 1.206660  Accuracy: 50.29%
Train Epoch: 2 [38400/50000 (77%)]      Loss: 1.379342  Accuracy: 50.75%
Train Epoch: 2 [44800/50000 (90%)]      Loss: 1.501670  Accuracy: 51.13%Test set: Average loss: 1.1633, Accuracy: 6020/10000 (60.20%)Train Epoch: 3 [0/50000 (0%)]   Loss: 1.454061  Accuracy: 50.00%
Train Epoch: 3 [6400/50000 (13%)]       Loss: 0.939104  Accuracy: 57.24%
Train Epoch: 3 [12800/50000 (26%)]      Loss: 1.240231  Accuracy: 56.58%
Train Epoch: 3 [19200/50000 (38%)]      Loss: 0.877654  Accuracy: 57.00%
Train Epoch: 3 [25600/50000 (51%)]      Loss: 1.166832  Accuracy: 57.22%
Train Epoch: 3 [32000/50000 (64%)]      Loss: 1.057176  Accuracy: 57.53%
Train Epoch: 3 [38400/50000 (77%)]      Loss: 1.339103  Accuracy: 57.71%
Train Epoch: 3 [44800/50000 (90%)]      Loss: 1.041523  Accuracy: 58.04%Test set: Average loss: 0.9137, Accuracy: 6791/10000 (67.91%)Train Epoch: 4 [0/50000 (0%)]   Loss: 0.974381  Accuracy: 68.75%
Train Epoch: 4 [6400/50000 (13%)]       Loss: 1.366313  Accuracy: 62.33%
Train Epoch: 4 [12800/50000 (26%)]      Loss: 0.929462  Accuracy: 62.22%
Train Epoch: 4 [19200/50000 (38%)]      Loss: 0.884589  Accuracy: 62.59%
Train Epoch: 4 [25600/50000 (51%)]      Loss: 1.171518  Accuracy: 62.30%
Train Epoch: 4 [32000/50000 (64%)]      Loss: 0.842188  Accuracy: 62.71%
Train Epoch: 4 [38400/50000 (77%)]      Loss: 1.012197  Accuracy: 62.51%
Train Epoch: 4 [44800/50000 (90%)]      Loss: 1.134873  Accuracy: 62.71%Test set: Average loss: 0.8598, Accuracy: 6938/10000 (69.38%)Train Epoch: 5 [0/50000 (0%)]   Loss: 0.878952  Accuracy: 67.19%
Train Epoch: 5 [6400/50000 (13%)]       Loss: 1.031704  Accuracy: 64.60%
Train Epoch: 5 [12800/50000 (26%)]      Loss: 1.064272  Accuracy: 65.48%
Train Epoch: 5 [19200/50000 (38%)]      Loss: 1.377383  Accuracy: 65.85%
Train Epoch: 5 [25600/50000 (51%)]      Loss: 0.908284  Accuracy: 66.02%
Train Epoch: 5 [32000/50000 (64%)]      Loss: 0.973955  Accuracy: 66.14%
Train Epoch: 5 [38400/50000 (77%)]      Loss: 1.087591  Accuracy: 66.22%
Train Epoch: 5 [44800/50000 (90%)]      Loss: 0.889676  Accuracy: 66.24%Test set: Average loss: 0.7889, Accuracy: 7208/10000 (72.08%)Train Epoch: 6 [0/50000 (0%)]   Loss: 0.876616  Accuracy: 70.31%
Train Epoch: 6 [6400/50000 (13%)]       Loss: 0.897449  Accuracy: 67.44%
Train Epoch: 6 [12800/50000 (26%)]      Loss: 1.097278  Accuracy: 67.63%
Train Epoch: 6 [19200/50000 (38%)]      Loss: 0.918221  Accuracy: 68.12%
Train Epoch: 6 [25600/50000 (51%)]      Loss: 0.754201  Accuracy: 68.52%
Train Epoch: 6 [32000/50000 (64%)]      Loss: 0.915608  Accuracy: 68.48%
Train Epoch: 6 [38400/50000 (77%)]      Loss: 1.108561  Accuracy: 68.63%
Train Epoch: 6 [44800/50000 (90%)]      Loss: 0.889088  Accuracy: 68.67%Test set: Average loss: 0.6993, Accuracy: 7569/10000 (75.69%)Train Epoch: 7 [0/50000 (0%)]   Loss: 0.958089  Accuracy: 65.62%
Train Epoch: 7 [6400/50000 (13%)]       Loss: 1.091723  Accuracy: 70.81%
Train Epoch: 7 [12800/50000 (26%)]      Loss: 0.777307  Accuracy: 70.35%
Train Epoch: 7 [19200/50000 (38%)]      Loss: 1.069645  Accuracy: 70.53%
Train Epoch: 7 [25600/50000 (51%)]      Loss: 0.896357  Accuracy: 70.77%
Train Epoch: 7 [32000/50000 (64%)]      Loss: 0.776956  Accuracy: 70.76%
Train Epoch: 7 [38400/50000 (77%)]      Loss: 0.742222  Accuracy: 70.88%
Train Epoch: 7 [44800/50000 (90%)]      Loss: 0.773929  Accuracy: 70.91%Test set: Average loss: 0.6808, Accuracy: 7606/10000 (76.06%)Train Epoch: 8 [0/50000 (0%)]   Loss: 0.693062  Accuracy: 73.44%
Train Epoch: 8 [6400/50000 (13%)]       Loss: 0.597719  Accuracy: 71.81%
Train Epoch: 8 [12800/50000 (26%)]      Loss: 0.577101  Accuracy: 72.22%
Train Epoch: 8 [19200/50000 (38%)]      Loss: 0.715675  Accuracy: 72.38%
Train Epoch: 8 [25600/50000 (51%)]      Loss: 0.894164  Accuracy: 72.39%
Train Epoch: 8 [32000/50000 (64%)]      Loss: 0.889195  Accuracy: 72.42%
Train Epoch: 8 [38400/50000 (77%)]      Loss: 0.777797  Accuracy: 72.43%
Train Epoch: 8 [44800/50000 (90%)]      Loss: 0.707688  Accuracy: 72.41%Test set: Average loss: 0.6419, Accuracy: 7769/10000 (77.69%)Train Epoch: 9 [0/50000 (0%)]   Loss: 0.801439  Accuracy: 73.44%
Train Epoch: 9 [6400/50000 (13%)]       Loss: 0.908550  Accuracy: 72.94%
Train Epoch: 9 [12800/50000 (26%)]      Loss: 0.692507  Accuracy: 73.35%
Train Epoch: 9 [19200/50000 (38%)]      Loss: 0.560888  Accuracy: 73.52%
Train Epoch: 9 [25600/50000 (51%)]      Loss: 0.740002  Accuracy: 73.79%
Train Epoch: 9 [32000/50000 (64%)]      Loss: 0.751597  Accuracy: 73.90%
Train Epoch: 9 [38400/50000 (77%)]      Loss: 0.994493  Accuracy: 73.81%
Train Epoch: 9 [44800/50000 (90%)]      Loss: 0.777613  Accuracy: 73.88%Test set: Average loss: 0.6190, Accuracy: 7830/10000 (78.30%)Train Epoch: 10 [0/50000 (0%)]  Loss: 0.687152  Accuracy: 79.69%
Train Epoch: 10 [6400/50000 (13%)]      Loss: 0.801341  Accuracy: 73.86%
Train Epoch: 10 [12800/50000 (26%)]     Loss: 0.684478  Accuracy: 73.98%
Train Epoch: 10 [19200/50000 (38%)]     Loss: 0.705056  Accuracy: 74.19%
Train Epoch: 10 [25600/50000 (51%)]     Loss: 0.853381  Accuracy: 74.05%
Train Epoch: 10 [32000/50000 (64%)]     Loss: 0.580054  Accuracy: 74.07%
Train Epoch: 10 [38400/50000 (77%)]     Loss: 0.817881  Accuracy: 74.00%
Train Epoch: 10 [44800/50000 (90%)]     Loss: 0.817918  Accuracy: 74.10%Test set: Average loss: 0.6143, Accuracy: 7862/10000 (78.62%)正在加载训练集...
Files already downloaded and verified
正在加载测试集...
Files already downloaded and verified
开始训练 深层CNN 使用 CosineAnnealingLR,设备: cpu
Train Epoch: 1 [0/50000 (0%)]   Loss: 2.342756  Accuracy: 14.06%
Train Epoch: 1 [6400/50000 (13%)]       Loss: 1.850860  Accuracy: 25.28%
"""
DAY 41 简单CNN本节重点:
1. 数据增强
2. CNN结构定义
3. Batch Normalization
4. 特征图可视化
5. 学习率调度器
"""import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
from torch.utils.data import DataLoader# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False# 设置随机种子
torch.manual_seed(42)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")# CIFAR-10数据集的类别
classes = ('飞机', '汽车', '鸟', '猫', '鹿', '狗', '青蛙', '马', '船', '卡车')#====================== 1. 数据加载与增强 ======================def load_data(batch_size=64, is_train=True):"""加载CIFAR-10数据集,并应用数据增强"""if is_train:# 训练集使用数据增强transform = transforms.Compose([transforms.RandomHorizontalFlip(),  # 随机水平翻转transforms.RandomRotation(10),      # 随机旋转transforms.RandomAffine(0, shear=10, scale=(0.8, 1.2)),  # 随机仿射变换transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2),  # 颜色抖动transforms.ToTensor(),transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])else:# 测试集只需要标准化transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])dataset = torchvision.datasets.CIFAR10(root='./data', train=is_train,download=True,transform=transform)dataloader = DataLoader(dataset,batch_size=batch_size,shuffle=is_train,num_workers=2)return dataloader#====================== 2. CNN模型定义 ======================class SimpleNet(nn.Module):def __init__(self, dropout_rate=0.5):super(SimpleNet, self).__init__()# 第一个卷积块self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1)self.bn1 = nn.BatchNorm2d(32)# 第二个卷积块self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)self.bn2 = nn.BatchNorm2d(64)# 第三个卷积块self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1)self.bn3 = nn.BatchNorm2d(128)# 全连接层self.fc1 = nn.Linear(128 * 4 * 4, 512)self.dropout = nn.Dropout(dropout_rate)self.fc2 = nn.Linear(512, 10)def forward(self, x):# 保存特征图用于可视化self.feature_maps = []# 第一个卷积块x = self.conv1(x)x = self.bn1(x)x = F.relu(x)x = F.max_pool2d(x, 2)self.feature_maps.append(x)# 第二个卷积块x = self.conv2(x)x = self.bn2(x)x = F.relu(x)x = F.max_pool2d(x, 2)self.feature_maps.append(x)# 第三个卷积块x = self.conv3(x)x = self.bn3(x)x = F.relu(x)x = F.max_pool2d(x, 2)self.feature_maps.append(x)# 全连接层x = torch.flatten(x, 1)x = self.fc1(x)x = F.relu(x)x = self.dropout(x)x = self.fc2(x)return F.log_softmax(x, dim=1)#====================== 3. 训练函数 ======================def train(model, train_loader, optimizer, epoch, history):"""训练一个epoch"""model.train()train_loss = 0correct = 0total = 0for batch_idx, (data, target) in enumerate(train_loader):data, target = data.to(device), target.to(device)optimizer.zero_grad()output = model(data)loss = F.nll_loss(output, target)loss.backward()optimizer.step()train_loss += loss.item()pred = output.max(1, keepdim=True)[1]correct += pred.eq(target.view_as(pred)).sum().item()total += target.size(0)if batch_idx % 100 == 0:print(f'Train Epoch: {epoch} [{batch_idx * len(data)}/{len(train_loader.dataset)} 'f'({100. * batch_idx / len(train_loader):.0f}%)]\t'f'Loss: {loss.item():.6f}\t'f'Accuracy: {100. * correct / total:.2f}%')epoch_loss = train_loss / len(train_loader)epoch_acc = 100. * correct / totalhistory['train_loss'].append(epoch_loss)history['train_acc'].append(epoch_acc)return epoch_loss, epoch_acc#====================== 4. 测试函数 ======================def test(model, test_loader, history):"""在测试集上评估模型"""model.eval()test_loss = 0correct = 0with torch.no_grad():for data, target in test_loader:data, target = data.to(device), target.to(device)output = model(data)test_loss += F.nll_loss(output, target, reduction='sum').item()pred = output.max(1, keepdim=True)[1]correct += pred.eq(target.view_as(pred)).sum().item()test_loss /= len(test_loader.dataset)accuracy = 100. * correct / len(test_loader.dataset)history['test_loss'].append(test_loss)history['test_acc'].append(accuracy)print(f'\nTest set: Average loss: {test_loss:.4f}, 'f'Accuracy: {correct}/{len(test_loader.dataset)} 'f'({accuracy:.2f}%)\n')return test_loss, accuracy#====================== 5. 可视化函数 ======================def plot_training_history(history):"""绘制训练历史曲线"""epochs = range(1, len(history['train_loss']) + 1)plt.figure(figsize=(12, 4))plt.subplot(1, 2, 1)plt.plot(epochs, history['train_loss'], 'b-', label='训练损失')plt.plot(epochs, history['test_loss'], 'r-', label='测试损失')plt.title('训练和测试损失')plt.xlabel('Epoch')plt.ylabel('损失')plt.legend()plt.grid(True)plt.subplot(1, 2, 2)plt.plot(epochs, history['train_acc'], 'b-', label='训练准确率')plt.plot(epochs, history['test_acc'], 'r-', label='测试准确率')plt.title('训练和测试准确率')plt.xlabel('Epoch')plt.ylabel('准确率 (%)')plt.legend()plt.grid(True)plt.tight_layout()plt.show()def visualize_feature_maps(model, test_loader):"""可视化特征图"""# 获取一个批次的数据dataiter = iter(test_loader)images, _ = next(dataiter)# 获取特征图with torch.no_grad():_ = model(images[0:1].to(device))# 显示原始图像和每层的特征图plt.figure(figsize=(15, 5))# 显示原始图像plt.subplot(1, 4, 1)img = images[0] / 2 + 0.5npimg = img.numpy()plt.imshow(np.transpose(npimg, (1, 2, 0)))plt.title('原始图像')plt.axis('off')# 显示每层的特征图for i, feature_map in enumerate(model.feature_maps, 2):plt.subplot(1, 4, i)# 选择第一个样本的第一个特征图plt.imshow(feature_map[0, 0].cpu(), cmap='viridis')plt.title(f'层 {i-1} 特征图')plt.axis('off')plt.tight_layout()plt.show()#====================== 6. 主函数 ======================def main():# 超参数设置batch_size = 64epochs = 4lr = 0.01dropout_rate = 0.5# 初始化训练历史记录history = {'train_loss': [],'train_acc': [],'test_loss': [],'test_acc': []}# 加载数据print("正在加载训练集...")train_loader = load_data(batch_size, is_train=True)print("正在加载测试集...")test_loader = load_data(batch_size, is_train=False)# 创建模型model = SimpleNet(dropout_rate=dropout_rate).to(device)optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.1)# 训练和测试print(f"开始训练,使用设备: {device}")for epoch in range(1, epochs + 1):train_loss, train_acc = train(model, train_loader, optimizer, epoch, history)test_loss, test_acc = test(model, test_loader, history)scheduler.step()# 可视化训练过程print("训练完成,绘制训练历史...")plot_training_history(history)# 可视化特征图print("可视化特征图...")visualize_feature_maps(model, test_loader)if __name__ == '__main__':main()"""
学习要点:1. 数据增强
- transforms.RandomHorizontalFlip(): 随机水平翻转
- transforms.RandomRotation(): 随机旋转
- transforms.RandomAffine(): 随机仿射变换
- transforms.ColorJitter(): 颜色抖动2. CNN结构
- 常见流程:输入 → 卷积层 → BN → 激活函数 → 池化层
- 特征提取:多个卷积块串联
- 分类器:Flatten后接全连接层3. Batch Normalization
- 在卷积层后添加
- 训练时计算并更新均值和方差
- 测试时使用训练阶段的统计量4. 特征图
- 保存每层的特征图用于可视化
- 观察模型学习到的特征
- 帮助理解模型的工作原理5. 学习率调度器
- 使用StepLR按步长降低学习率
- 帮助模型更好地收敛
- 避免学习率过大或过小
"""

"""
DAY 41 简单CNN - 基础示例这个文件提供了一个简单的CNN示例,展示了如何:
1. 修改CNN结构
2. 使用不同的学习率调度器
3. 观察训练效果的变化
"""import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import StepLR, CosineAnnealingLR# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False# 设置随机种子和设备
torch.manual_seed(42)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")#====================== 1. 数据加载 ======================def load_data(batch_size=64, use_augmentation=True):"""加载CIFAR-10数据集参数:batch_size: 批次大小use_augmentation: 是否使用数据增强"""if use_augmentation:# 使用数据增强train_transform = transforms.Compose([transforms.RandomHorizontalFlip(),    # 随机水平翻转transforms.RandomRotation(10),        # 随机旋转transforms.ToTensor(),transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])else:# 不使用数据增强train_transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])# 测试集转换test_transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])# 加载数据集train_set = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=train_transform)test_set = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=test_transform)# 创建数据加载器train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=2)test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=2)return train_loader, test_loader#====================== 2. 模型定义 ======================class SimpleCNN(nn.Module):"""简单的CNN模型,可以通过参数调整结构参数:num_conv_layers: 卷积层数量channels: 每层的通道数列表use_bn: 是否使用Batch Normalizationdropout_rate: Dropout比率"""def __init__(self, num_conv_layers=2, channels=[16, 32, 64], use_bn=False, dropout_rate=0.5):super(SimpleCNN, self).__init__()self.use_bn = use_bn# 创建卷积层self.conv_layers = nn.ModuleList()in_channels = 3  # 输入图像是RGB三通道for i in range(num_conv_layers):# 添加卷积层self.conv_layers.append(nn.Conv2d(in_channels, channels[i], kernel_size=3, padding=1))# 添加BN层(如果使用)if use_bn:self.conv_layers.append(nn.BatchNorm2d(channels[i]))in_channels = channels[i]# 计算全连接层的输入维度# 假设输入图像是32x32,每经过一次池化层大小减半final_size = 32 // (2 ** num_conv_layers)fc_input = channels[-1] * final_size * final_size# 全连接层self.fc1 = nn.Linear(fc_input, 512)self.dropout = nn.Dropout(dropout_rate)self.fc2 = nn.Linear(512, 10)def forward(self, x):# 保存特征图用于可视化self.feature_maps = []# 通过卷积层for i in range(0, len(self.conv_layers), 2 if self.use_bn else 1):x = self.conv_layers[i](x)  # 卷积if self.use_bn:x = self.conv_layers[i+1](x)  # BNx = F.relu(x)  # 激活函数x = F.max_pool2d(x, 2)  # 池化self.feature_maps.append(x)  # 保存特征图# 展平x = torch.flatten(x, 1)# 全连接层x = F.relu(self.fc1(x))x = self.dropout(x)x = self.fc2(x)return F.log_softmax(x, dim=1)#====================== 3. 训练函数 ======================def train_model(model, train_loader, test_loader, optimizer, scheduler, epochs=5):"""训练模型并记录历史"""history = {'train_loss': [], 'train_acc': [], 'test_loss': [], 'test_acc': []}for epoch in range(1, epochs + 1):model.train()train_loss = 0correct = 0total = 0for batch_idx, (data, target) in enumerate(train_loader):data, target = data.to(device), target.to(device)optimizer.zero_grad()output = model(data)loss = F.nll_loss(output, target)loss.backward()optimizer.step()train_loss += loss.item()pred = output.max(1, keepdim=True)[1]correct += pred.eq(target.view_as(pred)).sum().item()total += target.size(0)if batch_idx % 100 == 0:print(f'Train Epoch: {epoch} [{batch_idx * len(data)}/{len(train_loader.dataset)} 'f'({100. * batch_idx / len(train_loader):.0f}%)]\t'f'Loss: {loss.item():.6f}\t'f'Accuracy: {100. * correct / total:.2f}%\t'f'LR: {scheduler.get_last_lr()[0]:.6f}')# 记录训练指标history['train_loss'].append(train_loss / len(train_loader))history['train_acc'].append(100. * correct / total)# 测试model.eval()test_loss = 0correct = 0with torch.no_grad():for data, target in test_loader:data, target = data.to(device), target.to(device)output = model(data)test_loss += F.nll_loss(output, target, reduction='sum').item()pred = output.max(1, keepdim=True)[1]correct += pred.eq(target.view_as(pred)).sum().item()test_loss /= len(test_loader.dataset)accuracy = 100. * correct / len(test_loader.dataset)# 记录测试指标history['test_loss'].append(test_loss)history['test_acc'].append(accuracy)print(f'Epoch {epoch}: Test loss: {test_loss:.4f}, Accuracy: {accuracy:.2f}%')# 更新学习率scheduler.step()return history#====================== 4. 可视化函数 ======================def plot_history(history, title):"""绘制训练历史"""plt.figure(figsize=(12, 4))# 绘制损失plt.subplot(1, 2, 1)plt.plot(history['train_loss'], label='训练损失')plt.plot(history['test_loss'], label='测试损失')plt.title(f'{title} - 损失曲线')plt.xlabel('Epoch')plt.ylabel('损失')plt.legend()plt.grid(True)# 绘制准确率plt.subplot(1, 2, 2)plt.plot(history['train_acc'], label='训练准确率')plt.plot(history['test_acc'], label='测试准确率')plt.title(f'{title} - 准确率曲线')plt.xlabel('Epoch')plt.ylabel('准确率 (%)')plt.legend()plt.grid(True)plt.tight_layout()plt.show()def visualize_feature_maps(model, test_loader):"""可视化特征图"""# 获取一个批次的数据dataiter = iter(test_loader)images, _ = next(dataiter)# 获取特征图with torch.no_grad():_ = model(images[0:1].to(device))# 显示原始图像和特征图plt.figure(figsize=(15, 5))# 显示原始图像plt.subplot(1, len(model.feature_maps) + 1, 1)img = images[0] / 2 + 0.5  # 反归一化plt.imshow(img.permute(1, 2, 0))plt.title('原始图像')plt.axis('off')# 显示每层的特征图for i, feature_map in enumerate(model.feature_maps, 1):plt.subplot(1, len(model.feature_maps) + 1, i + 1)# 显示第一个特征图plt.imshow(feature_map[0, 0].cpu(), cmap='viridis')plt.title(f'特征图 {i}')plt.axis('off')plt.tight_layout()plt.show()#====================== 5. 主函数 ======================def main():# 基础配置batch_size = 64epochs = 5lr = 0.01# 加载数据print("加载数据...")train_loader, test_loader = load_data(batch_size, use_augmentation=True)# 实验1:基础CNN(3层)print("\n实验1:训练基础CNN(3层)...")model1 = SimpleCNN(num_conv_layers=3, channels=[32, 64, 128], use_bn=True).to(device)optimizer1 = optim.SGD(model1.parameters(), lr=lr, momentum=0.9)scheduler1 = StepLR(optimizer1, step_size=2, gamma=0.1)history1 = train_model(model1, train_loader, test_loader, optimizer1, scheduler1, epochs)plot_history(history1, "基础CNN(3层)+ StepLR")visualize_feature_maps(model1, test_loader)# 实验2:深层CNN(4层)print("\n实验2:训练深层CNN(4层)...")model2 = SimpleCNN(num_conv_layers=4, channels=[32, 64, 128, 256], use_bn=True).to(device)optimizer2 = optim.SGD(model2.parameters(), lr=lr, momentum=0.9)scheduler2 = CosineAnnealingLR(optimizer2, T_max=epochs)history2 = train_model(model2, train_loader, test_loader, optimizer2, scheduler2, epochs)plot_history(history2, "深层CNN(4层)+ CosineAnnealingLR")visualize_feature_maps(model2, test_loader)if __name__ == '__main__':main()"""
学习要点:1. CNN结构修改
- 可以通过修改num_conv_layers和channels参数来改变网络深度和宽度
- use_bn参数控制是否使用Batch Normalization
- dropout_rate参数调整Dropout比率2. 学习率调度器选择
- StepLR:按固定步长降低学习率
- CosineAnnealingLR:余弦周期调整学习率3. 观察重点
- 不同深度的网络收敛速度
- 是否出现过拟合(训练准确率高但测试准确率低)
- 特征图的变化4. 实验建议
- 尝试不同的网络深度(修改num_conv_layers和channels)
- 对比有无Batch Normalization的效果(修改use_bn)
- 测试不同的学习率调度策略
- 观察数据增强的影响(修改use_augmentation)
"""

相关文章:

  • 零基础一站式端游内存辅助编写教程(无密)
  • ShenNiusModularity项目源码学习(32:ShenNius.Admin.Mvc项目分析-17)
  • UVa1457/LA4746 Decrypt Messages
  • python里面导入yfinance的时候报错
  • 小白的进阶之路系列之八----人工智能从初步到精通pytorch综合运用的讲解第一部分
  • tomcat yum安装
  • day07
  • C++面试5——对象存储区域详解
  • IDM下载器 Internet Download Manager v6.42 Build 39
  • 深入理解设计模式之访问者模式
  • leetcode hot100刷题日记——34.将有序数组转换为二叉搜索树
  • 力扣HOT100之动态规划:152. 乘积最大子数组
  • C#数字图像处理(一)
  • 2、PyTorch基础教程:从张量到神经网络训练
  • FactoryBean 接口
  • 【HW系列】—溯源与定位—Linux入侵排查
  • 【razor】采集模块设置了窗体句柄但并不能直接渲染
  • 【基础算法】高精度(加、减、乘、除)
  • 用JS实现植物大战僵尸(前端作业)
  • 数据结构:栈(Stack)和堆(Heap)
  • 在百度做网站推广怎么做/查看浏览过的历史记录百度
  • 门户网站开发技术/直播营销策划方案范文
  • 响应式网站生成/线上电商怎么做
  • 湖南益阳网站建设/百度客服电话24小时人工服务热线
  • 做本地生活网站/广州中小企业seo推广运营
  • 网站小游戏怎么做/软文发布平台与板块