和鲸社区深度学习基础训练营2025年关卡4
使用 pytorch 构建一个简单的卷积神经网络(CNN)模型,完成对 CIFAR-10 数据集的图像分类任务。 直接使用 CNN 进行分类的模型性能。 提示: 数据集:CIFAR-10 网络结构:可以使用 2-3 层卷积层,ReLU 激活,MaxPooling 层,最后连接全连接层。
#1. 数据预处理与加载
import torch
import torchvision
import torchvision.transforms as transforms# 数据增强与归一化(使用CIFAR-10官方均值和标准差)
transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4), # 随机裁剪增强泛化性transforms.RandomHorizontalFlip(), # 随机水平翻转transforms.ToTensor(),transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2470, 0.2435, 0.2616))
])transform_test = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2470, 0.2435, 0.2616))
])# 加载数据集
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)# 数据加载器
trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)#2. CNN模型架构
import torch.nn as nn
import torch.nn.functional as Fclass SimpleCNN(nn.Module):def __init__(self):super(SimpleCNN, self).__init__()self.conv1 = nn.Conv2d(3, 32, 3, padding=1) # 输入通道3(RGB),输出32通道self.bn1 = nn.BatchNorm2d(32) # 批量归一化self.conv2 = nn.Conv2d(32, 64, 3, padding=1)self.bn2 = nn.BatchNorm2d(64)self.conv3 = nn.Conv2d(64, 128, 3, padding=1)self.bn3 = nn.BatchNorm2d(128)self.pool = nn.MaxPool2d(2, 2) # 池化层(尺寸减半)self.fc1 = nn.Linear(128 * 4 * 4, 256) # 全连接层(输入尺寸计算:32x32 → 16x16 → 8x8 → 4x4)self.fc2 = nn.Linear(256, 10) # 输出10类def forward(self, x):x = self.pool(F.relu(self.bn1(self.conv1(x)))) # 32x32 → 16x16x = self.pool(F.relu(self.bn2(self.conv2(x)))) # 16x16 → 8x8x = self.pool(F.relu(self.bn3(self.conv3(x)))) # 8x8 → 4x4x = x.view(-1, 128 * 4 * 4) # 展平x = F.relu(self.fc1(x))x = self.fc2(x)return x# 实例化模型并移至GPU(若可用)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net = SimpleCNN().to(device)#3. 训练与优化
import torch.optim as optimcriterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.001)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1) # 每5轮学习率×0.1# 训练循环(10个epoch)
for epoch in range(10):net.train()running_loss = 0.0for i, (inputs, labels) in enumerate(trainloader):inputs, labels = inputs.to(device), labels.to(device)optimizer.zero_grad()outputs = net(inputs)loss = criterion(outputs, labels)loss.backward()optimizer.step()running_loss += loss.item()if i % 100 == 99: # 每100批次打印一次print(f'Epoch [{epoch+1}/10], Step [{i+1}/{len(trainloader)}], Loss: {running_loss/100:.3f}')running_loss = 0.0scheduler.step() # 更新学习率print(f"Epoch {epoch+1} completed, learning rate: {scheduler.get_last_lr()[0]:.6f}")#4. 模型评估与可视化
net.eval()
correct, total = 0, 0
with torch.no_grad():for (images, labels) in testloader:images, labels = images.to(device), labels.to(device)outputs = net(images)_, predicted = torch.max(outputs.data, 1)total += labels.size(0)correct += (predicted == labels).sum().item()accuracy = 100 * correct / total
print(f'Test Accuracy: {accuracy:.2f}%')
运行结果: