【Python训练营打卡】day43 @浙大疏锦行
DAY 43 复习日
作业:
kaggle找到一个图像数据集,用cnn网络进行训练并且用grad-cam做可视化
进阶:并拆分成多个文件
我选择的是music_instruments 链接:Musical Instruments (kaggle.com)
#导包
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import os
import warnings
warnings.filterwarnings("ignore")# 设置随机种子确保结果可复现
torch.manual_seed(42)
np.random.seed(42)
# 数据预处理
transform = transforms.Compose([transforms.Resize((64, 64)), # 调整图像大小transforms.ToTensor(),transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])# 加载自定义数据集
dataset_path = r"F:\Program Files\MyPythonProjects\day43\music_instruments"
dataset = ImageFolder(root=dataset_path, transform=transform)# 划分训练集和测试集
train_size = int(0.8 * len(dataset))
test_size = len(dataset) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_size, test_size])# 定义类别名称
classes = ('accordion', 'banjo', 'drum', 'flute', 'guitar', 'harmonica', 'saxophone', 'sitar', 'tabla', 'violin')
# 定义一个简单的CNN模型(调整输出类别数为10)
class SimpleCNN(nn.Module):def __init__(self):super(SimpleCNN, self).__init__()self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1)self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1)self.pool = nn.MaxPool2d(2, 2)self.fc1 = nn.Linear(128 * 8 * 8, 512) self.fc2 = nn.Linear(512, len(classes)) def forward(self, x):x = self.pool(F.relu(self.conv1(x))) # 32x32x = self.pool(F.relu(self.conv2(x))) # 16x16x = self.pool(F.relu(self.conv3(x))) # 8x8x = x.view(-1, 128 * 8 * 8)x = F.relu(self.fc1(x))x = self.fc2(x)return x# 初始化模型
model = SimpleCNN()
print("模型已创建")# 使用GPU或CPU
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = model.to(device)# 训练模型
def train_model(model, epochs=10):trainloader = torch.utils.data.DataLoader(train_dataset, batch_size=32,shuffle=True, num_workers=2)criterion = nn.CrossEntropyLoss()optimizer = torch.optim.Adam(model.parameters(), lr=0.001)for epoch in range(epochs):running_loss = 0.0for i, data in enumerate(trainloader, 0):inputs, labels = datainputs, labels = inputs.to(device), labels.to(device)optimizer.zero_grad()outputs = model(inputs)loss = criterion(outputs, labels)loss.backward()optimizer.step()running_loss += loss.item()if i % 10 == 9:print(f'[{epoch + 1}, {i + 1}] 损失: {running_loss / 10:.3f}')running_loss = 0.0print("训练完成")# 训练或加载模型
try:model.load_state_dict(torch.load('music_instruments_cnn.pth'))print("已加载预训练模型")
except:print("无法加载预训练模型,使用未训练模型或训练新模型")train_model(model, epochs=10)torch.save(model.state_dict(), 'music_instruments_cnn.pth')# 设置模型为评估模式
model.eval()# Grad-CAM实现
class GradCAM:def __init__(self, model, target_layer):self.model = modelself.target_layer = target_layerself.gradients = Noneself.activations = Noneself.register_hooks()def register_hooks(self):def forward_hook(module, input, output):self.activations = output.detach()def backward_hook(module, grad_input, grad_output):self.gradients = grad_output[0].detach()self.target_layer.register_forward_hook(forward_hook)self.target_layer.register_backward_hook(backward_hook)def generate_cam(self, input_image, target_class=None):model_output = self.model(input_image)if target_class is None:target_class = torch.argmax(model_output, dim=1).item()self.model.zero_grad()one_hot = torch.zeros_like(model_output)one_hot[0, target_class] = 1model_output.backward(gradient=one_hot)gradients = self.gradientsactivations = self.activationsweights = torch.mean(gradients, dim=(2, 3), keepdim=True)cam = torch.sum(weights * activations, dim=1, keepdim=True)cam = F.relu(cam)cam = F.interpolate(cam, size=(64, 64), mode='bilinear', align_corners=False)cam = cam - cam.min()cam = cam / cam.max() if cam.max() > 0 else camreturn cam.cpu().squeeze().numpy(), target_class
# 可视化函数
def tensor_to_np(tensor):img = tensor.cpu().numpy().transpose(1, 2, 0)mean = np.array([0.5, 0.5, 0.5])std = np.array([0.5, 0.5, 0.5])img = std * img + meanimg = np.clip(img, 0, 1)return img# 设置中文字体支持
plt.rcParams["font.family"] = ["SimHei"]
plt.rcParams['axes.unicode_minus'] = False# 选择一个测试图像
idx = 10
image, label = test_dataset[idx]
print(f"选择的图像类别: {classes[label]}")# 添加批次维度并移动到设备
input_tensor = image.unsqueeze(0).to(device)# 初始化Grad-CAM(选择最后一个卷积层)
grad_cam = GradCAM(model, model.conv3)# 生成热力图
heatmap, pred_class = grad_cam.generate_cam(input_tensor)# 可视化
plt.figure(figsize=(12, 4))# 原始图像
plt.subplot(1, 3, 1)
plt.imshow(tensor_to_np(image))
plt.title(f"原始图像: {classes[label]}")
plt.axis('off')# 热力图
plt.subplot(1, 3, 2)
plt.imshow(heatmap, cmap='jet')
plt.title(f"Grad-CAM热力图: {classes[pred_class]}")
plt.axis('off')# 叠加的图像
plt.subplot(1, 3, 3)
img = tensor_to_np(image)
heatmap_resized = np.uint8(255 * heatmap)
heatmap_colored = plt.cm.jet(heatmap_resized)[:, :, :3]
superimposed_img = heatmap_colored * 0.4 + img * 0.6
plt.imshow(superimposed_img)
plt.title("叠加热力图")
plt.axis('off')plt.tight_layout()
plt.savefig('grad_cam_result.png')
plt.show()
print("Grad-CAM可视化完成。已保存为grad_cam_result.png")
@浙大疏锦行