当前位置: 首页 > news >正文

第29周———Inception v3算法实战与解析

目录

目录

目录

前言 

一、前期准备

1.检查GPU

2.查看数据

二、构建模型

1.划分数据集

2.创建模型

3.编译及训练模型

三、结果可视化 

四、总结 


前言 

  •  🍨 本文为🔗365天深度学习训练营中的学习记录博客
  • 🍖 原作者:K同学啊

一、前期准备

1.检查GPU
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision
from torchvision import transforms, datasetsimport os,PIL,pathlibdevice = torch.device("cuda" if torch.cuda.is_available() else "cpu")device
2.查看数据
import os,PIL,random,pathlibdata_dir = 'data/weather_photos'
data_dir = pathlib.Path(data_dir)data_paths = list(data_dir.glob('*'))
classeNames = [str(path).split("\\")[2] for path in data_paths]
classeNames

二、构建模型

1.划分数据集
total_datadir = 'data/45-data'train_transforms = transforms.Compose([transforms.Resize([224, 224]),  # 将输入图片resize成统一尺寸transforms.ToTensor(),          # 将PIL Image或numpy.ndarray转换为tensor,并归一化到[0,1]之间transforms.Normalize(           # 标准化处理-->转换为标准正太分布(高斯分布),使模型更容易收敛mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])  # 其中 mean=[0.485,0.456,0.406]与std=[0.229,0.224,0.225] 从数据集中随机抽样计算得到的。
])total_data = datasets.ImageFolder(total_datadir,transform=train_transforms)
total_datatrain_size = int(0.8 * len(total_data))
test_size  = len(total_data) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(total_data, [train_size, test_size])
train_dataset, test_datasettrain_size,test_sizebatch_size = 32train_dl = torch.utils.data.DataLoader(train_dataset,batch_size=batch_size,shuffle=True,num_workers=1)
test_dl = torch.utils.data.DataLoader(test_dataset,batch_size=batch_size,shuffle=True,num_workers=1)for X, y in test_dl:print("Shape of X [N, C, H, W]: ", X.shape)print("Shape of y: ", y.shape, y.dtype)break
2.创建模型
class InceptionA(nn.Module):def __init__(self, in_channels, pool_features):super(InceptionA, self).__init__()self.branch1x1 = BasicConv2d(in_channels, 64, kernel_size=1) # 1self.branch5x5_1 = BasicConv2d(in_channels, 48, kernel_size=1)self.branch5x5_2 = BasicConv2d(48, 64, kernel_size=5, padding=2)self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, padding=1)self.branch_pool = BasicConv2d(in_channels, pool_features, kernel_size=1)def forward(self, x):branch1x1 = self.branch1x1(x)branch5x5 = self.branch5x5_1(x)branch5x5 = self.branch5x5_2(branch5x5)branch3x3dbl = self.branch3x3dbl_1(x)branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)branch_pool = self.branch_pool(branch_pool)outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]return torch.cat(outputs, 1)class InceptionB(nn.Module):def __init__(self, in_channels, channels_7x7):super(InceptionB, self).__init__()self.branch1x1 = BasicConv2d(in_channels, 192, kernel_size=1)c7 = channels_7x7self.branch7x7_1 = BasicConv2d(in_channels, c7, kernel_size=1)self.branch7x7_2 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3))self.branch7x7_3 = BasicConv2d(c7, 192, kernel_size=(7, 1), padding=(3, 0))self.branch7x7dbl_1 = BasicConv2d(in_channels, c7, kernel_size=1)self.branch7x7dbl_2 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0))self.branch7x7dbl_3 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3))self.branch7x7dbl_4 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0))self.branch7x7dbl_5 = BasicConv2d(c7, 192, kernel_size=(1, 7), padding=(0, 3))self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)def forward(self, x):branch1x1 = self.branch1x1(x)branch7x7 = self.branch7x7_1(x)branch7x7 = self.branch7x7_2(branch7x7)branch7x7 = self.branch7x7_3(branch7x7)branch7x7dbl = self.branch7x7dbl_1(x)branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)branch_pool = self.branch_pool(branch_pool)outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]return torch.cat(outputs, 1)class InceptionC(nn.Module):def __init__(self, in_channels):super(InceptionC, self).__init__()self.branch1x1 = BasicConv2d(in_channels, 320, kernel_size=1)self.branch3x3_1 = BasicConv2d(in_channels, 384, kernel_size=1)self.branch3x3_2a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1))self.branch3x3_2b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0))self.branch3x3dbl_1 = BasicConv2d(in_channels, 448, kernel_size=1)self.branch3x3dbl_2 = BasicConv2d(448, 384, kernel_size=3, padding=1)self.branch3x3dbl_3a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1))self.branch3x3dbl_3b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0))self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)def forward(self, x):branch1x1 = self.branch1x1(x)branch3x3 = self.branch3x3_1(x)branch3x3 = [self.branch3x3_2a(branch3x3),self.branch3x3_2b(branch3x3),]branch3x3 = torch.cat(branch3x3, 1)branch3x3dbl = self.branch3x3dbl_1(x)branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)branch3x3dbl = [self.branch3x3dbl_3a(branch3x3dbl),self.branch3x3dbl_3b(branch3x3dbl),]branch3x3dbl = torch.cat(branch3x3dbl, 1)branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)branch_pool = self.branch_pool(branch_pool)outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]return torch.cat(outputs, 1)class ReductionA(nn.Module):def __init__(self, in_channels):super(ReductionA, self).__init__()self.branch3x3 = BasicConv2d(in_channels, 384, kernel_size=3, stride=2)self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, stride=2)def forward(self, x):branch3x3 = self.branch3x3(x)branch3x3dbl = self.branch3x3dbl_1(x)branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)outputs = [branch3x3, branch3x3dbl, branch_pool]return torch.cat(outputs, 1)class ReductionB(nn.Module):def __init__(self, in_channels):super(ReductionB, self).__init__()self.branch3x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)self.branch3x3_2 = BasicConv2d(192, 320, kernel_size=3, stride=2)self.branch7x7x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)self.branch7x7x3_2 = BasicConv2d(192, 192, kernel_size=(1, 7), padding=(0, 3))self.branch7x7x3_3 = BasicConv2d(192, 192, kernel_size=(7, 1), padding=(3, 0))self.branch7x7x3_4 = BasicConv2d(192, 192, kernel_size=3, stride=2)def forward(self, x):branch3x3 = self.branch3x3_1(x)branch3x3 = self.branch3x3_2(branch3x3)branch7x7x3 = self.branch7x7x3_1(x)branch7x7x3 = self.branch7x7x3_2(branch7x7x3)branch7x7x3 = self.branch7x7x3_3(branch7x7x3)branch7x7x3 = self.branch7x7x3_4(branch7x7x3)branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)outputs = [branch3x3, branch7x7x3, branch_pool]return torch.cat(outputs, 1)class InceptionAux(nn.Module):def __init__(self, in_channels, num_classes):super(InceptionAux, self).__init__()self.conv0 = BasicConv2d(in_channels, 128, kernel_size=1)self.conv1 = BasicConv2d(128, 768, kernel_size=5)self.conv1.stddev = 0.01self.fc = nn.Linear(768, num_classes)self.fc.stddev = 0.001def forward(self, x):# 17 x 17 x 768x = F.avg_pool2d(x, kernel_size=5, stride=3)# 5 x 5 x 768x = self.conv0(x)# 5 x 5 x 128x = self.conv1(x)# 1 x 1 x 768x = x.view(x.size(0), -1)# 768x = self.fc(x)# 1000return ximport torch.nn.functional as Fclass BasicConv2d(nn.Module):def __init__(self, in_channels, out_channels, **kwargs):super(BasicConv2d, self).__init__()self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)self.bn = nn.BatchNorm2d(out_channels, eps=0.001)def forward(self, x):x = self.conv(x)x = self.bn(x)return F.relu(x, inplace=True)class InceptionV3(nn.Module):def __init__(self, num_classes=1000, aux_logits=False, transform_input=False):super(InceptionV3, self).__init__()self.aux_logits = aux_logitsself.transform_input = transform_inputself.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)self.Mixed_5b = InceptionA(192, pool_features=32)self.Mixed_5c = InceptionA(256, pool_features=64)self.Mixed_5d = InceptionA(288, pool_features=64)self.Mixed_6a = ReductionA(288)self.Mixed_6b = InceptionB(768, channels_7x7=128)self.Mixed_6c = InceptionB(768, channels_7x7=160)self.Mixed_6d = InceptionB(768, channels_7x7=160)self.Mixed_6e = InceptionB(768, channels_7x7=192)if aux_logits:self.AuxLogits = InceptionAux(768, num_classes)self.Mixed_7a = ReductionB(768)self.Mixed_7b = InceptionC(1280)self.Mixed_7c = InceptionC(2048)self.fc = nn.Linear(2048, num_classes)def forward(self, x):if self.transform_input: # 1x = x.clone()x[:, 0] = x[:, 0] * (0.229 / 0.5) + (0.485 - 0.5) / 0.5x[:, 1] = x[:, 1] * (0.224 / 0.5) + (0.456 - 0.5) / 0.5x[:, 2] = x[:, 2] * (0.225 / 0.5) + (0.406 - 0.5) / 0.5# 299 x 299 x 3x = self.Conv2d_1a_3x3(x)# 149 x 149 x 32x = self.Conv2d_2a_3x3(x)# 147 x 147 x 32x = self.Conv2d_2b_3x3(x)# 147 x 147 x 64x = F.max_pool2d(x, kernel_size=3, stride=2)# 73 x 73 x 64x = self.Conv2d_3b_1x1(x)# 73 x 73 x 80x = self.Conv2d_4a_3x3(x)# 71 x 71 x 192x = F.max_pool2d(x, kernel_size=3, stride=2)# 35 x 35 x 192x = self.Mixed_5b(x)# 35 x 35 x 256x = self.Mixed_5c(x)# 35 x 35 x 288x = self.Mixed_5d(x)# 35 x 35 x 288x = self.Mixed_6a(x)# 17 x 17 x 768x = self.Mixed_6b(x)# 17 x 17 x 768x = self.Mixed_6c(x)# 17 x 17 x 768x = self.Mixed_6d(x)# 17 x 17 x 768x = self.Mixed_6e(x)# 17 x 17 x 768if self.training and self.aux_logits:aux = self.AuxLogits(x)# 17 x 17 x 768x = self.Mixed_7a(x)# 8 x 8 x 1280x = self.Mixed_7b(x)# 8 x 8 x 2048x = self.Mixed_7c(x)# 8 x 8 x 2048x = F.adaptive_avg_pool2d(x, (1, 1))# 1 x 1 x 2048x = F.dropout(x, training=self.training)# 1 x 1 x 2048x = x.view(x.size(0), -1)# 2048x = self.fc(x)# 1000 (num_classes)if self.training and self.aux_logits:return x, auxreturn xdevice = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))model = InceptionV3().to(device)
model
3.编译及训练模型
loss_fn    = nn.CrossEntropyLoss() # 创建损失函数
learn_rate = 1e-4 # 学习率
opt        = torch.optim.SGD(model.parameters(),lr=learn_rate)# 训练循环
def train(dataloader, model, loss_fn, optimizer):size = len(dataloader.dataset)  # 训练集的大小,一共60000张图片num_batches = len(dataloader)   # 批次数目,1875(60000/32)train_loss, train_acc = 0, 0  # 初始化训练损失和正确率for X, y in dataloader:  # 获取图片及其标签X, y = X.to(device), y.to(device)# 计算预测误差pred = model(X)          # 网络输出loss = loss_fn(pred, y)  # 计算网络输出和真实值之间的差距,targets为真实值,计算二者差值即为损失# 反向传播optimizer.zero_grad()  # grad属性归零loss.backward()        # 反向传播optimizer.step()       # 每一步自动更新# 记录acc与losstrain_acc  += (pred.argmax(1) == y).type(torch.float).sum().item()train_loss += loss.item()train_acc  /= sizetrain_loss /= num_batchesreturn train_acc, train_lossdef test (dataloader, model, loss_fn):size        = len(dataloader.dataset)  # 测试集的大小,一共10000张图片num_batches = len(dataloader)          # 批次数目,313(10000/32=312.5,向上取整)test_loss, test_acc = 0, 0# 当不进行训练时,停止梯度更新,节省计算内存消耗with torch.no_grad():for imgs, target in dataloader:imgs, target = imgs.to(device), target.to(device)# 计算losstarget_pred = model(imgs)loss        = loss_fn(target_pred, target)test_loss += loss.item()test_acc  += (target_pred.argmax(1) == target).type(torch.float).sum().item()test_acc  /= sizetest_loss /= num_batchesreturn test_acc, test_lossepochs     = 50
train_loss = []
train_acc  = []
test_loss  = []
test_acc   = []for epoch in range(epochs):model.train()epoch_train_acc, epoch_train_loss = train(train_dl, model, loss_fn, opt)model.eval()epoch_test_acc, epoch_test_loss = test(test_dl, model, loss_fn)train_acc.append(epoch_train_acc)train_loss.append(epoch_train_loss)test_acc.append(epoch_test_acc)test_loss.append(epoch_test_loss)template = ('Epoch:{:2d}, Train_acc:{:.1f}%, Train_loss:{:.3f}, Test_acc:{:.1f}%,Test_loss:{:.3f}')print(template.format(epoch+1, epoch_train_acc*100, epoch_train_loss, epoch_test_acc*100, epoch_test_loss))
print('Done')

三、结果可视化 

import matplotlib.pyplot as plt
#隐藏警告
import warnings
warnings.filterwarnings("ignore")               #忽略警告信息
plt.rcParams['font.sans-serif']    = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False      # 用来正常显示负号
plt.rcParams['figure.dpi']         = 100        #分辨率from datetime import datetime
current_time = datetime.now() # 获取当前时间epochs_range = range(epochs)plt.figure(figsize=(12, 3))
plt.subplot(1, 2, 1)plt.plot(epochs_range, train_acc, label='Training Accuracy')
plt.plot(epochs_range, test_acc, label='Test Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.xlabel(current_time) # 打卡请带上时间戳,否则代码截图无效plt.subplot(1, 2, 2)
plt.plot(epochs_range, train_loss, label='Training Loss')
plt.plot(epochs_range, test_loss, label='Test Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()


四、总结 

相较于 Inception V1,它在结构上进行了多项优化,进一步提升了模型的准确率和计算效率。该网络引入了多个关键改进,如使用因式分解卷积(例如将 5×5 卷积分解为两个 3×3 卷积)以减少计算成本,引入批归一化(Batch Normalization)以加速收敛和提高稳定性,以及采用辅助分类器增强梯度传播。此外,Inception V3 还通过更加复杂的模块堆叠方式提高了模型对图像特征的表达能力。尽管模型结构更加复杂,但它在保持合理计算资源消耗的前提下,在多个图像识别任务中达到了优异的表现。因此,Inception V3 是一款兼顾高准确率与良好效率的卷积神经网络,广泛应用于各类计算机视觉任务中。

相关文章:

  • 探索Qwen2ForCausalLM 架构上进行微调
  • SAP Business One, Web Client: The Advantages of All Worlds
  • 【Java】Java元注解
  • YOLOv8检测头代码详解(示例展示数据变换过程)
  • 防火墙NAT地址组NAT策略安全策略
  • 香港中农国际集团启动“百校农业计划”,助力全球农业人才培养
  • AI练习:折叠效果
  • 2025 年高尔夫蓝牙音箱市场报告:需求激增下的机遇与挑战
  • 光电耦合器与数字容隔离器的“光速对话”
  • linux cpu占用高的故障怎么排除
  • 智能变电站——三层两网
  • python打卡day34@浙大疏锦行
  • md5升级scram-sha-256认证
  • 文档结构化专家:数字化转型的核心力量
  • 架构师论文《论软件可靠性模型的设计与实现》
  • CAD标注样式如何设置?详细教程来了
  • 解锁内心的冲突:神经症冲突的理解与解决之道
  • 某某观鸟记录(rsa加密、MD5加密)返回数据AES解密逆向分析
  • 回溯算法:解锁多种问题的解决之门
  • 前端可视化
  • 建设一个商城网站需要多少钱/最新经济新闻
  • 群晖nda做网站/优化方案电子版
  • 网站制作说明书/建站系统主要包括
  • 上海营销平台网站建设/google下载官方版
  • 企业网站建设全包/百度官方网站网址是多少
  • 昆明做百度网站电话号码/seo和sem的区别