当前位置: 首页 > news >正文

深度学习(6)pytorch

基于前五节课来写代码

在那之前先解释一下重要的函数

_init_初始化

in_dim 输入层

nn.flatten  拉平成一维向量

BatchNarmld 批量归一化

in_features 输入特征

in_channels 输入的通道数

stride 步长

padding 填充

Kernel-size 卷积核的大小

# 导入必要依赖库
import torch
from torch import nn# 超参数定义
in_dim, n_hidden_1, n_hidden_2, out_dim = 28 * 28, 300, 100, 10# 用可变参数按顺序构建模型(无层名称)
Seq_arg = nn.Sequential(nn.Flatten(),nn.Linear(in_dim, n_hidden_1),nn.BatchNorm1d(n_hidden_1),nn.ReLU(),nn.Linear(n_hidden_1, n_hidden_2),nn.BatchNorm1d(n_hidden_2),nn.ReLU(),nn.Linear(n_hidden_2, out_dim),nn.Softmax(dim=1)
)# 打印模型结构
print("nn.Sequential可变参数方式模型:")
print(Seq_arg)运行结果:nn.Sequential可变参数方式模型:
Sequential((0): Flatten(start_dim=1, end_dim=-1)(1): Linear(in_features=784, out_features=300, bias=True)(2): BatchNorm1d(300, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)(3): ReLU()(4): Linear(in_features=300, out_features=100, bias=True)(5): BatchNorm1d(100, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)(6): ReLU()(7): Linear(in_features=100, out_features=10, bias=True)(8): Softmax(dim=1)
)

# 导入必要依赖库
import torch
from torch import nn
import torch.nn.functional as F# 定义模型类
class Model_dict(nn.Module):def __init__(self, in_dim, n_hidden_1, n_hidden_2, out_dim):super(Model_dict, self).__init__()self.layers_dict = nn.ModuleDict({"flatten": nn.Flatten(),"linear1": nn.Linear(in_dim, n_hidden_1),"bn1": nn.BatchNorm1d(n_hidden_1),"relu": nn.ReLU(),"linear2": nn.Linear(n_hidden_1, n_hidden_2),"bn2": nn.BatchNorm1d(n_hidden_2),"out": nn.Linear(n_hidden_2, out_dim),"softmax": nn.Softmax(dim=1)})# 正向传播:按自定义顺序调用字典中的层def forward(self, x):layer_order = ["flatten", "linear1", "bn1", "relu", "linear2", "bn2", "relu", "out", "softmax"]for layer_name in layer_order:x = self.layers_dict[layer_name](x)return x# 超参数与实例化
in_dim, n_hidden_1, n_hidden_2, out_dim = 28 * 28, 300, 100, 10
model_dict = Model_dict(in_dim, n_hidden_1, n_hidden_2, out_dim)
print("nn.Module+nn.ModuleDict容器模型:")
print(model_dict)运行结果:nn.Module+nn.ModuleDict容器模型:
Model_dict((layers_dict): ModuleDict((flatten): Flatten(start_dim=1, end_dim=-1)(linear1): Linear(in_features=784, out_features=300, bias=True)(bn1): BatchNorm1d(300, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)(relu): ReLU()(linear2): Linear(in_features=300, out_features=100, bias=True)(bn2): BatchNorm1d(100, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)(out): Linear(in_features=100, out_features=10, bias=True)(softmax): Softmax(dim=1))
)

import torch
from torch import nn
# 超参数(与上方一致,若已定义可跳过)
in_dim, n_hidden_1, n_hidden_2, out_dim = 28 * 28, 300, 100, 10# 初始化空的Sequential容器,再逐个添加层(指定名称)
Seq_module = nn.Sequential()
Seq_module.add_module("flatten", nn.Flatten())  # 展平层,名称"flatten"
Seq_module.add_module("linear1", nn.Linear(in_dim, n_hidden_1))  # 第1全连接层
Seq_module.add_module("bn1", nn.BatchNorm1d(n_hidden_1))  # 第1批归一化层
Seq_module.add_module("relu1", nn.ReLU())  # 第1激活层
Seq_module.add_module("linear2", nn.Linear(n_hidden_1, n_hidden_2))  # 第2全连接层
Seq_module.add_module("bn2", nn.BatchNorm1d(n_hidden_2))  # 第2批归一化层
Seq_module.add_module("relu2", nn.ReLU())  # 第2激活层
Seq_module.add_module("out", nn.Linear(n_hidden_2, out_dim))  # 输出层
Seq_module.add_module("softmax", nn.Softmax(dim=1))  # 概率归一化层print("\nn.Sequential add_module方式模型结构:")
print(Seq_module)运行结果:nn.Module+nn.ModuleDict容器模型:
Model_dict((layers_dict): ModuleDict((flatten): Flatten(start_dim=1, end_dim=-1)(linear1): Linear(in_features=784, out_features=300, bias=True)(bn1): BatchNorm1d(300, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)(relu): ReLU()(linear2): Linear(in_features=300, out_features=100, bias=True)(bn2): BatchNorm1d(100, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)(out): Linear(in_features=100, out_features=10, bias=True)(softmax): Softmax(dim=1))
)

# 导入必要依赖库
import torch
from torch import nn
from collections import OrderedDict  # 用于有序字典# 超参数定义(MNIST数据集适配)
in_dim, n_hidden_1, n_hidden_2, out_dim = 28 * 28, 300, 100, 10# 用OrderedDict存储"层名称-层实例",确保顺序
Seq_ordered = nn.Sequential(OrderedDict([("flatten", nn.Flatten()),("linear1", nn.Linear(in_dim, n_hidden_1)),("bn1", nn.BatchNorm1d(n_hidden_1)),("relu1", nn.ReLU()),("linear2", nn.Linear(n_hidden_1, n_hidden_2)),("bn2", nn.BatchNorm1d(n_hidden_2)),("relu2", nn.ReLU()),("out", nn.Linear(n_hidden_2, out_dim)),("softmax", nn.Softmax(dim=1))
]))# 打印模型结构
print("nn.Sequential OrderedDict方式模型:")
print(Seq_ordered)运行结果:nn.Sequential OrderedDict方式模型:
Sequential((flatten): Flatten(start_dim=1, end_dim=-1)(linear1): Linear(in_features=784, out_features=300, bias=True)(bn1): BatchNorm1d(300, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)(relu1): ReLU()(linear2): Linear(in_features=300, out_features=100, bias=True)(bn2): BatchNorm1d(100, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)(relu2): ReLU()(out): Linear(in_features=100, out_features=10, bias=True)(softmax): Softmax(dim=1)
)

# 导入必要依赖库
import torch
from torch import nn
import torch.nn.functional as F# 定义模型类
class Model_lst(nn.Module):def __init__(self, in_dim, n_hidden_1, n_hidden_2, out_dim):super(Model_lst, self).__init__()# 用nn.ModuleList将层存为列表self.layers = nn.ModuleList([nn.Flatten(),nn.Linear(in_dim, n_hidden_1),nn.BatchNorm1d(n_hidden_1),nn.ReLU(),nn.Linear(n_hidden_1, n_hidden_2),nn.BatchNorm1d(n_hidden_2),nn.ReLU(),nn.Linear(n_hidden_2, out_dim),nn.Softmax(dim=1)])# 正向传播:循环遍历列表def forward(self, x):for layer in self.layers:x = layer(x)return x# 超参数与实例化
in_dim, n_hidden_1, n_hidden_2, out_dim = 28 * 28, 300, 100, 10
model_lst = Model_lst(in_dim, n_hidden_1, n_hidden_2, out_dim)
print("nn.Module+nn.ModuleList容器模型:")
print(model_lst)运行结果:
nn.Module+nn.ModuleList容器模型:
Model_lst((layers): ModuleList((0): Flatten(start_dim=1, end_dim=-1)(1): Linear(in_features=784, out_features=300, bias=True)(2): BatchNorm1d(300, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)(3): ReLU()(4): Linear(in_features=300, out_features=100, bias=True)(5): BatchNorm1d(100, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)(6): ReLU()(7): Linear(in_features=100, out_features=10, bias=True)(8): Softmax(dim=1))
)

# 导入必要依赖库
import torch
from torch import nn
import torch.nn.functional as F5# 定义模型类
class Model_lay(nn.Module):def __init__(self, in_dim, n_hidden_1, n_hidden_2, out_dim):super(Model_lay, self).__init__()self.flatten = nn.Flatten()# 用nn.Sequential将相关层分组self.layer1 = nn.Sequential(nn.Linear(in_dim, n_hidden_1), nn.BatchNorm1d(n_hidden_1))self.layer2 = nn.Sequential(nn.Linear(n_hidden_1, n_hidden_2), nn.BatchNorm1d(n_hidden_2))self.out = nn.Sequential(nn.Linear(n_hidden_2, out_dim))# 正向传播逻辑def forward(self, x):x = self.flatten(x)x = F.relu(self.layer1(x))x = F.relu(self.layer2(x))x = F.softmax(self.out(x), dim=1)return x# 超参数与实例化
in_dim, n_hidden_1, n_hidden_2, out_dim = 28 * 28, 300, 100, 10
model_lay = Model_lay(in_dim, n_hidden_1, n_hidden_2, out_dim)
print("nn.Module+nn.Sequential容器模型:")
print(model_lay)运行结果:nn.Module+nn.Sequential容器模型:
Model_lay((flatten): Flatten(start_dim=1, end_dim=-1)(layer1): Sequential((0): Linear(in_features=784, out_features=300, bias=True)(1): BatchNorm1d(300, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True))(layer2): Sequential((0): Linear(in_features=300, out_features=100, bias=True)(1): BatchNorm1d(100, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True))(out): Sequential((0): Linear(in_features=100, out_features=10, bias=True))
)

# 导入必要依赖库
import torch
import torch.nn as nn
from torch.nn import functional as F# 定义基础残差块(输入输出形状一致,直接跳连)
class RestNetBasicBlock(nn.Module):def __init__(self, in_channels, out_channels, stride):super(RestNetBasicBlock, self).__init__()# 主路径:2个3×3卷积+批归一化self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1)self.bn1 = nn.BatchNorm2d(out_channels)self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=stride, padding=1)self.bn2 = nn.BatchNorm2d(out_channels)# 正向传播:主路径+跳连def forward(self, x):output = self.conv1(x)output = F.relu(self.bn1(output))output = self.conv2(output)output = self.bn2(output)return F.relu(x + output)  # 输入直接与主路径输出相加# 测试块结构(示例:64通道输入输出,stride=1)
basic_block = RestNetBasicBlock(64, 64, 1)
print("残差块1")
print(basic_block)输出结果:残差块1
RestNetBasicBlock((conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)

# 导入必要依赖库
import torch
import torch.nn as nn
from torch.nn import functional as F# 定义下采样残差块(输入输出形状不同,1×1卷积调整跳连)
class RestNetDownBlock(nn.Module):def __init__(self, in_channels, out_channels, stride):super(RestNetDownBlock, self).__init__()# 主路径:第1个卷积下采样,第2个卷积保持尺寸self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride[0], padding=1)self.bn1 = nn.BatchNorm2d(out_channels)self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=stride[1], padding=1)self.bn2 = nn.BatchNorm2d(out_channels)# 跳连路径:1×1卷积调整通道数和分辨率self.extra = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride[0], padding=0),nn.BatchNorm2d(out_channels))# 正向传播:调整后的跳连+主路径def forward(self, x):extra_x = self.extra(x)  # 跳连路径预处理output = self.conv1(x)out = F.relu(self.bn1(output))out = self.conv2(out)out = self.bn2(out)return F.relu(extra_x + out)# 测试块结构(示例:64→128通道,stride=[2,1]下采样)
down_block = RestNetDownBlock(64, 128, [2, 1])
print("下采样残差块结构:")
print(down_block)运行结果:RestNetDownBlock((conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)(extra): Sequential((0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2))(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True))
)

import torch
from torch import nn
import torch.nn.functional as Fclass Model_Seq(nn.Module):# 通过继承基类nn.Module来构建模型def __init__(self, in_dim, n_hidden_1, n_hidden_2, out_dim):super(Model_Seq, self).__init__()self.flatten = nn.Flatten()self.linear1 = nn.Linear(in_dim, n_hidden_1)self.bn1 = nn.BatchNorm1d(n_hidden_1)self.linear2 = nn.Linear(n_hidden_1, n_hidden_2)self.bn2 = nn.BatchNorm1d(n_hidden_2)self.out = nn.Linear(n_hidden_2, out_dim)def forward(self, x):x = self.flatten(x)x = self.linear1(x)x = self.bn1(x)x = F.relu(x)x = self.linear2(x)x = self.bn2(x)x = F.relu(x)x = self.out(x)x = F.softmax(x, dim=1)return x# 对一些超参数赋值
in_dim, n_hidden_1, n_hidden_2, out_dim = 28 * 28, 300, 100, 10
model_seq = Model_Seq(in_dim, n_hidden_1, n_hidden_2, out_dim)
print(model_seq)运行结果:
Model_Seq((flatten): Flatten(start_dim=1, end_dim=-1)(linear1): Linear(in_features=784, out_features=300, bias=True)(bn1): BatchNorm1d(300, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)(linear2): Linear(in_features=300, out_features=100, bias=True)(bn2): BatchNorm1d(100, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)(out): Linear(in_features=100, out_features=10, bias=True)
)

http://www.dtcms.com/a/399613.html

相关文章:

  • 函数(Num008)
  • MySQL8.0版本在Windows下进行安装
  • 站长seo工具图文素材库免费
  • 前端核心框架vue之(指令篇1/5)
  • 山东小语种网站建设免费高清视频素材app哪里找
  • 嵌入式Python环境深度解读与精益裁剪指南
  • 如何排查Windows事件ID 7000 服务启动失败
  • Java面试题大全1000+面试题附答案详解
  • LangChain:LLMs和ChatModels介绍、LangChain 集成大模型的本地部署与 API 调用实践、提示词prompt、输出解析器、链
  • spring中手动事务控制(提交、回滚事务)
  • 高端医疗网站开发用广州seo推广获精准访问量
  • 如何让本地使用 Ollama 部署的开源大模型(LLM)识别图片和 Excel 文件
  • 高低温试验有哪些类型?委托第三方做高低温试验的流程
  • print!/println!宏详解
  • 谢岗镇仿做网站经营性商务网站建设需要备案吗
  • 崂山区建设局网站郑州付费系统网站开发建设
  • xxl-job 执行器在 host 网络模式下注册到错误 IP 的问题与解决方案
  • 网站建站 seowordpress防止机器人注册
  • 网站建设需要服务器支持 吗营销活动
  • Python学习笔记:正则表达式
  • In VI, when an arrow key is pressed, a character (e.g. “A“) is sent.
  • pytorch工具箱(二)
  • css `isolation: isolate`
  • 杭州企业网站制作西安 网站开发
  • 数据结构算法真题
  • 容桂网站建设联系方式触屏网页界面设计
  • 网站设计方案公司建设免费网站制作
  • 国产CAD皇冠CAD(CrownCAD)三维建模教程:汽车水泵
  • 网站建设存在问题整改报告网站常用图标素材
  • 【Redis】缓存击穿、缓存穿透、缓存雪崩的解决方案