PyTorch2 Python深度学习 - 模型保存与加载
锋哥原创的PyTorch2 Python深度学习视频教程:
https://www.bilibili.com/video/BV1eqxNzXEYc
课程介绍

基于前面的机器学习Scikit-learn,深度学习Tensorflow2课程,我们继续讲解深度学习PyTorch2,所以有些机器学习,深度学习基本概念就不再重复讲解,大家务必学习好前面两个课程。本课程主要讲解基于PyTorch2的深度学习核心知识,主要讲解包括PyTorch2框架入门知识,环境搭建,张量,自动微分,数据加载与预处理,模型训练与优化,以及卷积神经网络(CNN),循环神经网络(RNN),生成对抗网络(GAN),模型保存与加载等。
PyTorch2 Python深度学习 - 模型保存与加载
在PyTorch 2中,模型的保存与加载是一个常见的操作。通常,我们会使用torch.save()来保存模型的权重(即state_dict),并使用torch.load()来加载模型权重。以下是PyTorch 2中如何保存和加载模型的介绍及实例代码:
1. 保存模型权重以及加载
保存模型的权重(state_dict)
# 保存模型权重
torch.save(model.state_dict(), 'model_weights.pt')
state_dict是PyTorch中保存模型的最佳方式,通常只保存模型的权重,而不包括模型结构。保存模型的权重后,你可以在加载时重新构建模型并加载权重。
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import torch
# 加载鸢尾花数据集
iris = load_iris()
X = iris.data # 150个样本 4个特征
y = iris.target # 3个类别(Setosa, Versicolor, Virginica)
# 特征标准化
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# 将数据集分为训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.2, random_state=42)
# 将数据转换为Pytorch的Tensor格式
X_train = torch.tensor(X_train, dtype=torch.float32)
y_train = torch.tensor(y_train, dtype=torch.long)
X_test = torch.tensor(X_test, dtype=torch.float32)
y_test = torch.tensor(y_test, dtype=torch.long)
# 创建数据集和数据加载器
train_dataset = torch.utils.data.TensorDataset(X_train, y_train)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)
test_dataset = torch.utils.data.TensorDataset(X_test, y_test)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=32, shuffle=False)
# 超参数定义
input_size = 4 # 输入特征数
hidden_size = 16 # 隐藏层节点数
output_size = 3 # 输出类别数 (鸢尾花有3个类别)
# 创建模型
model = torch.nn.Sequential(torch.nn.Linear(input_size, hidden_size), # 输入层 -> 隐藏层torch.nn.ReLU(),torch.nn.Linear(hidden_size, output_size) # 隐藏层 -> 输出层(3类)
)
# 损失函数和优化器
criterion = torch.nn.CrossEntropyLoss() # 交叉熵损失函数
optimizer = torch.optim.Adam(model.parameters(), lr=0.01) # 优化器
# 训练模型
num_epochs = 100
for epoch in range(num_epochs):model.train() # 设置为训练模式for inputs, labels in train_loader:# 前向传播outputs = model(inputs)loss = criterion(outputs, labels) # 计算损失
# 反向传播和优化optimizer.zero_grad() # 梯度清零loss.backward() # 计算梯度optimizer.step() # 更新参数
# 输出训练结果print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}')
# 保存模型权重
torch.save(model.state_dict(), 'model_weights.pt')
在上面的代码中,我们定义了一个简单的全连接网络,并保存了模型的权重到model_weights.pth文件。
加载模型权重(state_dict)
# 加载保存的模型权重
model.load_state_dict(torch.load('model_weights.pt'))
加载模型时,首先需要重新定义与保存时相同的模型结构,然后使用load_state_dict()将权重加载到模型中。
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import torch
# 加载鸢尾花数据集
iris = load_iris()
X = iris.data # 150个样本 4个特征
y = iris.target # 3个类别(Setosa, Versicolor, Virginica)
# 特征标准化
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# 将数据集分为训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.2, random_state=42)
# 将数据转换为Pytorch的Tensor格式
X_train = torch.tensor(X_train, dtype=torch.float32)
y_train = torch.tensor(y_train, dtype=torch.long)
X_test = torch.tensor(X_test, dtype=torch.float32)
y_test = torch.tensor(y_test, dtype=torch.long)
# 创建数据集和数据加载器
train_dataset = torch.utils.data.TensorDataset(X_train, y_train)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)
test_dataset = torch.utils.data.TensorDataset(X_test, y_test)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=32, shuffle=False)
# 超参数定义
input_size = 4 # 输入特征数
hidden_size = 16 # 隐藏层节点数
output_size = 3 # 输出类别数 (鸢尾花有3个类别)
# 创建模型
model = torch.nn.Sequential(torch.nn.Linear(input_size, hidden_size), # 输入层 -> 隐藏层torch.nn.ReLU(),torch.nn.Linear(hidden_size, output_size) # 隐藏层 -> 输出层(3类)
)
# 加载保存的模型权重
model.load_state_dict(torch.load('model_weights.pt'))
# 测试模型
model.eval() # 设置为测试模式
y_pred = []
y_true = []
with torch.no_grad(): # 禁用梯度计算for inputs, labels in test_loader:outputs = model(inputs)_, predicted = torch.max(outputs, 1) # 获取预测结果 获取最大概率的索引y_pred.extend(predicted.numpy())y_true.extend(labels.numpy())
# 计算准确率
accuracy = accuracy_score(y_true, y_pred)
print(f'Accuracy: {accuracy * 100:.2f}%')
注意:model.eval() 是 关键的一步,它会将 dropout 和 batch normalization 等层切换到推理模式,确保结果的一致性。
2. 保存整个模型(包括结构和权重)以及加载
除了保存模型的state_dict外,PyTorch还允许你保存整个模型(包括结构和权重)。不过这种方法不推荐,因为它依赖于特定的代码实现,可能导致移植性问题。
# 保存模型权重
torch.save(model, 'whole_model.pt')
完整代码:
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import torch
# 加载鸢尾花数据集
iris = load_iris()
X = iris.data # 150个样本 4个特征
y = iris.target # 3个类别(Setosa, Versicolor, Virginica)
# 特征标准化
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# 将数据集分为训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.2, random_state=42)
# 将数据转换为Pytorch的Tensor格式
X_train = torch.tensor(X_train, dtype=torch.float32)
y_train = torch.tensor(y_train, dtype=torch.long)
X_test = torch.tensor(X_test, dtype=torch.float32)
y_test = torch.tensor(y_test, dtype=torch.long)
# 创建数据集和数据加载器
train_dataset = torch.utils.data.TensorDataset(X_train, y_train)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)
test_dataset = torch.utils.data.TensorDataset(X_test, y_test)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=32, shuffle=False)
# 超参数定义
input_size = 4 # 输入特征数
hidden_size = 16 # 隐藏层节点数
output_size = 3 # 输出类别数 (鸢尾花有3个类别)
# 创建模型
model = torch.nn.Sequential(torch.nn.Linear(input_size, hidden_size), # 输入层 -> 隐藏层torch.nn.ReLU(),torch.nn.Linear(hidden_size, output_size) # 隐藏层 -> 输出层(3类)
)
# 损失函数和优化器
criterion = torch.nn.CrossEntropyLoss() # 交叉熵损失函数
optimizer = torch.optim.Adam(model.parameters(), lr=0.01) # 优化器
# 训练模型
num_epochs = 100
for epoch in range(num_epochs):model.train() # 设置为训练模式for inputs, labels in train_loader:# 前向传播outputs = model(inputs)loss = criterion(outputs, labels) # 计算损失
# 反向传播和优化optimizer.zero_grad() # 梯度清零loss.backward() # 计算梯度optimizer.step() # 更新参数
# 输出训练结果print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}')
# 保存模型权重
torch.save(model, 'whole_model.pt')
# 加载保存的模型权重
model = torch.load('whole_model.pt', weights_only=False)
加载整个模型时,你可以直接使用torch.load()方法:
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import torch
# 加载鸢尾花数据集
iris = load_iris()
X = iris.data # 150个样本 4个特征
y = iris.target # 3个类别(Setosa, Versicolor, Virginica)
# 特征标准化
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# 将数据集分为训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.2, random_state=42)
# 将数据转换为Pytorch的Tensor格式
X_train = torch.tensor(X_train, dtype=torch.float32)
y_train = torch.tensor(y_train, dtype=torch.long)
X_test = torch.tensor(X_test, dtype=torch.float32)
y_test = torch.tensor(y_test, dtype=torch.long)
# 创建数据集和数据加载器
train_dataset = torch.utils.data.TensorDataset(X_train, y_train)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)
test_dataset = torch.utils.data.TensorDataset(X_test, y_test)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=32, shuffle=False)
# 加载保存的模型权重
model = torch.load('whole_model.pt', weights_only=False)
# 测试模型
y_pred = []
y_true = []
with torch.no_grad(): # 禁用梯度计算for inputs, labels in test_loader:outputs = model(inputs)_, predicted = torch.max(outputs, 1) # 获取预测结果 获取最大概率的索引y_pred.extend(predicted.numpy())y_true.extend(labels.numpy())
# 计算准确率
accuracy = accuracy_score(y_true, y_pred)
print(f'Accuracy: {accuracy * 100:.2f}%')
