记9(Torch
目录
- 1、Troch
1、Troch
函数 | 说明 | 举例 |
---|---|---|
torch.tensor() torch.arange() | 创建张量 | 创建一个标量:torch.tensor(42) 创建一个一维张量:torch.tensor([1, 2, 3]) 创建一个二维张量:torch.tensor([[1, 2], [3, 4]]) 生成一维等差张量:语法:torch.arange(start=0, end, step=1, *, dtype=None, device=None, requires_grad=False) torch.arange(3)就是tensor([0, 1, 2]) |
torch.view() | 改变张量的形状 | 1行8列改2行4列:torch.arange(1, 9).view(2, 4) |
torch.cat() | 指定维度拼接张量 | torch.cat((torch.tensor([[1, 2], [3, 4]]), torch.tensor([[5, 6]])), dim=0) # tensor([[1, 2], [3, 4], [5, 6]]) |
索引与切片 | 和numpy数组用法一致 | |
tensor.t() | 张量转置 | torch.tensor([[1, 2, 3], [4, 5, 6]]).t() tensor([[1, 4], [2, 5], [3, 6]]) |
torch.mm() | 矩阵乘法 | torch.mm(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[5, 6], [7, 8]])) # tensor([[19, 22], [43, 50]]) |
torch.mul() | 元素级乘法 | torch.mul(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[5, 6], [7, 8]])) # tensor([[ 5, 12], [21, 32]]) |
torch.sum() | 求和 | torch.sum(torch.tensor([[1, 2], [3, 4]])) # tensor(10) |
import torch
import torch.nn as nn
import torch.optim as optim# torch.tensor()
print(torch.tensor(42)) # 创建一个标量(零维张量),tensor(42)
print(torch.tensor([1,2,3])) # 创建一个一维张量,tensor([1, 2, 3])
print(torch.tensor([[1,2],[3,4]])) # 创建一个二维张量,tensor([[1, 2], [3, 4]])# torch.arange(),一维等差张量
print(torch.arange(1,5)) # tensor([2, 3, 4])
print(torch.arange(3)) # tensor([0, 1, 2])# tensor1.view() 改变形状
tensor1 = torch.arange(1, 9) # tensor1 = tensor([1, 2, 3, 4, 5, 6, 7, 8])
print(tensor1.view(2, 4)) # 或者 tensor1.view(-1, 4)、tensor1.view(2, -1):tensor([[1, 2, 3, 4], [5, 6, 7, 8]])# torch.cat() 拼接
print(torch.cat((torch.tensor([[1, 2], [3, 4]]), torch.tensor([[5, 6]])), dim=0))
# 上面就是按照第0个维度拼接(就是第1维度不变,例如[1,2]拼接前后一致)tensor([[1, 2], [3, 4], [5, 6]])# 索引和切片
tensor1 = torch.tensor([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]])
print(tensor1[0, :]) # 获取第一行:tensor([1, 2, 3, 4, 5, 6])
print(tensor1[0, 0:3]) # 获取第一行从索引0开始,到索引3(不包括3)的元素:tensor([1, 2, 3])
print(tensor1[0, 0:3:2]) # 获取第一行,且从索引0开始,到索引3(不包括3),步长为2的元素:tensor([1, 3])
print(tensor1[:, 0]) # 获取第一列:tensor([1, 7])
print(tensor1[1:, 1:]) # 获取子集:tensor([[ 8, 9, 10, 11, 12]])# torch.t() 转置
print(torch.tensor([[1, 2, 3], [4, 5, 6]]).t()) # tensor([[1, 4], [2, 5], [3, 6]])# torch.mm() 矩阵乘法
print(torch.mm(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[5, 6], [7, 8]]))) # tensor([[19, 22], [43, 50]])# torch.mul() 元素级乘法
print(torch.mul(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[5, 6], [7, 8]]))) # tensor([[ 5, 12], [21, 32]])# torch.sum() 求和
print(torch.sum(torch.tensor([[1, 2], [3, 4]]))) # tensor(10)# torch.mean() 求均值
print(torch.mean(torch.tensor([[1.0, 2.0], [3.0, 4.0]]))) # tensor(2.5000)# torch.std() 求标准差
print(torch.std(torch.tensor([[1.0, 2.0], [3.0, 4.0]]))) # tensor(2.2910)# torch.max()、torch.min() 求最大值、最小值及其索引
print(torch.max(torch.tensor([[1.0, 2.0], [3.0, 4.0]]))) # tensor(4.)
print(torch.max(torch.tensor([[1.0, 2.0], [3.0, 4.0]]), dim=1)) # torch.return_types.max( values=tensor([2., 4.]), indices=tensor([1, 1]))
print(torch.min(torch.tensor([[1.0, 2.0], [3.0, 4.0]]))) # tensor(1.)# torch.abs() 绝对值
print(torch.abs(torch.tensor([[-1, 2], [-3, 4]]))) # tensor([[1, 2], [3, 4]])# torch.exp() 指数运算,就是e^x
print(torch.exp(torch.tensor([[1.0, 2.0], [3.0, 4.0]]))) # tensor([[ 2.7183, 7.3891], [20.0855, 54.5981]])# torch.log() 对数运算,就是 ln2≈0.6931
print(torch.log(torch.tensor([[1.0, 2.0], [3.0, 4.0]]))) # tensor([[0.0000, 0.6931], [1.0986, 1.3863]])# torch.floor()、torch.ceil() 向下取整floor、向上取整ceil
print(torch.floor(torch.tensor([[1.2, 2.8], [3.5, 4.1]]))) # tensor([[1., 2.], [3., 4.]])
print(torch.ceil(torch.tensor([[1.2, 2.8], [3.5, 4.1]]))) # tensor([[2., 3.], [4., 5.]])# nn.Linear(x, y) 定义一个线性层,x行y列,总共x*y个weight神经元,y个bias神经元
layer1 = nn.Linear(3, 1) # 定义一个线性层
print(f"layer1\t权重 W:{layer1.weight.shape}\t偏置 b:{layer1.bias.shape}")
# 查看权重和偏置:layer1 权重 W:torch.Size([1, 3]) 偏置 b:torch.Size([1])
layer2 = nn.Linear(3, 2) # 定义一个线性层
print(f"layer2\t权重 W:{layer2.weight.shape}\t偏置 b:{layer2.bias.shape}")
# 查看权重和偏置:layer2 权重 W:torch.Size([2, 3]) 偏置 b:torch.Size([2])# optimizer.zero_grad() 梯度清零,清空优化器跟踪的参数的梯度(即 model.parameters() 中注册的参数)
torch.manual_seed(77) # 设置随机种子,77可以改为其他数字
model = nn.Linear(3, 1) # 定义模型:简单线性层,就是3行1列 个神经元
optimizer = optim.SGD(model.parameters(), lr=0.01) # 定义优化器:随机梯度下降优化器
inputs = torch.randn(10, 3) # 模拟输入数据和标签:批量大小3,特征维度10(就是3行10列的张量)
labels = torch.randn(10, 1) # 对应标签
for epoch in range(2): # 训练循环optimizer.zero_grad() # 1. 梯度清零outputs = model(inputs) # 2. 前向传播计算损失loss = nn.MSELoss()(outputs, labels)print(f"计算epoch={epoch}的loss前的weight:{model.weight.grad}")print(f"计算epoch={epoch}的loss前的bias:{model.bias.grad}")loss.backward() # 3. 反向传播计算梯度print(f"计算epoch={epoch}的loss后的weight:{model.weight.grad}")print(f"计算epoch={epoch}的loss后的bias:{model.bias.grad}")optimizer.step() # 4. 优化器更新参数
# 计算epoch=0的loss前的weight:None
# 计算epoch=0的loss前的bias:None
# 计算epoch=0的loss后的weight:tensor([[-1.2573, -0.0045, -0.6926]])
# 计算epoch=0的loss后的bias:tensor([0.2520])
# 计算epoch=1的loss前的weight:tensor([[0., 0., 0.]])
# 计算epoch=1的loss前的bias:tensor([0.])
# 计算epoch=1的loss后的weight:tensor([[-1.2206, -0.0055, -0.6704]])
# 计算epoch=1的loss后的bias:tensor([0.2330])
# 如果注释掉optimizer.zero_grad(),可以对比bias变化,下面的 0.4849≈0.2520+0.2330
# 计算epoch=0的loss前的weight:None
# 计算epoch=0的loss前的bias:None
# 计算epoch=0的loss后的weight:tensor([[-1.2573, -0.0045, -0.6926]])
# 计算epoch=0的loss后的bias:tensor([0.2520])
# 计算epoch=1的loss前的weight:tensor([[-1.2573, -0.0045, -0.6926]])
# 计算epoch=1的loss前的bias:tensor([0.2520])
# 计算epoch=1的loss后的weight:tensor([[-2.4779, -0.0100, -1.3630]])
# 计算epoch=1的loss后的bias:tensor([0.4849])# torch.nn.utils.clip_grad_norm_() 梯度裁剪
# ...