逻辑回归机器学习
线性组合的基础上加上非线性变换
y=kx+b
torch.nn.linear(input,output)
torch.sigmoid(input)
torch.nn.BCELoss()
import numpy as np import torch import matplotlib.pyplot as plt from onnxslim.core import optimize x = np.linspace(-5,5,20, dtype=np.float32)//-5,5之间随机生成20个数 _b=1/(1 + np.exp(-x))//通过变换得到_b y = np.random.normal(_b,0.005)//在此基础上加上0.05的噪声来获得y x = np.float32(x.reshape(-1,1)) y = np.float32(y.reshape(-1,1))
class LogicRegressionModel(torch.nn.Module): def __init__(self, input_dim, output_dim): super(LogicRegressionModel, self).__init__() self.linear = torch.nn.Linear(input_dim, output_dim) def forward(self, x): out = torch.sigmoid(self.linear(x))//sigmoid 函数的作用是将输入的实数映射到区间 (0, 1) 上。在神经网络中,激活函数可以引入非线性因素,使得网络能够学习和表示复杂的模式。对于逻辑回归模型来说,sigmoid 函数将线性层的输出转换为一个概率值,即表示输入属于某一类别的可能性。在这段代码中,torch.sigmoid(self.linear(x)) 就是将线性层的输出通过 sigmoid 函数进行变换,得到最终的输出 out,这个 out 的值在 0 到 1 之间,可以被解释为一个概率预测值。 return out
input_dim = 1
output_dim = 1
model = LogicRegressionModel(input_dim, output_dim)
criterion = torch.nn.BCELoss()在二分类问题中,我们希望模型输出一个概率值,表示输入样本属于正类的可能性。二元交叉熵损失就是用来衡量模型预测的概率值与真实标签(通常为 0 或 1,表示负类和正类)之间的差距。如果模型预测的概率值与真实标签越接近,那么二元交叉熵损失就越小;反之,损失就越大。
learning_rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
import numpy as np import torch import matplotlib.pyplot as plt from onnxslim.core import optimize x = np.linspace(-5,5,20, dtype=np.float32) _b=1/(1 + np.exp(-x)) y = np.random.normal(_b,0.005) x = np.float32(x.reshape(-1,1)) y = np.float32(y.reshape(-1,1)) class LogicRegressionModel(torch.nn.Module): def __init__(self, input_dim, output_dim): super(LogicRegressionModel, self).__init__() self.linear = torch.nn.Linear(input_dim, output_dim) def forward(self, x): out = torch.sigmoid(self.linear(x)) return out input_dim = 1 output_dim = 1 model = LogicRegressionModel(input_dim, output_dim) criterion = torch.nn.BCELoss() learning_rate = 0.01 optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) for epoch in range(100): epoch +=1 inputs = torch.from_numpy(x).requires_grad_() labels = torch.from_numpy(y) optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() print('epoch {}, loss {}'.format(epoch + 1, loss.item())) # 绘制结果 predicted_y=model(torch.from_numpy(x).requires_grad_()).data.numpy() print("标签y",y) print("预测y",predicted_y) plt.clf() predicted=model(torch.from_numpy(x).requires_grad_()).data.numpy() plt.plot(x,y,'go',label='True data',alpha=0.5) plt.plot(x,predicted_y,'--',label='Predictions',alpha=0.5) plt.legend(loc='best') plt.show()