南昌电商网站设计网页源代码怎么查找部分内容
代码
# 2-10交叉熵代价函数的逻辑回归模型
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt# 参数设置
iterations = 1000 # 迭代次数
learning_rate = 0.1 # 学习率
m_train = 250 # 训练样本数量# 读入酒驾检测数据集
df = pd.read_csv('alcohol_dataset.csv')
data=np.array(df)
m_all = np.shape(data)[0] # 样本总数
d =np.shape(data)[1] - 1 # 输入特征的维数
m_test = m_all - m_train # 测试数据集样本数量
# 构造随机种子为指定值的随机数生成器,并对数据集中样本随机排序
rng=np.random.default_rng(1)
rng.shuffle(data)
# 对输入特征标准化
mean=np.mean(data[0:m_train, 0:d], axis=0) # 计算训练样本输入特征的均值
std=np.std(data[0:m_train, 0:d], axis=0, ddof=1) # 计算训练样本输入特诊的标准差
data[:, 0:d]=(data[:, 0:d]-mean)/std # 标准化所有样本的输入特征
# 划分数据集
X_train = data[0:m_train, 0:d].T # 训练集输入特征
X_test = data[m_train:, 0:d].T # 测试集输入特征
Y_train = data[0:m_train, d].reshape((1, -1)) # 训练集目标值
Y_test = data[m_train:, d].reshape((1, -1)) # 测试集目标值
# 初始化
w = np.zeros((d, 1)).reshape((-1, 1)) # 权重
b = 0 # 偏差(标量)
v = np.ones((1, m_train)) # 1向量
costs_saved = [] # 用于保存代价函数的值
# 训练过程,迭代循环
for i in range(iterations): # 更新权重与偏差z = np.dot(w.T, X_train) + b * v # 线性回归部分Y_hat = 1. / (1 + np.exp(-z))e = Y_hat - Y_train # 计算误差b = b - learning_rate * np.dot(v, e.T) / m_train # 更新偏差w = w - learning_rate * np.dot(X_train, e.T) / m_train # 更新权重# 保存代价函数的值Y_1_hat = 1 - Y_hatY_1 = 1 - Y_traincosts = -(np.dot(np.log(Y_hat), Y_train.T) + np.dot(np.log(Y_1_hat), Y_1.T)) / m_traincosts_saved.append(costs.item(0))# 打印最新权重与偏差
print('Weights=', np.array2string(np.squeeze(w, axis=1), precision=3))
print(f'Bias={b.item(0):.3f}')
# 画代价函数值
plt.plot(range(1, np.size(costs_saved) + 1), costs_saved, 'r-o', linewidth=2, markersize=5)
plt.ylabel('costs')
plt.xlabel('iterations')
plt.title('learning rate=' + str(learning_rate))
plt.show()
# 训练数据集上的分类错误
Y_train_hat = (np.dot(w.T, X_train) + b * v) >= 0
errors_train=np.sum(np.abs(Y_train_hat-Y_train))
print('Trainset Predicted errors=', errors_train.astype(int))
# 测试数据集上的分类错误
Y_test_hat=(np.dot(w.T,X_test)+b)>=0
errors_test=np.sum(np.abs(Y_test_hat-Y_test))
print('Testset Predicted errors=', errors_test.astype(int))
# 训练数据集上的混淆矩阵
tp=np.sum(np.logical_and(Y_train==1,Y_train_hat==1)) # TP
fp=np.sum(np.logical_and(Y_train==0,Y_train_hat==1)) # FP
tn=np.sum(np.logical_and(Y_train==0,Y_train_hat==0)) # TN
fn=np.sum(np.logical_and(Y_train==1,Y_train_hat==0)) # FN
print(f'Trainset TP={tp},FP={fp},TN={tn},FN={fn}')
# 训练数据集上的召回率、精度、F1值
recall=tp/(tp+fn)
precision=tp/(tp+fp)
f1=2*recall*precision/(recall+precision)
print(f'Trainset recall={recall:.3},presision={precision:.3},F1={f1:.3}')
# 测试数据集上的混淆矩阵
tp_test=np.sum(np.logical_and(Y_test==1, Y_test_hat==1))
fp_test=np.sum(np.logical_and(Y_test==0, Y_test_hat==1))
tn_test=np.sum(np.logical_and(Y_test==0, Y_test_hat==0))
fn_test=np.sum(np.logical_and(Y_test==1, Y_test_hat==0))
print(f'Trestset TP={tp_test},FP={fp_test},TN={tn_test},FN={fn_test}')
# 测试数据集上的召回率、精度、F1值
recall_test=tp_test/(tp_test+fn_test)
precision_test=tp_test/(tp_test+fp_test)
f1_test=2*recall_test*precision_test/(recall_test+precision_test)
print(f'Trainset recall={recall_test:.3},presision={precision_test:.3},F1={f1_test:.3}')
结果图

