我的LGB模型的一些参数和说明
对于很多初学者而言,lgb的参数如何设置其实是很大的挑战,我在工作中总结了如下一套行之有效的参数设置。这是一线信贷风控常用模型的情况,需要具备一定基础才能顺利阅读,对于感兴趣的同学欢迎参考。期待您的交流意见
import lightgbm as lgb
from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report,roc_auc_scoremodel_data_lgb = data6[(data6['mob3m2fenmu_flag']>=1)& (data6['confirm_month']<='2024-04')]
oot_data_lgb = data6[(data6['mob3m2fenmu_flag']>=1)& (data6['confirm_month']>='2024-05')]
data_x= model_data_lgb[x_list]
x_train,x_test,y_train,y_test=train_test_split(data_x,model_data_lgb['mob3m1fenzi_flag'],test_size=0.3,random_state=0)
定义lightGBM函数 参数方案
#定义lgb函数
def LGB_test(train_x,train_y,test_x,test_y):from multiprocessing import cpu_count #用于获取系统中的CPU数量。#multiprocessing包是Python中的多进程管理包。clf = lgb.LGBMClassifier(#直接用的话,分类是LGBMClassifier,回归是LGBMRegressorboosting_type='gbdt', #梯度提升决策树num_leaves=31,#一棵树上的叶子数reg_alpha=0.1, #alpha: L1 正则的惩罚系数,值越大越不容易过拟合,缺省值为0,增加这个值会让算法更保守。#在 Scikit-Learn中,这个参数名是reg_alphareg_lambda=1,#lambda: L2 正则的惩罚系数,值越大越不容易过拟合,缺省值为1,增加这个值会让算法更保守。#在 Scikit-Learn中,这个参数名是reg_lambdamax_depth=3, #树的深度,越大模型越复杂,也越可能过拟合。0表示没限制,默认值是6。==需要调参==,一般在3到10之间。n_estimators=800,#拟合的树的棵树,可以理解为训练的轮数。弱学习器的个数,其中gbdt原理是利用通过梯度不断拟合新的弱学习器,直到达到设定的弱学习器的数量。#一般来说n_estimators太小,容易欠拟合,n_estimators太大,又容易过拟合,一般选择一个适中的数值。默认是100max_features = 70, #限制分枝的特征个数objective='binary',#设置损失函数,输出概率值 #reg:linear" --线性回归;"reg:logistic" --逻辑回归;"binary:logistic" --二分类的逻辑回归,返回预测的概率(不是类别)subsample=0.8, #训练样本采样率,行; # 行采样百分比#默认是1,这个参数控制对于每棵树,随机采样的比例。减小这个参数的值算法会更加保守,避免过拟合。#但是这个值设置的过小,它可能会导致欠拟合。 (0,1]colsample_bytree=0.75, #默认是1,用来控制每颗树随机采样的列数的占比; (0,1]subsample_freq=1,#子样本频率#bagging 的频率, 0 意味着禁用 bagging. k 意味着每 k 次迭代执行bagging #Note: 为了启用 bagging, bagging_fraction 设置适当learning_rate=0.04, # #学习率,控制每次迭代更新权重时的步长,默认0.3,这里设置得步长是0.1min_child_weight=50,#就是这个叶子结点中样本的数目。如果这个值设置的太小,那么会出现单个样本成一个叶子结点的情况,这很容易过拟合。random_state=None,#纯随机. 不设置参数random_state,其相当于random_state=Nonen_jobs=cpu_count()-1, #获取系统中的CPU数量num_iterations = 800 #迭代次数;默认值为100,类型为int。表示提升迭代次数,也就是提升树的棵树;)clf.fit(train_x, train_y,eval_set=[(train_x, train_y),(test_x,test_y)],eval_metric='auc',early_stopping_rounds=100)print(clf.n_features_)return clf,clf.best_score_[ 'valid_1']['auc']#返回最好的验证集和测试集的aucimport time
import numpy as np
start = time.time()
model,auc = LGB_test(x_train[x_list],y_train,x_test[x_list],y_test)
end = time.time()
#模型贡献度放在feture中
feature = pd.DataFrame({'name' : model.booster_.feature_name(),'importance' : model.feature_importances_}).sort_values(by = ['importance'],ascending = False)
训练集、测试集、验证集上的KS和AUC
y_pred_train_lgb = model.predict_proba(x_train[x_list])[:, 1]
y_pred_test_lgb = model.predict_proba(x_test[x_list])[:, 1]train_fpr_lgb, train_tpr_lgb, _ = roc_curve(y_train, y_pred_train_lgb)
test_fpr_lgb, test_tpr_lgb, _ = roc_curve(y_test, y_pred_test_lgb)train_ks = abs(train_fpr_lgb - train_tpr_lgb).max()
test_ks = abs(test_fpr_lgb - test_tpr_lgb).max()train_auc = metrics.auc(train_fpr_lgb, train_tpr_lgb)
test_auc = metrics.auc(test_fpr_lgb, test_tpr_lgb)#ks_train_lst.append(train_ks)
#ks_test_lst.append(test_ks) feature_lst[str(rk)] = feature[feature.importance>=20].nametrain_ks = np.mean(train_ks)
test_ks = np.mean(test_ks)print('train_ks: ',train_ks)
print('test_ks: ',test_ks)y_pred_train_lgb = model.predict_proba(x_train[x_list])[:, 1]
y_pred_test_lgb = model.predict_proba(x_test[x_list])[:, 1]train_fpr_lgb, train_tpr_lgb, _ = roc_curve(y_train, y_pred_train_lgb)
test_fpr_lgb, test_tpr_lgb, _ = roc_curve(y_test, y_pred_test_lgb)train_ks = abs(train_fpr_lgb - train_tpr_lgb).max()
test_ks = abs(test_fpr_lgb - test_tpr_lgb).max()train_ks = np.mean(train_ks)
test_ks = np.mean(test_ks)print('train_ks: ',train_ks)
print('test_ks: ',test_ks)
#模型贡献度放在feture中
feature = pd.DataFrame({'name' : model.booster_.feature_name(),'importance' : model.feature_importances_}).sort_values(by = ['importance'],ascending = False)
feature[feature.importance>=30]
我的5折交叉验证
import pandas as pd
import numpy as np
from sklearn.model_selection import KFold
from sklearn.metrics import roc_auc_score, roc_curve
import lightgbm as lgb
from scipy.stats import ks_2samp
特征和标签
X = data6[modellist_sim]
y = data6['mob3m2fenzi_flag']
OOT划分
#假设我们按某个时间点将数据分为训练集和测试集(例如,最后一个月的数据作为测试集)
#train_cutoff = data['timestamp'].iloc[-int(0.2 * len(data))].to_pydatetime()
#train_data = data[data['timestamp'] < train_cutoff]
#oot_data = data[data['timestamp'] >= train_cutoff]
train_data= data6[(data6['mob3m2fenmu_flag']>=1)& (data6['confirm_month']>='2023-03')& (data6['confirm_month']<='2024-04')]
oot_data=data6[(data6['mob3m2fenmu_flag']>=1)& (data6['confirm_month']>'2024-04')]
X_train, y_train = train_data[modellist_sim], train_data['mob3m2fenzi_flag']
X_oot, y_oot = oot_data[modellist_sim], oot_data['mob3m2fenzi_flag']params_old={'boosting_type':'gbdt','num_leaves':31,'reg_alpha':0.1,'reg_lambda':1,'max_depth':3, 'n_estimators':800,'max_features ': 70, 'objective':'binary','subsample':0.8, 'colsample_bytree':0.75, 'subsample_freq':1,'learning_rate':0.04, 'min_child_weight':50,'num_iterations ': 800
}
5折交叉验证在训练集上进行
kf = KFold(n_splits=5, shuffle=False)
oot_auc_scores, oot_ks_scores, psi_scores = [], [], []
train_auc_scores, train_ks_scores = [], []for train_index, val_index in kf.split(X_train):X_train_fold, y_train_fold = X_train.iloc[train_index], y_train.iloc[train_index]X_val_fold, y_val_fold = X_train.iloc[val_index], y_train.iloc[val_index]# 创建LightGBM数据集train_data_fold = lgb.Dataset(X_train_fold, label=y_train_fold)val_data_fold = lgb.Dataset(X_val_fold, label=y_val_fold, reference=train_data_fold)# 设置参数params = {'boosting_type': 'gbdt','objective': 'binary','metric': 'binary_logloss','num_leaves': 31,'learning_rate': 0.05,'feature_fraction': 0.9}# 训练模型gbm = lgb.train(params_old,train_data_fold,num_boost_round=100,valid_sets=[train_data_fold, val_data_fold],early_stopping_rounds=10)oot_auc_scores, oot_ks_scores, psi_scores = [], [], []
train_auc_scores, train_ks_scores = [], []
y_oot_pred_proba = gbm.predict(X_oot, num_iteration=gbm.best_iteration)
计算AUC KS
oot_auc = roc_auc_score(y_oot, y_oot_pred_proba)
oot_auc_scores.append(oot_auc)fpr_oot, tpr_oot, _ = roc_curve(y_oot, y_oot_pred_proba)
ks_oot = max(tpr_oot - fpr_oot)
oot_ks_scores.append(ks_oot)