python中LightGBM模型以及其他模型的使用

举报
相信光的奥特王小懒 发表于 2022/04/28 17:02:56 2022/04/28
【摘要】 我们经常需要对数据进行处理并进行分析,我们需要选用不同的模型进行分析
import pandas as pd
import numpy
import warnings
from sklearn.preprocessing import scale
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from xgboost.sklearn import XGBClassifier
import lightgbm as lgb

1. 划分X和Y

X为数据特征,即feature,Y为target, 即是否逾期的结果。逾期为1,没有逾期为0。

2. 划分特征值和标签值

wxl=表的名称['target']
wxl_X=表的名称.drop(columns=['target'])
wxl_X=scale(wxl_X,axis=0)  #将数据转化为标准数据

3. 将一个大的数据集划分成训练集和测试集

#需要导入包
from sklearn.model_selection import train_test_split
#划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(feature, target, test_size=0.2)

4. 使用LightGBM模型进行预测以及结果评估

import lightgbm as lgb

lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference = lgb_train)
#lightgbm模型参数设置,根据自己的需求调一调
params = {
    'task':'train',
    'boosting_type':'gbdt',
    'objective':'binary',
    'metric':{'12','auc','binary_logloss'},
    'num_leaves':40,
    'learning_rate':0.05,
    'feature_fraction':0.9,
    'bagging_fraction':0.8,
    'bagging_freq':5,
    'verbose':0,
    'is_unbalance':True
      
}
#训练参数设置
gbm = lgb.train(params,lgb_train,num_boost_round=1000,valid_sets=lgb_eval,early_stopping_rounds=100)



#模型预测
lgb_pre = gbm.predict(X_test) #括号中需要输入与训练时相同的数据格式


#结果评估
from sklearn.metrics import roc_auc_score
auc_score = roc_auc_score(y_test, lgb_pre)


#模型保存
gbm.save_model('whx19961212.txt')
#模型加载
import lightgbm as lgb
gbm = lgb.Booster(model_file = 'whx19961212.txt')

5. 另外其他各种模型的构建

lr = LogisticRegression(random_state=2018,tol=1e-6)  # 逻辑回归模型

tree = DecisionTreeClassifier(random_state=2018) #决策树模型

svm = SVC(probability=True,random_state=2018,tol=1e-6)  # SVM模型

forest=RandomForestClassifier(n_estimators=100,random_state=2018) # 随机森林

Gbdt=GradientBoostingClassifier(random_state=2018) #CBDT

Xgbc=XGBClassifier(random_state=2018)  #XGBOOST

gbm=lgb.LGBMClassifier(random_state=2018)  #LightGbm

6. 各种评分函数的构建

def muti_score(model):
    warnings.filterwarnings('ignore')
    accuracy = cross_val_score(model, wxl_X, wxl_y, scoring='accuracy', cv=5)
    precision = cross_val_score(model, wxl_X, wxl_y, scoring='precision', cv=5)
    recall = cross_val_score(model, wxl_X, wxl_y, scoring='recall', cv=5)
    f1_score = cross_val_score(model, wxl_X, wxl_y, scoring='f1', cv=5)
    auc = cross_val_score(model, wxl_X, wxl_y, scoring='roc_auc', cv=5)
    print("准确率:",accuracy.mean())
    print("精确率:",precision.mean())
    print("召回率:",recall.mean())
    print("F1_score:",f1_score.mean())
    print("AUC:",auc.mean())
    
model_name=["lr","tree","svm","forest","Gbdt","Xgbc","gbm"]
for name in model_name:
    model=eval(name)
    print(name)
    muti_score(model)
【版权声明】本文为华为云社区用户原创内容,转载时必须标注文章的来源(华为云社区)、文章链接、文章作者等基本信息, 否则作者和本社区有权追究责任。如果您发现本社区中有涉嫌抄袭的内容,欢迎发送邮件进行举报,并提供相关证据,一经查实,本社区将立刻删除涉嫌侵权内容,举报邮箱: cloudbbs@huaweicloud.com
  • 点赞
  • 收藏
  • 关注作者

评论(0

0/1000
抱歉,系统识别当前为高风险访问,暂不支持该操作

全部回复

上滑加载中

设置昵称

在此一键设置昵称,即可参与社区互动!

*长度不超过10个汉字或20个英文字符,设置后3个月内不可修改。

*长度不超过10个汉字或20个英文字符,设置后3个月内不可修改。