提高 Python 中机器学习模型预测的准确性

问题描述

我们目前正在 Python 中为一家本地公司实施 ML 模型,以预测 0-999 分范围内的信用评分。从数据库提取了 11 个自变量(信用记录和支付行为)和一个因变量(信用评分)。客户已声明生产模型的 MAE 必须小于 100 分才能有用。问题是我们已经尝试了几种算法来实现这种回归,但我们的模型无法很好地概括不可见的数据。到目前为止,性能最好的算法似乎是随机森林,但它在测试数据上的 MAE 仍然超出了可接受的值。这是我们的代码

import numpy as np
from sklearn.preprocessing import RobustScaler
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.linear_model import ElasticNet
from sklearn.ensemble import RandomForestRegressor
from sklearn import metrics
from keras.layers import Dense
from keras.models import Sequential

# Linear Model
def GetLinearModel(X,y):
    model = LinearRegression()
    model.fit(X,y)
    return model   

# Ridge Regression
def GetRidge(X,y):
    model = Ridge(alpha=0.01)
    model.fit(X_train,y_train) 
    return model

# LASSO Regression
def GetLASSO(X,y):
    model = Lasso(alpha=0.01)
    model.fit(X_train,y_train) 
    return model

# ElasticNet Regression
def GetElasticNet(X,y):
    model = ElasticNet(alpha=0.01)
    model.fit(X_train,y_train) 
    return model

# Random Forest
def GetRandomForest(X,y):
    model = RandomForestRegressor(n_estimators=32,random_state=0)
    model.fit(X,y)
    return model

# Neural Networks
def GetNeuralNetworks(X,y):
    model = Sequential()
    model.add(Dense(32,activation = 'relu',input_dim = 11))
    model.add(Dense(units = 32,activation = 'relu'))
    model.add(Dense(units = 32,activation = 'relu'))
    model.add(Dense(units = 1))
    model.compile(optimizer = 'adam',loss = 'mean_absolute_error')
    model.fit(X,y,batch_size = 100,epochs = 500,verbose=0)
    return model

# Train data
train_set = np.array([\
[2,5,9,28,0.153668,500,0.076923077,800],\
[3,42,2,0.358913,0.230769231,900],12,500],\
[1,6,1,0.340075,457,560],3,0.458358,0.153846154,4,32,0.460336,600],0.473414,700],16,0.332991,19,27,0.3477,580],20,74,0.52076,550],\
[6,570],8,47,0.840656,681,50],14,400],\
[5,7,0.251423,980],0.121852,780],\
[2,0.37242,920],0.37682,17,0.449545,300],30,0.551193,30],\
[0,10,0.044175,350],0.521714,650],15,0.985122,34,0.666666,0.299756,330,31,26,0.104847,850],0.172947,0.206403,630],0.495897,46,250],602,20],21,0.158674,645,33,0.041473,890],0.147325,296,2.891695,521,1],0.098953,445,0.143443,1.110002,833,100],60,0.78685,112,0.305556,0.307692308,150],0.453743,570,0.325975,450],0.266311,0.134606,0.105576,430],0.519103,0.109559,669,\
[11,0.235709,0.504134,534],0.075403,573],\
[10,51,11,2.211951,547],\
[9,0.328037,747],0.166666,448],\
[8,719],0.150237,827],\
[7,138,35,37,0.414154,950],41,84,0.41248,750],0.232647,0.411712,520],45,80,0.266299,24,49,0.981102,0.333551,18,13,0.602826,406,427],40,83,0.332792,485],0.39671,664],88,0.548066,90],0.415991,0.51743,599,0.4413,610],50,0.313789,0.535163,375],23,0.51242,44,0.268062,744],38,0.28396,63,0.566661,0.174852,0.517482,52,0.378441,720],103,0.472361,36,0.298553,628],65,0.301907,710],177,29,0.501831,40],0.351668,708],57,0.432374,753],75,0.154085,0.331244,620],55,0.377253,640],0.877696,480],0.208742,0.228812,678,0.090459,553,535],0.292161,594],64,0.602431,0.567179,910],\
[4,0.423915,713],0.114637,0.489314,43,0.599918,612,25,135,0.472659,94,0.31713,69,0.412528,362],58,0.53184,370],0.033438,53,0.619595,200],0.593453,574],0.302636,790],0.256892,748],0.119599,517],22,0.419703,66,70,0.362268,0.597145,667]])

# Test data    
test_set = np.array([\
[2,87,0.168057,760],0.273522,877],0.262797,596],0.495495,680],0.254813,59,0.437851,0.34559,0.385379,641],0.2945,644],0.421612,0.436883,410],0.044898,377,0.428529,89,0.819431,440],124,0.375306,880],0.439412,820],0.495654,653],225,486,0.829792,0.500442,568]])

# split datasets into independent and dependent variables
X_train,y_train = train_set[:,:-1],train_set[:,-1]    
X_test,y_test = test_set[:,test_set[:,-1]    

# feature scaling
sc = RobustScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.fit_transform(X_test)

# Linear model
reg = GetLinearModel(X_train,y_train)
y_pred = reg.predict(X_test)
mae = metrics.mean_absolute_error(y_test,y_pred)
print("%15s: %10f" % ("Linear",mae))

# Ridge Regression
reg = GetRidge(X_train,y_pred)
print("%15s: %10f" % ("Ridge",mae))

# LASSO Regression
reg = GetLASSO(X_train,y_pred)
print("%15s: %10f" % ("LASSO",mae))

# ElasticNet Regression
reg = GetElasticNet(X_train,y_pred)
print("%15s: %10f" % ("ElasticNet",mae))

# Random Forest
reg = GetRandomForest(X_train,y_pred)
print("%15s: %10f" % ("Random Forest",mae))

# Neural networks
reg = GetNeuralNetworks(X_train,y_pred)
print("%15s: %10f" % ("Neural Networks",mae))

输出

         Linear: 141.265089
          Ridge: 141.267797
          LASSO: 141.274700
     ElasticNet: 141.413544
  Random Forest: 102.701562
WARNING:tensorflow:11 out of the last 11 calls to <function Model.make_predict_function.<locals>.predict_function at 0x00000229766694C0> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings Could be due to (1) creating @tf.function repeatedly in a loop,(2) passing tensors with different shapes,(3) passing Python objects instead of tensors. For (1),please define your @tf.function outside of the loop. For (2),@tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3),please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for  more details.
Neural Networks: 122.301840

任何有关如何提高模型准确性的帮助将不胜感激。

亲切的问候。

解决方法

我正在使用您在示例中提供的数据集 我还创建了训练、验证和测试数据集,以避免@Prayson W. Daniel 提到的数据泄漏

对于神经网络,您需要确保标签和特征都被缩放。您可以选择标准标量。您还需要确保特征和标签必须为 2 暗。在您的示例中,您的标签是一维数组。

使用以下代码提取二维特征

Train_labels=train_set[:,[-1]]

你可以使用StandardScaler对数据进行归一化,你需要确保标签和特征都需要归一化

现在,一旦您构建了 ANN,您就需要确保您的网络能够看到大量数据 由于您的训练和测试非常少,您可以使用 K 折交叉验证 我现在不使用 k 折叠,但我正在创建模型

from keras import regularizers
def build_model() :
    Model=K.models.Sequential()
    Model.add(K.layers.Dense(units=21,activation='relu',kernel_regularizer=regularizers.l2(0.001),input_dim=11))
    Model.add(K.layers.Dropout(0.2))
    Model.add(K.layers.Dense(21,kernel_regularizer=regularizers.l2(0.001)))
    Model.add(K.layers.Dropout(0.2))
    Model.add(K.layers.Dense(21,activation='relu'))
    Model.add(K.layers.Dense(1))

    #Compile the model


    Optimizer=K.optimizers.Nadam()
    Model.compile(optimizer=Optimizer,loss='mae',metrics=r2_keras_custom)
    return Model


model=build_model()
history=model.fit(x=X_train,y=Y_train,epochs=200,batch_size=29,validation_data= 
(X_test,Y_test))

I am using R2 as custom metric,you can also create one 

这里我使用的是 1-RSS/TSS 的 r2

plt.plot(history.history['val_r2_keras_custom'])
plt.plot(history.history['r2_keras_custom'])
plt.legend(['Test_score','Train_score'])
plt.plot()

enter image description here

Final score

希望对你有帮助,其他人可以纠正我

,

如果那是整个数据集,那就太小了。要考虑的一种选择是研究交叉验证,而不是将数据拆分为训练和验证(AKA 测试)。交叉验证是一种适用于小数据集的方法,其中所有数据都用于训练和验证,但仍可防止过度拟合。

,

您可以为每个模型执行超参数调整和交叉验证。

这个类可以帮助你做到这一点:https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html

GridSearchCV 也兼容 Keras 模型。为此,您可以查看: https://machinelearningmastery.com/grid-search-hyperparameters-deep-learning-models-python-keras/

,

就个人而言,训练数据集中的少量记录意味着机器学习算法训练集合中的基分类器数量较少。检查您的代码,我之前没有使用过 RobustScaler,但我会在测试数据集上使用转换,而不是 fit_transform。

回到您的代码,看起来随机森林的准确度最高。通过超调一些参数,包括估计器的数量和 max_depth,可以报告更好的性能。此后,正如其他答案/评论所推荐的那样,此处需要对算法参数进行超调。

# -*- coding: utf-8 -*-
"""
Created on Wed Jan  6 20:50:44 2021

@author: AliHaidar
"""

import numpy as np
from sklearn.preprocessing import RobustScaler
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.linear_model import ElasticNet
from sklearn.ensemble import RandomForestRegressor,GradientBoostingRegressor,AdaBoostRegressor
from sklearn import metrics

from xgboost import XGBRegressor


# Linear Model
def GetLinearModel(X,y):
    model = LinearRegression()
    model.fit(X,y)
    return model   

# Ridge Regression
def GetRidge(X,y):
    model = Ridge(alpha=0.01)
    model.fit(X_train,y_train) 
    return model

# LASSO Regression
def GetLASSO(X,y):
    model = Lasso(alpha=0.01)
    model.fit(X_train,y_train) 
    return model

# ElasticNet Regression
def GetElasticNet(X,y):
    model = ElasticNet(alpha=0.01)
    model.fit(X_train,y_train) 
    return model

# Random Forest
def GetRandomForest(X,y):
    model = RandomForestRegressor(n_estimators=4,random_state=0,max_depth=11)
    model.fit(X,y)
    return model


# Train data
train_set = np.array([\
[2,5,9,28,0.153668,500,0.076923077,800],\
[3,42,2,0.358913,0.230769231,900],12,500],\
[1,6,1,0.340075,457,560],3,0.458358,0.153846154,4,32,0.460336,600],0.473414,700],16,0.332991,19,27,0.3477,580],20,74,0.52076,550],\
[6,570],8,47,0.840656,681,50],14,400],\
[5,7,0.251423,980],0.121852,780],\
[2,0.37242,920],0.37682,17,0.449545,300],30,0.551193,30],\
[0,10,0.044175,350],0.521714,650],15,0.985122,34,0.666666,0.299756,330,31,26,0.104847,850],0.172947,0.206403,630],0.495897,46,250],602,20],21,0.158674,645,33,0.041473,890],0.147325,296,2.891695,521,1],0.098953,445,0.143443,1.110002,833,100],60,0.78685,112,0.305556,0.307692308,150],0.453743,570,0.325975,450],0.266311,0.134606,0.105576,430],0.519103,0.109559,669,\
[11,0.235709,0.504134,534],0.075403,573],\
[10,51,11,2.211951,547],\
[9,0.328037,747],0.166666,448],\
[8,719],0.150237,827],\
[7,138,35,37,0.414154,950],41,84,0.41248,750],0.232647,0.411712,520],45,80,0.266299,24,49,0.981102,0.333551,18,13,0.602826,406,427],40,83,0.332792,485],0.39671,664],88,0.548066,90],0.415991,0.51743,599,0.4413,610],50,0.313789,0.535163,375],23,0.51242,44,0.268062,744],38,0.28396,63,0.566661,0.174852,0.517482,52,0.378441,720],103,0.472361,36,0.298553,628],65,0.301907,710],177,29,0.501831,40],0.351668,708],57,0.432374,753],75,0.154085,0.331244,620],55,0.377253,640],0.877696,480],0.208742,0.228812,678,0.090459,553,535],0.292161,594],64,0.602431,0.567179,910],\
[4,0.423915,713],0.114637,0.489314,43,0.599918,612,25,135,0.472659,94,0.31713,69,0.412528,362],58,0.53184,370],0.033438,53,0.619595,200],0.593453,574],0.302636,790],0.256892,748],0.119599,517],22,0.419703,66,70,0.362268,0.597145,667]])

# Test data    
test_set = np.array([\
[2,87,0.168057,760],0.273522,877],0.262797,596],0.495495,680],0.254813,59,0.437851,0.34559,0.385379,641],0.2945,644],0.421612,0.436883,410],0.044898,377,0.428529,89,0.819431,440],124,0.375306,880],0.439412,820],0.495654,653],225,486,0.829792,0.500442,568]])

# split datasets into independent and dependent variables
X_train,y_train = train_set[:,:-1],train_set[:,-1]    
X_test,y_test = test_set[:,test_set[:,-1]    

# feature scaling
sc = RobustScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.fit_transform(X_test)

# Linear model
reg = GetLinearModel(X_train,y_train)
y_pred = reg.predict(X_test)
mae = metrics.mean_absolute_error(y_test,y_pred)
print("%15s: %10f" % ("Linear",mae))

# Ridge Regression
reg = GetRidge(X_train,y_pred)
print("%15s: %10f" % ("Ridge",mae))

# LASSO Regression
reg = GetLASSO(X_train,y_pred)
print("%15s: %10f" % ("LASSO",mae))

# ElasticNet Regression
reg = GetElasticNet(X_train,y_pred)
print("%15s: %10f" % ("ElasticNet",mae))

# Random Forest
reg = GetRandomForest(X_train,y_pred)
print("%15s: %10f" % ("Random Forest",mae))


输出:

         Linear: 141.265089
          Ridge: 141.267797
          LASSO: 141.274700
     ElasticNet: 141.413544
  Random Forest:  90.776332