问题描述
我正在运行此模型以集成神经网络和随机森林。
我得到了随机森林模型的准确性。但是,此后我看到一个错误消息。
ValueError:当X具有两个以上的训练功能时,必须提供填充值。
import itertools
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from sklearn import datasets
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from mlxtend.classifier import StackingClassifier
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Dropout,Activation
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import Flatten,Conv2D,MaxPooling2D
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score,train_test_split
from mlxtend.plotting import plot_learning_curves
from mlxtend.plotting import plot_decision_regions
import pickle
import numpy as np
from keras.utils import to_categorical
from keras import backend as K
from sklearn.metrics import accuracy_score
pick = open('data.pickle','rb')
data = pickle.load(pick)
pick.close()
epochs = 23
batch_size = 15
features = []
labels = []
for feature,label in data:
features.append(feature)
labels.append(label)
X=np.array(features)
y= np.array(labels)
#Activation Function
def swish (x):
return K.sigmoid(x)*x
def build_ann():
CNN= Sequential()
CNN.add(Conv2D(32,kernel_size=(3,3),input_shape =(224,224,1)))
CNN.add(Activation(swish))
CNN.add(Conv2D(32,3)))
CNN.add(Activation(swish))
CNN.add(MaxPooling2D(pool_size=(2,2)))
CNN.add(Conv2D(64,3)))
CNN.add(Activation(swish))
CNN.add(Conv2D(64,3)))
CNN.add(Activation(swish))
CNN.add(MaxPooling2D(pool_size=(2,2)))
#Turns inputs into a vector
CNN.add(Flatten())
# Fully connected layer
CNN.add(Dense(512))
CNN.add(Activation(swish))
CNN.add(Dense(2))
CNN.add(Activation('sigmoid'))
CNN.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
return CNN
clf1=RandomForestClassifier(n_estimators=100,n_jobs=-1,criterion='gini')
clf2= KerasClassifier(build_fn=build_ann,nb_epoch=epochs,batch_size=batch_size,verbose=0)
lr = LogisticRegression()
sclf = StackingClassifier(classifiers=[clf1,clf2],Meta_classifier=lr)
label = ['Random Forest','KerasNet','Stacking Classifier']
clf_list = [clf1,clf2,sclf]
fig = plt.figure(figsize=(10,8))
gs = gridspec.GridSpec(2,2)
grid = itertools.product([0,1],repeat=2)
clf_cv_mean = []
clf_cv_std = []
for clf,label,Grd in zip(clf_list,grid):
scores = cross_val_score(clf,X,y,cv=3,scoring='accuracy')
print ("Accuracy: %.2f (+/- %.2f) [%s]" %(scores.mean(),scores.std(),label))
clf_cv_mean.append(scores.mean())
clf_cv_std.append(scores.std())
clf.fit(X,y)
ax = plt.subplot(gs[Grd[0],Grd[1]])
fig = plot_decision_regions(X=X,y=y,clf=clf)
plt.title(label)
plt.show()
#plot classifier accuracy
plt.figure()
(_,caps,_) = plt.errorbar(range(4),clf_cv_mean,yerr=clf_cv_std,c='blue',fmt='-o',capsize=5)
for cap in caps:
cap.set_markeredgewidth(1)
plt.xticks(range(3),['RF','KN','Stacking'])
plt.ylabel('Accuracy'); plt.xlabel('Classifier'); plt.title('Stacking Ensemble');
plt.show()
#plot learning curves
X_train,X_test,y_train,y_test = train_test_split(X,test_size=0.3,random_state=42)
plt.figure()
plot_learning_curves(X_train,y_test,sclf,print_model=False,style='ggplot')
plt.show()
有人可以帮助我吗?我尝试了使用KerasClassifier进行组装的许多方法,但未成功。
解决方法
暂无找到可以解决该程序问题的有效方法,小编努力寻找整理中!
如果你已经找到好的解决方法,欢迎将解决方案带上本链接一起发送给小编。
小编邮箱:dio#foxmail.com (将#修改为@)