顺序_10层的输入0与该层不兼容::预期的min_ndim = 4,找到的ndim = 2

问题描述

在重塑xtraindata和xtest数据之前,我得到了错误: “图层sequence_10的输入0与该图层不兼容:预期的min_ndim = 4,找到的ndim = 2。”。在将xtraindata和xtestdata重塑为(1400,24,1)和(600,1)之后。然后我得到这样的错误: “不兼容的形状:[32,1]与[32,6,1] [[node mean_squared_error / SquaredDifference(在C:\ Users \ User \ Documents \ car_person.py:188上定义)]] [Op:__ inference_test_function_7945]

函数调用堆栈: test_function” 我无法使评估功能适用于创建的模型。为了使测试数据与模型兼容,该怎么办?

import numpy as np
import matplotlib.pyplot as plt
import os
import time
import cv2
import pandas as pd
import tensorflow as tf
import itertools as it
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
  try:
    tf.config.experimental.set_virtual_device_configuration(gpus[0],[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=4096)])
  except RuntimeError as e:
    print(e)


#gpu_options=K.tf.GPUOptions(per_process_gpu_memory_fraction=0.35)

path = "C:/Users/User/Desktop/tunel_data"
training_data=[]

def create_training_data(training_data,path):
    categories = ["tunel_data_other","tunel_data_car"]
    for category in categories:
        path=os.path.join(path,category)
        for img in os.listdir(path):
            print(img)
            if category=="tunel_data_other":
                class_num= 0
                #image=Image.open(img)
                #new_image = image.resize((50,50))
                #new_image.save('car'+img.index())
                #try:
                image_array = cv2.imread(os.path.join(path,img),cv2.IMREAD_GRAYSCALE)/255
                new_array = cv2.resize(image_array,(24,24))
                print(new_array.shape)
                training_data.append([new_array,class_num])
                #except:
                    #pass
            elif category=="tunel_data_car":
                class_num = 1
                #image=Image.open(img)
                #new_image = image.resize((50,50))
                #new_image.save('person'+img.index())
                #try:
                image_array = cv2.imread(os.path.join(path,class_num])
                #except:
                    #pass
        path = "C:/Users/User/Desktop/tunel_data"
    return training_data

create_training_data(training_data,path)

x=[]
y=[]

for i in range(len(training_data)):
    x.append(training_data[i][0])
    y.append(training_data[i][1])
#print(x)
#print(y)
     
x = np.array(x).reshape(2000,576)
"""
principle_features = PCA(n_components=250)
feature = principle_features.fit_transform(x)
"""
feature = x
label = y

feature_df = pd.DataFrame(feature)

#df = DataFrame (People_List,columns=['First_Name','Last_Name','Age'])

label_df = pd.DataFrame(label)


data = pd.concat([feature_df,label_df],axis=1).to_csv('complete.csv')


data = pd.read_csv("complete.csv")

data = data.sample(frac=1).reset_index(drop=True)

print(data)

x_test,x_train,y_test,y_train = train_test_split(x,y,test_size=0.7,random_state=65)
xtraindata=pd.DataFrame(data=x_train[:,:])
xtestdata=pd.DataFrame(data=x_test[:,:])
print(xtraindata)

ytraindata=pd.DataFrame(data=y_train[:])
ytestdata=pd.DataFrame(data=y_test[:])
print(ytraindata)

xtraindata = np.asarray(xtraindata)
ytraindata = np.asarray(ytraindata)
xtestdata = np.asarray(xtestdata)
ytestdata = np.asarray(ytestdata)
x=np.asarray(x)
y=np.asarray(y)


xtraindata = xtraindata.reshape(1400,1)
xtestdata = xtestdata.reshape(600,1)

activation = ["tanh","relu","sigmoid","softmax"]
input_size1 = range(10)
input_size2 = range(10)
k_scores = []
in_size = []

possible = list(it.permutations(activation,4))

for c in possible:
    for i in input_size1:
        for a in input_size2:
            model = tf.keras.Sequential([tf.keras.layers.Conv2D(256,kernel_size=(3,3),padding='same',activation='relu'),tf.keras.layers.MaxPooling2D(pool_size=(2,2)),tf.keras.layers.Conv2D(512,tf.keras.layers.Dense(250,activation=c[0]),tf.keras.layers.Dense(i,activation=c[1]),tf.keras.layers.Dense(a,activation=c[2]),tf.keras.layers.Dense(1,activation=c[3])])
            model.compile(optimizer='sgd',loss='mse')
            val_loss = model.evaluate(xtestdata,ytestdata,verbose=1)
            k_scores.append(val_loss)
            in_size.append([i,a])
            
print(k_scores)
print("Best activation functions for each layer:",possible[(k_scores.index((min(k_scores)))) % len(possible)],"/n Best input sizes:","840",in_size[k_scores.index((min(k_scores)))][0],in_size[k_scores.index((min(k_scores)))][1],"1")

model = tf.keras.Sequential()
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(250,activation=possible[(k_scores.index((min(k_scores)))) % len(possible)][0]))
model.add(tf.keras.layers.Dense(in_size[k_scores.index((min(k_scores)))][0],activation=possible[(k_scores.index((min(k_scores)))) % len(possible)][1]))
model.add(tf.keras.layers.Dense(in_size[k_scores.index((min(k_scores)))][1],activation=possible[(k_scores.index((min(k_scores)))) % len(possible)][2]))
model.add(tf.keras.layers.Dense(1,activation=possible[(k_scores.index((min(k_scores)))) % len(possible)][3]))
model.compile(optimizer="adam",loss="binary_crossentropy",metrics=["accuracy","mse"])
model.fit(x,batch_size=16,epochs=5)
predictions = model.predict([x_test])
print(predictions)
print(predictions.shape)

解决方法

输出层大小不同。您想要大小(32,1),但模型的输出为(32,6,1)

Flatten()MaxPooling2D之间插入Dense()可能很好。

这是提示。 .evaluate方法仅适用于经过训练的模型。您应该先使用.fit

相关问答

依赖报错 idea导入项目后依赖报错,解决方案:https://blog....
错误1:代码生成器依赖和mybatis依赖冲突 启动项目时报错如下...
错误1:gradle项目控制台输出为乱码 # 解决方案:https://bl...
错误还原:在查询的过程中,传入的workType为0时,该条件不起...
报错如下,gcc版本太低 ^ server.c:5346:31: 错误:‘struct...