Keras中带有文本和图像的混合模型

问题描述

我正在处理具有图像和文本的数据集。我需要创建将文本和图像作为输入的混合模型,以将图像分为两类。我的数据集看起来像所附的图像。

enter image description here

我遇到Data cardinality is ambiguous错误。我不知道该如何解决。下面是mt代码

import tensorflow as tf
import pandas as pd
import numpy as np

base_dir = "D:/Dataset/xxxx/datasets/xxx/xx/xxxxx/"

import os

train_dir = os.path.join(base_dir,"trin.jsonl")
test_dir = os.path.join(base_dir,"tst.jsonl")
dev_dir = os.path.join(base_dir,"dv.jsonl")

df_train = pd.read_json(train_dir,lines=True)
df_test = pd.read_json(test_dir,lines=True)
df_dev = pd.read_json(dev_dir,lines=True)

df_train=df_train.set_index('id')
df_dev=df_dev.set_index('id')
df_test=df_test.set_index('id')

from tensorflow.keras import optimizers
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import re
import spacy
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences

nlp = spacy.load('en_core_web_md')

train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)

label_map = {1:"Hate",0:"No_Hate"}
df_dev['label']=df_dev['label'].map(label_map)
df_train['label']=df_train['label'].map(label_map)

train_generator = train_datagen.flow_from_dataframe(dataframe=df_train,directory=img_path,x_col="img",y_col="label",target_size=(224,224),batch_size=20,class_mode="binary",shuffle=False)

def spacy_tokenizer(sentence):
    sentence = re.sub(r"[^a-zA-Z0-9]+"," ",sentence)
    sentence_list = [word.lemma_ for word in nlp(sentence) if not (word.is_space or word.is_stop or len(word)==1)]
    return ' '.join(sentence_list)
    
image_files = pd.Series(train_generator.filenames)
image_files = image_files.str.split('/',expand=True)[1].str[:-4]
image_files = list(map(int,image_files))

df_sorted = df_train.reindex(image_files)
df_sorted.head(1)

images,labels = next(train_generator)

tokenizer = Tokenizer(num_words=10000)

tokenizer.fit_on_texts(df_sorted['new_text'].values)
sequences = tokenizer.texts_to_sequences(df_sorted['new_text'].values)
train_padd = pad_sequences(sequences,maxlen=maxlen,padding='post',truncating='post')

from tensorflow.keras.models import Model
from tensorflow.keras import layers
from tensorflow.keras import models
from tensorflow.keras.layers import Embedding,Flatten,Dense
from tensorflow.keras.layers import Dense,LSTM,Embedding,Dropout,SpatialDropout1D,Conv1D,MaxPooling1D,GRU,Batchnormalization
from tensorflow.keras.layers import Input,Bidirectional,GlobalAveragePooling1D,GlobalMaxPooling1D,concatenate,LeakyReLU

def create_nlp():
    sequence_input=Input(shape=(maxlen))
    embedding_layer=Embedding(input_dim=text_embedding.shape[0],output_dim=text_embedding.shape[1],weights=[text_embedding],input_length=maxlen,trainable=False)
    embedded_sequence = embedding_layer(sequence_input)
    l_conv_1=Conv1D(128,5,activation='relu')(embedded_sequence)
    l_pool_1=MaxPooling1D(5)(l_conv_1)
    l_conv_2=Conv1D(128,activation='relu')(l_pool_1)
    l_pool_2=MaxPooling1D(5)(l_conv_2)
    l_flat = Flatten()(l_pool_2)
    model=Model(sequence_input,l_flat)
    return model
    
    
from tensorflow.keras.applications import VGG16
from tensorflow.keras import optimizers

def create_img():
    img_input=Input(shape=(224,224,3))
    conv_base = VGG16(weights='imagenet',include_top=False,input_shape=(224,3))
    conv_base.trainable = False
    conv_l_1=conv_base(img_input)
    flat_l = Flatten()(conv_l_1)
    dense_l = Dense(256,activation='relu')(flat_l)
    model = Model(img_input,dense_l)
    return model

nlp_1=create_nlp()
img_cnn=create_img()
combinedInput = concatenate([nlp_1.output,img_cnn.output])

x = Dense(4,activation="relu")(combinedInput)
x = Dense(1,activation="sigmoid")(x)
model1 = Model(inputs=[nlp_1.input,img_cnn.input],outputs=x)
opt = optimizers.Adam(lr=1e-3,decay=1e-3 / 200)
model1.compile(loss="binary_crossentropy",metrics=['acc'],optimizer=opt)

model1_history = model1.fit([train_padd,images],train_y,epochs=15,batch_size=10)

以下是错误

ValueError: Data cardinality is ambiguous:
  x sizes: 8500,20
  y sizes: 8500
Please provide data which shares the same first dimension.

解决方法

暂无找到可以解决该程序问题的有效方法,小编努力寻找整理中!

如果你已经找到好的解决方法,欢迎将解决方案带上本链接一起发送给小编。

小编邮箱:dio#foxmail.com (将#修改为@)