AttributeError: 'dict' 对象没有属性 '_keras_mask'

问题描述

我正在编写使用三层损失的几个镜头问答模型。我能够训练我的模型,但是当我尝试预测时,我得到了上述错误

我已将我的代码的以下专家提供以供参考。我已经尝试通过检查数据类型和其他因素但没有运气。

谁能告诉我这里出了什么问题?

import pandas as pd
import numpy as np

df = pd.read_csv(r'/content/IAS16_standard.csv',encoding = "latin1")
rev_df = (df.drop('Questions',axis=1)
             .join
             (
             df.Questions
             .str
             .split(',',expand=True)
             .stack()
             .reset_index(drop=True,level=1)
             .rename('Questions')           
             ))
train=rev_df[['Questions','Intent']]
train.reset_index(drop=True)

import tensorflow as tf
import tensorflow_hub as hub
module_url = 'https://tfhub.dev/google/universal-sentence-encoder-large/4'
# Import the Universal Sentence Encoder's TF Hub module
embed = hub.load(module_url)

import numpy as np # linear algebra
import pandas as pd # data processing,CSV file I/O (e.g. pd.read_csv)

import tensorflow as tf
import tensorflow_hub as hub
import keras
import keras.backend as K
from keras.layers import *
from keras.callbacks import *
from keras.optimizers import *
from keras import Model
from keras.layers.core import Lambda,Flatten,Dense
from keras.layers import Bidirectional,LSTM
import pickle    
import os
input_text1 = Input(shape=(512,))
x = Dense(256,activation='relu')(input_text1)
x = Dropout(0.4)(x)
x = Batchnormalization()(x)
x = Dense(64,activation='relu',kernel_regularizer=keras.regularizers.l2(0.001))(x)
x = Dropout(0.4)(x)
dense_layer = Dense(128,name='dense_layer')(x)
norm_layer = Lambda(lambda  x: K.l2_normalize(x,axis=1),name='norm_layer')(dense_layer)

model=Model(inputs=[input_text1],outputs=norm_layer)

model.summary()
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_1 (InputLayer)         [(None,512)]             0         
_________________________________________________________________
dense (Dense)                (None,256)               131328    
_________________________________________________________________
dropout (Dropout)            (None,256)               0         
_________________________________________________________________
batch_normalization (BatchNo (None,256)               1024      
_________________________________________________________________
dense_1 (Dense)              (None,64)                16448     
_________________________________________________________________
dropout_1 (Dropout)          (None,64)                0         
_________________________________________________________________
dense_layer (Dense)          (None,128)               8320      
_________________________________________________________________
norm_layer (Lambda)          (None,128)               0         
=================================================================
Total params: 157,120
Trainable params: 156,608
Non-trainable params: 512

from keras import backend as K
from keras.models import Model
from keras.layers import Input,Layer

# Input for anchor,positive and negative images
in_a = Input(shape=(512,))
in_p = Input(shape=(512,))
in_n = Input(shape=(512,))

# Output for anchor,positive and negative embedding vectors
# The nn4_small model instance is shared (Siamese network)
emb_a = model(in_a)
emb_p = model(in_p)
emb_n = model(in_n)

class TripletLossLayer(Layer):
    def __init__(self,alpha,**kwargs):
        self.alpha = alpha
        super(TripletLossLayer,self).__init__(**kwargs)
    
    def triplet_loss(self,inputs):
        a,p,n = inputs
        p_dist = K.sum(K.square(a-p),axis=-1)
        n_dist = K.sum(K.square(a-n),axis=-1)
        return K.sum(K.maximum(p_dist - n_dist + self.alpha,0),axis=0)
    
    def call(self,inputs):
        loss = self.triplet_loss(inputs)
        self.add_loss(loss)
        return loss

# Layer that computes the triplet loss from anchor,positive and negative embedding vectors
triplet_loss_layer = TripletLossLayer(alpha=0.4,name='triplet_loss_layer')([emb_a,emb_p,emb_n])

# Model that can be trained with anchor,positive negative images
nn4_small2_train = Model([in_a,in_p,in_n],triplet_loss_layer)

nn4_small2_train.summary()

__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_2 (InputLayer)            [(None,512)]        0                                            
__________________________________________________________________________________________________
input_3 (InputLayer)            [(None,512)]        0                                            
__________________________________________________________________________________________________
input_4 (InputLayer)            [(None,512)]        0                                            
__________________________________________________________________________________________________
model (Functional)              (None,128)          157120      input_2[0][0]                    
                                                                 input_3[0][0]                    
                                                                 input_4[0][0]                    
__________________________________________________________________________________________________
triplet_loss_layer (TripletLoss ()                   0           model[0][0]                      
                                                                 model[1][0]                      
                                                                 model[2][0]                      
==================================================================================================
Total params: 157,608
Non-trainable params: 512

unique_train_label=np.array(train['Intent'].unique().tolist())
labels_train=np.array(train['Intent'].tolist())
map_train_label_indices = {label: np.flatnonzero(labels_train == label) for label in unique_train_label}

def get_triplets(unique_train_label,map_train_label_indices):
      label_l,label_r = np.random.choice(unique_train_label,2,replace=True)
      a,p = np.random.choice(map_train_label_indices[label_l],replace=True)
      n = np.random.choice(map_train_label_indices[label_r])
      return a,n

def get_triplets_batch(k,train_set,unique_train_label,map_train_label_indices,embed):

    while True:
      idxs_a,idxs_p,idxs_n = [],[],[]
      for _ in range(k):
          a,n = get_triplets(unique_train_label,map_train_label_indices)
          idxs_a.append(a)
          idxs_p.append(p)
          idxs_n.append(n)

      a=train_set.iloc[idxs_a].values.tolist()
      b=train_set.iloc[idxs_p].values.tolist()
      c=train_set.iloc[idxs_n].values.tolist()

      a = embed(a)
      p = embed(b)
      n = embed(c)
        # return train_set[idxs_a],train_set[idxs_p],train_set[idxs_n]
      yield [a,n],[]

nn4_small2_train.compile(loss=None,optimizer='adam')
nn4_small2_train.fit(get_triplets_batch(128,train['Questions'],embed),epochs=100,steps_per_epoch=10)

X_train = model.predict({'input_1':embed(np.stack(np.array(train['Questions'].values.tolist())))})

y_train = np.array(train['Intent'].values.tolist())

最后一行给出了错误

解决方法

暂无找到可以解决该程序问题的有效方法,小编努力寻找整理中!

如果你已经找到好的解决方法,欢迎将解决方案带上本链接一起发送给小编。

小编邮箱:dio#foxmail.com (将#修改为@)

相关问答

Selenium Web驱动程序和Java。元素在(x,y)点处不可单击。其...
Python-如何使用点“。” 访问字典成员?
Java 字符串是不可变的。到底是什么意思?
Java中的“ final”关键字如何工作?(我仍然可以修改对象。...
“loop:”在Java代码中。这是什么,为什么要编译?
java.lang.ClassNotFoundException:sun.jdbc.odbc.JdbcOdbc...