KeyError:“ gradients_3 / dgm_net_2 / Relu_5_grad / ReluGrad”

问题描述

KeyError                                  Traceback (most recent call last)

<ipython-input-47-dbe0ddf23b1b> in <module>()
     25           learning_rate = tf.compat.v1.train.exponential_decay(starter_learning_rate,global_step,26                                             100000,0.96,staircase=True)
---> 27           optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss_t)
     28 
     29           # Training parameters

9 frames

/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py in _get_operation_by_name_unsafe(self,name)
   3819 
   3820     with self._lock:
-> 3821       return self._nodes_by_name[name]
   3822 
   3823   def _get_operation_by_tf_operation(self,tf_oper):

KeyError: 'gradients_3/dgm_net_2/Relu_5_grad/ReluGrad'


初始化模型时出现此KeyError错误。更具体地说,优化器:

          global_step = tf.Variable(decay,trainable=False)#0 for constant decay,1 for exponential decay
          learning_rate = tf.compat.v1.train.exponential_decay(starter_learning_rate,100000,staircase=True)
          optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss_t)

下面是我使用ReLU激活功能的网络体系结构:

class DGMNet(tf.keras.Model):
    
    def __init__(self,n_layers,n_nodes,dimensions=1):
        """
        Parameters:
            - n_layers:     number of layers
            - n_nodes:      number of nodes in (inner) layers
            - dimensions:   number of spacial dimensions
        """
        super().__init__()
        
        self.n_layers = n_layers

        self.initial_layer = DenseLayer(dimensions + 1,activation="relu")
        self.lstmlikelist = []
        for _ in range(self.n_layers):
            self.lstmlikelist.append(LSTMLikeLayer(dimensions + 1,activation="relu"))
        self.final_layer = DenseLayer(n_nodes,1,activation=None)


    def call(self,t,x):
        X = tf.concat([t,x],1)

        S = self.initial_layer.call(X)
        for i in range(self.n_layers):
            S = self.lstmlikelist[i].call({'S': S,'X': X})
        result = self.final_layer.call(S)

        return result
    


# Neural network layers

class DenseLayer(tf.keras.layers.Layer):
        

  def __init__(self,n_inputs,n_outputs,activation):
      """
      Parameters:
      - n_inputs:     number of inputs
      - n_outputs:    number of outputs
      - activation:   activation function
      """
      super(DenseLayer,self).__init__()
      self.n_inputs = n_inputs
      self.n_outputs = n_outputs
      self.W = self.add_weight(shape=(self.n_inputs,self.n_outputs),initializer='random_normal',trainable=True)
      self.b = self.add_weight(shape=(1,trainable=True)
      self.activation = _get_function(activation)
      
  def call(self,inputs):
        S = tf.add(tf.matmul(inputs,self.W),self.b)
        S = self.activation(S)

        return S



class LSTMLikeLayer(tf.keras.layers.Layer):
    def __init__(self,activation):
        """
        Parameters:
            - n_inputs:     number of inputs
            - n_outputs:    number of outputs
            - activation:   activation function
        """
      
        super(LSTMLikeLayer,self).__init__()

        self.n_outputs = n_outputs
        self.n_inputs = n_inputs


        
        
        self.Uz = self.add_weight("Uz",shape=[self.n_inputs,self.n_outputs])
        self.Ug = self.add_weight("Ug",self.n_outputs])
        self.Ur = self.add_weight("Ur",self.n_outputs])
        self.Uh = self.add_weight("Uh",self.n_outputs])
        self.Wz = self.add_weight("Wz",shape=[self.n_outputs,self.n_outputs])
        self.Wg = self.add_weight("Wg",self.n_outputs])
        self.Wr = self.add_weight("Wr",self.n_outputs])
        self.Wh = self.add_weight("Wh",self.n_outputs])
        self.bz = self.add_weight("bz",shape=[1,self.n_outputs])
        self.bg = self.add_weight("bg",self.n_outputs])
        self.br = self.add_weight("br",self.n_outputs])
        self.bh = self.add_weight("bh",self.n_outputs])
        

        self.activation = _get_function(activation)


    
    def call(self,inputs):
        S = inputs['S']
        X = inputs['X']

        Z = self.activation(tf.add(tf.add(tf.matmul(X,self.Uz),tf.matmul(S,self.Wz)),self.bz))
        G = self.activation(tf.add(tf.add(tf.matmul(X,self.Ug),self.Wg)),self.bg))
        R = self.activation(tf.add(tf.add(tf.matmul(X,self.Ur),self.Wr)),self.br))
        H = self.activation(tf.add(tf.add(tf.matmul(X,self.Uh),tf.matmul(tf.multiply(S,R),self.Wh)),self.bh))
        Snew = tf.add(tf.multiply(tf.subtract(tf.ones_like(G),G),H),tf.multiply(Z,S))

        return Snew



def _get_function(name):
    f = None
    if name == "tanh":
        f = tf.nn.tanh
    elif name == "sigmoid":
        f = tf.nn.sigmoid
    elif name == "relu":
        f = tf.nn.relu
    elif not name:
        f = tf.identity
    
    assert f is not None
    
    return f

loss_t来自

 ann = DGMNet(num_layers,nodes_per_layer)
          
          
          L1_t,L2_t,L3_t = loss(ann,t1_t,x1_t,t2_t,x2_t,t3_t,x3_t)
          loss_t = L1_t  + L2_t + L3_t

其中损失函数定义为。

def loss(model,t1,x1,t2,x2,t3,x3):
    # Loss term #1: PDE
    V = model(t1,x1)
    V_t = tf.gradients(V,t1)[0]
    V_x = tf.gradients(V,x1)[0]
    V_xx = tf.gradients(V_x,x1)[0]
    f = V_t + r*x1*V_x + 0.5*sigma**2*x1**2*V_xx - r*V 

    L1 = tf.reduce_mean(tf.square(f))

    #Loss term #2: boundary condition
    L2 = tf.reduce_mean(tf.square(model(t2,x2) - 0)) 
    
    # Loss term #3: initial/terminal condition
    L3 = tf.reduce_mean(tf.square(model(t3,x3) - tf.math.maximum(x3-K,0)))

    return (L1,L2,L3)

因此优化器似乎存在问题。昨天它仍然有效,我什么也没改变。

解决方法

暂无找到可以解决该程序问题的有效方法,小编努力寻找整理中!

如果你已经找到好的解决方法,欢迎将解决方案带上本链接一起发送给小编。

小编邮箱:dio#foxmail.com (将#修改为@)

相关问答

错误1:Request method ‘DELETE‘ not supported 错误还原:...
错误1:启动docker镜像时报错:Error response from daemon:...
错误1:private field ‘xxx‘ is never assigned 按Alt...
报错如下,通过源不能下载,最后警告pip需升级版本 Requirem...