Nvidia DIGITS上的HED网络带来的负损失

问题描述

我正在尝试使用Nvidia DIGITS训练HED网络(在这里找到:https://github.com/s9xie/hed),但是在训练时,损耗值被降低到非常大的负数,并且图像输出不是(非常)靠近标签。我敢肯定这是一个简单的规范化问题或类似问题,但是在过去的几天里我一直没有取得任何进展。我可以再看看吗?

网络:

# data layers
layer {
  name: "data"
  type: "Data"
  top: "data"
  include {
    phase: TRAIN
  }
  data_param {
    batch_size: 1
    backend: LMDB
  }
}
layer {
  name: "label"
  type: "Data"
  top: "label"
  include {
    phase: TRAIN
  }
  data_param {
    batch_size: 1
    backend: LMDB
  }
}
layer {
  name: "data"
  type: "Data"
  top: "data"
  include {
    phase: TEST
  }
  data_param {
    batch_size: 1
    backend: LMDB
  }
}
layer {
  name: "label"
  type: "Data"
  top: "label"
  include {
    phase: TEST
  }
  data_param {
    batch_size: 1
    backend: LMDB
  }
}
layer { bottom: 'data' top: 'conv1_1' name: 'conv1_1' type: "Convolution"
  param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0}
  convolution_param { engine: CAFFE num_output: 64 pad: 1 kernel_size: 3 } }
layer { bottom: 'conv1_1' top: 'conv1_1' name: 'relu1_1' type: "ReLU" }
layer { bottom: 'conv1_1' top: 'conv1_2' name: 'conv1_2' type: "Convolution"
  param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0}
  convolution_param { engine: CAFFE num_output: 64 pad: 1 kernel_size: 3 } }
layer { bottom: 'conv1_2' top: 'conv1_2' name: 'relu1_2' type: "ReLU" }
layer { name: 'pool1' bottom: 'conv1_2' top: 'pool1' type: "Pooling"
  pooling_param { pool: MAX kernel_size: 2 stride: 2 } }

layer { name: 'conv2_1' bottom: 'pool1' top: 'conv2_1' type: "Convolution"
  param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0}
  convolution_param { engine: CAFFE num_output: 128 pad: 1 kernel_size: 3 } }
layer { bottom: 'conv2_1' top: 'conv2_1' name: 'relu2_1' type: "ReLU" }
layer { bottom: 'conv2_1' top: 'conv2_2' name: 'conv2_2' type: "Convolution"
  param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0}
  convolution_param { engine: CAFFE num_output: 128 pad: 1 kernel_size: 3 } }
layer { bottom: 'conv2_2' top: 'conv2_2' name: 'relu2_2' type: "ReLU" }
layer { bottom: 'conv2_2' top: 'pool2' name: 'pool2' type: "Pooling"
  pooling_param { pool: MAX kernel_size: 2 stride: 2 } }

layer { bottom: 'pool2' top: 'conv3_1' name: 'conv3_1' type: "Convolution"
  param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0}
  convolution_param { engine: CAFFE num_output: 256 pad: 1 kernel_size: 3 } }
layer { bottom: 'conv3_1' top: 'conv3_1' name: 'relu3_1' type: "ReLU" }
layer { bottom: 'conv3_1' top: 'conv3_2' name: 'conv3_2' type: "Convolution"
  param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0}
  convolution_param { engine: CAFFE num_output: 256 pad: 1 kernel_size: 3 } }
layer { bottom: 'conv3_2' top: 'conv3_2' name: 'relu3_2' type: "ReLU" }
layer { bottom: 'conv3_2' top: 'conv3_3' name: 'conv3_3' type: "Convolution"
  param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0}
  convolution_param { engine: CAFFE num_output: 256 pad: 1 kernel_size: 3 } }
layer { bottom: 'conv3_3' top: 'conv3_3' name: 'relu3_3' type: "ReLU" }
layer { bottom: 'conv3_3' top: 'pool3' name: 'pool3' type: "Pooling"
  pooling_param { pool: MAX kernel_size: 2 stride: 2 } }

layer { bottom: 'pool3' top: 'conv4_1' name: 'conv4_1' type: "Convolution"
  param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0}
  convolution_param { engine: CAFFE num_output: 512 pad: 1 kernel_size: 3 } }
layer { bottom: 'conv4_1' top: 'conv4_1' name: 'relu4_1' type: "ReLU" }
layer { bottom: 'conv4_1' top: 'conv4_2' name: 'conv4_2' type: "Convolution"
  param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0}
  convolution_param { engine: CAFFE num_output: 512 pad: 1 kernel_size: 3 } }
layer { bottom: 'conv4_2' top: 'conv4_2' name: 'relu4_2' type: "ReLU" }
layer { bottom: 'conv4_2' top: 'conv4_3' name: 'conv4_3' type: "Convolution"
  param { lr_mult: 1 decay_mult: 1 } param { lr_mult: 2 decay_mult: 0}
  convolution_param { engine: CAFFE num_output: 512 pad: 1 kernel_size: 3 } }
layer { bottom: 'conv4_3' top: 'conv4_3' name: 'relu4_3' type: "ReLU" }
layer { bottom: 'conv4_3' top: 'pool4' name: 'pool4' type: "Pooling"
  pooling_param { pool: MAX kernel_size: 2 stride: 2 } }

layer { bottom: 'pool4' top: 'conv5_1' name: 'conv5_1' type: "Convolution"
  param { lr_mult: 100 decay_mult: 1 } param { lr_mult: 200 decay_mult: 0}
  convolution_param { engine: CAFFE num_output: 512 pad: 1 kernel_size: 3 } }
layer { bottom: 'conv5_1' top: 'conv5_1' name: 'relu5_1' type: "ReLU" }
layer { bottom: 'conv5_1' top: 'conv5_2' name: 'conv5_2' type: "Convolution"
  param { lr_mult: 100 decay_mult: 1 } param { lr_mult: 200 decay_mult: 0}
  convolution_param { engine: CAFFE num_output: 512 pad: 1 kernel_size: 3 } }
layer { bottom: 'conv5_2' top: 'conv5_2' name: 'relu5_2' type: "ReLU" }
layer { bottom: 'conv5_2' top: 'conv5_3' name: 'conv5_3' type: "Convolution"
  param { lr_mult: 100 decay_mult: 1 } param { lr_mult: 200 decay_mult: 0}
  convolution_param { engine: CAFFE num_output: 512 pad: 1 kernel_size: 3 } }
layer { bottom: 'conv5_3' top: 'conv5_3' name: 'relu5_3' type: "ReLU" }

## DSN conv 1 ###
layer { name: 'score-dsn1' type: "Convolution" bottom: 'conv1_2' top: 'score-dsn1-up'
  param { lr_mult: 0.01 decay_mult: 1 } param { lr_mult: 0.02 decay_mult: 0}
  convolution_param { engine: CAFFE num_output: 1 kernel_size: 1 } }
  
layer { 
    type: "Crop" 
    name: 'crop1' 
    bottom: 'score-dsn1-up' 
    bottom: 'data' 
    top: 'upscore-dsn1'   
}
  
layer { type: "SigmoidCrossEntropyLoss" name: 'loss1' bottom: "upscore-dsn1" bottom: "label" top:"dsn1_loss" exclude { stage: "deploy" } loss_weight: 1}

### DSN conv 2 ###
layer { name: 'score-dsn2' type: "Convolution" bottom: 'conv2_2' top: 'score-dsn2'
  param { lr_mult: 0.01 decay_mult: 1 } param { lr_mult: 0.02 decay_mult: 0}
  convolution_param { engine: CAFFE num_output: 1 kernel_size: 1 } }
layer { type: "Deconvolution" name: 'upsample_2' bottom: 'score-dsn2' top: 'score-dsn2-up'
  param { lr_mult: 0 decay_mult: 1 } param { lr_mult: 0 decay_mult: 0}
  convolution_param { kernel_size: 4 stride: 2 num_output: 1 } }
layer { type: "Crop" name: 'crop2' bottom: 'score-dsn2-up' bottom: 'data' top: 'upscore-dsn2'}
layer { type: "SigmoidCrossEntropyLoss" name: 'loss2' bottom: "upscore-dsn2" bottom: "label" top:"dsn2_loss" exclude { stage: "deploy" } loss_weight: 1}

### DSN conv 3 ###
layer { name: 'score-dsn3' type: "Convolution" bottom: 'conv3_3' top: 'score-dsn3'
  param { lr_mult: 0.01 decay_mult: 1 } param { lr_mult: 0.02 decay_mult: 0}
  convolution_param { engine: CAFFE num_output: 1 kernel_size: 1 } }
layer { type: "Deconvolution" name: 'upsample_4' bottom: 'score-dsn3' top: 'score-dsn3-up'
  param { lr_mult: 0 decay_mult: 1 } param { lr_mult: 0 decay_mult: 0}
  convolution_param { kernel_size: 8 stride: 4 num_output: 1 } }
layer { type: "Crop" name: 'crop3' bottom: 'score-dsn3-up' bottom: 'data' top: 'upscore-dsn3'}
layer { type: "SigmoidCrossEntropyLoss" name: 'loss3' bottom: "upscore-dsn3" bottom: "label" top:"dsn3_loss" exclude { stage: "deploy" } loss_weight: 1}

###DSN conv 4###
layer { name: 'score-dsn4' type: "Convolution" bottom: 'conv4_3' top: 'score-dsn4'
  param { lr_mult: 0.01 decay_mult: 1 } param { lr_mult: 0.02 decay_mult: 0}
  convolution_param { engine: CAFFE num_output: 1 kernel_size: 1 } }
layer { type: "Deconvolution" name: 'upsample_8' bottom: 'score-dsn4' top: 'score-dsn4-up'
  param { lr_mult: 0 decay_mult: 1 } param { lr_mult: 0 decay_mult: 0}
  convolution_param { kernel_size: 16 stride: 8 num_output: 1 } }
layer { type: "Crop" name: 'crop4' bottom: 'score-dsn4-up' bottom: 'data' top: 'upscore-dsn4'}
layer { type: "SigmoidCrossEntropyLoss" name: 'loss4' bottom: "upscore-dsn4" bottom: "label" top:"dsn4_loss" exclude { stage: "deploy" } loss_weight: 1}

###DSN conv 5###
layer { name: 'score-dsn5' type: "Convolution" bottom: 'conv5_3' top: 'score-dsn5'
  param { lr_mult: 0.01 decay_mult: 1 } param { lr_mult: 0.02 decay_mult: 0}
  convolution_param { engine: CAFFE num_output: 1 kernel_size: 1 } }
layer { type: "Deconvolution" name: 'upsample_16' bottom: 'score-dsn5' top: 'score-dsn5-up'
  param { lr_mult: 0 decay_mult: 1 } param { lr_mult: 0 decay_mult: 0}
  convolution_param { kernel_size: 32 stride: 16 num_output: 1 } }
layer { type: "Crop" name: 'crop5' bottom: 'score-dsn5-up' bottom: 'data' top: 'upscore-dsn5'}
layer { type: "SigmoidCrossEntropyLoss" name: 'loss5' bottom: "upscore-dsn5" bottom: "label" top:"dsn5_loss" exclude { stage: "deploy" } loss_weight: 1}

### Concat and multiscale weight layer ###
layer { name: "concat" bottom: "upscore-dsn1"  bottom: "upscore-dsn2" bottom: "upscore-dsn3" 
         bottom: "upscore-dsn4" bottom: "upscore-dsn5" top: "concat-upscore" type: "Concat"
  concat_param { concat_dim: 1}}
layer { name: 'new-score-weighting' type: "Convolution" bottom: 'concat-upscore' top: 'upscore-fuse'
  param { lr_mult: 0.001 decay_mult: 1 } param { lr_mult: 0.002 decay_mult: 0}
  convolution_param { engine: CAFFE num_output: 1 kernel_size: 1 weight_filler {type: "constant" value: 0.2} }}
  
layer { type: "SigmoidCrossEntropyLoss" bottom: "upscore-fuse" bottom: "label" top:"fuse_loss" exclude { stage: "deploy" } loss_weight: 1}

设置:

Training Settings

值得关注

  • 我不得不将学习率设置为极低的数值 阻止它快速驱动到NaN输出
  • 我只用了一个 他们在数据集中提供的图像子集(共300张)
  • 我将网络设置为使用他们提供的预先训练的模型
  • 我尝试在'conv1_1'层上将pad:35更改为pad:1,因为网络从未切断填充,并且正在转移整个输出(我的解决方法可能是个糟糕的主意)

几次迭代后丢失:

Loss

标签(我将这些图像旋转了90度,以便于观看):

Label

预测:

Prediction

解决方法

暂无找到可以解决该程序问题的有效方法,小编努力寻找整理中!

如果你已经找到好的解决方法,欢迎将解决方案带上本链接一起发送给小编。

小编邮箱:dio#foxmail.com (将#修改为@)