Skip to content

Instantly share code, notes, and snippets.

@SeaOfOcean
Last active October 10, 2016 11:03
Show Gist options
  • Select an option

  • Save SeaOfOcean/189232a4a49c931092009231bc8fcee3 to your computer and use it in GitHub Desktop.

Select an option

Save SeaOfOcean/189232a4a49c931092009231bc8fcee3 to your computer and use it in GitHub Desktop.
name: "PVANET finetune"
layer {
name: 'input-data'
type: 'Python'
top: 'data'
top: 'im_info'
top: 'gt_boxes'
python_param {
module: 'roi_data_layer.layer'
layer: 'RoIDataLayer'
param_str: "'num_classes': 21"
}
}
################################################################################
## Convolution
################################################################################
layer {
name: "conv1_1/conv"
type: "Convolution"
bottom: "data"
top: "conv1_1/conv"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 16
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 3
pad_w: 3
kernel_h: 7
kernel_w: 7
stride_h: 2
stride_w: 2
}
}
layer {
name: "conv1_1/neg"
type: "Power"
bottom: "conv1_1/conv"
top: "conv1_1/neg"
power_param {
power: 1
scale: -1.0
shift: 0
}
}
layer {
name: "conv1_1/concat"
type: "Concat"
bottom: "conv1_1/conv"
bottom: "conv1_1/neg"
top: "conv1_1"
}
layer {
name: "conv1_1/scale"
type: "Scale"
bottom: "conv1_1"
top: "conv1_1"
param {
lr_mult: 0.1
decay_mult: 0
}
param {
lr_mult: 0.2
decay_mult: 0
}
scale_param {
bias_term: true
}
}
layer {
name: "conv1_1/relu"
type: "ReLU"
bottom: "conv1_1"
top: "conv1_1"
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1_1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
pad: 0
}
}
layer {
name: "conv2_1/1/conv"
type: "Convolution"
bottom: "pool1"
top: "conv2_1/1"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 24
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv2_1/1/relu"
type: "ReLU"
bottom: "conv2_1/1"
top: "conv2_1/1"
}
layer {
name: "conv2_1/2/conv"
type: "Convolution"
bottom: "conv2_1/1"
top: "conv2_1/2/conv"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 24
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv2_1/2/neg"
type: "Power"
bottom: "conv2_1/2/conv"
top: "conv2_1/2/neg"
power_param {
power: 1
scale: -1.0
shift: 0
}
}
layer {
name: "conv2_1/2/concat"
type: "Concat"
bottom: "conv2_1/2/conv"
bottom: "conv2_1/2/neg"
top: "conv2_1/2"
}
layer {
name: "conv2_1/2/scale"
type: "Scale"
bottom: "conv2_1/2"
top: "conv2_1/2"
param {
lr_mult: 0.1
decay_mult: 0
}
param {
lr_mult: 0.2
decay_mult: 0
}
scale_param {
bias_term: true
}
}
layer {
name: "conv2_1/2/relu"
type: "ReLU"
bottom: "conv2_1/2"
top: "conv2_1/2"
}
layer {
name: "conv2_1/3/conv"
type: "Convolution"
bottom: "conv2_1/2"
top: "conv2_1/3"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 64
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv2_1/proj"
type: "Convolution"
bottom: "pool1"
top: "conv2_1/proj"
param {
lr_mult: 0.1
decay_mult: 0.1
}
param {
lr_mult: 0.2
decay_mult: 0.0
}
convolution_param {
num_output: 64
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0.1
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv2_1"
type: "Eltwise"
bottom: "conv2_1/3"
bottom: "conv2_1/proj"
top: "conv2_1"
eltwise_param {
operation: SUM
coeff: 1
coeff: 1
}
}
layer {
name: "conv2_2/1/conv"
type: "Convolution"
bottom: "conv2_1"
top: "conv2_2/1"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 24
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv2_2/1/relu"
type: "ReLU"
bottom: "conv2_2/1"
top: "conv2_2/1"
}
layer {
name: "conv2_2/2/conv"
type: "Convolution"
bottom: "conv2_2/1"
top: "conv2_2/2/conv"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 24
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv2_2/2/neg"
type: "Power"
bottom: "conv2_2/2/conv"
top: "conv2_2/2/neg"
power_param {
power: 1
scale: -1.0
shift: 0
}
}
layer {
name: "conv2_2/2/concat"
type: "Concat"
bottom: "conv2_2/2/conv"
bottom: "conv2_2/2/neg"
top: "conv2_2/2"
}
layer {
name: "conv2_2/2/scale"
type: "Scale"
bottom: "conv2_2/2"
top: "conv2_2/2"
param {
lr_mult: 0.1
decay_mult: 0
}
param {
lr_mult: 0.2
decay_mult: 0
}
scale_param {
bias_term: true
}
}
layer {
name: "conv2_2/2/relu"
type: "ReLU"
bottom: "conv2_2/2"
top: "conv2_2/2"
}
layer {
name: "conv2_2/3/conv"
type: "Convolution"
bottom: "conv2_2/2"
top: "conv2_2/3"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 64
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv2_2"
type: "Eltwise"
bottom: "conv2_2/3"
bottom: "conv2_1"
top: "conv2_2"
eltwise_param {
operation: SUM
coeff: 1
coeff: 1
}
}
layer {
name: "conv2_3/1/conv"
type: "Convolution"
bottom: "conv2_2"
top: "conv2_3/1"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 24
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv2_3/1/relu"
type: "ReLU"
bottom: "conv2_3/1"
top: "conv2_3/1"
}
layer {
name: "conv2_3/2/conv"
type: "Convolution"
bottom: "conv2_3/1"
top: "conv2_3/2/conv"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 24
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv2_3/2/neg"
type: "Power"
bottom: "conv2_3/2/conv"
top: "conv2_3/2/neg"
power_param {
power: 1
scale: -1.0
shift: 0
}
}
layer {
name: "conv2_3/2/concat"
type: "Concat"
bottom: "conv2_3/2/conv"
bottom: "conv2_3/2/neg"
top: "conv2_3/2"
}
layer {
name: "conv2_3/2/scale"
type: "Scale"
bottom: "conv2_3/2"
top: "conv2_3/2"
param {
lr_mult: 0.1
decay_mult: 0
}
param {
lr_mult: 0.2
decay_mult: 0
}
scale_param {
bias_term: true
}
}
layer {
name: "conv2_3/2/relu"
type: "ReLU"
bottom: "conv2_3/2"
top: "conv2_3/2"
}
layer {
name: "conv2_3/3/conv"
type: "Convolution"
bottom: "conv2_3/2"
top: "conv2_3/3"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 64
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv2_3"
type: "Eltwise"
bottom: "conv2_3/3"
bottom: "conv2_2"
top: "conv2_3"
eltwise_param {
operation: SUM
coeff: 1
coeff: 1
}
}
layer {
name: "conv3_1/1/conv"
type: "Convolution"
bottom: "conv2_3"
top: "conv3_1/1"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 48
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 2
stride_w: 2
}
}
layer {
name: "conv3_1/1/relu"
type: "ReLU"
bottom: "conv3_1/1"
top: "conv3_1/1"
}
layer {
name: "conv3_1/2/conv"
type: "Convolution"
bottom: "conv3_1/1"
top: "conv3_1/2/conv"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 48
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv3_1/2/neg"
type: "Power"
bottom: "conv3_1/2/conv"
top: "conv3_1/2/neg"
power_param {
power: 1
scale: -1.0
shift: 0
}
}
layer {
name: "conv3_1/2/concat"
type: "Concat"
bottom: "conv3_1/2/conv"
bottom: "conv3_1/2/neg"
top: "conv3_1/2"
}
layer {
name: "conv3_1/2/scale"
type: "Scale"
bottom: "conv3_1/2"
top: "conv3_1/2"
param {
lr_mult: 0.1
decay_mult: 0
}
param {
lr_mult: 0.2
decay_mult: 0
}
scale_param {
bias_term: true
}
}
layer {
name: "conv3_1/2/relu"
type: "ReLU"
bottom: "conv3_1/2"
top: "conv3_1/2"
}
layer {
name: "conv3_1/3/conv"
type: "Convolution"
bottom: "conv3_1/2"
top: "conv3_1/3"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 128
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv3_1/proj"
type: "Convolution"
bottom: "conv2_3"
top: "conv3_1/proj"
param {
lr_mult: 0.1
decay_mult: 0.1
}
param {
lr_mult: 0.2
decay_mult: 0.0
}
convolution_param {
num_output: 128
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0.1
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 2
stride_w: 2
}
}
layer {
name: "conv3_1"
type: "Eltwise"
bottom: "conv3_1/3"
bottom: "conv3_1/proj"
top: "conv3_1"
eltwise_param {
operation: SUM
coeff: 1
coeff: 1
}
}
layer {
name: "conv3_2/1/conv"
type: "Convolution"
bottom: "conv3_1"
top: "conv3_2/1"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 48
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv3_2/1/relu"
type: "ReLU"
bottom: "conv3_2/1"
top: "conv3_2/1"
}
layer {
name: "conv3_2/2/conv"
type: "Convolution"
bottom: "conv3_2/1"
top: "conv3_2/2/conv"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 48
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv3_2/2/neg"
type: "Power"
bottom: "conv3_2/2/conv"
top: "conv3_2/2/neg"
power_param {
power: 1
scale: -1.0
shift: 0
}
}
layer {
name: "conv3_2/2/concat"
type: "Concat"
bottom: "conv3_2/2/conv"
bottom: "conv3_2/2/neg"
top: "conv3_2/2"
}
layer {
name: "conv3_2/2/scale"
type: "Scale"
bottom: "conv3_2/2"
top: "conv3_2/2"
param {
lr_mult: 0.1
decay_mult: 0
}
param {
lr_mult: 0.2
decay_mult: 0
}
scale_param {
bias_term: true
}
}
layer {
name: "conv3_2/2/relu"
type: "ReLU"
bottom: "conv3_2/2"
top: "conv3_2/2"
}
layer {
name: "conv3_2/3/conv"
type: "Convolution"
bottom: "conv3_2/2"
top: "conv3_2/3"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 128
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv3_2"
type: "Eltwise"
bottom: "conv3_2/3"
bottom: "conv3_1"
top: "conv3_2"
eltwise_param {
operation: SUM
coeff: 1
coeff: 1
}
}
layer {
name: "conv3_3/1/conv"
type: "Convolution"
bottom: "conv3_2"
top: "conv3_3/1"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 48
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv3_3/1/relu"
type: "ReLU"
bottom: "conv3_3/1"
top: "conv3_3/1"
}
layer {
name: "conv3_3/2/conv"
type: "Convolution"
bottom: "conv3_3/1"
top: "conv3_3/2/conv"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 48
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv3_3/2/neg"
type: "Power"
bottom: "conv3_3/2/conv"
top: "conv3_3/2/neg"
power_param {
power: 1
scale: -1.0
shift: 0
}
}
layer {
name: "conv3_3/2/concat"
type: "Concat"
bottom: "conv3_3/2/conv"
bottom: "conv3_3/2/neg"
top: "conv3_3/2"
}
layer {
name: "conv3_3/2/scale"
type: "Scale"
bottom: "conv3_3/2"
top: "conv3_3/2"
param {
lr_mult: 0.1
decay_mult: 0
}
param {
lr_mult: 0.2
decay_mult: 0
}
scale_param {
bias_term: true
}
}
layer {
name: "conv3_3/2/relu"
type: "ReLU"
bottom: "conv3_3/2"
top: "conv3_3/2"
}
layer {
name: "conv3_3/3/conv"
type: "Convolution"
bottom: "conv3_3/2"
top: "conv3_3/3"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 128
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv3_3"
type: "Eltwise"
bottom: "conv3_3/3"
bottom: "conv3_2"
top: "conv3_3"
eltwise_param {
operation: SUM
coeff: 1
coeff: 1
}
}
layer {
name: "conv3_4/1/conv"
type: "Convolution"
bottom: "conv3_3"
top: "conv3_4/1"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 48
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv3_4/1/relu"
type: "ReLU"
bottom: "conv3_4/1"
top: "conv3_4/1"
}
layer {
name: "conv3_4/2/conv"
type: "Convolution"
bottom: "conv3_4/1"
top: "conv3_4/2/conv"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 48
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv3_4/2/neg"
type: "Power"
bottom: "conv3_4/2/conv"
top: "conv3_4/2/neg"
power_param {
power: 1
scale: -1.0
shift: 0
}
}
layer {
name: "conv3_4/2/concat"
type: "Concat"
bottom: "conv3_4/2/conv"
bottom: "conv3_4/2/neg"
top: "conv3_4/2"
}
layer {
name: "conv3_4/2/scale"
type: "Scale"
bottom: "conv3_4/2"
top: "conv3_4/2"
param {
lr_mult: 0.1
decay_mult: 0
}
param {
lr_mult: 0.2
decay_mult: 0
}
scale_param {
bias_term: true
}
}
layer {
name: "conv3_4/2/relu"
type: "ReLU"
bottom: "conv3_4/2"
top: "conv3_4/2"
}
layer {
name: "conv3_4/3/conv"
type: "Convolution"
bottom: "conv3_4/2"
top: "conv3_4/3"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 128
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv3_4"
type: "Eltwise"
bottom: "conv3_4/3"
bottom: "conv3_3"
top: "conv3_4"
eltwise_param {
operation: SUM
coeff: 1
coeff: 1
}
}
layer {
name: "conv4_1/incep/0/conv"
type: "Convolution"
bottom: "conv3_4"
top: "conv4_1/incep/0"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 64
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 2
stride_w: 2
}
}
layer {
name: "conv4_1/incep/0/relu"
type: "ReLU"
bottom: "conv4_1/incep/0"
top: "conv4_1/incep/0"
}
layer {
name: "conv4_1/incep/1_reduce/conv"
type: "Convolution"
bottom: "conv3_4"
top: "conv4_1/incep/1_reduce"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 48
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 2
stride_w: 2
}
}
layer {
name: "conv4_1/incep/1_reduce/relu"
type: "ReLU"
bottom: "conv4_1/incep/1_reduce"
top: "conv4_1/incep/1_reduce"
}
layer {
name: "conv4_1/incep/1_0/conv"
type: "Convolution"
bottom: "conv4_1/incep/1_reduce"
top: "conv4_1/incep/1_0"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 128
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv4_1/incep/1_0/relu"
type: "ReLU"
bottom: "conv4_1/incep/1_0"
top: "conv4_1/incep/1_0"
}
layer {
name: "conv4_1/incep/2_reduce/conv"
type: "Convolution"
bottom: "conv3_4"
top: "conv4_1/incep/2_reduce"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 24
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 2
stride_w: 2
}
}
layer {
name: "conv4_1/incep/2_reduce/relu"
type: "ReLU"
bottom: "conv4_1/incep/2_reduce"
top: "conv4_1/incep/2_reduce"
}
layer {
name: "conv4_1/incep/2_0/conv"
type: "Convolution"
bottom: "conv4_1/incep/2_reduce"
top: "conv4_1/incep/2_0"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 48
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv4_1/incep/2_0/relu"
type: "ReLU"
bottom: "conv4_1/incep/2_0"
top: "conv4_1/incep/2_0"
}
layer {
name: "conv4_1/incep/2_1/conv"
type: "Convolution"
bottom: "conv4_1/incep/2_0"
top: "conv4_1/incep/2_1"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 48
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv4_1/incep/2_1/relu"
type: "ReLU"
bottom: "conv4_1/incep/2_1"
top: "conv4_1/incep/2_1"
}
layer {
name: "conv4_1/incep/pool"
type: "Pooling"
bottom: "conv3_4"
top: "conv4_1/incep/pool"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
pad: 0
}
}
layer {
name: "conv4_1/incep/poolproj/conv"
type: "Convolution"
bottom: "conv4_1/incep/pool"
top: "conv4_1/incep/poolproj"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 128
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv4_1/incep/poolproj/relu"
type: "ReLU"
bottom: "conv4_1/incep/poolproj"
top: "conv4_1/incep/poolproj"
}
layer {
name: "conv4_1/incep"
type: "Concat"
bottom: "conv4_1/incep/0"
bottom: "conv4_1/incep/1_0"
bottom: "conv4_1/incep/2_1"
bottom: "conv4_1/incep/poolproj"
top: "conv4_1/incep"
}
layer {
name: "conv4_1/out/conv"
type: "Convolution"
bottom: "conv4_1/incep"
top: "conv4_1/out"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 256
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv4_1/proj"
type: "Convolution"
bottom: "conv3_4"
top: "conv4_1/proj"
param {
lr_mult: 0.1
decay_mult: 0.1
}
param {
lr_mult: 0.2
decay_mult: 0.0
}
convolution_param {
num_output: 256
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0.1
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 2
stride_w: 2
}
}
layer {
name: "conv4_1"
type: "Eltwise"
bottom: "conv4_1/out"
bottom: "conv4_1/proj"
top: "conv4_1"
eltwise_param {
operation: SUM
coeff: 1
coeff: 1
}
}
layer {
name: "conv4_2/incep/0/conv"
type: "Convolution"
bottom: "conv4_1"
top: "conv4_2/incep/0"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 64
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv4_2/incep/0/relu"
type: "ReLU"
bottom: "conv4_2/incep/0"
top: "conv4_2/incep/0"
}
layer {
name: "conv4_2/incep/1_reduce/conv"
type: "Convolution"
bottom: "conv4_1"
top: "conv4_2/incep/1_reduce"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 64
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv4_2/incep/1_reduce/relu"
type: "ReLU"
bottom: "conv4_2/incep/1_reduce"
top: "conv4_2/incep/1_reduce"
}
layer {
name: "conv4_2/incep/1_0/conv"
type: "Convolution"
bottom: "conv4_2/incep/1_reduce"
top: "conv4_2/incep/1_0"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 128
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv4_2/incep/1_0/relu"
type: "ReLU"
bottom: "conv4_2/incep/1_0"
top: "conv4_2/incep/1_0"
}
layer {
name: "conv4_2/incep/2_reduce/conv"
type: "Convolution"
bottom: "conv4_1"
top: "conv4_2/incep/2_reduce"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 24
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv4_2/incep/2_reduce/relu"
type: "ReLU"
bottom: "conv4_2/incep/2_reduce"
top: "conv4_2/incep/2_reduce"
}
layer {
name: "conv4_2/incep/2_0/conv"
type: "Convolution"
bottom: "conv4_2/incep/2_reduce"
top: "conv4_2/incep/2_0"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 48
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv4_2/incep/2_0/relu"
type: "ReLU"
bottom: "conv4_2/incep/2_0"
top: "conv4_2/incep/2_0"
}
layer {
name: "conv4_2/incep/2_1/conv"
type: "Convolution"
bottom: "conv4_2/incep/2_0"
top: "conv4_2/incep/2_1"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 48
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv4_2/incep/2_1/relu"
type: "ReLU"
bottom: "conv4_2/incep/2_1"
top: "conv4_2/incep/2_1"
}
layer {
name: "conv4_2/incep"
type: "Concat"
bottom: "conv4_2/incep/0"
bottom: "conv4_2/incep/1_0"
bottom: "conv4_2/incep/2_1"
top: "conv4_2/incep"
}
layer {
name: "conv4_2/out/conv"
type: "Convolution"
bottom: "conv4_2/incep"
top: "conv4_2/out"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 256
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv4_2"
type: "Eltwise"
bottom: "conv4_2/out"
bottom: "conv4_1"
top: "conv4_2"
eltwise_param {
operation: SUM
coeff: 1
coeff: 1
}
}
layer {
name: "conv4_3/incep/0/conv"
type: "Convolution"
bottom: "conv4_2"
top: "conv4_3/incep/0"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 64
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv4_3/incep/0/relu"
type: "ReLU"
bottom: "conv4_3/incep/0"
top: "conv4_3/incep/0"
}
layer {
name: "conv4_3/incep/1_reduce/conv"
type: "Convolution"
bottom: "conv4_2"
top: "conv4_3/incep/1_reduce"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 64
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv4_3/incep/1_reduce/relu"
type: "ReLU"
bottom: "conv4_3/incep/1_reduce"
top: "conv4_3/incep/1_reduce"
}
layer {
name: "conv4_3/incep/1_0/conv"
type: "Convolution"
bottom: "conv4_3/incep/1_reduce"
top: "conv4_3/incep/1_0"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 128
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv4_3/incep/1_0/relu"
type: "ReLU"
bottom: "conv4_3/incep/1_0"
top: "conv4_3/incep/1_0"
}
layer {
name: "conv4_3/incep/2_reduce/conv"
type: "Convolution"
bottom: "conv4_2"
top: "conv4_3/incep/2_reduce"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 24
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv4_3/incep/2_reduce/relu"
type: "ReLU"
bottom: "conv4_3/incep/2_reduce"
top: "conv4_3/incep/2_reduce"
}
layer {
name: "conv4_3/incep/2_0/conv"
type: "Convolution"
bottom: "conv4_3/incep/2_reduce"
top: "conv4_3/incep/2_0"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 48
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv4_3/incep/2_0/relu"
type: "ReLU"
bottom: "conv4_3/incep/2_0"
top: "conv4_3/incep/2_0"
}
layer {
name: "conv4_3/incep/2_1/conv"
type: "Convolution"
bottom: "conv4_3/incep/2_0"
top: "conv4_3/incep/2_1"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 48
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv4_3/incep/2_1/relu"
type: "ReLU"
bottom: "conv4_3/incep/2_1"
top: "conv4_3/incep/2_1"
}
layer {
name: "conv4_3/incep"
type: "Concat"
bottom: "conv4_3/incep/0"
bottom: "conv4_3/incep/1_0"
bottom: "conv4_3/incep/2_1"
top: "conv4_3/incep"
}
layer {
name: "conv4_3/out/conv"
type: "Convolution"
bottom: "conv4_3/incep"
top: "conv4_3/out"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 256
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv4_3"
type: "Eltwise"
bottom: "conv4_3/out"
bottom: "conv4_2"
top: "conv4_3"
eltwise_param {
operation: SUM
coeff: 1
coeff: 1
}
}
layer {
name: "conv4_4/incep/0/conv"
type: "Convolution"
bottom: "conv4_3"
top: "conv4_4/incep/0"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 64
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv4_4/incep/0/relu"
type: "ReLU"
bottom: "conv4_4/incep/0"
top: "conv4_4/incep/0"
}
layer {
name: "conv4_4/incep/1_reduce/conv"
type: "Convolution"
bottom: "conv4_3"
top: "conv4_4/incep/1_reduce"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 64
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv4_4/incep/1_reduce/relu"
type: "ReLU"
bottom: "conv4_4/incep/1_reduce"
top: "conv4_4/incep/1_reduce"
}
layer {
name: "conv4_4/incep/1_0/conv"
type: "Convolution"
bottom: "conv4_4/incep/1_reduce"
top: "conv4_4/incep/1_0"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 128
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv4_4/incep/1_0/relu"
type: "ReLU"
bottom: "conv4_4/incep/1_0"
top: "conv4_4/incep/1_0"
}
layer {
name: "conv4_4/incep/2_reduce/conv"
type: "Convolution"
bottom: "conv4_3"
top: "conv4_4/incep/2_reduce"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 24
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv4_4/incep/2_reduce/relu"
type: "ReLU"
bottom: "conv4_4/incep/2_reduce"
top: "conv4_4/incep/2_reduce"
}
layer {
name: "conv4_4/incep/2_0/conv"
type: "Convolution"
bottom: "conv4_4/incep/2_reduce"
top: "conv4_4/incep/2_0"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 48
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv4_4/incep/2_0/relu"
type: "ReLU"
bottom: "conv4_4/incep/2_0"
top: "conv4_4/incep/2_0"
}
layer {
name: "conv4_4/incep/2_1/conv"
type: "Convolution"
bottom: "conv4_4/incep/2_0"
top: "conv4_4/incep/2_1"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 48
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv4_4/incep/2_1/relu"
type: "ReLU"
bottom: "conv4_4/incep/2_1"
top: "conv4_4/incep/2_1"
}
layer {
name: "conv4_4/incep"
type: "Concat"
bottom: "conv4_4/incep/0"
bottom: "conv4_4/incep/1_0"
bottom: "conv4_4/incep/2_1"
top: "conv4_4/incep"
}
layer {
name: "conv4_4/out/conv"
type: "Convolution"
bottom: "conv4_4/incep"
top: "conv4_4/out"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 256
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv4_4"
type: "Eltwise"
bottom: "conv4_4/out"
bottom: "conv4_3"
top: "conv4_4"
eltwise_param {
operation: SUM
coeff: 1
coeff: 1
}
}
layer {
name: "conv5_1/incep/0/conv"
type: "Convolution"
bottom: "conv4_4"
top: "conv5_1/incep/0"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 64
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 2
stride_w: 2
}
}
layer {
name: "conv5_1/incep/0/relu"
type: "ReLU"
bottom: "conv5_1/incep/0"
top: "conv5_1/incep/0"
}
layer {
name: "conv5_1/incep/1_reduce/conv"
type: "Convolution"
bottom: "conv4_4"
top: "conv5_1/incep/1_reduce"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 96
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 2
stride_w: 2
}
}
layer {
name: "conv5_1/incep/1_reduce/relu"
type: "ReLU"
bottom: "conv5_1/incep/1_reduce"
top: "conv5_1/incep/1_reduce"
}
layer {
name: "conv5_1/incep/1_0/conv"
type: "Convolution"
bottom: "conv5_1/incep/1_reduce"
top: "conv5_1/incep/1_0"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 192
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv5_1/incep/1_0/relu"
type: "ReLU"
bottom: "conv5_1/incep/1_0"
top: "conv5_1/incep/1_0"
}
layer {
name: "conv5_1/incep/2_reduce/conv"
type: "Convolution"
bottom: "conv4_4"
top: "conv5_1/incep/2_reduce"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 32
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 2
stride_w: 2
}
}
layer {
name: "conv5_1/incep/2_reduce/relu"
type: "ReLU"
bottom: "conv5_1/incep/2_reduce"
top: "conv5_1/incep/2_reduce"
}
layer {
name: "conv5_1/incep/2_0/conv"
type: "Convolution"
bottom: "conv5_1/incep/2_reduce"
top: "conv5_1/incep/2_0"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 64
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv5_1/incep/2_0/relu"
type: "ReLU"
bottom: "conv5_1/incep/2_0"
top: "conv5_1/incep/2_0"
}
layer {
name: "conv5_1/incep/2_1/conv"
type: "Convolution"
bottom: "conv5_1/incep/2_0"
top: "conv5_1/incep/2_1"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 64
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv5_1/incep/2_1/relu"
type: "ReLU"
bottom: "conv5_1/incep/2_1"
top: "conv5_1/incep/2_1"
}
layer {
name: "conv5_1/incep/pool"
type: "Pooling"
bottom: "conv4_4"
top: "conv5_1/incep/pool"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
pad: 0
}
}
layer {
name: "conv5_1/incep/poolproj/conv"
type: "Convolution"
bottom: "conv5_1/incep/pool"
top: "conv5_1/incep/poolproj"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 128
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv5_1/incep/poolproj/relu"
type: "ReLU"
bottom: "conv5_1/incep/poolproj"
top: "conv5_1/incep/poolproj"
}
layer {
name: "conv5_1/incep"
type: "Concat"
bottom: "conv5_1/incep/0"
bottom: "conv5_1/incep/1_0"
bottom: "conv5_1/incep/2_1"
bottom: "conv5_1/incep/poolproj"
top: "conv5_1/incep"
}
layer {
name: "conv5_1/out/conv"
type: "Convolution"
bottom: "conv5_1/incep"
top: "conv5_1/out"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 384
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv5_1/proj"
type: "Convolution"
bottom: "conv4_4"
top: "conv5_1/proj"
param {
lr_mult: 0.1
decay_mult: 0.1
}
param {
lr_mult: 0.2
decay_mult: 0.0
}
convolution_param {
num_output: 384
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0.1
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 2
stride_w: 2
}
}
layer {
name: "conv5_1"
type: "Eltwise"
bottom: "conv5_1/out"
bottom: "conv5_1/proj"
top: "conv5_1"
eltwise_param {
operation: SUM
coeff: 1
coeff: 1
}
}
layer {
name: "conv5_2/incep/0/conv"
type: "Convolution"
bottom: "conv5_1"
top: "conv5_2/incep/0"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 64
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv5_2/incep/0/relu"
type: "ReLU"
bottom: "conv5_2/incep/0"
top: "conv5_2/incep/0"
}
layer {
name: "conv5_2/incep/1_reduce/conv"
type: "Convolution"
bottom: "conv5_1"
top: "conv5_2/incep/1_reduce"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 96
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv5_2/incep/1_reduce/relu"
type: "ReLU"
bottom: "conv5_2/incep/1_reduce"
top: "conv5_2/incep/1_reduce"
}
layer {
name: "conv5_2/incep/1_0/conv"
type: "Convolution"
bottom: "conv5_2/incep/1_reduce"
top: "conv5_2/incep/1_0"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 192
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv5_2/incep/1_0/relu"
type: "ReLU"
bottom: "conv5_2/incep/1_0"
top: "conv5_2/incep/1_0"
}
layer {
name: "conv5_2/incep/2_reduce/conv"
type: "Convolution"
bottom: "conv5_1"
top: "conv5_2/incep/2_reduce"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 32
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv5_2/incep/2_reduce/relu"
type: "ReLU"
bottom: "conv5_2/incep/2_reduce"
top: "conv5_2/incep/2_reduce"
}
layer {
name: "conv5_2/incep/2_0/conv"
type: "Convolution"
bottom: "conv5_2/incep/2_reduce"
top: "conv5_2/incep/2_0"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 64
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv5_2/incep/2_0/relu"
type: "ReLU"
bottom: "conv5_2/incep/2_0"
top: "conv5_2/incep/2_0"
}
layer {
name: "conv5_2/incep/2_1/conv"
type: "Convolution"
bottom: "conv5_2/incep/2_0"
top: "conv5_2/incep/2_1"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 64
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv5_2/incep/2_1/relu"
type: "ReLU"
bottom: "conv5_2/incep/2_1"
top: "conv5_2/incep/2_1"
}
layer {
name: "conv5_2/incep"
type: "Concat"
bottom: "conv5_2/incep/0"
bottom: "conv5_2/incep/1_0"
bottom: "conv5_2/incep/2_1"
top: "conv5_2/incep"
}
layer {
name: "conv5_2/out/conv"
type: "Convolution"
bottom: "conv5_2/incep"
top: "conv5_2/out"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 384
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv5_2"
type: "Eltwise"
bottom: "conv5_2/out"
bottom: "conv5_1"
top: "conv5_2"
eltwise_param {
operation: SUM
coeff: 1
coeff: 1
}
}
layer {
name: "conv5_3/incep/0/conv"
type: "Convolution"
bottom: "conv5_2"
top: "conv5_3/incep/0"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 64
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv5_3/incep/0/relu"
type: "ReLU"
bottom: "conv5_3/incep/0"
top: "conv5_3/incep/0"
}
layer {
name: "conv5_3/incep/1_reduce/conv"
type: "Convolution"
bottom: "conv5_2"
top: "conv5_3/incep/1_reduce"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 96
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv5_3/incep/1_reduce/relu"
type: "ReLU"
bottom: "conv5_3/incep/1_reduce"
top: "conv5_3/incep/1_reduce"
}
layer {
name: "conv5_3/incep/1_0/conv"
type: "Convolution"
bottom: "conv5_3/incep/1_reduce"
top: "conv5_3/incep/1_0"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 192
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv5_3/incep/1_0/relu"
type: "ReLU"
bottom: "conv5_3/incep/1_0"
top: "conv5_3/incep/1_0"
}
layer {
name: "conv5_3/incep/2_reduce/conv"
type: "Convolution"
bottom: "conv5_2"
top: "conv5_3/incep/2_reduce"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 32
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv5_3/incep/2_reduce/relu"
type: "ReLU"
bottom: "conv5_3/incep/2_reduce"
top: "conv5_3/incep/2_reduce"
}
layer {
name: "conv5_3/incep/2_0/conv"
type: "Convolution"
bottom: "conv5_3/incep/2_reduce"
top: "conv5_3/incep/2_0"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 64
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv5_3/incep/2_0/relu"
type: "ReLU"
bottom: "conv5_3/incep/2_0"
top: "conv5_3/incep/2_0"
}
layer {
name: "conv5_3/incep/2_1/conv"
type: "Convolution"
bottom: "conv5_3/incep/2_0"
top: "conv5_3/incep/2_1"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 64
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv5_3/incep/2_1/relu"
type: "ReLU"
bottom: "conv5_3/incep/2_1"
top: "conv5_3/incep/2_1"
}
layer {
name: "conv5_3/incep"
type: "Concat"
bottom: "conv5_3/incep/0"
bottom: "conv5_3/incep/1_0"
bottom: "conv5_3/incep/2_1"
top: "conv5_3/incep"
}
layer {
name: "conv5_3/out/conv"
type: "Convolution"
bottom: "conv5_3/incep"
top: "conv5_3/out"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 384
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv5_3"
type: "Eltwise"
bottom: "conv5_3/out"
bottom: "conv5_2"
top: "conv5_3"
eltwise_param {
operation: SUM
coeff: 1
coeff: 1
}
}
layer {
name: "conv5_4/incep/0/conv"
type: "Convolution"
bottom: "conv5_3"
top: "conv5_4/incep/0"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 64
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv5_4/incep/0/relu"
type: "ReLU"
bottom: "conv5_4/incep/0"
top: "conv5_4/incep/0"
}
layer {
name: "conv5_4/incep/1_reduce/conv"
type: "Convolution"
bottom: "conv5_3"
top: "conv5_4/incep/1_reduce"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 96
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv5_4/incep/1_reduce/relu"
type: "ReLU"
bottom: "conv5_4/incep/1_reduce"
top: "conv5_4/incep/1_reduce"
}
layer {
name: "conv5_4/incep/1_0/conv"
type: "Convolution"
bottom: "conv5_4/incep/1_reduce"
top: "conv5_4/incep/1_0"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 192
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv5_4/incep/1_0/relu"
type: "ReLU"
bottom: "conv5_4/incep/1_0"
top: "conv5_4/incep/1_0"
}
layer {
name: "conv5_4/incep/2_reduce/conv"
type: "Convolution"
bottom: "conv5_3"
top: "conv5_4/incep/2_reduce"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 32
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv5_4/incep/2_reduce/relu"
type: "ReLU"
bottom: "conv5_4/incep/2_reduce"
top: "conv5_4/incep/2_reduce"
}
layer {
name: "conv5_4/incep/2_0/conv"
type: "Convolution"
bottom: "conv5_4/incep/2_reduce"
top: "conv5_4/incep/2_0"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 64
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv5_4/incep/2_0/relu"
type: "ReLU"
bottom: "conv5_4/incep/2_0"
top: "conv5_4/incep/2_0"
}
layer {
name: "conv5_4/incep/2_1/conv"
type: "Convolution"
bottom: "conv5_4/incep/2_0"
top: "conv5_4/incep/2_1"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 64
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 1
pad_w: 1
kernel_h: 3
kernel_w: 3
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv5_4/incep/2_1/relu"
type: "ReLU"
bottom: "conv5_4/incep/2_1"
top: "conv5_4/incep/2_1"
}
layer {
name: "conv5_4/incep"
type: "Concat"
bottom: "conv5_4/incep/0"
bottom: "conv5_4/incep/1_0"
bottom: "conv5_4/incep/2_1"
top: "conv5_4/incep"
}
layer {
name: "conv5_4/out/conv"
type: "Convolution"
bottom: "conv5_4/incep"
top: "conv5_4/out"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 384
bias_term: true
weight_filler {
type: "xavier"
}
pad_h: 0
pad_w: 0
kernel_h: 1
kernel_w: 1
stride_h: 1
stride_w: 1
}
}
layer {
name: "conv5_4"
type: "Eltwise"
bottom: "conv5_4/out"
bottom: "conv5_3"
top: "conv5_4"
eltwise_param {
operation: SUM
coeff: 1
coeff: 1
}
}
### hyper feature ###
layer {
name: "downsample"
type: "Pooling"
bottom: "conv3_4"
top: "downsample"
pooling_param {
kernel_size: 3
stride: 2
pad: 0
pool: MAX
}
}
layer {
name: "upsample"
type: "Deconvolution"
bottom: "conv5_4"
top: "upsample"
param {
lr_mult: 0
decay_mult: 0
}
convolution_param {
num_output: 384
kernel_size: 4
pad: 1
stride: 2
group: 384
bias_term: false
weight_filler: {
type: "bilinear"
}
}
}
layer {
name: "concat"
bottom: "downsample"
bottom: "conv4_4"
bottom: "upsample"
top: "concat"
type: "Concat"
concat_param {
axis: 1
}
}
layer {
name: "convf_rpn"
type: "Convolution"
bottom: "concat"
top: "convf_rpn"
param {
lr_mult: 1.0
decay_mult: 1.0
}
param {
lr_mult: 2.0
decay_mult: 0
}
convolution_param {
num_output: 128
kernel_size: 1
pad: 0
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "reluf_rpn"
type: "ReLU"
bottom: "convf_rpn"
top: "convf_rpn"
}
layer {
name: "convf_2"
type: "Convolution"
bottom: "concat"
top: "convf_2"
param {
lr_mult: 1.0
decay_mult: 1.0
}
param {
lr_mult: 2.0
decay_mult: 0
}
convolution_param {
num_output: 384
kernel_size: 1
pad: 0
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "reluf_2"
type: "ReLU"
bottom: "convf_2"
top: "convf_2"
}
layer {
name: "concat_convf"
bottom: "convf_rpn"
bottom: "convf_2"
top: "convf"
type: "Concat"
concat_param {
axis: 1
}
}
################################################################################
## RPN
################################################################################
### RPN conv ###
layer {
name: "rpn_conv1"
type: "Convolution"
bottom: "convf_rpn"
top: "rpn_conv1"
param { lr_mult: 1.0 decay_mult: 1.0 }
param { lr_mult: 2.0 decay_mult: 0 }
convolution_param {
num_output: 384 kernel_size: 3 pad: 1 stride: 1
weight_filler { type: "gaussian" std: 0.01 }
bias_filler { type: "constant" value: 0 }
}
}
layer {
name: "rpn_relu1"
type: "ReLU"
bottom: "rpn_conv1"
top: "rpn_conv1"
}
layer {
name: "rpn_cls_score"
type: "Convolution"
bottom: "rpn_conv1"
top: "rpn_cls_score"
param { lr_mult: 1.0 decay_mult: 1.0 }
param { lr_mult: 2.0 decay_mult: 0 }
convolution_param {
num_output: 50 # 2(bg/fg) * 25(anchors)
kernel_size: 1 pad: 0 stride: 1
weight_filler { type: "gaussian" std: 0.01 }
bias_filler { type: "constant" value: 0 }
}
}
layer {
name: "rpn_bbox_pred"
type: "Convolution"
bottom: "rpn_conv1"
top: "rpn_bbox_pred"
param { lr_mult: 1.0 decay_mult: 1.0 }
param { lr_mult: 2.0 decay_mult: 0 }
convolution_param {
num_output: 100 # 4 * 25(anchors)
kernel_size: 1 pad: 0 stride: 1
weight_filler { type: "gaussian" std: 0.01 }
bias_filler { type: "constant" value: 0 }
}
}
layer {
bottom: "rpn_cls_score"
top: "rpn_cls_score_reshape"
name: "rpn_cls_score_reshape"
type: "Reshape"
reshape_param { shape { dim: 0 dim: 2 dim: -1 dim: 0 } }
}
layer {
name: 'rpn-data'
type: 'Python'
bottom: 'rpn_cls_score'
bottom: 'gt_boxes'
bottom: 'im_info'
bottom: 'data'
top: 'rpn_labels'
top: 'rpn_bbox_targets'
top: 'rpn_bbox_inside_weights'
top: 'rpn_bbox_outside_weights'
python_param {
module: 'rpn.anchor_target_layer'
layer: 'AnchorTargetLayer'
param_str: "{'feat_stride': 16, 'scales': [3, 6, 9, 16, 32], 'ratios': [0.5, 0.667, 1.0, 1.5, 2.0]}"
}
}
layer {
name: "rpn_loss_cls"
type: "SoftmaxWithLoss"
bottom: "rpn_cls_score_reshape"
bottom: "rpn_labels"
propagate_down: 1
propagate_down: 0
top: "rpn_cls_loss"
loss_weight: 1
loss_param {
ignore_label: -1
normalize: true
}
}
layer {
name: "rpn_loss_bbox"
type: "SmoothL1Loss"
bottom: "rpn_bbox_pred"
bottom: "rpn_bbox_targets"
bottom: 'rpn_bbox_inside_weights'
bottom: 'rpn_bbox_outside_weights'
top: "rpn_loss_bbox"
loss_weight: 1
smooth_l1_loss_param { sigma: 3.0 }
}
#========= RoI Proposal ============
layer {
name: "rpn_cls_prob"
type: "Softmax"
bottom: "rpn_cls_score_reshape"
top: "rpn_cls_prob"
}
layer {
name: 'rpn_cls_prob_reshape'
type: 'Reshape'
bottom: 'rpn_cls_prob'
top: 'rpn_cls_prob_reshape'
reshape_param { shape { dim: 0 dim: 50 dim: -1 dim: 0 } }
}
# C++ implementation of the proposal layer
layer {
name: 'proposal'
type: 'Proposal'
bottom: 'rpn_cls_prob_reshape'
bottom: 'rpn_bbox_pred'
bottom: 'im_info'
top: 'rpn_rois'
top: 'rpn_scores'
proposal_param {
ratio: 0.5 ratio: 0.667 ratio: 1.0 ratio: 1.5 ratio: 2.0
scale: 3 scale: 6 scale: 9 scale: 16 scale: 32
base_size: 16
feat_stride: 16
pre_nms_topn: 12000
post_nms_topn: 200
nms_thresh: 0.7
min_size: 16
}
}
layer {
name: 'mute_rpn_scores'
bottom: 'rpn_scores'
type: 'Silence'
}
layer {
name: 'roi-data'
type: 'Python'
bottom: 'rpn_rois'
bottom: 'gt_boxes'
top: 'rois'
top: 'labels'
top: 'bbox_targets'
top: 'bbox_inside_weights'
top: 'bbox_outside_weights'
python_param {
module: 'rpn.proposal_target_layer'
layer: 'ProposalTargetLayer'
param_str: "'num_classes': 21"
}
}
################################################################################
## RCNN
################################################################################
layer {
name: "roi_pool_conv5"
type: "ROIPooling"
bottom: "convf"
bottom: "rois"
top: "roi_pool_conv5"
roi_pooling_param {
pooled_w: 6
pooled_h: 6
spatial_scale: 0.0625 # 1/16
}
}
layer {
name: "fc6"
type: "InnerProduct"
bottom: "roi_pool_conv5"
top: "fc6"
param {
lr_mult: 1.0
decay_mult: 1.0
}
param {
lr_mult: 2.0
decay_mult: 0.0
}
inner_product_param {
num_output: 4096
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "fc6/dropout"
type: "Dropout"
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.25
}
}
layer {
name: "fc6/relu"
type: "ReLU"
bottom: "fc6"
top: "fc6"
}
layer {
name: "fc7"
type: "InnerProduct"
bottom: "fc6"
top: "fc7"
param {
lr_mult: 1.0
decay_mult: 1.0
}
param {
lr_mult: 2.0
decay_mult: 0.0
}
inner_product_param {
num_output: 4096
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "fc7/dropout"
type: "Dropout"
bottom: "fc7"
top: "fc7"
dropout_param {
dropout_ratio: 0.25
}
}
layer {
name: "fc7/relu"
type: "ReLU"
bottom: "fc7"
top: "fc7"
}
layer {
name: "cls_score"
type: "InnerProduct"
bottom: "fc7"
top: "cls_score"
param { lr_mult: 1.0 decay_mult: 1.0 }
param { lr_mult: 2.0 decay_mult: 0 }
inner_product_param {
num_output: 21
weight_filler { type: "gaussian" std: 0.01 }
bias_filler { type: "constant" value: 0 }
}
}
layer {
name: "bbox_pred"
type: "InnerProduct"
bottom: "fc7"
top: "bbox_pred"
param { lr_mult: 1.0 decay_mult: 1.0 }
param { lr_mult: 2.0 decay_mult: 0 }
inner_product_param {
num_output: 84
weight_filler { type: "gaussian" std: 0.001 }
bias_filler { type: "constant" value: 0 }
}
}
layer {
name: "loss_cls"
type: "SoftmaxWithLoss"
bottom: "cls_score"
bottom: "labels"
propagate_down: 1
propagate_down: 0
top: "cls_loss"
loss_weight: 1
loss_param {
ignore_label: -1
normalize: true
}
}
layer {
name: "loss_bbox"
type: "SmoothL1Loss"
bottom: "bbox_pred"
bottom: "bbox_targets"
bottom: 'bbox_inside_weights'
bottom: 'bbox_outside_weights'
top: "loss_bbox"
loss_weight: 1
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment