Created
March 6, 2020 14:39
-
-
Save roachsinai/51ceea2bd25537687612b9b073cb3546 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: "cornernet" | |
input: "blob1" | |
input_dim: 1 | |
input_dim: 3 | |
input_dim: 511 | |
input_dim: 511 | |
layer { | |
name: "conv1" | |
type: "Convolution" | |
bottom: "blob1" | |
top: "conv_blob1" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 3 | |
kernel_size: 7 | |
group: 1 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm1" | |
type: "BatchNorm" | |
bottom: "conv_blob1" | |
top: "batch_norm_blob1" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale1" | |
type: "Scale" | |
bottom: "batch_norm_blob1" | |
top: "batch_norm_blob1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu1" | |
type: "CPP" | |
bottom: "batch_norm_blob1" | |
top: "relu_blob1" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv2" | |
type: "Convolution" | |
bottom: "relu_blob1" | |
top: "conv_blob2" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm2" | |
type: "BatchNorm" | |
bottom: "conv_blob2" | |
top: "batch_norm_blob2" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale2" | |
type: "Scale" | |
bottom: "batch_norm_blob2" | |
top: "batch_norm_blob2" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu2" | |
type: "CPP" | |
bottom: "batch_norm_blob2" | |
top: "relu_blob2" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv3" | |
type: "Convolution" | |
bottom: "relu_blob2" | |
top: "conv_blob3" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm3" | |
type: "BatchNorm" | |
bottom: "conv_blob3" | |
top: "batch_norm_blob3" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale3" | |
type: "Scale" | |
bottom: "batch_norm_blob3" | |
top: "batch_norm_blob3" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "conv4" | |
type: "Convolution" | |
bottom: "relu_blob1" | |
top: "conv_blob4" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm4" | |
type: "BatchNorm" | |
bottom: "conv_blob4" | |
top: "batch_norm_blob4" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale4" | |
type: "Scale" | |
bottom: "batch_norm_blob4" | |
top: "batch_norm_blob4" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add1" | |
type: "Eltwise" | |
bottom: "batch_norm_blob3" | |
bottom: "batch_norm_blob4" | |
top: "add_blob1" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu3" | |
type: "CPP" | |
bottom: "add_blob1" | |
top: "relu_blob3" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv5" | |
type: "Convolution" | |
bottom: "relu_blob3" | |
top: "conv_blob5" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm5" | |
type: "BatchNorm" | |
bottom: "conv_blob5" | |
top: "batch_norm_blob5" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale5" | |
type: "Scale" | |
bottom: "batch_norm_blob5" | |
top: "batch_norm_blob5" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu4" | |
type: "CPP" | |
bottom: "batch_norm_blob5" | |
top: "relu_blob4" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv6" | |
type: "Convolution" | |
bottom: "relu_blob4" | |
top: "conv_blob6" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm6" | |
type: "BatchNorm" | |
bottom: "conv_blob6" | |
top: "batch_norm_blob6" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale6" | |
type: "Scale" | |
bottom: "batch_norm_blob6" | |
top: "batch_norm_blob6" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add2" | |
type: "Eltwise" | |
bottom: "batch_norm_blob6" | |
bottom: "relu_blob3" | |
top: "add_blob2" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu5" | |
type: "CPP" | |
bottom: "add_blob2" | |
top: "relu_blob5" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv7" | |
type: "Convolution" | |
bottom: "relu_blob5" | |
top: "conv_blob7" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm7" | |
type: "BatchNorm" | |
bottom: "conv_blob7" | |
top: "batch_norm_blob7" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale7" | |
type: "Scale" | |
bottom: "batch_norm_blob7" | |
top: "batch_norm_blob7" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu6" | |
type: "CPP" | |
bottom: "batch_norm_blob7" | |
top: "relu_blob6" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv8" | |
type: "Convolution" | |
bottom: "relu_blob6" | |
top: "conv_blob8" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm8" | |
type: "BatchNorm" | |
bottom: "conv_blob8" | |
top: "batch_norm_blob8" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale8" | |
type: "Scale" | |
bottom: "batch_norm_blob8" | |
top: "batch_norm_blob8" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add3" | |
type: "Eltwise" | |
bottom: "batch_norm_blob8" | |
bottom: "relu_blob5" | |
top: "add_blob3" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu7" | |
type: "CPP" | |
bottom: "add_blob3" | |
top: "relu_blob7" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv9" | |
type: "Convolution" | |
bottom: "relu_blob3" | |
top: "conv_blob9" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm9" | |
type: "BatchNorm" | |
bottom: "conv_blob9" | |
top: "batch_norm_blob9" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale9" | |
type: "Scale" | |
bottom: "batch_norm_blob9" | |
top: "batch_norm_blob9" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu8" | |
type: "CPP" | |
bottom: "batch_norm_blob9" | |
top: "relu_blob8" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv10" | |
type: "Convolution" | |
bottom: "relu_blob8" | |
top: "conv_blob10" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm10" | |
type: "BatchNorm" | |
bottom: "conv_blob10" | |
top: "batch_norm_blob10" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale10" | |
type: "Scale" | |
bottom: "batch_norm_blob10" | |
top: "batch_norm_blob10" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "conv11" | |
type: "Convolution" | |
bottom: "relu_blob3" | |
top: "conv_blob11" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm11" | |
type: "BatchNorm" | |
bottom: "conv_blob11" | |
top: "batch_norm_blob11" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale11" | |
type: "Scale" | |
bottom: "batch_norm_blob11" | |
top: "batch_norm_blob11" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add4" | |
type: "Eltwise" | |
bottom: "batch_norm_blob10" | |
bottom: "batch_norm_blob11" | |
top: "add_blob4" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu9" | |
type: "CPP" | |
bottom: "add_blob4" | |
top: "relu_blob9" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv12" | |
type: "Convolution" | |
bottom: "relu_blob9" | |
top: "conv_blob12" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm12" | |
type: "BatchNorm" | |
bottom: "conv_blob12" | |
top: "batch_norm_blob12" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale12" | |
type: "Scale" | |
bottom: "batch_norm_blob12" | |
top: "batch_norm_blob12" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu10" | |
type: "CPP" | |
bottom: "batch_norm_blob12" | |
top: "relu_blob10" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv13" | |
type: "Convolution" | |
bottom: "relu_blob10" | |
top: "conv_blob13" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm13" | |
type: "BatchNorm" | |
bottom: "conv_blob13" | |
top: "batch_norm_blob13" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale13" | |
type: "Scale" | |
bottom: "batch_norm_blob13" | |
top: "batch_norm_blob13" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add5" | |
type: "Eltwise" | |
bottom: "batch_norm_blob13" | |
bottom: "relu_blob9" | |
top: "add_blob5" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu11" | |
type: "CPP" | |
bottom: "add_blob5" | |
top: "relu_blob11" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv14" | |
type: "Convolution" | |
bottom: "relu_blob11" | |
top: "conv_blob14" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm14" | |
type: "BatchNorm" | |
bottom: "conv_blob14" | |
top: "batch_norm_blob14" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale14" | |
type: "Scale" | |
bottom: "batch_norm_blob14" | |
top: "batch_norm_blob14" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu12" | |
type: "CPP" | |
bottom: "batch_norm_blob14" | |
top: "relu_blob12" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv15" | |
type: "Convolution" | |
bottom: "relu_blob12" | |
top: "conv_blob15" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm15" | |
type: "BatchNorm" | |
bottom: "conv_blob15" | |
top: "batch_norm_blob15" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale15" | |
type: "Scale" | |
bottom: "batch_norm_blob15" | |
top: "batch_norm_blob15" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add6" | |
type: "Eltwise" | |
bottom: "batch_norm_blob15" | |
bottom: "relu_blob11" | |
top: "add_blob6" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu13" | |
type: "CPP" | |
bottom: "add_blob6" | |
top: "relu_blob13" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv16" | |
type: "Convolution" | |
bottom: "relu_blob13" | |
top: "conv_blob16" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm16" | |
type: "BatchNorm" | |
bottom: "conv_blob16" | |
top: "batch_norm_blob16" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale16" | |
type: "Scale" | |
bottom: "batch_norm_blob16" | |
top: "batch_norm_blob16" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu14" | |
type: "CPP" | |
bottom: "batch_norm_blob16" | |
top: "relu_blob14" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv17" | |
type: "Convolution" | |
bottom: "relu_blob14" | |
top: "conv_blob17" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm17" | |
type: "BatchNorm" | |
bottom: "conv_blob17" | |
top: "batch_norm_blob17" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale17" | |
type: "Scale" | |
bottom: "batch_norm_blob17" | |
top: "batch_norm_blob17" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add7" | |
type: "Eltwise" | |
bottom: "batch_norm_blob17" | |
bottom: "relu_blob13" | |
top: "add_blob7" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu15" | |
type: "CPP" | |
bottom: "add_blob7" | |
top: "relu_blob15" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv18" | |
type: "Convolution" | |
bottom: "relu_blob11" | |
top: "conv_blob18" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm18" | |
type: "BatchNorm" | |
bottom: "conv_blob18" | |
top: "batch_norm_blob18" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale18" | |
type: "Scale" | |
bottom: "batch_norm_blob18" | |
top: "batch_norm_blob18" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu16" | |
type: "CPP" | |
bottom: "batch_norm_blob18" | |
top: "relu_blob16" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv19" | |
type: "Convolution" | |
bottom: "relu_blob16" | |
top: "conv_blob19" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm19" | |
type: "BatchNorm" | |
bottom: "conv_blob19" | |
top: "batch_norm_blob19" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale19" | |
type: "Scale" | |
bottom: "batch_norm_blob19" | |
top: "batch_norm_blob19" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "conv20" | |
type: "Convolution" | |
bottom: "relu_blob11" | |
top: "conv_blob20" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm20" | |
type: "BatchNorm" | |
bottom: "conv_blob20" | |
top: "batch_norm_blob20" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale20" | |
type: "Scale" | |
bottom: "batch_norm_blob20" | |
top: "batch_norm_blob20" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add8" | |
type: "Eltwise" | |
bottom: "batch_norm_blob19" | |
bottom: "batch_norm_blob20" | |
top: "add_blob8" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu17" | |
type: "CPP" | |
bottom: "add_blob8" | |
top: "relu_blob17" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv21" | |
type: "Convolution" | |
bottom: "relu_blob17" | |
top: "conv_blob21" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm21" | |
type: "BatchNorm" | |
bottom: "conv_blob21" | |
top: "batch_norm_blob21" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale21" | |
type: "Scale" | |
bottom: "batch_norm_blob21" | |
top: "batch_norm_blob21" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu18" | |
type: "CPP" | |
bottom: "batch_norm_blob21" | |
top: "relu_blob18" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv22" | |
type: "Convolution" | |
bottom: "relu_blob18" | |
top: "conv_blob22" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm22" | |
type: "BatchNorm" | |
bottom: "conv_blob22" | |
top: "batch_norm_blob22" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale22" | |
type: "Scale" | |
bottom: "batch_norm_blob22" | |
top: "batch_norm_blob22" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add9" | |
type: "Eltwise" | |
bottom: "batch_norm_blob22" | |
bottom: "relu_blob17" | |
top: "add_blob9" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu19" | |
type: "CPP" | |
bottom: "add_blob9" | |
top: "relu_blob19" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv23" | |
type: "Convolution" | |
bottom: "relu_blob19" | |
top: "conv_blob23" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm23" | |
type: "BatchNorm" | |
bottom: "conv_blob23" | |
top: "batch_norm_blob23" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale23" | |
type: "Scale" | |
bottom: "batch_norm_blob23" | |
top: "batch_norm_blob23" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu20" | |
type: "CPP" | |
bottom: "batch_norm_blob23" | |
top: "relu_blob20" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv24" | |
type: "Convolution" | |
bottom: "relu_blob20" | |
top: "conv_blob24" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm24" | |
type: "BatchNorm" | |
bottom: "conv_blob24" | |
top: "batch_norm_blob24" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale24" | |
type: "Scale" | |
bottom: "batch_norm_blob24" | |
top: "batch_norm_blob24" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add10" | |
type: "Eltwise" | |
bottom: "batch_norm_blob24" | |
bottom: "relu_blob19" | |
top: "add_blob10" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu21" | |
type: "CPP" | |
bottom: "add_blob10" | |
top: "relu_blob21" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv25" | |
type: "Convolution" | |
bottom: "relu_blob21" | |
top: "conv_blob25" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm25" | |
type: "BatchNorm" | |
bottom: "conv_blob25" | |
top: "batch_norm_blob25" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale25" | |
type: "Scale" | |
bottom: "batch_norm_blob25" | |
top: "batch_norm_blob25" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu22" | |
type: "CPP" | |
bottom: "batch_norm_blob25" | |
top: "relu_blob22" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv26" | |
type: "Convolution" | |
bottom: "relu_blob22" | |
top: "conv_blob26" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm26" | |
type: "BatchNorm" | |
bottom: "conv_blob26" | |
top: "batch_norm_blob26" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale26" | |
type: "Scale" | |
bottom: "batch_norm_blob26" | |
top: "batch_norm_blob26" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add11" | |
type: "Eltwise" | |
bottom: "batch_norm_blob26" | |
bottom: "relu_blob21" | |
top: "add_blob11" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu23" | |
type: "CPP" | |
bottom: "add_blob11" | |
top: "relu_blob23" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv27" | |
type: "Convolution" | |
bottom: "relu_blob19" | |
top: "conv_blob27" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm27" | |
type: "BatchNorm" | |
bottom: "conv_blob27" | |
top: "batch_norm_blob27" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale27" | |
type: "Scale" | |
bottom: "batch_norm_blob27" | |
top: "batch_norm_blob27" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu24" | |
type: "CPP" | |
bottom: "batch_norm_blob27" | |
top: "relu_blob24" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv28" | |
type: "Convolution" | |
bottom: "relu_blob24" | |
top: "conv_blob28" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm28" | |
type: "BatchNorm" | |
bottom: "conv_blob28" | |
top: "batch_norm_blob28" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale28" | |
type: "Scale" | |
bottom: "batch_norm_blob28" | |
top: "batch_norm_blob28" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "conv29" | |
type: "Convolution" | |
bottom: "relu_blob19" | |
top: "conv_blob29" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm29" | |
type: "BatchNorm" | |
bottom: "conv_blob29" | |
top: "batch_norm_blob29" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale29" | |
type: "Scale" | |
bottom: "batch_norm_blob29" | |
top: "batch_norm_blob29" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add12" | |
type: "Eltwise" | |
bottom: "batch_norm_blob28" | |
bottom: "batch_norm_blob29" | |
top: "add_blob12" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu25" | |
type: "CPP" | |
bottom: "add_blob12" | |
top: "relu_blob25" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv30" | |
type: "Convolution" | |
bottom: "relu_blob25" | |
top: "conv_blob30" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm30" | |
type: "BatchNorm" | |
bottom: "conv_blob30" | |
top: "batch_norm_blob30" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale30" | |
type: "Scale" | |
bottom: "batch_norm_blob30" | |
top: "batch_norm_blob30" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu26" | |
type: "CPP" | |
bottom: "batch_norm_blob30" | |
top: "relu_blob26" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv31" | |
type: "Convolution" | |
bottom: "relu_blob26" | |
top: "conv_blob31" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm31" | |
type: "BatchNorm" | |
bottom: "conv_blob31" | |
top: "batch_norm_blob31" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale31" | |
type: "Scale" | |
bottom: "batch_norm_blob31" | |
top: "batch_norm_blob31" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add13" | |
type: "Eltwise" | |
bottom: "batch_norm_blob31" | |
bottom: "relu_blob25" | |
top: "add_blob13" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu27" | |
type: "CPP" | |
bottom: "add_blob13" | |
top: "relu_blob27" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv32" | |
type: "Convolution" | |
bottom: "relu_blob27" | |
top: "conv_blob32" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm32" | |
type: "BatchNorm" | |
bottom: "conv_blob32" | |
top: "batch_norm_blob32" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale32" | |
type: "Scale" | |
bottom: "batch_norm_blob32" | |
top: "batch_norm_blob32" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu28" | |
type: "CPP" | |
bottom: "batch_norm_blob32" | |
top: "relu_blob28" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv33" | |
type: "Convolution" | |
bottom: "relu_blob28" | |
top: "conv_blob33" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm33" | |
type: "BatchNorm" | |
bottom: "conv_blob33" | |
top: "batch_norm_blob33" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale33" | |
type: "Scale" | |
bottom: "batch_norm_blob33" | |
top: "batch_norm_blob33" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add14" | |
type: "Eltwise" | |
bottom: "batch_norm_blob33" | |
bottom: "relu_blob27" | |
top: "add_blob14" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu29" | |
type: "CPP" | |
bottom: "add_blob14" | |
top: "relu_blob29" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv34" | |
type: "Convolution" | |
bottom: "relu_blob29" | |
top: "conv_blob34" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm34" | |
type: "BatchNorm" | |
bottom: "conv_blob34" | |
top: "batch_norm_blob34" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale34" | |
type: "Scale" | |
bottom: "batch_norm_blob34" | |
top: "batch_norm_blob34" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu30" | |
type: "CPP" | |
bottom: "batch_norm_blob34" | |
top: "relu_blob30" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv35" | |
type: "Convolution" | |
bottom: "relu_blob30" | |
top: "conv_blob35" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm35" | |
type: "BatchNorm" | |
bottom: "conv_blob35" | |
top: "batch_norm_blob35" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale35" | |
type: "Scale" | |
bottom: "batch_norm_blob35" | |
top: "batch_norm_blob35" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add15" | |
type: "Eltwise" | |
bottom: "batch_norm_blob35" | |
bottom: "relu_blob29" | |
top: "add_blob15" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu31" | |
type: "CPP" | |
bottom: "add_blob15" | |
top: "relu_blob31" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv36" | |
type: "Convolution" | |
bottom: "relu_blob27" | |
top: "conv_blob36" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm36" | |
type: "BatchNorm" | |
bottom: "conv_blob36" | |
top: "batch_norm_blob36" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale36" | |
type: "Scale" | |
bottom: "batch_norm_blob36" | |
top: "batch_norm_blob36" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu32" | |
type: "CPP" | |
bottom: "batch_norm_blob36" | |
top: "relu_blob32" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv37" | |
type: "Convolution" | |
bottom: "relu_blob32" | |
top: "conv_blob37" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm37" | |
type: "BatchNorm" | |
bottom: "conv_blob37" | |
top: "batch_norm_blob37" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale37" | |
type: "Scale" | |
bottom: "batch_norm_blob37" | |
top: "batch_norm_blob37" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "conv38" | |
type: "Convolution" | |
bottom: "relu_blob27" | |
top: "conv_blob38" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm38" | |
type: "BatchNorm" | |
bottom: "conv_blob38" | |
top: "batch_norm_blob38" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale38" | |
type: "Scale" | |
bottom: "batch_norm_blob38" | |
top: "batch_norm_blob38" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add16" | |
type: "Eltwise" | |
bottom: "batch_norm_blob37" | |
bottom: "batch_norm_blob38" | |
top: "add_blob16" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu33" | |
type: "CPP" | |
bottom: "add_blob16" | |
top: "relu_blob33" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv39" | |
type: "Convolution" | |
bottom: "relu_blob33" | |
top: "conv_blob39" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm39" | |
type: "BatchNorm" | |
bottom: "conv_blob39" | |
top: "batch_norm_blob39" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale39" | |
type: "Scale" | |
bottom: "batch_norm_blob39" | |
top: "batch_norm_blob39" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu34" | |
type: "CPP" | |
bottom: "batch_norm_blob39" | |
top: "relu_blob34" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv40" | |
type: "Convolution" | |
bottom: "relu_blob34" | |
top: "conv_blob40" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm40" | |
type: "BatchNorm" | |
bottom: "conv_blob40" | |
top: "batch_norm_blob40" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale40" | |
type: "Scale" | |
bottom: "batch_norm_blob40" | |
top: "batch_norm_blob40" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add17" | |
type: "Eltwise" | |
bottom: "batch_norm_blob40" | |
bottom: "relu_blob33" | |
top: "add_blob17" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu35" | |
type: "CPP" | |
bottom: "add_blob17" | |
top: "relu_blob35" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv41" | |
type: "Convolution" | |
bottom: "relu_blob35" | |
top: "conv_blob41" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm41" | |
type: "BatchNorm" | |
bottom: "conv_blob41" | |
top: "batch_norm_blob41" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale41" | |
type: "Scale" | |
bottom: "batch_norm_blob41" | |
top: "batch_norm_blob41" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu36" | |
type: "CPP" | |
bottom: "batch_norm_blob41" | |
top: "relu_blob36" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv42" | |
type: "Convolution" | |
bottom: "relu_blob36" | |
top: "conv_blob42" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm42" | |
type: "BatchNorm" | |
bottom: "conv_blob42" | |
top: "batch_norm_blob42" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale42" | |
type: "Scale" | |
bottom: "batch_norm_blob42" | |
top: "batch_norm_blob42" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add18" | |
type: "Eltwise" | |
bottom: "batch_norm_blob42" | |
bottom: "relu_blob35" | |
top: "add_blob18" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu37" | |
type: "CPP" | |
bottom: "add_blob18" | |
top: "relu_blob37" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv43" | |
type: "Convolution" | |
bottom: "relu_blob37" | |
top: "conv_blob43" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm43" | |
type: "BatchNorm" | |
bottom: "conv_blob43" | |
top: "batch_norm_blob43" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale43" | |
type: "Scale" | |
bottom: "batch_norm_blob43" | |
top: "batch_norm_blob43" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu38" | |
type: "CPP" | |
bottom: "batch_norm_blob43" | |
top: "relu_blob38" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv44" | |
type: "Convolution" | |
bottom: "relu_blob38" | |
top: "conv_blob44" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm44" | |
type: "BatchNorm" | |
bottom: "conv_blob44" | |
top: "batch_norm_blob44" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale44" | |
type: "Scale" | |
bottom: "batch_norm_blob44" | |
top: "batch_norm_blob44" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add19" | |
type: "Eltwise" | |
bottom: "batch_norm_blob44" | |
bottom: "relu_blob37" | |
top: "add_blob19" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu39" | |
type: "CPP" | |
bottom: "add_blob19" | |
top: "relu_blob39" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv45" | |
type: "Convolution" | |
bottom: "relu_blob35" | |
top: "conv_blob45" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm45" | |
type: "BatchNorm" | |
bottom: "conv_blob45" | |
top: "batch_norm_blob45" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale45" | |
type: "Scale" | |
bottom: "batch_norm_blob45" | |
top: "batch_norm_blob45" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu40" | |
type: "CPP" | |
bottom: "batch_norm_blob45" | |
top: "relu_blob40" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv46" | |
type: "Convolution" | |
bottom: "relu_blob40" | |
top: "conv_blob46" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm46" | |
type: "BatchNorm" | |
bottom: "conv_blob46" | |
top: "batch_norm_blob46" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale46" | |
type: "Scale" | |
bottom: "batch_norm_blob46" | |
top: "batch_norm_blob46" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "conv47" | |
type: "Convolution" | |
bottom: "relu_blob35" | |
top: "conv_blob47" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm47" | |
type: "BatchNorm" | |
bottom: "conv_blob47" | |
top: "batch_norm_blob47" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale47" | |
type: "Scale" | |
bottom: "batch_norm_blob47" | |
top: "batch_norm_blob47" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add20" | |
type: "Eltwise" | |
bottom: "batch_norm_blob46" | |
bottom: "batch_norm_blob47" | |
top: "add_blob20" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu41" | |
type: "CPP" | |
bottom: "add_blob20" | |
top: "relu_blob41" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv48" | |
type: "Convolution" | |
bottom: "relu_blob41" | |
top: "conv_blob48" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm48" | |
type: "BatchNorm" | |
bottom: "conv_blob48" | |
top: "batch_norm_blob48" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale48" | |
type: "Scale" | |
bottom: "batch_norm_blob48" | |
top: "batch_norm_blob48" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu42" | |
type: "CPP" | |
bottom: "batch_norm_blob48" | |
top: "relu_blob42" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv49" | |
type: "Convolution" | |
bottom: "relu_blob42" | |
top: "conv_blob49" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm49" | |
type: "BatchNorm" | |
bottom: "conv_blob49" | |
top: "batch_norm_blob49" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale49" | |
type: "Scale" | |
bottom: "batch_norm_blob49" | |
top: "batch_norm_blob49" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add21" | |
type: "Eltwise" | |
bottom: "batch_norm_blob49" | |
bottom: "relu_blob41" | |
top: "add_blob21" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu43" | |
type: "CPP" | |
bottom: "add_blob21" | |
top: "relu_blob43" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv50" | |
type: "Convolution" | |
bottom: "relu_blob43" | |
top: "conv_blob50" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm50" | |
type: "BatchNorm" | |
bottom: "conv_blob50" | |
top: "batch_norm_blob50" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale50" | |
type: "Scale" | |
bottom: "batch_norm_blob50" | |
top: "batch_norm_blob50" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu44" | |
type: "CPP" | |
bottom: "batch_norm_blob50" | |
top: "relu_blob44" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv51" | |
type: "Convolution" | |
bottom: "relu_blob44" | |
top: "conv_blob51" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm51" | |
type: "BatchNorm" | |
bottom: "conv_blob51" | |
top: "batch_norm_blob51" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale51" | |
type: "Scale" | |
bottom: "batch_norm_blob51" | |
top: "batch_norm_blob51" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add22" | |
type: "Eltwise" | |
bottom: "batch_norm_blob51" | |
bottom: "relu_blob43" | |
top: "add_blob22" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu45" | |
type: "CPP" | |
bottom: "add_blob22" | |
top: "relu_blob45" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv52" | |
type: "Convolution" | |
bottom: "relu_blob45" | |
top: "conv_blob52" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm52" | |
type: "BatchNorm" | |
bottom: "conv_blob52" | |
top: "batch_norm_blob52" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale52" | |
type: "Scale" | |
bottom: "batch_norm_blob52" | |
top: "batch_norm_blob52" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu46" | |
type: "CPP" | |
bottom: "batch_norm_blob52" | |
top: "relu_blob46" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv53" | |
type: "Convolution" | |
bottom: "relu_blob46" | |
top: "conv_blob53" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm53" | |
type: "BatchNorm" | |
bottom: "conv_blob53" | |
top: "batch_norm_blob53" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale53" | |
type: "Scale" | |
bottom: "batch_norm_blob53" | |
top: "batch_norm_blob53" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add23" | |
type: "Eltwise" | |
bottom: "batch_norm_blob53" | |
bottom: "relu_blob45" | |
top: "add_blob23" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu47" | |
type: "CPP" | |
bottom: "add_blob23" | |
top: "relu_blob47" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv54" | |
type: "Convolution" | |
bottom: "relu_blob47" | |
top: "conv_blob54" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm54" | |
type: "BatchNorm" | |
bottom: "conv_blob54" | |
top: "batch_norm_blob54" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale54" | |
type: "Scale" | |
bottom: "batch_norm_blob54" | |
top: "batch_norm_blob54" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu48" | |
type: "CPP" | |
bottom: "batch_norm_blob54" | |
top: "relu_blob48" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv55" | |
type: "Convolution" | |
bottom: "relu_blob48" | |
top: "conv_blob55" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm55" | |
type: "BatchNorm" | |
bottom: "conv_blob55" | |
top: "batch_norm_blob55" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale55" | |
type: "Scale" | |
bottom: "batch_norm_blob55" | |
top: "batch_norm_blob55" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add24" | |
type: "Eltwise" | |
bottom: "batch_norm_blob55" | |
bottom: "relu_blob47" | |
top: "add_blob24" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu49" | |
type: "CPP" | |
bottom: "add_blob24" | |
top: "relu_blob49" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv56" | |
type: "Convolution" | |
bottom: "relu_blob49" | |
top: "conv_blob56" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm56" | |
type: "BatchNorm" | |
bottom: "conv_blob56" | |
top: "batch_norm_blob56" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale56" | |
type: "Scale" | |
bottom: "batch_norm_blob56" | |
top: "batch_norm_blob56" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu50" | |
type: "CPP" | |
bottom: "batch_norm_blob56" | |
top: "relu_blob50" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv57" | |
type: "Convolution" | |
bottom: "relu_blob50" | |
top: "conv_blob57" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm57" | |
type: "BatchNorm" | |
bottom: "conv_blob57" | |
top: "batch_norm_blob57" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale57" | |
type: "Scale" | |
bottom: "batch_norm_blob57" | |
top: "batch_norm_blob57" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add25" | |
type: "Eltwise" | |
bottom: "batch_norm_blob57" | |
bottom: "relu_blob49" | |
top: "add_blob25" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu51" | |
type: "CPP" | |
bottom: "add_blob25" | |
top: "relu_blob51" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv58" | |
type: "Convolution" | |
bottom: "relu_blob51" | |
top: "conv_blob58" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm58" | |
type: "BatchNorm" | |
bottom: "conv_blob58" | |
top: "batch_norm_blob58" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale58" | |
type: "Scale" | |
bottom: "batch_norm_blob58" | |
top: "batch_norm_blob58" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu52" | |
type: "CPP" | |
bottom: "batch_norm_blob58" | |
top: "relu_blob52" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv59" | |
type: "Convolution" | |
bottom: "relu_blob52" | |
top: "conv_blob59" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm59" | |
type: "BatchNorm" | |
bottom: "conv_blob59" | |
top: "batch_norm_blob59" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale59" | |
type: "Scale" | |
bottom: "batch_norm_blob59" | |
top: "batch_norm_blob59" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add26" | |
type: "Eltwise" | |
bottom: "batch_norm_blob59" | |
bottom: "relu_blob51" | |
top: "add_blob26" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu53" | |
type: "CPP" | |
bottom: "add_blob26" | |
top: "relu_blob53" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv60" | |
type: "Convolution" | |
bottom: "relu_blob53" | |
top: "conv_blob60" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm60" | |
type: "BatchNorm" | |
bottom: "conv_blob60" | |
top: "batch_norm_blob60" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale60" | |
type: "Scale" | |
bottom: "batch_norm_blob60" | |
top: "batch_norm_blob60" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu54" | |
type: "CPP" | |
bottom: "batch_norm_blob60" | |
top: "relu_blob54" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv61" | |
type: "Convolution" | |
bottom: "relu_blob54" | |
top: "conv_blob61" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm61" | |
type: "BatchNorm" | |
bottom: "conv_blob61" | |
top: "batch_norm_blob61" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale61" | |
type: "Scale" | |
bottom: "batch_norm_blob61" | |
top: "batch_norm_blob61" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "conv62" | |
type: "Convolution" | |
bottom: "relu_blob53" | |
top: "conv_blob62" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm62" | |
type: "BatchNorm" | |
bottom: "conv_blob62" | |
top: "batch_norm_blob62" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale62" | |
type: "Scale" | |
bottom: "batch_norm_blob62" | |
top: "batch_norm_blob62" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add27" | |
type: "Eltwise" | |
bottom: "batch_norm_blob61" | |
bottom: "batch_norm_blob62" | |
top: "add_blob27" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu55" | |
type: "CPP" | |
bottom: "add_blob27" | |
top: "relu_blob55" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "upsample1" | |
type: "CPP" | |
bottom: "relu_blob55" | |
top: "upsample_blob1" | |
cpp_param { | |
param_str: "scale:2; upsample_h: 8; upsample_w: 8" | |
type: "Upsample" | |
} | |
} | |
layer { | |
name: "add28" | |
type: "Eltwise" | |
bottom: "relu_blob39" | |
bottom: "upsample_blob1" | |
top: "add_blob28" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "conv63" | |
type: "Convolution" | |
bottom: "add_blob28" | |
top: "conv_blob63" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm63" | |
type: "BatchNorm" | |
bottom: "conv_blob63" | |
top: "batch_norm_blob63" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale63" | |
type: "Scale" | |
bottom: "batch_norm_blob63" | |
top: "batch_norm_blob63" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu56" | |
type: "CPP" | |
bottom: "batch_norm_blob63" | |
top: "relu_blob56" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv64" | |
type: "Convolution" | |
bottom: "relu_blob56" | |
top: "conv_blob64" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm64" | |
type: "BatchNorm" | |
bottom: "conv_blob64" | |
top: "batch_norm_blob64" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale64" | |
type: "Scale" | |
bottom: "batch_norm_blob64" | |
top: "batch_norm_blob64" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add29" | |
type: "Eltwise" | |
bottom: "batch_norm_blob64" | |
bottom: "add_blob28" | |
top: "add_blob29" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu57" | |
type: "CPP" | |
bottom: "add_blob29" | |
top: "relu_blob57" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv65" | |
type: "Convolution" | |
bottom: "relu_blob57" | |
top: "conv_blob65" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm65" | |
type: "BatchNorm" | |
bottom: "conv_blob65" | |
top: "batch_norm_blob65" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale65" | |
type: "Scale" | |
bottom: "batch_norm_blob65" | |
top: "batch_norm_blob65" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu58" | |
type: "CPP" | |
bottom: "batch_norm_blob65" | |
top: "relu_blob58" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv66" | |
type: "Convolution" | |
bottom: "relu_blob58" | |
top: "conv_blob66" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm66" | |
type: "BatchNorm" | |
bottom: "conv_blob66" | |
top: "batch_norm_blob66" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale66" | |
type: "Scale" | |
bottom: "batch_norm_blob66" | |
top: "batch_norm_blob66" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add30" | |
type: "Eltwise" | |
bottom: "batch_norm_blob66" | |
bottom: "relu_blob57" | |
top: "add_blob30" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu59" | |
type: "CPP" | |
bottom: "add_blob30" | |
top: "relu_blob59" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "upsample2" | |
type: "CPP" | |
bottom: "relu_blob59" | |
top: "upsample_blob2" | |
cpp_param { | |
param_str: "scale:2; upsample_h: 16; upsample_w: 16" | |
type: "Upsample" | |
} | |
} | |
layer { | |
name: "add31" | |
type: "Eltwise" | |
bottom: "relu_blob31" | |
bottom: "upsample_blob2" | |
top: "add_blob31" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "conv67" | |
type: "Convolution" | |
bottom: "add_blob31" | |
top: "conv_blob67" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm67" | |
type: "BatchNorm" | |
bottom: "conv_blob67" | |
top: "batch_norm_blob67" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale67" | |
type: "Scale" | |
bottom: "batch_norm_blob67" | |
top: "batch_norm_blob67" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu60" | |
type: "CPP" | |
bottom: "batch_norm_blob67" | |
top: "relu_blob60" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv68" | |
type: "Convolution" | |
bottom: "relu_blob60" | |
top: "conv_blob68" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm68" | |
type: "BatchNorm" | |
bottom: "conv_blob68" | |
top: "batch_norm_blob68" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale68" | |
type: "Scale" | |
bottom: "batch_norm_blob68" | |
top: "batch_norm_blob68" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add32" | |
type: "Eltwise" | |
bottom: "batch_norm_blob68" | |
bottom: "add_blob31" | |
top: "add_blob32" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu61" | |
type: "CPP" | |
bottom: "add_blob32" | |
top: "relu_blob61" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv69" | |
type: "Convolution" | |
bottom: "relu_blob61" | |
top: "conv_blob69" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm69" | |
type: "BatchNorm" | |
bottom: "conv_blob69" | |
top: "batch_norm_blob69" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale69" | |
type: "Scale" | |
bottom: "batch_norm_blob69" | |
top: "batch_norm_blob69" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu62" | |
type: "CPP" | |
bottom: "batch_norm_blob69" | |
top: "relu_blob62" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv70" | |
type: "Convolution" | |
bottom: "relu_blob62" | |
top: "conv_blob70" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm70" | |
type: "BatchNorm" | |
bottom: "conv_blob70" | |
top: "batch_norm_blob70" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale70" | |
type: "Scale" | |
bottom: "batch_norm_blob70" | |
top: "batch_norm_blob70" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add33" | |
type: "Eltwise" | |
bottom: "batch_norm_blob70" | |
bottom: "relu_blob61" | |
top: "add_blob33" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu63" | |
type: "CPP" | |
bottom: "add_blob33" | |
top: "relu_blob63" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "upsample3" | |
type: "CPP" | |
bottom: "relu_blob63" | |
top: "upsample_blob3" | |
cpp_param { | |
param_str: "scale:2; upsample_h: 32; upsample_w: 32" | |
type: "Upsample" | |
} | |
} | |
layer { | |
name: "add34" | |
type: "Eltwise" | |
bottom: "relu_blob23" | |
bottom: "upsample_blob3" | |
top: "add_blob34" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "conv71" | |
type: "Convolution" | |
bottom: "add_blob34" | |
top: "conv_blob71" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm71" | |
type: "BatchNorm" | |
bottom: "conv_blob71" | |
top: "batch_norm_blob71" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale71" | |
type: "Scale" | |
bottom: "batch_norm_blob71" | |
top: "batch_norm_blob71" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu64" | |
type: "CPP" | |
bottom: "batch_norm_blob71" | |
top: "relu_blob64" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv72" | |
type: "Convolution" | |
bottom: "relu_blob64" | |
top: "conv_blob72" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm72" | |
type: "BatchNorm" | |
bottom: "conv_blob72" | |
top: "batch_norm_blob72" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale72" | |
type: "Scale" | |
bottom: "batch_norm_blob72" | |
top: "batch_norm_blob72" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add35" | |
type: "Eltwise" | |
bottom: "batch_norm_blob72" | |
bottom: "add_blob34" | |
top: "add_blob35" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu65" | |
type: "CPP" | |
bottom: "add_blob35" | |
top: "relu_blob65" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv73" | |
type: "Convolution" | |
bottom: "relu_blob65" | |
top: "conv_blob73" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm73" | |
type: "BatchNorm" | |
bottom: "conv_blob73" | |
top: "batch_norm_blob73" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale73" | |
type: "Scale" | |
bottom: "batch_norm_blob73" | |
top: "batch_norm_blob73" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu66" | |
type: "CPP" | |
bottom: "batch_norm_blob73" | |
top: "relu_blob66" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv74" | |
type: "Convolution" | |
bottom: "relu_blob66" | |
top: "conv_blob74" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm74" | |
type: "BatchNorm" | |
bottom: "conv_blob74" | |
top: "batch_norm_blob74" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale74" | |
type: "Scale" | |
bottom: "batch_norm_blob74" | |
top: "batch_norm_blob74" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "conv75" | |
type: "Convolution" | |
bottom: "relu_blob65" | |
top: "conv_blob75" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm75" | |
type: "BatchNorm" | |
bottom: "conv_blob75" | |
top: "batch_norm_blob75" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale75" | |
type: "Scale" | |
bottom: "batch_norm_blob75" | |
top: "batch_norm_blob75" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add36" | |
type: "Eltwise" | |
bottom: "batch_norm_blob74" | |
bottom: "batch_norm_blob75" | |
top: "add_blob36" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu67" | |
type: "CPP" | |
bottom: "add_blob36" | |
top: "relu_blob67" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "upsample4" | |
type: "CPP" | |
bottom: "relu_blob67" | |
top: "upsample_blob4" | |
cpp_param { | |
param_str: "scale:2; upsample_h: 64; upsample_w: 64" | |
type: "Upsample" | |
} | |
} | |
layer { | |
name: "add37" | |
type: "Eltwise" | |
bottom: "relu_blob15" | |
bottom: "upsample_blob4" | |
top: "add_blob37" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "conv76" | |
type: "Convolution" | |
bottom: "add_blob37" | |
top: "conv_blob76" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm76" | |
type: "BatchNorm" | |
bottom: "conv_blob76" | |
top: "batch_norm_blob76" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale76" | |
type: "Scale" | |
bottom: "batch_norm_blob76" | |
top: "batch_norm_blob76" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu68" | |
type: "CPP" | |
bottom: "batch_norm_blob76" | |
top: "relu_blob68" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv77" | |
type: "Convolution" | |
bottom: "relu_blob68" | |
top: "conv_blob77" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm77" | |
type: "BatchNorm" | |
bottom: "conv_blob77" | |
top: "batch_norm_blob77" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale77" | |
type: "Scale" | |
bottom: "batch_norm_blob77" | |
top: "batch_norm_blob77" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add38" | |
type: "Eltwise" | |
bottom: "batch_norm_blob77" | |
bottom: "add_blob37" | |
top: "add_blob38" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu69" | |
type: "CPP" | |
bottom: "add_blob38" | |
top: "relu_blob69" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv78" | |
type: "Convolution" | |
bottom: "relu_blob69" | |
top: "conv_blob78" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm78" | |
type: "BatchNorm" | |
bottom: "conv_blob78" | |
top: "batch_norm_blob78" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale78" | |
type: "Scale" | |
bottom: "batch_norm_blob78" | |
top: "batch_norm_blob78" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu70" | |
type: "CPP" | |
bottom: "batch_norm_blob78" | |
top: "relu_blob70" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv79" | |
type: "Convolution" | |
bottom: "relu_blob70" | |
top: "conv_blob79" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm79" | |
type: "BatchNorm" | |
bottom: "conv_blob79" | |
top: "batch_norm_blob79" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale79" | |
type: "Scale" | |
bottom: "batch_norm_blob79" | |
top: "batch_norm_blob79" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add39" | |
type: "Eltwise" | |
bottom: "batch_norm_blob79" | |
bottom: "relu_blob69" | |
top: "add_blob39" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu71" | |
type: "CPP" | |
bottom: "add_blob39" | |
top: "relu_blob71" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "upsample5" | |
type: "CPP" | |
bottom: "relu_blob71" | |
top: "upsample_blob5" | |
cpp_param { | |
param_str: "scale:2; upsample_h: 128; upsample_w: 128" | |
type: "Upsample" | |
} | |
} | |
layer { | |
name: "add40" | |
type: "Eltwise" | |
bottom: "relu_blob7" | |
bottom: "upsample_blob5" | |
top: "add_blob40" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "conv80" | |
type: "Convolution" | |
bottom: "add_blob40" | |
top: "conv_blob80" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm80" | |
type: "BatchNorm" | |
bottom: "conv_blob80" | |
top: "batch_norm_blob80" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale80" | |
type: "Scale" | |
bottom: "batch_norm_blob80" | |
top: "batch_norm_blob80" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu72" | |
type: "CPP" | |
bottom: "batch_norm_blob80" | |
top: "relu_blob72" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv81" | |
type: "Convolution" | |
bottom: "relu_blob3" | |
top: "conv_blob81" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm81" | |
type: "BatchNorm" | |
bottom: "conv_blob81" | |
top: "batch_norm_blob81" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale81" | |
type: "Scale" | |
bottom: "batch_norm_blob81" | |
top: "batch_norm_blob81" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "conv82" | |
type: "Convolution" | |
bottom: "relu_blob72" | |
top: "conv_blob82" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm82" | |
type: "BatchNorm" | |
bottom: "conv_blob82" | |
top: "batch_norm_blob82" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale82" | |
type: "Scale" | |
bottom: "batch_norm_blob82" | |
top: "batch_norm_blob82" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add41" | |
type: "Eltwise" | |
bottom: "batch_norm_blob81" | |
bottom: "batch_norm_blob82" | |
top: "add_blob41" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu73" | |
type: "CPP" | |
bottom: "add_blob41" | |
top: "relu_blob73" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv83" | |
type: "Convolution" | |
bottom: "relu_blob73" | |
top: "conv_blob83" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm83" | |
type: "BatchNorm" | |
bottom: "conv_blob83" | |
top: "batch_norm_blob83" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale83" | |
type: "Scale" | |
bottom: "batch_norm_blob83" | |
top: "batch_norm_blob83" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu74" | |
type: "CPP" | |
bottom: "batch_norm_blob83" | |
top: "relu_blob74" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv84" | |
type: "Convolution" | |
bottom: "relu_blob74" | |
top: "conv_blob84" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm84" | |
type: "BatchNorm" | |
bottom: "conv_blob84" | |
top: "batch_norm_blob84" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale84" | |
type: "Scale" | |
bottom: "batch_norm_blob84" | |
top: "batch_norm_blob84" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add42" | |
type: "Eltwise" | |
bottom: "batch_norm_blob84" | |
bottom: "relu_blob73" | |
top: "add_blob42" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu75" | |
type: "CPP" | |
bottom: "add_blob42" | |
top: "relu_blob75" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv85" | |
type: "Convolution" | |
bottom: "relu_blob75" | |
top: "conv_blob85" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm85" | |
type: "BatchNorm" | |
bottom: "conv_blob85" | |
top: "batch_norm_blob85" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale85" | |
type: "Scale" | |
bottom: "batch_norm_blob85" | |
top: "batch_norm_blob85" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu76" | |
type: "CPP" | |
bottom: "batch_norm_blob85" | |
top: "relu_blob76" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv86" | |
type: "Convolution" | |
bottom: "relu_blob76" | |
top: "conv_blob86" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm86" | |
type: "BatchNorm" | |
bottom: "conv_blob86" | |
top: "batch_norm_blob86" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale86" | |
type: "Scale" | |
bottom: "batch_norm_blob86" | |
top: "batch_norm_blob86" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add43" | |
type: "Eltwise" | |
bottom: "batch_norm_blob86" | |
bottom: "relu_blob75" | |
top: "add_blob43" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu77" | |
type: "CPP" | |
bottom: "add_blob43" | |
top: "relu_blob77" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv87" | |
type: "Convolution" | |
bottom: "relu_blob77" | |
top: "conv_blob87" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm87" | |
type: "BatchNorm" | |
bottom: "conv_blob87" | |
top: "batch_norm_blob87" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale87" | |
type: "Scale" | |
bottom: "batch_norm_blob87" | |
top: "batch_norm_blob87" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu78" | |
type: "CPP" | |
bottom: "batch_norm_blob87" | |
top: "relu_blob78" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv88" | |
type: "Convolution" | |
bottom: "relu_blob78" | |
top: "conv_blob88" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm88" | |
type: "BatchNorm" | |
bottom: "conv_blob88" | |
top: "batch_norm_blob88" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale88" | |
type: "Scale" | |
bottom: "batch_norm_blob88" | |
top: "batch_norm_blob88" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add44" | |
type: "Eltwise" | |
bottom: "batch_norm_blob88" | |
bottom: "relu_blob77" | |
top: "add_blob44" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu79" | |
type: "CPP" | |
bottom: "add_blob44" | |
top: "relu_blob79" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv89" | |
type: "Convolution" | |
bottom: "relu_blob75" | |
top: "conv_blob89" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm89" | |
type: "BatchNorm" | |
bottom: "conv_blob89" | |
top: "batch_norm_blob89" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale89" | |
type: "Scale" | |
bottom: "batch_norm_blob89" | |
top: "batch_norm_blob89" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu80" | |
type: "CPP" | |
bottom: "batch_norm_blob89" | |
top: "relu_blob80" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv90" | |
type: "Convolution" | |
bottom: "relu_blob80" | |
top: "conv_blob90" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm90" | |
type: "BatchNorm" | |
bottom: "conv_blob90" | |
top: "batch_norm_blob90" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale90" | |
type: "Scale" | |
bottom: "batch_norm_blob90" | |
top: "batch_norm_blob90" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "conv91" | |
type: "Convolution" | |
bottom: "relu_blob75" | |
top: "conv_blob91" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm91" | |
type: "BatchNorm" | |
bottom: "conv_blob91" | |
top: "batch_norm_blob91" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale91" | |
type: "Scale" | |
bottom: "batch_norm_blob91" | |
top: "batch_norm_blob91" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add45" | |
type: "Eltwise" | |
bottom: "batch_norm_blob90" | |
bottom: "batch_norm_blob91" | |
top: "add_blob45" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu81" | |
type: "CPP" | |
bottom: "add_blob45" | |
top: "relu_blob81" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv92" | |
type: "Convolution" | |
bottom: "relu_blob81" | |
top: "conv_blob92" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm92" | |
type: "BatchNorm" | |
bottom: "conv_blob92" | |
top: "batch_norm_blob92" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale92" | |
type: "Scale" | |
bottom: "batch_norm_blob92" | |
top: "batch_norm_blob92" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu82" | |
type: "CPP" | |
bottom: "batch_norm_blob92" | |
top: "relu_blob82" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv93" | |
type: "Convolution" | |
bottom: "relu_blob82" | |
top: "conv_blob93" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm93" | |
type: "BatchNorm" | |
bottom: "conv_blob93" | |
top: "batch_norm_blob93" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale93" | |
type: "Scale" | |
bottom: "batch_norm_blob93" | |
top: "batch_norm_blob93" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add46" | |
type: "Eltwise" | |
bottom: "batch_norm_blob93" | |
bottom: "relu_blob81" | |
top: "add_blob46" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu83" | |
type: "CPP" | |
bottom: "add_blob46" | |
top: "relu_blob83" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv94" | |
type: "Convolution" | |
bottom: "relu_blob83" | |
top: "conv_blob94" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm94" | |
type: "BatchNorm" | |
bottom: "conv_blob94" | |
top: "batch_norm_blob94" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale94" | |
type: "Scale" | |
bottom: "batch_norm_blob94" | |
top: "batch_norm_blob94" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu84" | |
type: "CPP" | |
bottom: "batch_norm_blob94" | |
top: "relu_blob84" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv95" | |
type: "Convolution" | |
bottom: "relu_blob84" | |
top: "conv_blob95" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm95" | |
type: "BatchNorm" | |
bottom: "conv_blob95" | |
top: "batch_norm_blob95" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale95" | |
type: "Scale" | |
bottom: "batch_norm_blob95" | |
top: "batch_norm_blob95" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add47" | |
type: "Eltwise" | |
bottom: "batch_norm_blob95" | |
bottom: "relu_blob83" | |
top: "add_blob47" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu85" | |
type: "CPP" | |
bottom: "add_blob47" | |
top: "relu_blob85" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv96" | |
type: "Convolution" | |
bottom: "relu_blob85" | |
top: "conv_blob96" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm96" | |
type: "BatchNorm" | |
bottom: "conv_blob96" | |
top: "batch_norm_blob96" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale96" | |
type: "Scale" | |
bottom: "batch_norm_blob96" | |
top: "batch_norm_blob96" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu86" | |
type: "CPP" | |
bottom: "batch_norm_blob96" | |
top: "relu_blob86" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv97" | |
type: "Convolution" | |
bottom: "relu_blob86" | |
top: "conv_blob97" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm97" | |
type: "BatchNorm" | |
bottom: "conv_blob97" | |
top: "batch_norm_blob97" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale97" | |
type: "Scale" | |
bottom: "batch_norm_blob97" | |
top: "batch_norm_blob97" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add48" | |
type: "Eltwise" | |
bottom: "batch_norm_blob97" | |
bottom: "relu_blob85" | |
top: "add_blob48" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu87" | |
type: "CPP" | |
bottom: "add_blob48" | |
top: "relu_blob87" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv98" | |
type: "Convolution" | |
bottom: "relu_blob83" | |
top: "conv_blob98" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm98" | |
type: "BatchNorm" | |
bottom: "conv_blob98" | |
top: "batch_norm_blob98" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale98" | |
type: "Scale" | |
bottom: "batch_norm_blob98" | |
top: "batch_norm_blob98" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu88" | |
type: "CPP" | |
bottom: "batch_norm_blob98" | |
top: "relu_blob88" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv99" | |
type: "Convolution" | |
bottom: "relu_blob88" | |
top: "conv_blob99" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm99" | |
type: "BatchNorm" | |
bottom: "conv_blob99" | |
top: "batch_norm_blob99" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale99" | |
type: "Scale" | |
bottom: "batch_norm_blob99" | |
top: "batch_norm_blob99" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "conv100" | |
type: "Convolution" | |
bottom: "relu_blob83" | |
top: "conv_blob100" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm100" | |
type: "BatchNorm" | |
bottom: "conv_blob100" | |
top: "batch_norm_blob100" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale100" | |
type: "Scale" | |
bottom: "batch_norm_blob100" | |
top: "batch_norm_blob100" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add49" | |
type: "Eltwise" | |
bottom: "batch_norm_blob99" | |
bottom: "batch_norm_blob100" | |
top: "add_blob49" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu89" | |
type: "CPP" | |
bottom: "add_blob49" | |
top: "relu_blob89" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv101" | |
type: "Convolution" | |
bottom: "relu_blob89" | |
top: "conv_blob101" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm101" | |
type: "BatchNorm" | |
bottom: "conv_blob101" | |
top: "batch_norm_blob101" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale101" | |
type: "Scale" | |
bottom: "batch_norm_blob101" | |
top: "batch_norm_blob101" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu90" | |
type: "CPP" | |
bottom: "batch_norm_blob101" | |
top: "relu_blob90" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv102" | |
type: "Convolution" | |
bottom: "relu_blob90" | |
top: "conv_blob102" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm102" | |
type: "BatchNorm" | |
bottom: "conv_blob102" | |
top: "batch_norm_blob102" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale102" | |
type: "Scale" | |
bottom: "batch_norm_blob102" | |
top: "batch_norm_blob102" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add50" | |
type: "Eltwise" | |
bottom: "batch_norm_blob102" | |
bottom: "relu_blob89" | |
top: "add_blob50" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu91" | |
type: "CPP" | |
bottom: "add_blob50" | |
top: "relu_blob91" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv103" | |
type: "Convolution" | |
bottom: "relu_blob91" | |
top: "conv_blob103" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm103" | |
type: "BatchNorm" | |
bottom: "conv_blob103" | |
top: "batch_norm_blob103" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale103" | |
type: "Scale" | |
bottom: "batch_norm_blob103" | |
top: "batch_norm_blob103" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu92" | |
type: "CPP" | |
bottom: "batch_norm_blob103" | |
top: "relu_blob92" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv104" | |
type: "Convolution" | |
bottom: "relu_blob92" | |
top: "conv_blob104" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm104" | |
type: "BatchNorm" | |
bottom: "conv_blob104" | |
top: "batch_norm_blob104" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale104" | |
type: "Scale" | |
bottom: "batch_norm_blob104" | |
top: "batch_norm_blob104" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add51" | |
type: "Eltwise" | |
bottom: "batch_norm_blob104" | |
bottom: "relu_blob91" | |
top: "add_blob51" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu93" | |
type: "CPP" | |
bottom: "add_blob51" | |
top: "relu_blob93" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv105" | |
type: "Convolution" | |
bottom: "relu_blob93" | |
top: "conv_blob105" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm105" | |
type: "BatchNorm" | |
bottom: "conv_blob105" | |
top: "batch_norm_blob105" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale105" | |
type: "Scale" | |
bottom: "batch_norm_blob105" | |
top: "batch_norm_blob105" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu94" | |
type: "CPP" | |
bottom: "batch_norm_blob105" | |
top: "relu_blob94" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv106" | |
type: "Convolution" | |
bottom: "relu_blob94" | |
top: "conv_blob106" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm106" | |
type: "BatchNorm" | |
bottom: "conv_blob106" | |
top: "batch_norm_blob106" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale106" | |
type: "Scale" | |
bottom: "batch_norm_blob106" | |
top: "batch_norm_blob106" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add52" | |
type: "Eltwise" | |
bottom: "batch_norm_blob106" | |
bottom: "relu_blob93" | |
top: "add_blob52" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu95" | |
type: "CPP" | |
bottom: "add_blob52" | |
top: "relu_blob95" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv107" | |
type: "Convolution" | |
bottom: "relu_blob91" | |
top: "conv_blob107" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm107" | |
type: "BatchNorm" | |
bottom: "conv_blob107" | |
top: "batch_norm_blob107" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale107" | |
type: "Scale" | |
bottom: "batch_norm_blob107" | |
top: "batch_norm_blob107" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu96" | |
type: "CPP" | |
bottom: "batch_norm_blob107" | |
top: "relu_blob96" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv108" | |
type: "Convolution" | |
bottom: "relu_blob96" | |
top: "conv_blob108" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm108" | |
type: "BatchNorm" | |
bottom: "conv_blob108" | |
top: "batch_norm_blob108" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale108" | |
type: "Scale" | |
bottom: "batch_norm_blob108" | |
top: "batch_norm_blob108" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "conv109" | |
type: "Convolution" | |
bottom: "relu_blob91" | |
top: "conv_blob109" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm109" | |
type: "BatchNorm" | |
bottom: "conv_blob109" | |
top: "batch_norm_blob109" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale109" | |
type: "Scale" | |
bottom: "batch_norm_blob109" | |
top: "batch_norm_blob109" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add53" | |
type: "Eltwise" | |
bottom: "batch_norm_blob108" | |
bottom: "batch_norm_blob109" | |
top: "add_blob53" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu97" | |
type: "CPP" | |
bottom: "add_blob53" | |
top: "relu_blob97" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv110" | |
type: "Convolution" | |
bottom: "relu_blob97" | |
top: "conv_blob110" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm110" | |
type: "BatchNorm" | |
bottom: "conv_blob110" | |
top: "batch_norm_blob110" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale110" | |
type: "Scale" | |
bottom: "batch_norm_blob110" | |
top: "batch_norm_blob110" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu98" | |
type: "CPP" | |
bottom: "batch_norm_blob110" | |
top: "relu_blob98" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv111" | |
type: "Convolution" | |
bottom: "relu_blob98" | |
top: "conv_blob111" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm111" | |
type: "BatchNorm" | |
bottom: "conv_blob111" | |
top: "batch_norm_blob111" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale111" | |
type: "Scale" | |
bottom: "batch_norm_blob111" | |
top: "batch_norm_blob111" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add54" | |
type: "Eltwise" | |
bottom: "batch_norm_blob111" | |
bottom: "relu_blob97" | |
top: "add_blob54" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu99" | |
type: "CPP" | |
bottom: "add_blob54" | |
top: "relu_blob99" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv112" | |
type: "Convolution" | |
bottom: "relu_blob99" | |
top: "conv_blob112" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm112" | |
type: "BatchNorm" | |
bottom: "conv_blob112" | |
top: "batch_norm_blob112" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale112" | |
type: "Scale" | |
bottom: "batch_norm_blob112" | |
top: "batch_norm_blob112" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu100" | |
type: "CPP" | |
bottom: "batch_norm_blob112" | |
top: "relu_blob100" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv113" | |
type: "Convolution" | |
bottom: "relu_blob100" | |
top: "conv_blob113" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm113" | |
type: "BatchNorm" | |
bottom: "conv_blob113" | |
top: "batch_norm_blob113" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale113" | |
type: "Scale" | |
bottom: "batch_norm_blob113" | |
top: "batch_norm_blob113" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add55" | |
type: "Eltwise" | |
bottom: "batch_norm_blob113" | |
bottom: "relu_blob99" | |
top: "add_blob55" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu101" | |
type: "CPP" | |
bottom: "add_blob55" | |
top: "relu_blob101" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv114" | |
type: "Convolution" | |
bottom: "relu_blob101" | |
top: "conv_blob114" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm114" | |
type: "BatchNorm" | |
bottom: "conv_blob114" | |
top: "batch_norm_blob114" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale114" | |
type: "Scale" | |
bottom: "batch_norm_blob114" | |
top: "batch_norm_blob114" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu102" | |
type: "CPP" | |
bottom: "batch_norm_blob114" | |
top: "relu_blob102" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv115" | |
type: "Convolution" | |
bottom: "relu_blob102" | |
top: "conv_blob115" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm115" | |
type: "BatchNorm" | |
bottom: "conv_blob115" | |
top: "batch_norm_blob115" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale115" | |
type: "Scale" | |
bottom: "batch_norm_blob115" | |
top: "batch_norm_blob115" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add56" | |
type: "Eltwise" | |
bottom: "batch_norm_blob115" | |
bottom: "relu_blob101" | |
top: "add_blob56" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu103" | |
type: "CPP" | |
bottom: "add_blob56" | |
top: "relu_blob103" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv116" | |
type: "Convolution" | |
bottom: "relu_blob99" | |
top: "conv_blob116" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm116" | |
type: "BatchNorm" | |
bottom: "conv_blob116" | |
top: "batch_norm_blob116" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale116" | |
type: "Scale" | |
bottom: "batch_norm_blob116" | |
top: "batch_norm_blob116" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu104" | |
type: "CPP" | |
bottom: "batch_norm_blob116" | |
top: "relu_blob104" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv117" | |
type: "Convolution" | |
bottom: "relu_blob104" | |
top: "conv_blob117" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm117" | |
type: "BatchNorm" | |
bottom: "conv_blob117" | |
top: "batch_norm_blob117" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale117" | |
type: "Scale" | |
bottom: "batch_norm_blob117" | |
top: "batch_norm_blob117" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "conv118" | |
type: "Convolution" | |
bottom: "relu_blob99" | |
top: "conv_blob118" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm118" | |
type: "BatchNorm" | |
bottom: "conv_blob118" | |
top: "batch_norm_blob118" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale118" | |
type: "Scale" | |
bottom: "batch_norm_blob118" | |
top: "batch_norm_blob118" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add57" | |
type: "Eltwise" | |
bottom: "batch_norm_blob117" | |
bottom: "batch_norm_blob118" | |
top: "add_blob57" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu105" | |
type: "CPP" | |
bottom: "add_blob57" | |
top: "relu_blob105" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv119" | |
type: "Convolution" | |
bottom: "relu_blob105" | |
top: "conv_blob119" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm119" | |
type: "BatchNorm" | |
bottom: "conv_blob119" | |
top: "batch_norm_blob119" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale119" | |
type: "Scale" | |
bottom: "batch_norm_blob119" | |
top: "batch_norm_blob119" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu106" | |
type: "CPP" | |
bottom: "batch_norm_blob119" | |
top: "relu_blob106" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv120" | |
type: "Convolution" | |
bottom: "relu_blob106" | |
top: "conv_blob120" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm120" | |
type: "BatchNorm" | |
bottom: "conv_blob120" | |
top: "batch_norm_blob120" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale120" | |
type: "Scale" | |
bottom: "batch_norm_blob120" | |
top: "batch_norm_blob120" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add58" | |
type: "Eltwise" | |
bottom: "batch_norm_blob120" | |
bottom: "relu_blob105" | |
top: "add_blob58" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu107" | |
type: "CPP" | |
bottom: "add_blob58" | |
top: "relu_blob107" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv121" | |
type: "Convolution" | |
bottom: "relu_blob107" | |
top: "conv_blob121" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm121" | |
type: "BatchNorm" | |
bottom: "conv_blob121" | |
top: "batch_norm_blob121" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale121" | |
type: "Scale" | |
bottom: "batch_norm_blob121" | |
top: "batch_norm_blob121" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu108" | |
type: "CPP" | |
bottom: "batch_norm_blob121" | |
top: "relu_blob108" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv122" | |
type: "Convolution" | |
bottom: "relu_blob108" | |
top: "conv_blob122" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm122" | |
type: "BatchNorm" | |
bottom: "conv_blob122" | |
top: "batch_norm_blob122" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale122" | |
type: "Scale" | |
bottom: "batch_norm_blob122" | |
top: "batch_norm_blob122" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add59" | |
type: "Eltwise" | |
bottom: "batch_norm_blob122" | |
bottom: "relu_blob107" | |
top: "add_blob59" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu109" | |
type: "CPP" | |
bottom: "add_blob59" | |
top: "relu_blob109" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv123" | |
type: "Convolution" | |
bottom: "relu_blob109" | |
top: "conv_blob123" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm123" | |
type: "BatchNorm" | |
bottom: "conv_blob123" | |
top: "batch_norm_blob123" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale123" | |
type: "Scale" | |
bottom: "batch_norm_blob123" | |
top: "batch_norm_blob123" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu110" | |
type: "CPP" | |
bottom: "batch_norm_blob123" | |
top: "relu_blob110" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv124" | |
type: "Convolution" | |
bottom: "relu_blob110" | |
top: "conv_blob124" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm124" | |
type: "BatchNorm" | |
bottom: "conv_blob124" | |
top: "batch_norm_blob124" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale124" | |
type: "Scale" | |
bottom: "batch_norm_blob124" | |
top: "batch_norm_blob124" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add60" | |
type: "Eltwise" | |
bottom: "batch_norm_blob124" | |
bottom: "relu_blob109" | |
top: "add_blob60" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu111" | |
type: "CPP" | |
bottom: "add_blob60" | |
top: "relu_blob111" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv125" | |
type: "Convolution" | |
bottom: "relu_blob107" | |
top: "conv_blob125" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm125" | |
type: "BatchNorm" | |
bottom: "conv_blob125" | |
top: "batch_norm_blob125" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale125" | |
type: "Scale" | |
bottom: "batch_norm_blob125" | |
top: "batch_norm_blob125" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu112" | |
type: "CPP" | |
bottom: "batch_norm_blob125" | |
top: "relu_blob112" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv126" | |
type: "Convolution" | |
bottom: "relu_blob112" | |
top: "conv_blob126" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm126" | |
type: "BatchNorm" | |
bottom: "conv_blob126" | |
top: "batch_norm_blob126" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale126" | |
type: "Scale" | |
bottom: "batch_norm_blob126" | |
top: "batch_norm_blob126" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "conv127" | |
type: "Convolution" | |
bottom: "relu_blob107" | |
top: "conv_blob127" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm127" | |
type: "BatchNorm" | |
bottom: "conv_blob127" | |
top: "batch_norm_blob127" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale127" | |
type: "Scale" | |
bottom: "batch_norm_blob127" | |
top: "batch_norm_blob127" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add61" | |
type: "Eltwise" | |
bottom: "batch_norm_blob126" | |
bottom: "batch_norm_blob127" | |
top: "add_blob61" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu113" | |
type: "CPP" | |
bottom: "add_blob61" | |
top: "relu_blob113" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv128" | |
type: "Convolution" | |
bottom: "relu_blob113" | |
top: "conv_blob128" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm128" | |
type: "BatchNorm" | |
bottom: "conv_blob128" | |
top: "batch_norm_blob128" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale128" | |
type: "Scale" | |
bottom: "batch_norm_blob128" | |
top: "batch_norm_blob128" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu114" | |
type: "CPP" | |
bottom: "batch_norm_blob128" | |
top: "relu_blob114" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv129" | |
type: "Convolution" | |
bottom: "relu_blob114" | |
top: "conv_blob129" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm129" | |
type: "BatchNorm" | |
bottom: "conv_blob129" | |
top: "batch_norm_blob129" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale129" | |
type: "Scale" | |
bottom: "batch_norm_blob129" | |
top: "batch_norm_blob129" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add62" | |
type: "Eltwise" | |
bottom: "batch_norm_blob129" | |
bottom: "relu_blob113" | |
top: "add_blob62" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu115" | |
type: "CPP" | |
bottom: "add_blob62" | |
top: "relu_blob115" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv130" | |
type: "Convolution" | |
bottom: "relu_blob115" | |
top: "conv_blob130" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm130" | |
type: "BatchNorm" | |
bottom: "conv_blob130" | |
top: "batch_norm_blob130" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale130" | |
type: "Scale" | |
bottom: "batch_norm_blob130" | |
top: "batch_norm_blob130" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu116" | |
type: "CPP" | |
bottom: "batch_norm_blob130" | |
top: "relu_blob116" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv131" | |
type: "Convolution" | |
bottom: "relu_blob116" | |
top: "conv_blob131" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm131" | |
type: "BatchNorm" | |
bottom: "conv_blob131" | |
top: "batch_norm_blob131" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale131" | |
type: "Scale" | |
bottom: "batch_norm_blob131" | |
top: "batch_norm_blob131" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add63" | |
type: "Eltwise" | |
bottom: "batch_norm_blob131" | |
bottom: "relu_blob115" | |
top: "add_blob63" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu117" | |
type: "CPP" | |
bottom: "add_blob63" | |
top: "relu_blob117" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv132" | |
type: "Convolution" | |
bottom: "relu_blob117" | |
top: "conv_blob132" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm132" | |
type: "BatchNorm" | |
bottom: "conv_blob132" | |
top: "batch_norm_blob132" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale132" | |
type: "Scale" | |
bottom: "batch_norm_blob132" | |
top: "batch_norm_blob132" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu118" | |
type: "CPP" | |
bottom: "batch_norm_blob132" | |
top: "relu_blob118" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv133" | |
type: "Convolution" | |
bottom: "relu_blob118" | |
top: "conv_blob133" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm133" | |
type: "BatchNorm" | |
bottom: "conv_blob133" | |
top: "batch_norm_blob133" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale133" | |
type: "Scale" | |
bottom: "batch_norm_blob133" | |
top: "batch_norm_blob133" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add64" | |
type: "Eltwise" | |
bottom: "batch_norm_blob133" | |
bottom: "relu_blob117" | |
top: "add_blob64" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu119" | |
type: "CPP" | |
bottom: "add_blob64" | |
top: "relu_blob119" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv134" | |
type: "Convolution" | |
bottom: "relu_blob119" | |
top: "conv_blob134" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm134" | |
type: "BatchNorm" | |
bottom: "conv_blob134" | |
top: "batch_norm_blob134" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale134" | |
type: "Scale" | |
bottom: "batch_norm_blob134" | |
top: "batch_norm_blob134" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu120" | |
type: "CPP" | |
bottom: "batch_norm_blob134" | |
top: "relu_blob120" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv135" | |
type: "Convolution" | |
bottom: "relu_blob120" | |
top: "conv_blob135" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm135" | |
type: "BatchNorm" | |
bottom: "conv_blob135" | |
top: "batch_norm_blob135" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale135" | |
type: "Scale" | |
bottom: "batch_norm_blob135" | |
top: "batch_norm_blob135" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add65" | |
type: "Eltwise" | |
bottom: "batch_norm_blob135" | |
bottom: "relu_blob119" | |
top: "add_blob65" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu121" | |
type: "CPP" | |
bottom: "add_blob65" | |
top: "relu_blob121" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv136" | |
type: "Convolution" | |
bottom: "relu_blob121" | |
top: "conv_blob136" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm136" | |
type: "BatchNorm" | |
bottom: "conv_blob136" | |
top: "batch_norm_blob136" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale136" | |
type: "Scale" | |
bottom: "batch_norm_blob136" | |
top: "batch_norm_blob136" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu122" | |
type: "CPP" | |
bottom: "batch_norm_blob136" | |
top: "relu_blob122" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv137" | |
type: "Convolution" | |
bottom: "relu_blob122" | |
top: "conv_blob137" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm137" | |
type: "BatchNorm" | |
bottom: "conv_blob137" | |
top: "batch_norm_blob137" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale137" | |
type: "Scale" | |
bottom: "batch_norm_blob137" | |
top: "batch_norm_blob137" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add66" | |
type: "Eltwise" | |
bottom: "batch_norm_blob137" | |
bottom: "relu_blob121" | |
top: "add_blob66" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu123" | |
type: "CPP" | |
bottom: "add_blob66" | |
top: "relu_blob123" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv138" | |
type: "Convolution" | |
bottom: "relu_blob123" | |
top: "conv_blob138" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm138" | |
type: "BatchNorm" | |
bottom: "conv_blob138" | |
top: "batch_norm_blob138" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale138" | |
type: "Scale" | |
bottom: "batch_norm_blob138" | |
top: "batch_norm_blob138" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu124" | |
type: "CPP" | |
bottom: "batch_norm_blob138" | |
top: "relu_blob124" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv139" | |
type: "Convolution" | |
bottom: "relu_blob124" | |
top: "conv_blob139" | |
convolution_param { | |
num_output: 512 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm139" | |
type: "BatchNorm" | |
bottom: "conv_blob139" | |
top: "batch_norm_blob139" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale139" | |
type: "Scale" | |
bottom: "batch_norm_blob139" | |
top: "batch_norm_blob139" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add67" | |
type: "Eltwise" | |
bottom: "batch_norm_blob139" | |
bottom: "relu_blob123" | |
top: "add_blob67" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu125" | |
type: "CPP" | |
bottom: "add_blob67" | |
top: "relu_blob125" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv140" | |
type: "Convolution" | |
bottom: "relu_blob125" | |
top: "conv_blob140" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm140" | |
type: "BatchNorm" | |
bottom: "conv_blob140" | |
top: "batch_norm_blob140" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale140" | |
type: "Scale" | |
bottom: "batch_norm_blob140" | |
top: "batch_norm_blob140" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu126" | |
type: "CPP" | |
bottom: "batch_norm_blob140" | |
top: "relu_blob126" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv141" | |
type: "Convolution" | |
bottom: "relu_blob126" | |
top: "conv_blob141" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm141" | |
type: "BatchNorm" | |
bottom: "conv_blob141" | |
top: "batch_norm_blob141" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale141" | |
type: "Scale" | |
bottom: "batch_norm_blob141" | |
top: "batch_norm_blob141" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "conv142" | |
type: "Convolution" | |
bottom: "relu_blob125" | |
top: "conv_blob142" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm142" | |
type: "BatchNorm" | |
bottom: "conv_blob142" | |
top: "batch_norm_blob142" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale142" | |
type: "Scale" | |
bottom: "batch_norm_blob142" | |
top: "batch_norm_blob142" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add68" | |
type: "Eltwise" | |
bottom: "batch_norm_blob141" | |
bottom: "batch_norm_blob142" | |
top: "add_blob68" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu127" | |
type: "CPP" | |
bottom: "add_blob68" | |
top: "relu_blob127" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "upsample6" | |
type: "CPP" | |
bottom: "relu_blob127" | |
top: "upsample_blob6" | |
cpp_param { | |
param_str: "scale:2; upsample_h: 8; upsample_w: 8" | |
type: "Upsample" | |
} | |
} | |
layer { | |
name: "add69" | |
type: "Eltwise" | |
bottom: "relu_blob111" | |
bottom: "upsample_blob6" | |
top: "add_blob69" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "conv143" | |
type: "Convolution" | |
bottom: "add_blob69" | |
top: "conv_blob143" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm143" | |
type: "BatchNorm" | |
bottom: "conv_blob143" | |
top: "batch_norm_blob143" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale143" | |
type: "Scale" | |
bottom: "batch_norm_blob143" | |
top: "batch_norm_blob143" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu128" | |
type: "CPP" | |
bottom: "batch_norm_blob143" | |
top: "relu_blob128" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv144" | |
type: "Convolution" | |
bottom: "relu_blob128" | |
top: "conv_blob144" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm144" | |
type: "BatchNorm" | |
bottom: "conv_blob144" | |
top: "batch_norm_blob144" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale144" | |
type: "Scale" | |
bottom: "batch_norm_blob144" | |
top: "batch_norm_blob144" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add70" | |
type: "Eltwise" | |
bottom: "batch_norm_blob144" | |
bottom: "add_blob69" | |
top: "add_blob70" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu129" | |
type: "CPP" | |
bottom: "add_blob70" | |
top: "relu_blob129" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv145" | |
type: "Convolution" | |
bottom: "relu_blob129" | |
top: "conv_blob145" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm145" | |
type: "BatchNorm" | |
bottom: "conv_blob145" | |
top: "batch_norm_blob145" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale145" | |
type: "Scale" | |
bottom: "batch_norm_blob145" | |
top: "batch_norm_blob145" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu130" | |
type: "CPP" | |
bottom: "batch_norm_blob145" | |
top: "relu_blob130" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv146" | |
type: "Convolution" | |
bottom: "relu_blob130" | |
top: "conv_blob146" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm146" | |
type: "BatchNorm" | |
bottom: "conv_blob146" | |
top: "batch_norm_blob146" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale146" | |
type: "Scale" | |
bottom: "batch_norm_blob146" | |
top: "batch_norm_blob146" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add71" | |
type: "Eltwise" | |
bottom: "batch_norm_blob146" | |
bottom: "relu_blob129" | |
top: "add_blob71" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu131" | |
type: "CPP" | |
bottom: "add_blob71" | |
top: "relu_blob131" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "upsample7" | |
type: "CPP" | |
bottom: "relu_blob131" | |
top: "upsample_blob7" | |
cpp_param { | |
param_str: "scale:2; upsample_h: 16; upsample_w: 16" | |
type: "Upsample" | |
} | |
} | |
layer { | |
name: "add72" | |
type: "Eltwise" | |
bottom: "relu_blob103" | |
bottom: "upsample_blob7" | |
top: "add_blob72" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "conv147" | |
type: "Convolution" | |
bottom: "add_blob72" | |
top: "conv_blob147" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm147" | |
type: "BatchNorm" | |
bottom: "conv_blob147" | |
top: "batch_norm_blob147" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale147" | |
type: "Scale" | |
bottom: "batch_norm_blob147" | |
top: "batch_norm_blob147" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu132" | |
type: "CPP" | |
bottom: "batch_norm_blob147" | |
top: "relu_blob132" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv148" | |
type: "Convolution" | |
bottom: "relu_blob132" | |
top: "conv_blob148" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm148" | |
type: "BatchNorm" | |
bottom: "conv_blob148" | |
top: "batch_norm_blob148" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale148" | |
type: "Scale" | |
bottom: "batch_norm_blob148" | |
top: "batch_norm_blob148" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add73" | |
type: "Eltwise" | |
bottom: "batch_norm_blob148" | |
bottom: "add_blob72" | |
top: "add_blob73" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu133" | |
type: "CPP" | |
bottom: "add_blob73" | |
top: "relu_blob133" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv149" | |
type: "Convolution" | |
bottom: "relu_blob133" | |
top: "conv_blob149" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm149" | |
type: "BatchNorm" | |
bottom: "conv_blob149" | |
top: "batch_norm_blob149" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale149" | |
type: "Scale" | |
bottom: "batch_norm_blob149" | |
top: "batch_norm_blob149" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu134" | |
type: "CPP" | |
bottom: "batch_norm_blob149" | |
top: "relu_blob134" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv150" | |
type: "Convolution" | |
bottom: "relu_blob134" | |
top: "conv_blob150" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm150" | |
type: "BatchNorm" | |
bottom: "conv_blob150" | |
top: "batch_norm_blob150" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale150" | |
type: "Scale" | |
bottom: "batch_norm_blob150" | |
top: "batch_norm_blob150" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add74" | |
type: "Eltwise" | |
bottom: "batch_norm_blob150" | |
bottom: "relu_blob133" | |
top: "add_blob74" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu135" | |
type: "CPP" | |
bottom: "add_blob74" | |
top: "relu_blob135" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "upsample8" | |
type: "CPP" | |
bottom: "relu_blob135" | |
top: "upsample_blob8" | |
cpp_param { | |
param_str: "scale:2; upsample_h: 32; upsample_w: 32" | |
type: "Upsample" | |
} | |
} | |
layer { | |
name: "add75" | |
type: "Eltwise" | |
bottom: "relu_blob95" | |
bottom: "upsample_blob8" | |
top: "add_blob75" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "conv151" | |
type: "Convolution" | |
bottom: "add_blob75" | |
top: "conv_blob151" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm151" | |
type: "BatchNorm" | |
bottom: "conv_blob151" | |
top: "batch_norm_blob151" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale151" | |
type: "Scale" | |
bottom: "batch_norm_blob151" | |
top: "batch_norm_blob151" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu136" | |
type: "CPP" | |
bottom: "batch_norm_blob151" | |
top: "relu_blob136" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv152" | |
type: "Convolution" | |
bottom: "relu_blob136" | |
top: "conv_blob152" | |
convolution_param { | |
num_output: 384 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm152" | |
type: "BatchNorm" | |
bottom: "conv_blob152" | |
top: "batch_norm_blob152" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale152" | |
type: "Scale" | |
bottom: "batch_norm_blob152" | |
top: "batch_norm_blob152" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add76" | |
type: "Eltwise" | |
bottom: "batch_norm_blob152" | |
bottom: "add_blob75" | |
top: "add_blob76" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu137" | |
type: "CPP" | |
bottom: "add_blob76" | |
top: "relu_blob137" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv153" | |
type: "Convolution" | |
bottom: "relu_blob137" | |
top: "conv_blob153" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm153" | |
type: "BatchNorm" | |
bottom: "conv_blob153" | |
top: "batch_norm_blob153" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale153" | |
type: "Scale" | |
bottom: "batch_norm_blob153" | |
top: "batch_norm_blob153" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu138" | |
type: "CPP" | |
bottom: "batch_norm_blob153" | |
top: "relu_blob138" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv154" | |
type: "Convolution" | |
bottom: "relu_blob138" | |
top: "conv_blob154" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm154" | |
type: "BatchNorm" | |
bottom: "conv_blob154" | |
top: "batch_norm_blob154" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale154" | |
type: "Scale" | |
bottom: "batch_norm_blob154" | |
top: "batch_norm_blob154" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "conv155" | |
type: "Convolution" | |
bottom: "relu_blob137" | |
top: "conv_blob155" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm155" | |
type: "BatchNorm" | |
bottom: "conv_blob155" | |
top: "batch_norm_blob155" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale155" | |
type: "Scale" | |
bottom: "batch_norm_blob155" | |
top: "batch_norm_blob155" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add77" | |
type: "Eltwise" | |
bottom: "batch_norm_blob154" | |
bottom: "batch_norm_blob155" | |
top: "add_blob77" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu139" | |
type: "CPP" | |
bottom: "add_blob77" | |
top: "relu_blob139" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "upsample9" | |
type: "CPP" | |
bottom: "relu_blob139" | |
top: "upsample_blob9" | |
cpp_param { | |
param_str: "scale:2; upsample_h: 64; upsample_w: 64" | |
type: "Upsample" | |
} | |
} | |
layer { | |
name: "add78" | |
type: "Eltwise" | |
bottom: "relu_blob87" | |
bottom: "upsample_blob9" | |
top: "add_blob78" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "conv156" | |
type: "Convolution" | |
bottom: "add_blob78" | |
top: "conv_blob156" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm156" | |
type: "BatchNorm" | |
bottom: "conv_blob156" | |
top: "batch_norm_blob156" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale156" | |
type: "Scale" | |
bottom: "batch_norm_blob156" | |
top: "batch_norm_blob156" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu140" | |
type: "CPP" | |
bottom: "batch_norm_blob156" | |
top: "relu_blob140" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv157" | |
type: "Convolution" | |
bottom: "relu_blob140" | |
top: "conv_blob157" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm157" | |
type: "BatchNorm" | |
bottom: "conv_blob157" | |
top: "batch_norm_blob157" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale157" | |
type: "Scale" | |
bottom: "batch_norm_blob157" | |
top: "batch_norm_blob157" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add79" | |
type: "Eltwise" | |
bottom: "batch_norm_blob157" | |
bottom: "add_blob78" | |
top: "add_blob79" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu141" | |
type: "CPP" | |
bottom: "add_blob79" | |
top: "relu_blob141" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv158" | |
type: "Convolution" | |
bottom: "relu_blob141" | |
top: "conv_blob158" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm158" | |
type: "BatchNorm" | |
bottom: "conv_blob158" | |
top: "batch_norm_blob158" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale158" | |
type: "Scale" | |
bottom: "batch_norm_blob158" | |
top: "batch_norm_blob158" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu142" | |
type: "CPP" | |
bottom: "batch_norm_blob158" | |
top: "relu_blob142" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv159" | |
type: "Convolution" | |
bottom: "relu_blob142" | |
top: "conv_blob159" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm159" | |
type: "BatchNorm" | |
bottom: "conv_blob159" | |
top: "batch_norm_blob159" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale159" | |
type: "Scale" | |
bottom: "batch_norm_blob159" | |
top: "batch_norm_blob159" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add80" | |
type: "Eltwise" | |
bottom: "batch_norm_blob159" | |
bottom: "relu_blob141" | |
top: "add_blob80" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu143" | |
type: "CPP" | |
bottom: "add_blob80" | |
top: "relu_blob143" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "upsample10" | |
type: "CPP" | |
bottom: "relu_blob143" | |
top: "upsample_blob10" | |
cpp_param { | |
param_str: "scale:2; upsample_h: 128; upsample_w: 128" | |
type: "Upsample" | |
} | |
} | |
layer { | |
name: "add81" | |
type: "Eltwise" | |
bottom: "relu_blob79" | |
bottom: "upsample_blob10" | |
top: "add_blob81" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "conv160" | |
type: "Convolution" | |
bottom: "add_blob81" | |
top: "conv_blob160" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm160" | |
type: "BatchNorm" | |
bottom: "conv_blob160" | |
top: "batch_norm_blob160" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale160" | |
type: "Scale" | |
bottom: "batch_norm_blob160" | |
top: "batch_norm_blob160" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu144" | |
type: "CPP" | |
bottom: "batch_norm_blob160" | |
top: "relu_blob144" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv161" | |
type: "Convolution" | |
bottom: "relu_blob144" | |
top: "conv_blob161" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm161" | |
type: "BatchNorm" | |
bottom: "conv_blob161" | |
top: "batch_norm_blob161" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale161" | |
type: "Scale" | |
bottom: "batch_norm_blob161" | |
top: "batch_norm_blob161" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu145" | |
type: "CPP" | |
bottom: "batch_norm_blob161" | |
top: "relu_blob145" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "top_pool1" | |
type: "CPP" | |
bottom: "relu_blob145" | |
top: "top_pool_blob1" | |
cpp_param { | |
type: "TopCornerPoolLayer" | |
} | |
} | |
layer { | |
name: "conv162" | |
type: "Convolution" | |
bottom: "relu_blob144" | |
top: "conv_blob162" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm162" | |
type: "BatchNorm" | |
bottom: "conv_blob162" | |
top: "batch_norm_blob162" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale162" | |
type: "Scale" | |
bottom: "batch_norm_blob162" | |
top: "batch_norm_blob162" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu146" | |
type: "CPP" | |
bottom: "batch_norm_blob162" | |
top: "relu_blob146" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "left_pool1" | |
type: "CPP" | |
bottom: "relu_blob146" | |
top: "left_pool_blob1" | |
cpp_param { | |
type: "LeftCornerPoolLayer" | |
} | |
} | |
layer { | |
name: "add82" | |
type: "Eltwise" | |
bottom: "top_pool_blob1" | |
bottom: "left_pool_blob1" | |
top: "add_blob82" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "conv163" | |
type: "Convolution" | |
bottom: "add_blob82" | |
top: "conv_blob163" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm163" | |
type: "BatchNorm" | |
bottom: "conv_blob163" | |
top: "batch_norm_blob163" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale163" | |
type: "Scale" | |
bottom: "batch_norm_blob163" | |
top: "batch_norm_blob163" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "conv164" | |
type: "Convolution" | |
bottom: "relu_blob144" | |
top: "conv_blob164" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm164" | |
type: "BatchNorm" | |
bottom: "conv_blob164" | |
top: "batch_norm_blob164" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale164" | |
type: "Scale" | |
bottom: "batch_norm_blob164" | |
top: "batch_norm_blob164" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add83" | |
type: "Eltwise" | |
bottom: "batch_norm_blob163" | |
bottom: "batch_norm_blob164" | |
top: "add_blob83" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu147" | |
type: "CPP" | |
bottom: "add_blob83" | |
top: "relu_blob147" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv165" | |
type: "Convolution" | |
bottom: "relu_blob147" | |
top: "conv_blob165" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm165" | |
type: "BatchNorm" | |
bottom: "conv_blob165" | |
top: "batch_norm_blob165" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale165" | |
type: "Scale" | |
bottom: "batch_norm_blob165" | |
top: "batch_norm_blob165" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu148" | |
type: "CPP" | |
bottom: "batch_norm_blob165" | |
top: "relu_blob148" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv166" | |
type: "Convolution" | |
bottom: "relu_blob144" | |
top: "conv_blob166" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm166" | |
type: "BatchNorm" | |
bottom: "conv_blob166" | |
top: "batch_norm_blob166" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale166" | |
type: "Scale" | |
bottom: "batch_norm_blob166" | |
top: "batch_norm_blob166" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu149" | |
type: "CPP" | |
bottom: "batch_norm_blob166" | |
top: "relu_blob149" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "bottom_pool1" | |
type: "CPP" | |
bottom: "relu_blob149" | |
top: "bottom_pool_blob1" | |
cpp_param { | |
type: "BottomCornerPoolLayer" | |
} | |
} | |
layer { | |
name: "conv167" | |
type: "Convolution" | |
bottom: "relu_blob144" | |
top: "conv_blob167" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm167" | |
type: "BatchNorm" | |
bottom: "conv_blob167" | |
top: "batch_norm_blob167" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale167" | |
type: "Scale" | |
bottom: "batch_norm_blob167" | |
top: "batch_norm_blob167" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu150" | |
type: "CPP" | |
bottom: "batch_norm_blob167" | |
top: "relu_blob150" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "right_pool1" | |
type: "CPP" | |
bottom: "relu_blob150" | |
top: "right_pool_blob1" | |
cpp_param { | |
type: "RightCornerPoolLayer" | |
} | |
} | |
layer { | |
name: "add84" | |
type: "Eltwise" | |
bottom: "bottom_pool_blob1" | |
bottom: "right_pool_blob1" | |
top: "add_blob84" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "conv168" | |
type: "Convolution" | |
bottom: "add_blob84" | |
top: "conv_blob168" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm168" | |
type: "BatchNorm" | |
bottom: "conv_blob168" | |
top: "batch_norm_blob168" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale168" | |
type: "Scale" | |
bottom: "batch_norm_blob168" | |
top: "batch_norm_blob168" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "conv169" | |
type: "Convolution" | |
bottom: "relu_blob144" | |
top: "conv_blob169" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm169" | |
type: "BatchNorm" | |
bottom: "conv_blob169" | |
top: "batch_norm_blob169" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale169" | |
type: "Scale" | |
bottom: "batch_norm_blob169" | |
top: "batch_norm_blob169" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "add85" | |
type: "Eltwise" | |
bottom: "batch_norm_blob168" | |
bottom: "batch_norm_blob169" | |
top: "add_blob85" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "relu151" | |
type: "CPP" | |
bottom: "add_blob85" | |
top: "relu_blob151" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv170" | |
type: "Convolution" | |
bottom: "relu_blob151" | |
top: "conv_blob170" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "batch_norm170" | |
type: "BatchNorm" | |
bottom: "conv_blob170" | |
top: "batch_norm_blob170" | |
batch_norm_param { | |
eps: 9.999999747378752e-06 | |
} | |
} | |
layer { | |
name: "bn_scale170" | |
type: "Scale" | |
bottom: "batch_norm_blob170" | |
top: "batch_norm_blob170" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "relu152" | |
type: "CPP" | |
bottom: "batch_norm_blob170" | |
top: "relu_blob152" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv171" | |
type: "Convolution" | |
bottom: "relu_blob148" | |
top: "conv_blob171" | |
convolution_param { | |
num_output: 256 | |
bias_term: true | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "relu153" | |
type: "CPP" | |
bottom: "conv_blob171" | |
top: "relu_blob153" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv172" | |
type: "Convolution" | |
bottom: "relu_blob153" | |
top: "conv_blob172" | |
convolution_param { | |
num_output: 80 | |
bias_term: true | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "conv173" | |
type: "Convolution" | |
bottom: "relu_blob152" | |
top: "conv_blob173" | |
convolution_param { | |
num_output: 256 | |
bias_term: true | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "relu154" | |
type: "CPP" | |
bottom: "conv_blob173" | |
top: "relu_blob154" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv174" | |
type: "Convolution" | |
bottom: "relu_blob154" | |
top: "conv_blob174" | |
convolution_param { | |
num_output: 80 | |
bias_term: true | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "conv175" | |
type: "Convolution" | |
bottom: "relu_blob148" | |
top: "conv_blob175" | |
convolution_param { | |
num_output: 256 | |
bias_term: true | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "relu155" | |
type: "CPP" | |
bottom: "conv_blob175" | |
top: "relu_blob155" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv176" | |
type: "Convolution" | |
bottom: "relu_blob155" | |
top: "conv_blob176" | |
convolution_param { | |
num_output: 1 | |
bias_term: true | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "conv177" | |
type: "Convolution" | |
bottom: "relu_blob152" | |
top: "conv_blob177" | |
convolution_param { | |
num_output: 256 | |
bias_term: true | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "relu156" | |
type: "CPP" | |
bottom: "conv_blob177" | |
top: "relu_blob156" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv178" | |
type: "Convolution" | |
bottom: "relu_blob156" | |
top: "conv_blob178" | |
convolution_param { | |
num_output: 1 | |
bias_term: true | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "conv179" | |
type: "Convolution" | |
bottom: "relu_blob148" | |
top: "conv_blob179" | |
convolution_param { | |
num_output: 256 | |
bias_term: true | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "relu157" | |
type: "CPP" | |
bottom: "conv_blob179" | |
top: "relu_blob157" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv180" | |
type: "Convolution" | |
bottom: "relu_blob157" | |
top: "conv_blob180" | |
convolution_param { | |
num_output: 2 | |
bias_term: true | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "conv181" | |
type: "Convolution" | |
bottom: "relu_blob152" | |
top: "conv_blob181" | |
convolution_param { | |
num_output: 256 | |
bias_term: true | |
pad: 1 | |
kernel_size: 3 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "relu158" | |
type: "CPP" | |
bottom: "conv_blob181" | |
top: "relu_blob158" | |
cpp_param { | |
type: "ReLU" | |
} | |
} | |
layer { | |
name: "conv182" | |
type: "Convolution" | |
bottom: "relu_blob158" | |
top: "conv_blob182" | |
convolution_param { | |
num_output: 2 | |
bias_term: true | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
dilation: 1 | |
} | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment