Created
July 6, 2018 04:06
-
-
Save RedCarrottt/692156a14e6c23e532b456721f946f75 to your computer and use it in GitHub Desktop.
SqueezeNext-1.0-23-v5
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: "SqueezeNext-1.0-2.3-v5" | |
engine: "MKL2017" | |
name: "SqueezeNet 1.1" | |
layer { | |
name: "data" | |
type: "Data" | |
top: "data" | |
top: "label" | |
include { | |
phase: TRAIN | |
} | |
transform_param { | |
crop_size: 227 | |
mean_value: 104 | |
mean_value: 117 | |
mean_value: 123 | |
} | |
data_param { | |
source: "/localdisk/imagenet/compressed_lmdb/ilsvrc12_train_lmdb" | |
batch_size: 32 | |
backend: LMDB | |
prefetch: 2 | |
shuffle: true | |
} | |
} | |
layer { | |
name: "data" | |
type: "Data" | |
top: "data" | |
top: "label" | |
include { | |
phase: TEST | |
} | |
transform_param { | |
crop_size: 227 | |
mean_value: 104 | |
mean_value: 117 | |
mean_value: 123 | |
} | |
data_param { | |
source: "/localdisk/imagenet/compressed_lmdb/ilsvrc12_val_lmdb" | |
batch_size: 50 | |
backend: LMDB | |
} | |
}layer { | |
name: "Convolution1" | |
type: "Convolution" | |
bottom: "data" | |
top: "Convolution1" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 5 | |
group: 1 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm1" | |
type: "BatchNorm" | |
bottom: "Convolution1" | |
top: "Convolution1" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale1" | |
type: "Scale" | |
bottom: "Convolution1" | |
top: "Convolution1" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "conv1" | |
type: "ReLU" | |
bottom: "Convolution1" | |
top: "Convolution1" | |
} | |
layer { | |
name: "pool1" | |
type: "Pooling" | |
bottom: "Convolution1" | |
top: "pool1" | |
pooling_param { | |
pool: MAX | |
kernel_size: 3 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "Convolution2" | |
type: "Convolution" | |
bottom: "pool1" | |
top: "Convolution2" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm2" | |
type: "BatchNorm" | |
bottom: "Convolution2" | |
top: "Convolution2" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale2" | |
type: "Scale" | |
bottom: "Convolution2" | |
top: "Convolution2" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU1" | |
type: "ReLU" | |
bottom: "Convolution2" | |
top: "Convolution2" | |
} | |
layer { | |
name: "Convolution3" | |
type: "Convolution" | |
bottom: "Convolution2" | |
top: "Convolution3" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm3" | |
type: "BatchNorm" | |
bottom: "Convolution3" | |
top: "Convolution3" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale3" | |
type: "Scale" | |
bottom: "Convolution3" | |
top: "Convolution3" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU2" | |
type: "ReLU" | |
bottom: "Convolution3" | |
top: "Convolution3" | |
} | |
layer { | |
name: "Convolution4" | |
type: "Convolution" | |
bottom: "Convolution3" | |
top: "Convolution4" | |
convolution_param { | |
num_output: 8 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm4" | |
type: "BatchNorm" | |
bottom: "Convolution4" | |
top: "Convolution4" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale4" | |
type: "Scale" | |
bottom: "Convolution4" | |
top: "Convolution4" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU3" | |
type: "ReLU" | |
bottom: "Convolution4" | |
top: "Convolution4" | |
} | |
layer { | |
name: "Convolution5" | |
type: "Convolution" | |
bottom: "Convolution4" | |
top: "Convolution5" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 1 | |
pad_w: 0 | |
kernel_h: 3 | |
kernel_w: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm5" | |
type: "BatchNorm" | |
bottom: "Convolution5" | |
top: "Convolution5" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale5" | |
type: "Scale" | |
bottom: "Convolution5" | |
top: "Convolution5" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU4" | |
type: "ReLU" | |
bottom: "Convolution5" | |
top: "Convolution5" | |
} | |
layer { | |
name: "Convolution6" | |
type: "Convolution" | |
bottom: "Convolution5" | |
top: "Convolution6" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 0 | |
pad_w: 1 | |
kernel_h: 1 | |
kernel_w: 3 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm6" | |
type: "BatchNorm" | |
bottom: "Convolution6" | |
top: "Convolution6" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale6" | |
type: "Scale" | |
bottom: "Convolution6" | |
top: "Convolution6" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU5" | |
type: "ReLU" | |
bottom: "Convolution6" | |
top: "Convolution6" | |
} | |
layer { | |
name: "Convolution7" | |
type: "Convolution" | |
bottom: "Convolution6" | |
top: "Convolution7" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm7" | |
type: "BatchNorm" | |
bottom: "Convolution7" | |
top: "Convolution7" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale7" | |
type: "Scale" | |
bottom: "Convolution7" | |
top: "Convolution7" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU6" | |
type: "ReLU" | |
bottom: "Convolution7" | |
top: "Convolution7" | |
} | |
layer { | |
name: "Eltwise1" | |
type: "Eltwise" | |
bottom: "Convolution2" | |
bottom: "Convolution7" | |
top: "Eltwise1" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "ReLU7" | |
type: "ReLU" | |
bottom: "Eltwise1" | |
top: "Eltwise1" | |
} | |
layer { | |
name: "Convolution8" | |
type: "Convolution" | |
bottom: "Eltwise1" | |
top: "Convolution8" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm8" | |
type: "BatchNorm" | |
bottom: "Convolution8" | |
top: "Convolution8" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale8" | |
type: "Scale" | |
bottom: "Convolution8" | |
top: "Convolution8" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU8" | |
type: "ReLU" | |
bottom: "Convolution8" | |
top: "Convolution8" | |
} | |
layer { | |
name: "Convolution9" | |
type: "Convolution" | |
bottom: "Convolution8" | |
top: "Convolution9" | |
convolution_param { | |
num_output: 8 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm9" | |
type: "BatchNorm" | |
bottom: "Convolution9" | |
top: "Convolution9" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale9" | |
type: "Scale" | |
bottom: "Convolution9" | |
top: "Convolution9" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU9" | |
type: "ReLU" | |
bottom: "Convolution9" | |
top: "Convolution9" | |
} | |
layer { | |
name: "Convolution10" | |
type: "Convolution" | |
bottom: "Convolution9" | |
top: "Convolution10" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 0 | |
pad_w: 1 | |
kernel_h: 1 | |
kernel_w: 3 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm10" | |
type: "BatchNorm" | |
bottom: "Convolution10" | |
top: "Convolution10" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale10" | |
type: "Scale" | |
bottom: "Convolution10" | |
top: "Convolution10" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU10" | |
type: "ReLU" | |
bottom: "Convolution10" | |
top: "Convolution10" | |
} | |
layer { | |
name: "Convolution11" | |
type: "Convolution" | |
bottom: "Convolution10" | |
top: "Convolution11" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 1 | |
pad_w: 0 | |
kernel_h: 3 | |
kernel_w: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm11" | |
type: "BatchNorm" | |
bottom: "Convolution11" | |
top: "Convolution11" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale11" | |
type: "Scale" | |
bottom: "Convolution11" | |
top: "Convolution11" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU11" | |
type: "ReLU" | |
bottom: "Convolution11" | |
top: "Convolution11" | |
} | |
layer { | |
name: "Convolution12" | |
type: "Convolution" | |
bottom: "Convolution11" | |
top: "Convolution12" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm12" | |
type: "BatchNorm" | |
bottom: "Convolution12" | |
top: "Convolution12" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale12" | |
type: "Scale" | |
bottom: "Convolution12" | |
top: "Convolution12" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU12" | |
type: "ReLU" | |
bottom: "Convolution12" | |
top: "Convolution12" | |
} | |
layer { | |
name: "Eltwise2" | |
type: "Eltwise" | |
bottom: "Eltwise1" | |
bottom: "Convolution12" | |
top: "Eltwise2" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "bypass_128_3" | |
type: "ReLU" | |
bottom: "Eltwise2" | |
top: "Eltwise2" | |
} | |
layer { | |
name: "Convolution13" | |
type: "Convolution" | |
bottom: "Eltwise2" | |
top: "Convolution13" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm13" | |
type: "BatchNorm" | |
bottom: "Convolution13" | |
top: "Convolution13" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale13" | |
type: "Scale" | |
bottom: "Convolution13" | |
top: "Convolution13" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU13" | |
type: "ReLU" | |
bottom: "Convolution13" | |
top: "Convolution13" | |
} | |
layer { | |
name: "Convolution14" | |
type: "Convolution" | |
bottom: "Convolution13" | |
top: "Convolution14" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm14" | |
type: "BatchNorm" | |
bottom: "Convolution14" | |
top: "Convolution14" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale14" | |
type: "Scale" | |
bottom: "Convolution14" | |
top: "Convolution14" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU14" | |
type: "ReLU" | |
bottom: "Convolution14" | |
top: "Convolution14" | |
} | |
layer { | |
name: "Convolution15" | |
type: "Convolution" | |
bottom: "Convolution14" | |
top: "Convolution15" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm15" | |
type: "BatchNorm" | |
bottom: "Convolution15" | |
top: "Convolution15" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale15" | |
type: "Scale" | |
bottom: "Convolution15" | |
top: "Convolution15" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU15" | |
type: "ReLU" | |
bottom: "Convolution15" | |
top: "Convolution15" | |
} | |
layer { | |
name: "Convolution16" | |
type: "Convolution" | |
bottom: "Convolution15" | |
top: "Convolution16" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 1 | |
pad_w: 0 | |
kernel_h: 3 | |
kernel_w: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm16" | |
type: "BatchNorm" | |
bottom: "Convolution16" | |
top: "Convolution16" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale16" | |
type: "Scale" | |
bottom: "Convolution16" | |
top: "Convolution16" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU16" | |
type: "ReLU" | |
bottom: "Convolution16" | |
top: "Convolution16" | |
} | |
layer { | |
name: "Convolution17" | |
type: "Convolution" | |
bottom: "Convolution16" | |
top: "Convolution17" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 0 | |
pad_w: 1 | |
kernel_h: 1 | |
kernel_w: 3 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm17" | |
type: "BatchNorm" | |
bottom: "Convolution17" | |
top: "Convolution17" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale17" | |
type: "Scale" | |
bottom: "Convolution17" | |
top: "Convolution17" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU17" | |
type: "ReLU" | |
bottom: "Convolution17" | |
top: "Convolution17" | |
} | |
layer { | |
name: "Convolution18" | |
type: "Convolution" | |
bottom: "Convolution17" | |
top: "Convolution18" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm18" | |
type: "BatchNorm" | |
bottom: "Convolution18" | |
top: "Convolution18" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale18" | |
type: "Scale" | |
bottom: "Convolution18" | |
top: "Convolution18" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU18" | |
type: "ReLU" | |
bottom: "Convolution18" | |
top: "Convolution18" | |
} | |
layer { | |
name: "Eltwise3" | |
type: "Eltwise" | |
bottom: "Convolution13" | |
bottom: "Convolution18" | |
top: "Eltwise3" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "ReLU19" | |
type: "ReLU" | |
bottom: "Eltwise3" | |
top: "Eltwise3" | |
} | |
layer { | |
name: "Convolution19" | |
type: "Convolution" | |
bottom: "Eltwise3" | |
top: "Convolution19" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm19" | |
type: "BatchNorm" | |
bottom: "Convolution19" | |
top: "Convolution19" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale19" | |
type: "Scale" | |
bottom: "Convolution19" | |
top: "Convolution19" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU20" | |
type: "ReLU" | |
bottom: "Convolution19" | |
top: "Convolution19" | |
} | |
layer { | |
name: "Convolution20" | |
type: "Convolution" | |
bottom: "Convolution19" | |
top: "Convolution20" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm20" | |
type: "BatchNorm" | |
bottom: "Convolution20" | |
top: "Convolution20" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale20" | |
type: "Scale" | |
bottom: "Convolution20" | |
top: "Convolution20" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU21" | |
type: "ReLU" | |
bottom: "Convolution20" | |
top: "Convolution20" | |
} | |
layer { | |
name: "Convolution21" | |
type: "Convolution" | |
bottom: "Convolution20" | |
top: "Convolution21" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 0 | |
pad_w: 1 | |
kernel_h: 1 | |
kernel_w: 3 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm21" | |
type: "BatchNorm" | |
bottom: "Convolution21" | |
top: "Convolution21" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale21" | |
type: "Scale" | |
bottom: "Convolution21" | |
top: "Convolution21" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU22" | |
type: "ReLU" | |
bottom: "Convolution21" | |
top: "Convolution21" | |
} | |
layer { | |
name: "Convolution22" | |
type: "Convolution" | |
bottom: "Convolution21" | |
top: "Convolution22" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 1 | |
pad_w: 0 | |
kernel_h: 3 | |
kernel_w: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm22" | |
type: "BatchNorm" | |
bottom: "Convolution22" | |
top: "Convolution22" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale22" | |
type: "Scale" | |
bottom: "Convolution22" | |
top: "Convolution22" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU23" | |
type: "ReLU" | |
bottom: "Convolution22" | |
top: "Convolution22" | |
} | |
layer { | |
name: "Convolution23" | |
type: "Convolution" | |
bottom: "Convolution22" | |
top: "Convolution23" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm23" | |
type: "BatchNorm" | |
bottom: "Convolution23" | |
top: "Convolution23" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale23" | |
type: "Scale" | |
bottom: "Convolution23" | |
top: "Convolution23" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU24" | |
type: "ReLU" | |
bottom: "Convolution23" | |
top: "Convolution23" | |
} | |
layer { | |
name: "Eltwise4" | |
type: "Eltwise" | |
bottom: "Eltwise3" | |
bottom: "Convolution23" | |
top: "Eltwise4" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "ReLU25" | |
type: "ReLU" | |
bottom: "Eltwise4" | |
top: "Eltwise4" | |
} | |
layer { | |
name: "Convolution24" | |
type: "Convolution" | |
bottom: "Eltwise4" | |
top: "Convolution24" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm24" | |
type: "BatchNorm" | |
bottom: "Convolution24" | |
top: "Convolution24" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale24" | |
type: "Scale" | |
bottom: "Convolution24" | |
top: "Convolution24" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU26" | |
type: "ReLU" | |
bottom: "Convolution24" | |
top: "Convolution24" | |
} | |
layer { | |
name: "Convolution25" | |
type: "Convolution" | |
bottom: "Convolution24" | |
top: "Convolution25" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm25" | |
type: "BatchNorm" | |
bottom: "Convolution25" | |
top: "Convolution25" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale25" | |
type: "Scale" | |
bottom: "Convolution25" | |
top: "Convolution25" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU27" | |
type: "ReLU" | |
bottom: "Convolution25" | |
top: "Convolution25" | |
} | |
layer { | |
name: "Convolution26" | |
type: "Convolution" | |
bottom: "Convolution25" | |
top: "Convolution26" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 1 | |
pad_w: 0 | |
kernel_h: 3 | |
kernel_w: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm26" | |
type: "BatchNorm" | |
bottom: "Convolution26" | |
top: "Convolution26" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale26" | |
type: "Scale" | |
bottom: "Convolution26" | |
top: "Convolution26" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU28" | |
type: "ReLU" | |
bottom: "Convolution26" | |
top: "Convolution26" | |
} | |
layer { | |
name: "Convolution27" | |
type: "Convolution" | |
bottom: "Convolution26" | |
top: "Convolution27" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 0 | |
pad_w: 1 | |
kernel_h: 1 | |
kernel_w: 3 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm27" | |
type: "BatchNorm" | |
bottom: "Convolution27" | |
top: "Convolution27" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale27" | |
type: "Scale" | |
bottom: "Convolution27" | |
top: "Convolution27" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU29" | |
type: "ReLU" | |
bottom: "Convolution27" | |
top: "Convolution27" | |
} | |
layer { | |
name: "Convolution28" | |
type: "Convolution" | |
bottom: "Convolution27" | |
top: "Convolution28" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm28" | |
type: "BatchNorm" | |
bottom: "Convolution28" | |
top: "Convolution28" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale28" | |
type: "Scale" | |
bottom: "Convolution28" | |
top: "Convolution28" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU30" | |
type: "ReLU" | |
bottom: "Convolution28" | |
top: "Convolution28" | |
} | |
layer { | |
name: "Eltwise5" | |
type: "Eltwise" | |
bottom: "Eltwise4" | |
bottom: "Convolution28" | |
top: "Eltwise5" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "ReLU31" | |
type: "ReLU" | |
bottom: "Eltwise5" | |
top: "Eltwise5" | |
} | |
layer { | |
name: "Convolution29" | |
type: "Convolution" | |
bottom: "Eltwise5" | |
top: "Convolution29" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm29" | |
type: "BatchNorm" | |
bottom: "Convolution29" | |
top: "Convolution29" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale29" | |
type: "Scale" | |
bottom: "Convolution29" | |
top: "Convolution29" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU32" | |
type: "ReLU" | |
bottom: "Convolution29" | |
top: "Convolution29" | |
} | |
layer { | |
name: "Convolution30" | |
type: "Convolution" | |
bottom: "Convolution29" | |
top: "Convolution30" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm30" | |
type: "BatchNorm" | |
bottom: "Convolution30" | |
top: "Convolution30" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale30" | |
type: "Scale" | |
bottom: "Convolution30" | |
top: "Convolution30" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU33" | |
type: "ReLU" | |
bottom: "Convolution30" | |
top: "Convolution30" | |
} | |
layer { | |
name: "Convolution31" | |
type: "Convolution" | |
bottom: "Convolution30" | |
top: "Convolution31" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 0 | |
pad_w: 1 | |
kernel_h: 1 | |
kernel_w: 3 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm31" | |
type: "BatchNorm" | |
bottom: "Convolution31" | |
top: "Convolution31" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale31" | |
type: "Scale" | |
bottom: "Convolution31" | |
top: "Convolution31" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU34" | |
type: "ReLU" | |
bottom: "Convolution31" | |
top: "Convolution31" | |
} | |
layer { | |
name: "Convolution32" | |
type: "Convolution" | |
bottom: "Convolution31" | |
top: "Convolution32" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 1 | |
pad_w: 0 | |
kernel_h: 3 | |
kernel_w: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm32" | |
type: "BatchNorm" | |
bottom: "Convolution32" | |
top: "Convolution32" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale32" | |
type: "Scale" | |
bottom: "Convolution32" | |
top: "Convolution32" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU35" | |
type: "ReLU" | |
bottom: "Convolution32" | |
top: "Convolution32" | |
} | |
layer { | |
name: "Convolution33" | |
type: "Convolution" | |
bottom: "Convolution32" | |
top: "Convolution33" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm33" | |
type: "BatchNorm" | |
bottom: "Convolution33" | |
top: "Convolution33" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale33" | |
type: "Scale" | |
bottom: "Convolution33" | |
top: "Convolution33" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU36" | |
type: "ReLU" | |
bottom: "Convolution33" | |
top: "Convolution33" | |
} | |
layer { | |
name: "Eltwise6" | |
type: "Eltwise" | |
bottom: "Eltwise5" | |
bottom: "Convolution33" | |
top: "Eltwise6" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "bypass_256_3" | |
type: "ReLU" | |
bottom: "Eltwise6" | |
top: "Eltwise6" | |
} | |
layer { | |
name: "Convolution34" | |
type: "Convolution" | |
bottom: "Eltwise6" | |
top: "Convolution34" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm34" | |
type: "BatchNorm" | |
bottom: "Convolution34" | |
top: "Convolution34" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale34" | |
type: "Scale" | |
bottom: "Convolution34" | |
top: "Convolution34" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU37" | |
type: "ReLU" | |
bottom: "Convolution34" | |
top: "Convolution34" | |
} | |
layer { | |
name: "Convolution35" | |
type: "Convolution" | |
bottom: "Convolution34" | |
top: "Convolution35" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm35" | |
type: "BatchNorm" | |
bottom: "Convolution35" | |
top: "Convolution35" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale35" | |
type: "Scale" | |
bottom: "Convolution35" | |
top: "Convolution35" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU38" | |
type: "ReLU" | |
bottom: "Convolution35" | |
top: "Convolution35" | |
} | |
layer { | |
name: "Convolution36" | |
type: "Convolution" | |
bottom: "Convolution35" | |
top: "Convolution36" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm36" | |
type: "BatchNorm" | |
bottom: "Convolution36" | |
top: "Convolution36" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale36" | |
type: "Scale" | |
bottom: "Convolution36" | |
top: "Convolution36" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU39" | |
type: "ReLU" | |
bottom: "Convolution36" | |
top: "Convolution36" | |
} | |
layer { | |
name: "Convolution37" | |
type: "Convolution" | |
bottom: "Convolution36" | |
top: "Convolution37" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 1 | |
pad_w: 0 | |
kernel_h: 3 | |
kernel_w: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm37" | |
type: "BatchNorm" | |
bottom: "Convolution37" | |
top: "Convolution37" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale37" | |
type: "Scale" | |
bottom: "Convolution37" | |
top: "Convolution37" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU40" | |
type: "ReLU" | |
bottom: "Convolution37" | |
top: "Convolution37" | |
} | |
layer { | |
name: "Convolution38" | |
type: "Convolution" | |
bottom: "Convolution37" | |
top: "Convolution38" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 0 | |
pad_w: 1 | |
kernel_h: 1 | |
kernel_w: 3 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm38" | |
type: "BatchNorm" | |
bottom: "Convolution38" | |
top: "Convolution38" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale38" | |
type: "Scale" | |
bottom: "Convolution38" | |
top: "Convolution38" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU41" | |
type: "ReLU" | |
bottom: "Convolution38" | |
top: "Convolution38" | |
} | |
layer { | |
name: "Convolution39" | |
type: "Convolution" | |
bottom: "Convolution38" | |
top: "Convolution39" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm39" | |
type: "BatchNorm" | |
bottom: "Convolution39" | |
top: "Convolution39" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale39" | |
type: "Scale" | |
bottom: "Convolution39" | |
top: "Convolution39" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU42" | |
type: "ReLU" | |
bottom: "Convolution39" | |
top: "Convolution39" | |
} | |
layer { | |
name: "Eltwise7" | |
type: "Eltwise" | |
bottom: "Convolution34" | |
bottom: "Convolution39" | |
top: "Eltwise7" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "ReLU43" | |
type: "ReLU" | |
bottom: "Eltwise7" | |
top: "Eltwise7" | |
} | |
layer { | |
name: "Convolution40" | |
type: "Convolution" | |
bottom: "Eltwise7" | |
top: "Convolution40" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm40" | |
type: "BatchNorm" | |
bottom: "Convolution40" | |
top: "Convolution40" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale40" | |
type: "Scale" | |
bottom: "Convolution40" | |
top: "Convolution40" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU44" | |
type: "ReLU" | |
bottom: "Convolution40" | |
top: "Convolution40" | |
} | |
layer { | |
name: "Convolution41" | |
type: "Convolution" | |
bottom: "Convolution40" | |
top: "Convolution41" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm41" | |
type: "BatchNorm" | |
bottom: "Convolution41" | |
top: "Convolution41" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale41" | |
type: "Scale" | |
bottom: "Convolution41" | |
top: "Convolution41" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU45" | |
type: "ReLU" | |
bottom: "Convolution41" | |
top: "Convolution41" | |
} | |
layer { | |
name: "Convolution42" | |
type: "Convolution" | |
bottom: "Convolution41" | |
top: "Convolution42" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 0 | |
pad_w: 1 | |
kernel_h: 1 | |
kernel_w: 3 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm42" | |
type: "BatchNorm" | |
bottom: "Convolution42" | |
top: "Convolution42" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale42" | |
type: "Scale" | |
bottom: "Convolution42" | |
top: "Convolution42" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU46" | |
type: "ReLU" | |
bottom: "Convolution42" | |
top: "Convolution42" | |
} | |
layer { | |
name: "Convolution43" | |
type: "Convolution" | |
bottom: "Convolution42" | |
top: "Convolution43" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 1 | |
pad_w: 0 | |
kernel_h: 3 | |
kernel_w: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm43" | |
type: "BatchNorm" | |
bottom: "Convolution43" | |
top: "Convolution43" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale43" | |
type: "Scale" | |
bottom: "Convolution43" | |
top: "Convolution43" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU47" | |
type: "ReLU" | |
bottom: "Convolution43" | |
top: "Convolution43" | |
} | |
layer { | |
name: "Convolution44" | |
type: "Convolution" | |
bottom: "Convolution43" | |
top: "Convolution44" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm44" | |
type: "BatchNorm" | |
bottom: "Convolution44" | |
top: "Convolution44" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale44" | |
type: "Scale" | |
bottom: "Convolution44" | |
top: "Convolution44" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU48" | |
type: "ReLU" | |
bottom: "Convolution44" | |
top: "Convolution44" | |
} | |
layer { | |
name: "Eltwise8" | |
type: "Eltwise" | |
bottom: "Eltwise7" | |
bottom: "Convolution44" | |
top: "Eltwise8" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "ReLU49" | |
type: "ReLU" | |
bottom: "Eltwise8" | |
top: "Eltwise8" | |
} | |
layer { | |
name: "Convolution45" | |
type: "Convolution" | |
bottom: "Eltwise8" | |
top: "Convolution45" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm45" | |
type: "BatchNorm" | |
bottom: "Convolution45" | |
top: "Convolution45" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale45" | |
type: "Scale" | |
bottom: "Convolution45" | |
top: "Convolution45" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU50" | |
type: "ReLU" | |
bottom: "Convolution45" | |
top: "Convolution45" | |
} | |
layer { | |
name: "Convolution46" | |
type: "Convolution" | |
bottom: "Convolution45" | |
top: "Convolution46" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm46" | |
type: "BatchNorm" | |
bottom: "Convolution46" | |
top: "Convolution46" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale46" | |
type: "Scale" | |
bottom: "Convolution46" | |
top: "Convolution46" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU51" | |
type: "ReLU" | |
bottom: "Convolution46" | |
top: "Convolution46" | |
} | |
layer { | |
name: "Convolution47" | |
type: "Convolution" | |
bottom: "Convolution46" | |
top: "Convolution47" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 1 | |
pad_w: 0 | |
kernel_h: 3 | |
kernel_w: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm47" | |
type: "BatchNorm" | |
bottom: "Convolution47" | |
top: "Convolution47" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale47" | |
type: "Scale" | |
bottom: "Convolution47" | |
top: "Convolution47" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU52" | |
type: "ReLU" | |
bottom: "Convolution47" | |
top: "Convolution47" | |
} | |
layer { | |
name: "Convolution48" | |
type: "Convolution" | |
bottom: "Convolution47" | |
top: "Convolution48" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 0 | |
pad_w: 1 | |
kernel_h: 1 | |
kernel_w: 3 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm48" | |
type: "BatchNorm" | |
bottom: "Convolution48" | |
top: "Convolution48" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale48" | |
type: "Scale" | |
bottom: "Convolution48" | |
top: "Convolution48" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU53" | |
type: "ReLU" | |
bottom: "Convolution48" | |
top: "Convolution48" | |
} | |
layer { | |
name: "Convolution49" | |
type: "Convolution" | |
bottom: "Convolution48" | |
top: "Convolution49" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm49" | |
type: "BatchNorm" | |
bottom: "Convolution49" | |
top: "Convolution49" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale49" | |
type: "Scale" | |
bottom: "Convolution49" | |
top: "Convolution49" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU54" | |
type: "ReLU" | |
bottom: "Convolution49" | |
top: "Convolution49" | |
} | |
layer { | |
name: "Eltwise9" | |
type: "Eltwise" | |
bottom: "Eltwise8" | |
bottom: "Convolution49" | |
top: "Eltwise9" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "ReLU55" | |
type: "ReLU" | |
bottom: "Eltwise9" | |
top: "Eltwise9" | |
} | |
layer { | |
name: "Convolution50" | |
type: "Convolution" | |
bottom: "Eltwise9" | |
top: "Convolution50" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm50" | |
type: "BatchNorm" | |
bottom: "Convolution50" | |
top: "Convolution50" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale50" | |
type: "Scale" | |
bottom: "Convolution50" | |
top: "Convolution50" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU56" | |
type: "ReLU" | |
bottom: "Convolution50" | |
top: "Convolution50" | |
} | |
layer { | |
name: "Convolution51" | |
type: "Convolution" | |
bottom: "Convolution50" | |
top: "Convolution51" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm51" | |
type: "BatchNorm" | |
bottom: "Convolution51" | |
top: "Convolution51" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale51" | |
type: "Scale" | |
bottom: "Convolution51" | |
top: "Convolution51" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU57" | |
type: "ReLU" | |
bottom: "Convolution51" | |
top: "Convolution51" | |
} | |
layer { | |
name: "Convolution52" | |
type: "Convolution" | |
bottom: "Convolution51" | |
top: "Convolution52" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 0 | |
pad_w: 1 | |
kernel_h: 1 | |
kernel_w: 3 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm52" | |
type: "BatchNorm" | |
bottom: "Convolution52" | |
top: "Convolution52" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale52" | |
type: "Scale" | |
bottom: "Convolution52" | |
top: "Convolution52" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU58" | |
type: "ReLU" | |
bottom: "Convolution52" | |
top: "Convolution52" | |
} | |
layer { | |
name: "Convolution53" | |
type: "Convolution" | |
bottom: "Convolution52" | |
top: "Convolution53" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 1 | |
pad_w: 0 | |
kernel_h: 3 | |
kernel_w: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm53" | |
type: "BatchNorm" | |
bottom: "Convolution53" | |
top: "Convolution53" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale53" | |
type: "Scale" | |
bottom: "Convolution53" | |
top: "Convolution53" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU59" | |
type: "ReLU" | |
bottom: "Convolution53" | |
top: "Convolution53" | |
} | |
layer { | |
name: "Convolution54" | |
type: "Convolution" | |
bottom: "Convolution53" | |
top: "Convolution54" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm54" | |
type: "BatchNorm" | |
bottom: "Convolution54" | |
top: "Convolution54" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale54" | |
type: "Scale" | |
bottom: "Convolution54" | |
top: "Convolution54" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU60" | |
type: "ReLU" | |
bottom: "Convolution54" | |
top: "Convolution54" | |
} | |
layer { | |
name: "Eltwise10" | |
type: "Eltwise" | |
bottom: "Eltwise9" | |
bottom: "Convolution54" | |
top: "Eltwise10" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "ReLU61" | |
type: "ReLU" | |
bottom: "Eltwise10" | |
top: "Eltwise10" | |
} | |
layer { | |
name: "Convolution55" | |
type: "Convolution" | |
bottom: "Eltwise10" | |
top: "Convolution55" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm55" | |
type: "BatchNorm" | |
bottom: "Convolution55" | |
top: "Convolution55" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale55" | |
type: "Scale" | |
bottom: "Convolution55" | |
top: "Convolution55" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU62" | |
type: "ReLU" | |
bottom: "Convolution55" | |
top: "Convolution55" | |
} | |
layer { | |
name: "Convolution56" | |
type: "Convolution" | |
bottom: "Convolution55" | |
top: "Convolution56" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm56" | |
type: "BatchNorm" | |
bottom: "Convolution56" | |
top: "Convolution56" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale56" | |
type: "Scale" | |
bottom: "Convolution56" | |
top: "Convolution56" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU63" | |
type: "ReLU" | |
bottom: "Convolution56" | |
top: "Convolution56" | |
} | |
layer { | |
name: "Convolution57" | |
type: "Convolution" | |
bottom: "Convolution56" | |
top: "Convolution57" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 1 | |
pad_w: 0 | |
kernel_h: 3 | |
kernel_w: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm57" | |
type: "BatchNorm" | |
bottom: "Convolution57" | |
top: "Convolution57" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale57" | |
type: "Scale" | |
bottom: "Convolution57" | |
top: "Convolution57" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU64" | |
type: "ReLU" | |
bottom: "Convolution57" | |
top: "Convolution57" | |
} | |
layer { | |
name: "Convolution58" | |
type: "Convolution" | |
bottom: "Convolution57" | |
top: "Convolution58" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 0 | |
pad_w: 1 | |
kernel_h: 1 | |
kernel_w: 3 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm58" | |
type: "BatchNorm" | |
bottom: "Convolution58" | |
top: "Convolution58" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale58" | |
type: "Scale" | |
bottom: "Convolution58" | |
top: "Convolution58" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU65" | |
type: "ReLU" | |
bottom: "Convolution58" | |
top: "Convolution58" | |
} | |
layer { | |
name: "Convolution59" | |
type: "Convolution" | |
bottom: "Convolution58" | |
top: "Convolution59" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm59" | |
type: "BatchNorm" | |
bottom: "Convolution59" | |
top: "Convolution59" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale59" | |
type: "Scale" | |
bottom: "Convolution59" | |
top: "Convolution59" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU66" | |
type: "ReLU" | |
bottom: "Convolution59" | |
top: "Convolution59" | |
} | |
layer { | |
name: "Eltwise11" | |
type: "Eltwise" | |
bottom: "Eltwise10" | |
bottom: "Convolution59" | |
top: "Eltwise11" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "ReLU67" | |
type: "ReLU" | |
bottom: "Eltwise11" | |
top: "Eltwise11" | |
} | |
layer { | |
name: "Convolution60" | |
type: "Convolution" | |
bottom: "Eltwise11" | |
top: "Convolution60" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm60" | |
type: "BatchNorm" | |
bottom: "Convolution60" | |
top: "Convolution60" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale60" | |
type: "Scale" | |
bottom: "Convolution60" | |
top: "Convolution60" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU68" | |
type: "ReLU" | |
bottom: "Convolution60" | |
top: "Convolution60" | |
} | |
layer { | |
name: "Convolution61" | |
type: "Convolution" | |
bottom: "Convolution60" | |
top: "Convolution61" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm61" | |
type: "BatchNorm" | |
bottom: "Convolution61" | |
top: "Convolution61" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale61" | |
type: "Scale" | |
bottom: "Convolution61" | |
top: "Convolution61" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU69" | |
type: "ReLU" | |
bottom: "Convolution61" | |
top: "Convolution61" | |
} | |
layer { | |
name: "Convolution62" | |
type: "Convolution" | |
bottom: "Convolution61" | |
top: "Convolution62" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 0 | |
pad_w: 1 | |
kernel_h: 1 | |
kernel_w: 3 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm62" | |
type: "BatchNorm" | |
bottom: "Convolution62" | |
top: "Convolution62" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale62" | |
type: "Scale" | |
bottom: "Convolution62" | |
top: "Convolution62" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU70" | |
type: "ReLU" | |
bottom: "Convolution62" | |
top: "Convolution62" | |
} | |
layer { | |
name: "Convolution63" | |
type: "Convolution" | |
bottom: "Convolution62" | |
top: "Convolution63" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 1 | |
pad_w: 0 | |
kernel_h: 3 | |
kernel_w: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm63" | |
type: "BatchNorm" | |
bottom: "Convolution63" | |
top: "Convolution63" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale63" | |
type: "Scale" | |
bottom: "Convolution63" | |
top: "Convolution63" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU71" | |
type: "ReLU" | |
bottom: "Convolution63" | |
top: "Convolution63" | |
} | |
layer { | |
name: "Convolution64" | |
type: "Convolution" | |
bottom: "Convolution63" | |
top: "Convolution64" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm64" | |
type: "BatchNorm" | |
bottom: "Convolution64" | |
top: "Convolution64" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale64" | |
type: "Scale" | |
bottom: "Convolution64" | |
top: "Convolution64" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU72" | |
type: "ReLU" | |
bottom: "Convolution64" | |
top: "Convolution64" | |
} | |
layer { | |
name: "Eltwise12" | |
type: "Eltwise" | |
bottom: "Eltwise11" | |
bottom: "Convolution64" | |
top: "Eltwise12" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "ReLU73" | |
type: "ReLU" | |
bottom: "Eltwise12" | |
top: "Eltwise12" | |
} | |
layer { | |
name: "Convolution65" | |
type: "Convolution" | |
bottom: "Eltwise12" | |
top: "Convolution65" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm65" | |
type: "BatchNorm" | |
bottom: "Convolution65" | |
top: "Convolution65" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale65" | |
type: "Scale" | |
bottom: "Convolution65" | |
top: "Convolution65" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU74" | |
type: "ReLU" | |
bottom: "Convolution65" | |
top: "Convolution65" | |
} | |
layer { | |
name: "Convolution66" | |
type: "Convolution" | |
bottom: "Convolution65" | |
top: "Convolution66" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm66" | |
type: "BatchNorm" | |
bottom: "Convolution66" | |
top: "Convolution66" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale66" | |
type: "Scale" | |
bottom: "Convolution66" | |
top: "Convolution66" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU75" | |
type: "ReLU" | |
bottom: "Convolution66" | |
top: "Convolution66" | |
} | |
layer { | |
name: "Convolution67" | |
type: "Convolution" | |
bottom: "Convolution66" | |
top: "Convolution67" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 1 | |
pad_w: 0 | |
kernel_h: 3 | |
kernel_w: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm67" | |
type: "BatchNorm" | |
bottom: "Convolution67" | |
top: "Convolution67" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale67" | |
type: "Scale" | |
bottom: "Convolution67" | |
top: "Convolution67" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU76" | |
type: "ReLU" | |
bottom: "Convolution67" | |
top: "Convolution67" | |
} | |
layer { | |
name: "Convolution68" | |
type: "Convolution" | |
bottom: "Convolution67" | |
top: "Convolution68" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 0 | |
pad_w: 1 | |
kernel_h: 1 | |
kernel_w: 3 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm68" | |
type: "BatchNorm" | |
bottom: "Convolution68" | |
top: "Convolution68" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale68" | |
type: "Scale" | |
bottom: "Convolution68" | |
top: "Convolution68" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU77" | |
type: "ReLU" | |
bottom: "Convolution68" | |
top: "Convolution68" | |
} | |
layer { | |
name: "Convolution69" | |
type: "Convolution" | |
bottom: "Convolution68" | |
top: "Convolution69" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm69" | |
type: "BatchNorm" | |
bottom: "Convolution69" | |
top: "Convolution69" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale69" | |
type: "Scale" | |
bottom: "Convolution69" | |
top: "Convolution69" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU78" | |
type: "ReLU" | |
bottom: "Convolution69" | |
top: "Convolution69" | |
} | |
layer { | |
name: "Eltwise13" | |
type: "Eltwise" | |
bottom: "Eltwise12" | |
bottom: "Convolution69" | |
top: "Eltwise13" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "ReLU79" | |
type: "ReLU" | |
bottom: "Eltwise13" | |
top: "Eltwise13" | |
} | |
layer { | |
name: "Convolution70" | |
type: "Convolution" | |
bottom: "Eltwise13" | |
top: "Convolution70" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm70" | |
type: "BatchNorm" | |
bottom: "Convolution70" | |
top: "Convolution70" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale70" | |
type: "Scale" | |
bottom: "Convolution70" | |
top: "Convolution70" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU80" | |
type: "ReLU" | |
bottom: "Convolution70" | |
top: "Convolution70" | |
} | |
layer { | |
name: "Convolution71" | |
type: "Convolution" | |
bottom: "Convolution70" | |
top: "Convolution71" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm71" | |
type: "BatchNorm" | |
bottom: "Convolution71" | |
top: "Convolution71" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale71" | |
type: "Scale" | |
bottom: "Convolution71" | |
top: "Convolution71" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU81" | |
type: "ReLU" | |
bottom: "Convolution71" | |
top: "Convolution71" | |
} | |
layer { | |
name: "Convolution72" | |
type: "Convolution" | |
bottom: "Convolution71" | |
top: "Convolution72" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 0 | |
pad_w: 1 | |
kernel_h: 1 | |
kernel_w: 3 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm72" | |
type: "BatchNorm" | |
bottom: "Convolution72" | |
top: "Convolution72" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale72" | |
type: "Scale" | |
bottom: "Convolution72" | |
top: "Convolution72" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU82" | |
type: "ReLU" | |
bottom: "Convolution72" | |
top: "Convolution72" | |
} | |
layer { | |
name: "Convolution73" | |
type: "Convolution" | |
bottom: "Convolution72" | |
top: "Convolution73" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 1 | |
pad_w: 0 | |
kernel_h: 3 | |
kernel_w: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm73" | |
type: "BatchNorm" | |
bottom: "Convolution73" | |
top: "Convolution73" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale73" | |
type: "Scale" | |
bottom: "Convolution73" | |
top: "Convolution73" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU83" | |
type: "ReLU" | |
bottom: "Convolution73" | |
top: "Convolution73" | |
} | |
layer { | |
name: "Convolution74" | |
type: "Convolution" | |
bottom: "Convolution73" | |
top: "Convolution74" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm74" | |
type: "BatchNorm" | |
bottom: "Convolution74" | |
top: "Convolution74" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale74" | |
type: "Scale" | |
bottom: "Convolution74" | |
top: "Convolution74" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU84" | |
type: "ReLU" | |
bottom: "Convolution74" | |
top: "Convolution74" | |
} | |
layer { | |
name: "Eltwise14" | |
type: "Eltwise" | |
bottom: "Eltwise13" | |
bottom: "Convolution74" | |
top: "Eltwise14" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "ReLU85" | |
type: "ReLU" | |
bottom: "Eltwise14" | |
top: "Eltwise14" | |
} | |
layer { | |
name: "Convolution75" | |
type: "Convolution" | |
bottom: "Eltwise14" | |
top: "Convolution75" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm75" | |
type: "BatchNorm" | |
bottom: "Convolution75" | |
top: "Convolution75" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale75" | |
type: "Scale" | |
bottom: "Convolution75" | |
top: "Convolution75" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU86" | |
type: "ReLU" | |
bottom: "Convolution75" | |
top: "Convolution75" | |
} | |
layer { | |
name: "Convolution76" | |
type: "Convolution" | |
bottom: "Convolution75" | |
top: "Convolution76" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm76" | |
type: "BatchNorm" | |
bottom: "Convolution76" | |
top: "Convolution76" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale76" | |
type: "Scale" | |
bottom: "Convolution76" | |
top: "Convolution76" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU87" | |
type: "ReLU" | |
bottom: "Convolution76" | |
top: "Convolution76" | |
} | |
layer { | |
name: "Convolution77" | |
type: "Convolution" | |
bottom: "Convolution76" | |
top: "Convolution77" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 1 | |
pad_w: 0 | |
kernel_h: 3 | |
kernel_w: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm77" | |
type: "BatchNorm" | |
bottom: "Convolution77" | |
top: "Convolution77" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale77" | |
type: "Scale" | |
bottom: "Convolution77" | |
top: "Convolution77" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU88" | |
type: "ReLU" | |
bottom: "Convolution77" | |
top: "Convolution77" | |
} | |
layer { | |
name: "Convolution78" | |
type: "Convolution" | |
bottom: "Convolution77" | |
top: "Convolution78" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 0 | |
pad_w: 1 | |
kernel_h: 1 | |
kernel_w: 3 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm78" | |
type: "BatchNorm" | |
bottom: "Convolution78" | |
top: "Convolution78" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale78" | |
type: "Scale" | |
bottom: "Convolution78" | |
top: "Convolution78" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU89" | |
type: "ReLU" | |
bottom: "Convolution78" | |
top: "Convolution78" | |
} | |
layer { | |
name: "Convolution79" | |
type: "Convolution" | |
bottom: "Convolution78" | |
top: "Convolution79" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm79" | |
type: "BatchNorm" | |
bottom: "Convolution79" | |
top: "Convolution79" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale79" | |
type: "Scale" | |
bottom: "Convolution79" | |
top: "Convolution79" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU90" | |
type: "ReLU" | |
bottom: "Convolution79" | |
top: "Convolution79" | |
} | |
layer { | |
name: "Eltwise15" | |
type: "Eltwise" | |
bottom: "Eltwise14" | |
bottom: "Convolution79" | |
top: "Eltwise15" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "ReLU91" | |
type: "ReLU" | |
bottom: "Eltwise15" | |
top: "Eltwise15" | |
} | |
layer { | |
name: "Convolution80" | |
type: "Convolution" | |
bottom: "Eltwise15" | |
top: "Convolution80" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm80" | |
type: "BatchNorm" | |
bottom: "Convolution80" | |
top: "Convolution80" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale80" | |
type: "Scale" | |
bottom: "Convolution80" | |
top: "Convolution80" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU92" | |
type: "ReLU" | |
bottom: "Convolution80" | |
top: "Convolution80" | |
} | |
layer { | |
name: "Convolution81" | |
type: "Convolution" | |
bottom: "Convolution80" | |
top: "Convolution81" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm81" | |
type: "BatchNorm" | |
bottom: "Convolution81" | |
top: "Convolution81" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale81" | |
type: "Scale" | |
bottom: "Convolution81" | |
top: "Convolution81" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU93" | |
type: "ReLU" | |
bottom: "Convolution81" | |
top: "Convolution81" | |
} | |
layer { | |
name: "Convolution82" | |
type: "Convolution" | |
bottom: "Convolution81" | |
top: "Convolution82" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 0 | |
pad_w: 1 | |
kernel_h: 1 | |
kernel_w: 3 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm82" | |
type: "BatchNorm" | |
bottom: "Convolution82" | |
top: "Convolution82" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale82" | |
type: "Scale" | |
bottom: "Convolution82" | |
top: "Convolution82" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU94" | |
type: "ReLU" | |
bottom: "Convolution82" | |
top: "Convolution82" | |
} | |
layer { | |
name: "Convolution83" | |
type: "Convolution" | |
bottom: "Convolution82" | |
top: "Convolution83" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 1 | |
pad_w: 0 | |
kernel_h: 3 | |
kernel_w: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm83" | |
type: "BatchNorm" | |
bottom: "Convolution83" | |
top: "Convolution83" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale83" | |
type: "Scale" | |
bottom: "Convolution83" | |
top: "Convolution83" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU95" | |
type: "ReLU" | |
bottom: "Convolution83" | |
top: "Convolution83" | |
} | |
layer { | |
name: "Convolution84" | |
type: "Convolution" | |
bottom: "Convolution83" | |
top: "Convolution84" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm84" | |
type: "BatchNorm" | |
bottom: "Convolution84" | |
top: "Convolution84" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale84" | |
type: "Scale" | |
bottom: "Convolution84" | |
top: "Convolution84" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU96" | |
type: "ReLU" | |
bottom: "Convolution84" | |
top: "Convolution84" | |
} | |
layer { | |
name: "Eltwise16" | |
type: "Eltwise" | |
bottom: "Eltwise15" | |
bottom: "Convolution84" | |
top: "Eltwise16" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "ReLU97" | |
type: "ReLU" | |
bottom: "Eltwise16" | |
top: "Eltwise16" | |
} | |
layer { | |
name: "Convolution85" | |
type: "Convolution" | |
bottom: "Eltwise16" | |
top: "Convolution85" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm85" | |
type: "BatchNorm" | |
bottom: "Convolution85" | |
top: "Convolution85" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale85" | |
type: "Scale" | |
bottom: "Convolution85" | |
top: "Convolution85" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU98" | |
type: "ReLU" | |
bottom: "Convolution85" | |
top: "Convolution85" | |
} | |
layer { | |
name: "Convolution86" | |
type: "Convolution" | |
bottom: "Convolution85" | |
top: "Convolution86" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm86" | |
type: "BatchNorm" | |
bottom: "Convolution86" | |
top: "Convolution86" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale86" | |
type: "Scale" | |
bottom: "Convolution86" | |
top: "Convolution86" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU99" | |
type: "ReLU" | |
bottom: "Convolution86" | |
top: "Convolution86" | |
} | |
layer { | |
name: "Convolution87" | |
type: "Convolution" | |
bottom: "Convolution86" | |
top: "Convolution87" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 1 | |
pad_w: 0 | |
kernel_h: 3 | |
kernel_w: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm87" | |
type: "BatchNorm" | |
bottom: "Convolution87" | |
top: "Convolution87" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale87" | |
type: "Scale" | |
bottom: "Convolution87" | |
top: "Convolution87" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU100" | |
type: "ReLU" | |
bottom: "Convolution87" | |
top: "Convolution87" | |
} | |
layer { | |
name: "Convolution88" | |
type: "Convolution" | |
bottom: "Convolution87" | |
top: "Convolution88" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 0 | |
pad_w: 1 | |
kernel_h: 1 | |
kernel_w: 3 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm88" | |
type: "BatchNorm" | |
bottom: "Convolution88" | |
top: "Convolution88" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale88" | |
type: "Scale" | |
bottom: "Convolution88" | |
top: "Convolution88" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU101" | |
type: "ReLU" | |
bottom: "Convolution88" | |
top: "Convolution88" | |
} | |
layer { | |
name: "Convolution89" | |
type: "Convolution" | |
bottom: "Convolution88" | |
top: "Convolution89" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm89" | |
type: "BatchNorm" | |
bottom: "Convolution89" | |
top: "Convolution89" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale89" | |
type: "Scale" | |
bottom: "Convolution89" | |
top: "Convolution89" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU102" | |
type: "ReLU" | |
bottom: "Convolution89" | |
top: "Convolution89" | |
} | |
layer { | |
name: "Eltwise17" | |
type: "Eltwise" | |
bottom: "Eltwise16" | |
bottom: "Convolution89" | |
top: "Eltwise17" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "ReLU103" | |
type: "ReLU" | |
bottom: "Eltwise17" | |
top: "Eltwise17" | |
} | |
layer { | |
name: "Convolution90" | |
type: "Convolution" | |
bottom: "Eltwise17" | |
top: "Convolution90" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm90" | |
type: "BatchNorm" | |
bottom: "Convolution90" | |
top: "Convolution90" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale90" | |
type: "Scale" | |
bottom: "Convolution90" | |
top: "Convolution90" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU104" | |
type: "ReLU" | |
bottom: "Convolution90" | |
top: "Convolution90" | |
} | |
layer { | |
name: "Convolution91" | |
type: "Convolution" | |
bottom: "Convolution90" | |
top: "Convolution91" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm91" | |
type: "BatchNorm" | |
bottom: "Convolution91" | |
top: "Convolution91" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale91" | |
type: "Scale" | |
bottom: "Convolution91" | |
top: "Convolution91" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU105" | |
type: "ReLU" | |
bottom: "Convolution91" | |
top: "Convolution91" | |
} | |
layer { | |
name: "Convolution92" | |
type: "Convolution" | |
bottom: "Convolution91" | |
top: "Convolution92" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 0 | |
pad_w: 1 | |
kernel_h: 1 | |
kernel_w: 3 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm92" | |
type: "BatchNorm" | |
bottom: "Convolution92" | |
top: "Convolution92" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale92" | |
type: "Scale" | |
bottom: "Convolution92" | |
top: "Convolution92" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU106" | |
type: "ReLU" | |
bottom: "Convolution92" | |
top: "Convolution92" | |
} | |
layer { | |
name: "Convolution93" | |
type: "Convolution" | |
bottom: "Convolution92" | |
top: "Convolution93" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 1 | |
pad_w: 0 | |
kernel_h: 3 | |
kernel_w: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm93" | |
type: "BatchNorm" | |
bottom: "Convolution93" | |
top: "Convolution93" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale93" | |
type: "Scale" | |
bottom: "Convolution93" | |
top: "Convolution93" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU107" | |
type: "ReLU" | |
bottom: "Convolution93" | |
top: "Convolution93" | |
} | |
layer { | |
name: "Convolution94" | |
type: "Convolution" | |
bottom: "Convolution93" | |
top: "Convolution94" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm94" | |
type: "BatchNorm" | |
bottom: "Convolution94" | |
top: "Convolution94" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale94" | |
type: "Scale" | |
bottom: "Convolution94" | |
top: "Convolution94" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU108" | |
type: "ReLU" | |
bottom: "Convolution94" | |
top: "Convolution94" | |
} | |
layer { | |
name: "Eltwise18" | |
type: "Eltwise" | |
bottom: "Eltwise17" | |
bottom: "Convolution94" | |
top: "Eltwise18" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "ReLU109" | |
type: "ReLU" | |
bottom: "Eltwise18" | |
top: "Eltwise18" | |
} | |
layer { | |
name: "Convolution95" | |
type: "Convolution" | |
bottom: "Eltwise18" | |
top: "Convolution95" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm95" | |
type: "BatchNorm" | |
bottom: "Convolution95" | |
top: "Convolution95" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale95" | |
type: "Scale" | |
bottom: "Convolution95" | |
top: "Convolution95" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU110" | |
type: "ReLU" | |
bottom: "Convolution95" | |
top: "Convolution95" | |
} | |
layer { | |
name: "Convolution96" | |
type: "Convolution" | |
bottom: "Convolution95" | |
top: "Convolution96" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm96" | |
type: "BatchNorm" | |
bottom: "Convolution96" | |
top: "Convolution96" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale96" | |
type: "Scale" | |
bottom: "Convolution96" | |
top: "Convolution96" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU111" | |
type: "ReLU" | |
bottom: "Convolution96" | |
top: "Convolution96" | |
} | |
layer { | |
name: "Convolution97" | |
type: "Convolution" | |
bottom: "Convolution96" | |
top: "Convolution97" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 1 | |
pad_w: 0 | |
kernel_h: 3 | |
kernel_w: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm97" | |
type: "BatchNorm" | |
bottom: "Convolution97" | |
top: "Convolution97" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale97" | |
type: "Scale" | |
bottom: "Convolution97" | |
top: "Convolution97" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU112" | |
type: "ReLU" | |
bottom: "Convolution97" | |
top: "Convolution97" | |
} | |
layer { | |
name: "Convolution98" | |
type: "Convolution" | |
bottom: "Convolution97" | |
top: "Convolution98" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 0 | |
pad_w: 1 | |
kernel_h: 1 | |
kernel_w: 3 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm98" | |
type: "BatchNorm" | |
bottom: "Convolution98" | |
top: "Convolution98" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale98" | |
type: "Scale" | |
bottom: "Convolution98" | |
top: "Convolution98" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU113" | |
type: "ReLU" | |
bottom: "Convolution98" | |
top: "Convolution98" | |
} | |
layer { | |
name: "Convolution99" | |
type: "Convolution" | |
bottom: "Convolution98" | |
top: "Convolution99" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm99" | |
type: "BatchNorm" | |
bottom: "Convolution99" | |
top: "Convolution99" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale99" | |
type: "Scale" | |
bottom: "Convolution99" | |
top: "Convolution99" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU114" | |
type: "ReLU" | |
bottom: "Convolution99" | |
top: "Convolution99" | |
} | |
layer { | |
name: "Eltwise19" | |
type: "Eltwise" | |
bottom: "Eltwise18" | |
bottom: "Convolution99" | |
top: "Eltwise19" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "ReLU115" | |
type: "ReLU" | |
bottom: "Eltwise19" | |
top: "Eltwise19" | |
} | |
layer { | |
name: "Convolution100" | |
type: "Convolution" | |
bottom: "Eltwise19" | |
top: "Convolution100" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm100" | |
type: "BatchNorm" | |
bottom: "Convolution100" | |
top: "Convolution100" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale100" | |
type: "Scale" | |
bottom: "Convolution100" | |
top: "Convolution100" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU116" | |
type: "ReLU" | |
bottom: "Convolution100" | |
top: "Convolution100" | |
} | |
layer { | |
name: "Convolution101" | |
type: "Convolution" | |
bottom: "Convolution100" | |
top: "Convolution101" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm101" | |
type: "BatchNorm" | |
bottom: "Convolution101" | |
top: "Convolution101" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale101" | |
type: "Scale" | |
bottom: "Convolution101" | |
top: "Convolution101" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU117" | |
type: "ReLU" | |
bottom: "Convolution101" | |
top: "Convolution101" | |
} | |
layer { | |
name: "Convolution102" | |
type: "Convolution" | |
bottom: "Convolution101" | |
top: "Convolution102" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 0 | |
pad_w: 1 | |
kernel_h: 1 | |
kernel_w: 3 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm102" | |
type: "BatchNorm" | |
bottom: "Convolution102" | |
top: "Convolution102" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale102" | |
type: "Scale" | |
bottom: "Convolution102" | |
top: "Convolution102" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU118" | |
type: "ReLU" | |
bottom: "Convolution102" | |
top: "Convolution102" | |
} | |
layer { | |
name: "Convolution103" | |
type: "Convolution" | |
bottom: "Convolution102" | |
top: "Convolution103" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 1 | |
pad_w: 0 | |
kernel_h: 3 | |
kernel_w: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm103" | |
type: "BatchNorm" | |
bottom: "Convolution103" | |
top: "Convolution103" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale103" | |
type: "Scale" | |
bottom: "Convolution103" | |
top: "Convolution103" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU119" | |
type: "ReLU" | |
bottom: "Convolution103" | |
top: "Convolution103" | |
} | |
layer { | |
name: "Convolution104" | |
type: "Convolution" | |
bottom: "Convolution103" | |
top: "Convolution104" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm104" | |
type: "BatchNorm" | |
bottom: "Convolution104" | |
top: "Convolution104" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale104" | |
type: "Scale" | |
bottom: "Convolution104" | |
top: "Convolution104" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU120" | |
type: "ReLU" | |
bottom: "Convolution104" | |
top: "Convolution104" | |
} | |
layer { | |
name: "Eltwise20" | |
type: "Eltwise" | |
bottom: "Eltwise19" | |
bottom: "Convolution104" | |
top: "Eltwise20" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "bypass_512_6" | |
type: "ReLU" | |
bottom: "Eltwise20" | |
top: "Eltwise20" | |
} | |
layer { | |
name: "Convolution105" | |
type: "Convolution" | |
bottom: "Eltwise20" | |
top: "Convolution105" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 2 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm105" | |
type: "BatchNorm" | |
bottom: "Convolution105" | |
top: "Convolution105" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale105" | |
type: "Scale" | |
bottom: "Convolution105" | |
top: "Convolution105" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU121" | |
type: "ReLU" | |
bottom: "Convolution105" | |
top: "Convolution105" | |
} | |
layer { | |
name: "Convolution106" | |
type: "Convolution" | |
bottom: "Convolution105" | |
top: "Convolution106" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm106" | |
type: "BatchNorm" | |
bottom: "Convolution106" | |
top: "Convolution106" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale106" | |
type: "Scale" | |
bottom: "Convolution106" | |
top: "Convolution106" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU122" | |
type: "ReLU" | |
bottom: "Convolution106" | |
top: "Convolution106" | |
} | |
layer { | |
name: "Convolution107" | |
type: "Convolution" | |
bottom: "Convolution106" | |
top: "Convolution107" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm107" | |
type: "BatchNorm" | |
bottom: "Convolution107" | |
top: "Convolution107" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale107" | |
type: "Scale" | |
bottom: "Convolution107" | |
top: "Convolution107" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU123" | |
type: "ReLU" | |
bottom: "Convolution107" | |
top: "Convolution107" | |
} | |
layer { | |
name: "Convolution108" | |
type: "Convolution" | |
bottom: "Convolution107" | |
top: "Convolution108" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 1 | |
pad_w: 0 | |
kernel_h: 3 | |
kernel_w: 1 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm108" | |
type: "BatchNorm" | |
bottom: "Convolution108" | |
top: "Convolution108" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale108" | |
type: "Scale" | |
bottom: "Convolution108" | |
top: "Convolution108" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU124" | |
type: "ReLU" | |
bottom: "Convolution108" | |
top: "Convolution108" | |
} | |
layer { | |
name: "Convolution109" | |
type: "Convolution" | |
bottom: "Convolution108" | |
top: "Convolution109" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
pad_h: 0 | |
pad_w: 1 | |
kernel_h: 1 | |
kernel_w: 3 | |
dilation: 1 | |
} | |
} | |
layer { | |
name: "BatchNorm109" | |
type: "BatchNorm" | |
bottom: "Convolution109" | |
top: "Convolution109" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale109" | |
type: "Scale" | |
bottom: "Convolution109" | |
top: "Convolution109" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU125" | |
type: "ReLU" | |
bottom: "Convolution109" | |
top: "Convolution109" | |
} | |
layer { | |
name: "Convolution110" | |
type: "Convolution" | |
bottom: "Convolution109" | |
top: "Convolution110" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm110" | |
type: "BatchNorm" | |
bottom: "Convolution110" | |
top: "Convolution110" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale110" | |
type: "Scale" | |
bottom: "Convolution110" | |
top: "Convolution110" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "ReLU126" | |
type: "ReLU" | |
bottom: "Convolution110" | |
top: "Convolution110" | |
} | |
layer { | |
name: "Eltwise21" | |
type: "Eltwise" | |
bottom: "Convolution105" | |
bottom: "Convolution110" | |
top: "Eltwise21" | |
eltwise_param { | |
operation: SUM | |
} | |
} | |
layer { | |
name: "bypass_1024_3" | |
type: "ReLU" | |
bottom: "Eltwise21" | |
top: "Eltwise21" | |
} | |
layer { | |
name: "Convolution111" | |
type: "Convolution" | |
bottom: "Eltwise21" | |
top: "Convolution111" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
group: 1 | |
stride: 1 | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "BatchNorm111" | |
type: "BatchNorm" | |
bottom: "Convolution111" | |
top: "Convolution111" | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
param { | |
lr_mult: 0.0 | |
decay_mult: 0.0 | |
} | |
} | |
layer { | |
name: "Scale111" | |
type: "Scale" | |
bottom: "Convolution111" | |
top: "Convolution111" | |
scale_param { | |
filler { | |
value: 1.0 | |
} | |
bias_term: true | |
bias_filler { | |
value: 0.0 | |
} | |
} | |
} | |
layer { | |
name: "conv_128" | |
type: "ReLU" | |
bottom: "Convolution111" | |
top: "Convolution111" | |
} | |
layer { | |
name: "pool5" | |
type: "Pooling" | |
bottom: "Convolution111" | |
top: "pool5" | |
pooling_param { | |
pool: AVE | |
global_pooling: true | |
} | |
} | |
layer { | |
name: "fc1000" | |
type: "InnerProduct" | |
bottom: "pool5" | |
top: "fc1000" | |
inner_product_param { | |
num_output: 1000 | |
bias_term: false | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
} | |
} | |
} | |
layer { | |
name: "loss" | |
type: "SoftmaxWithLoss" | |
bottom: "fc1000" | |
bottom: "label" | |
top: "loss" | |
} | |
layer { | |
name: "acc5_test" | |
type: "Accuracy" | |
bottom: "fc1000" | |
bottom: "label" | |
top: "acc5_test" | |
include { | |
phase: TEST | |
} | |
accuracy_param { | |
top_k: 5 | |
} | |
} | |
layer { | |
name: "acc5_train" | |
type: "Accuracy" | |
bottom: "fc1000" | |
bottom: "label" | |
top: "acc5_train" | |
include { | |
phase: TRAIN | |
} | |
accuracy_param { | |
top_k: 5 | |
} | |
} | |
layer { | |
name: "acc1_test" | |
type: "Accuracy" | |
bottom: "fc1000" | |
bottom: "label" | |
top: "acc1_test" | |
include { | |
phase: TEST | |
} | |
accuracy_param { | |
top_k: 1 | |
} | |
} | |
layer { | |
name: "acc1_train" | |
type: "Accuracy" | |
bottom: "fc1000" | |
bottom: "label" | |
top: "acc1_train" | |
include { | |
phase: TRAIN | |
} | |
accuracy_param { | |
top_k: 1 | |
} | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment