Created
July 31, 2018 14:58
-
-
Save trungv0/40329b0f1d33bea9bd9367601439b2e6 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: "ResNet-101" | |
input: "data" | |
input_shape{ | |
dim: 1 | |
dim: 3 | |
dim: 224 | |
dim: 224 | |
} | |
input: "rois" | |
input_shape{ | |
dim: 1 | |
dim: 1 | |
dim: 1 | |
dim: 5 | |
} | |
layer { | |
bottom: "data" | |
top: "conv1" | |
name: "conv1" | |
type: "Convolution" | |
convolution_param { | |
num_output: 64 | |
kernel_size: 7 | |
pad: 3 | |
stride: 2 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "conv1" | |
top: "conv1" | |
name: "bn_conv1" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "conv1" | |
top: "conv1" | |
name: "scale_conv1" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "conv1" | |
bottom: "conv1" | |
name: "conv1_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "conv1" | |
top: "pool1" | |
name: "pool1" | |
type: "Pooling" | |
pooling_param { | |
kernel_size: 3 | |
stride: 2 | |
pool: MAX | |
} | |
} | |
layer { | |
bottom: "pool1" | |
top: "res2a_branch1" | |
name: "res2a_branch1" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res2a_branch1" | |
top: "res2a_branch1" | |
name: "bn2a_branch1" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res2a_branch1" | |
top: "res2a_branch1" | |
name: "scale2a_branch1" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "pool1" | |
top: "res2a_branch2a" | |
name: "res2a_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 64 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res2a_branch2a" | |
top: "res2a_branch2a" | |
name: "bn2a_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res2a_branch2a" | |
top: "res2a_branch2a" | |
name: "scale2a_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res2a_branch2a" | |
bottom: "res2a_branch2a" | |
name: "res2a_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res2a_branch2a" | |
top: "res2a_branch2b" | |
name: "res2a_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 64 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res2a_branch2b" | |
top: "res2a_branch2b" | |
name: "bn2a_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res2a_branch2b" | |
top: "res2a_branch2b" | |
name: "scale2a_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res2a_branch2b" | |
bottom: "res2a_branch2b" | |
name: "res2a_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res2a_branch2b" | |
top: "res2a_branch2c" | |
name: "res2a_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res2a_branch2c" | |
top: "res2a_branch2c" | |
name: "bn2a_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res2a_branch2c" | |
top: "res2a_branch2c" | |
name: "scale2a_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res2a_branch1" | |
bottom: "res2a_branch2c" | |
top: "res2a" | |
name: "res2a" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res2a" | |
top: "res2a" | |
name: "res2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res2a" | |
top: "res2b_branch2a" | |
name: "res2b_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 64 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res2b_branch2a" | |
top: "res2b_branch2a" | |
name: "bn2b_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res2b_branch2a" | |
top: "res2b_branch2a" | |
name: "scale2b_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res2b_branch2a" | |
bottom: "res2b_branch2a" | |
name: "res2b_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res2b_branch2a" | |
top: "res2b_branch2b" | |
name: "res2b_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 64 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res2b_branch2b" | |
top: "res2b_branch2b" | |
name: "bn2b_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res2b_branch2b" | |
top: "res2b_branch2b" | |
name: "scale2b_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res2b_branch2b" | |
bottom: "res2b_branch2b" | |
name: "res2b_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res2b_branch2b" | |
top: "res2b_branch2c" | |
name: "res2b_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res2b_branch2c" | |
top: "res2b_branch2c" | |
name: "bn2b_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res2b_branch2c" | |
top: "res2b_branch2c" | |
name: "scale2b_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res2a" | |
bottom: "res2b_branch2c" | |
top: "res2b" | |
name: "res2b" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res2b" | |
top: "res2b" | |
name: "res2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res2b" | |
top: "res2c_branch2a" | |
name: "res2c_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 64 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res2c_branch2a" | |
top: "res2c_branch2a" | |
name: "bn2c_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res2c_branch2a" | |
top: "res2c_branch2a" | |
name: "scale2c_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res2c_branch2a" | |
bottom: "res2c_branch2a" | |
name: "res2c_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res2c_branch2a" | |
top: "res2c_branch2b" | |
name: "res2c_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 64 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res2c_branch2b" | |
top: "res2c_branch2b" | |
name: "bn2c_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res2c_branch2b" | |
top: "res2c_branch2b" | |
name: "scale2c_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res2c_branch2b" | |
bottom: "res2c_branch2b" | |
name: "res2c_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res2c_branch2b" | |
top: "res2c_branch2c" | |
name: "res2c_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res2c_branch2c" | |
top: "res2c_branch2c" | |
name: "bn2c_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res2c_branch2c" | |
top: "res2c_branch2c" | |
name: "scale2c_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res2b" | |
bottom: "res2c_branch2c" | |
top: "res2c" | |
name: "res2c" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res2c" | |
top: "res2c" | |
name: "res2c_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res2c" | |
top: "res3a_branch1" | |
name: "res3a_branch1" | |
type: "Convolution" | |
convolution_param { | |
num_output: 512 | |
kernel_size: 1 | |
pad: 0 | |
stride: 2 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res3a_branch1" | |
top: "res3a_branch1" | |
name: "bn3a_branch1" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res3a_branch1" | |
top: "res3a_branch1" | |
name: "scale3a_branch1" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res2c" | |
top: "res3a_branch2a" | |
name: "res3a_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 128 | |
kernel_size: 1 | |
pad: 0 | |
stride: 2 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res3a_branch2a" | |
top: "res3a_branch2a" | |
name: "bn3a_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res3a_branch2a" | |
top: "res3a_branch2a" | |
name: "scale3a_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res3a_branch2a" | |
bottom: "res3a_branch2a" | |
name: "res3a_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res3a_branch2a" | |
top: "res3a_branch2b" | |
name: "res3a_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 128 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res3a_branch2b" | |
top: "res3a_branch2b" | |
name: "bn3a_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res3a_branch2b" | |
top: "res3a_branch2b" | |
name: "scale3a_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res3a_branch2b" | |
bottom: "res3a_branch2b" | |
name: "res3a_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res3a_branch2b" | |
top: "res3a_branch2c" | |
name: "res3a_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 512 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res3a_branch2c" | |
top: "res3a_branch2c" | |
name: "bn3a_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res3a_branch2c" | |
top: "res3a_branch2c" | |
name: "scale3a_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res3a_branch1" | |
bottom: "res3a_branch2c" | |
top: "res3a" | |
name: "res3a" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res3a" | |
top: "res3a" | |
name: "res3a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res3a" | |
top: "res3b1_branch2a" | |
name: "res3b1_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 128 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res3b1_branch2a" | |
top: "res3b1_branch2a" | |
name: "bn3b1_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res3b1_branch2a" | |
top: "res3b1_branch2a" | |
name: "scale3b1_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res3b1_branch2a" | |
bottom: "res3b1_branch2a" | |
name: "res3b1_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res3b1_branch2a" | |
top: "res3b1_branch2b" | |
name: "res3b1_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 128 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res3b1_branch2b" | |
top: "res3b1_branch2b" | |
name: "bn3b1_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res3b1_branch2b" | |
top: "res3b1_branch2b" | |
name: "scale3b1_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res3b1_branch2b" | |
bottom: "res3b1_branch2b" | |
name: "res3b1_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res3b1_branch2b" | |
top: "res3b1_branch2c" | |
name: "res3b1_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 512 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res3b1_branch2c" | |
top: "res3b1_branch2c" | |
name: "bn3b1_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res3b1_branch2c" | |
top: "res3b1_branch2c" | |
name: "scale3b1_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res3a" | |
bottom: "res3b1_branch2c" | |
top: "res3b1" | |
name: "res3b1" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res3b1" | |
top: "res3b1" | |
name: "res3b1_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res3b1" | |
top: "res3b2_branch2a" | |
name: "res3b2_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 128 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res3b2_branch2a" | |
top: "res3b2_branch2a" | |
name: "bn3b2_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res3b2_branch2a" | |
top: "res3b2_branch2a" | |
name: "scale3b2_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res3b2_branch2a" | |
bottom: "res3b2_branch2a" | |
name: "res3b2_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res3b2_branch2a" | |
top: "res3b2_branch2b" | |
name: "res3b2_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 128 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res3b2_branch2b" | |
top: "res3b2_branch2b" | |
name: "bn3b2_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res3b2_branch2b" | |
top: "res3b2_branch2b" | |
name: "scale3b2_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res3b2_branch2b" | |
bottom: "res3b2_branch2b" | |
name: "res3b2_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res3b2_branch2b" | |
top: "res3b2_branch2c" | |
name: "res3b2_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 512 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res3b2_branch2c" | |
top: "res3b2_branch2c" | |
name: "bn3b2_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res3b2_branch2c" | |
top: "res3b2_branch2c" | |
name: "scale3b2_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res3b1" | |
bottom: "res3b2_branch2c" | |
top: "res3b2" | |
name: "res3b2" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res3b2" | |
top: "res3b2" | |
name: "res3b2_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res3b2" | |
top: "res3b3_branch2a" | |
name: "res3b3_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 128 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res3b3_branch2a" | |
top: "res3b3_branch2a" | |
name: "bn3b3_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res3b3_branch2a" | |
top: "res3b3_branch2a" | |
name: "scale3b3_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res3b3_branch2a" | |
bottom: "res3b3_branch2a" | |
name: "res3b3_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res3b3_branch2a" | |
top: "res3b3_branch2b" | |
name: "res3b3_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 128 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res3b3_branch2b" | |
top: "res3b3_branch2b" | |
name: "bn3b3_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res3b3_branch2b" | |
top: "res3b3_branch2b" | |
name: "scale3b3_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res3b3_branch2b" | |
bottom: "res3b3_branch2b" | |
name: "res3b3_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res3b3_branch2b" | |
top: "res3b3_branch2c" | |
name: "res3b3_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 512 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res3b3_branch2c" | |
top: "res3b3_branch2c" | |
name: "bn3b3_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res3b3_branch2c" | |
top: "res3b3_branch2c" | |
name: "scale3b3_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res3b2" | |
bottom: "res3b3_branch2c" | |
top: "res3b3" | |
name: "res3b3" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res3b3" | |
top: "res3b3" | |
name: "res3b3_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res3b3" | |
top: "res4a_branch1" | |
name: "res4a_branch1" | |
type: "Convolution" | |
convolution_param { | |
num_output: 1024 | |
kernel_size: 1 | |
pad: 0 | |
stride: 2 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4a_branch1" | |
top: "res4a_branch1" | |
name: "bn4a_branch1" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4a_branch1" | |
top: "res4a_branch1" | |
name: "scale4a_branch1" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res3b3" | |
top: "res4a_branch2a" | |
name: "res4a_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 2 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4a_branch2a" | |
top: "res4a_branch2a" | |
name: "bn4a_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4a_branch2a" | |
top: "res4a_branch2a" | |
name: "scale4a_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4a_branch2a" | |
bottom: "res4a_branch2a" | |
name: "res4a_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4a_branch2a" | |
top: "res4a_branch2b" | |
name: "res4a_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4a_branch2b" | |
top: "res4a_branch2b" | |
name: "bn4a_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4a_branch2b" | |
top: "res4a_branch2b" | |
name: "scale4a_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4a_branch2b" | |
bottom: "res4a_branch2b" | |
name: "res4a_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4a_branch2b" | |
top: "res4a_branch2c" | |
name: "res4a_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 1024 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4a_branch2c" | |
top: "res4a_branch2c" | |
name: "bn4a_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4a_branch2c" | |
top: "res4a_branch2c" | |
name: "scale4a_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res4a_branch1" | |
bottom: "res4a_branch2c" | |
top: "res4a" | |
name: "res4a" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res4a" | |
top: "res4a" | |
name: "res4a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4a" | |
top: "res4b1_branch2a" | |
name: "res4b1_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b1_branch2a" | |
top: "res4b1_branch2a" | |
name: "bn4b1_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b1_branch2a" | |
top: "res4b1_branch2a" | |
name: "scale4b1_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b1_branch2a" | |
bottom: "res4b1_branch2a" | |
name: "res4b1_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b1_branch2a" | |
top: "res4b1_branch2b" | |
name: "res4b1_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b1_branch2b" | |
top: "res4b1_branch2b" | |
name: "bn4b1_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b1_branch2b" | |
top: "res4b1_branch2b" | |
name: "scale4b1_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b1_branch2b" | |
bottom: "res4b1_branch2b" | |
name: "res4b1_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b1_branch2b" | |
top: "res4b1_branch2c" | |
name: "res4b1_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 1024 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b1_branch2c" | |
top: "res4b1_branch2c" | |
name: "bn4b1_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b1_branch2c" | |
top: "res4b1_branch2c" | |
name: "scale4b1_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res4a" | |
bottom: "res4b1_branch2c" | |
top: "res4b1" | |
name: "res4b1" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res4b1" | |
top: "res4b1" | |
name: "res4b1_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b1" | |
top: "res4b2_branch2a" | |
name: "res4b2_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b2_branch2a" | |
top: "res4b2_branch2a" | |
name: "bn4b2_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b2_branch2a" | |
top: "res4b2_branch2a" | |
name: "scale4b2_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b2_branch2a" | |
bottom: "res4b2_branch2a" | |
name: "res4b2_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b2_branch2a" | |
top: "res4b2_branch2b" | |
name: "res4b2_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b2_branch2b" | |
top: "res4b2_branch2b" | |
name: "bn4b2_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b2_branch2b" | |
top: "res4b2_branch2b" | |
name: "scale4b2_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b2_branch2b" | |
bottom: "res4b2_branch2b" | |
name: "res4b2_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b2_branch2b" | |
top: "res4b2_branch2c" | |
name: "res4b2_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 1024 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b2_branch2c" | |
top: "res4b2_branch2c" | |
name: "bn4b2_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b2_branch2c" | |
top: "res4b2_branch2c" | |
name: "scale4b2_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res4b1" | |
bottom: "res4b2_branch2c" | |
top: "res4b2" | |
name: "res4b2" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res4b2" | |
top: "res4b2" | |
name: "res4b2_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b2" | |
top: "res4b3_branch2a" | |
name: "res4b3_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b3_branch2a" | |
top: "res4b3_branch2a" | |
name: "bn4b3_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b3_branch2a" | |
top: "res4b3_branch2a" | |
name: "scale4b3_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b3_branch2a" | |
bottom: "res4b3_branch2a" | |
name: "res4b3_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b3_branch2a" | |
top: "res4b3_branch2b" | |
name: "res4b3_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b3_branch2b" | |
top: "res4b3_branch2b" | |
name: "bn4b3_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b3_branch2b" | |
top: "res4b3_branch2b" | |
name: "scale4b3_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b3_branch2b" | |
bottom: "res4b3_branch2b" | |
name: "res4b3_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b3_branch2b" | |
top: "res4b3_branch2c" | |
name: "res4b3_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 1024 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b3_branch2c" | |
top: "res4b3_branch2c" | |
name: "bn4b3_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b3_branch2c" | |
top: "res4b3_branch2c" | |
name: "scale4b3_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res4b2" | |
bottom: "res4b3_branch2c" | |
top: "res4b3" | |
name: "res4b3" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res4b3" | |
top: "res4b3" | |
name: "res4b3_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b3" | |
top: "res4b4_branch2a" | |
name: "res4b4_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b4_branch2a" | |
top: "res4b4_branch2a" | |
name: "bn4b4_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b4_branch2a" | |
top: "res4b4_branch2a" | |
name: "scale4b4_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b4_branch2a" | |
bottom: "res4b4_branch2a" | |
name: "res4b4_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b4_branch2a" | |
top: "res4b4_branch2b" | |
name: "res4b4_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b4_branch2b" | |
top: "res4b4_branch2b" | |
name: "bn4b4_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b4_branch2b" | |
top: "res4b4_branch2b" | |
name: "scale4b4_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b4_branch2b" | |
bottom: "res4b4_branch2b" | |
name: "res4b4_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b4_branch2b" | |
top: "res4b4_branch2c" | |
name: "res4b4_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 1024 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b4_branch2c" | |
top: "res4b4_branch2c" | |
name: "bn4b4_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b4_branch2c" | |
top: "res4b4_branch2c" | |
name: "scale4b4_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res4b3" | |
bottom: "res4b4_branch2c" | |
top: "res4b4" | |
name: "res4b4" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res4b4" | |
top: "res4b4" | |
name: "res4b4_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b4" | |
top: "res4b5_branch2a" | |
name: "res4b5_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b5_branch2a" | |
top: "res4b5_branch2a" | |
name: "bn4b5_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b5_branch2a" | |
top: "res4b5_branch2a" | |
name: "scale4b5_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b5_branch2a" | |
bottom: "res4b5_branch2a" | |
name: "res4b5_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b5_branch2a" | |
top: "res4b5_branch2b" | |
name: "res4b5_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b5_branch2b" | |
top: "res4b5_branch2b" | |
name: "bn4b5_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b5_branch2b" | |
top: "res4b5_branch2b" | |
name: "scale4b5_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b5_branch2b" | |
bottom: "res4b5_branch2b" | |
name: "res4b5_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b5_branch2b" | |
top: "res4b5_branch2c" | |
name: "res4b5_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 1024 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b5_branch2c" | |
top: "res4b5_branch2c" | |
name: "bn4b5_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b5_branch2c" | |
top: "res4b5_branch2c" | |
name: "scale4b5_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res4b4" | |
bottom: "res4b5_branch2c" | |
top: "res4b5" | |
name: "res4b5" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res4b5" | |
top: "res4b5" | |
name: "res4b5_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b5" | |
top: "res4b6_branch2a" | |
name: "res4b6_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b6_branch2a" | |
top: "res4b6_branch2a" | |
name: "bn4b6_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b6_branch2a" | |
top: "res4b6_branch2a" | |
name: "scale4b6_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b6_branch2a" | |
bottom: "res4b6_branch2a" | |
name: "res4b6_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b6_branch2a" | |
top: "res4b6_branch2b" | |
name: "res4b6_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b6_branch2b" | |
top: "res4b6_branch2b" | |
name: "bn4b6_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b6_branch2b" | |
top: "res4b6_branch2b" | |
name: "scale4b6_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b6_branch2b" | |
bottom: "res4b6_branch2b" | |
name: "res4b6_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b6_branch2b" | |
top: "res4b6_branch2c" | |
name: "res4b6_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 1024 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b6_branch2c" | |
top: "res4b6_branch2c" | |
name: "bn4b6_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b6_branch2c" | |
top: "res4b6_branch2c" | |
name: "scale4b6_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res4b5" | |
bottom: "res4b6_branch2c" | |
top: "res4b6" | |
name: "res4b6" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res4b6" | |
top: "res4b6" | |
name: "res4b6_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b6" | |
top: "res4b7_branch2a" | |
name: "res4b7_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b7_branch2a" | |
top: "res4b7_branch2a" | |
name: "bn4b7_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b7_branch2a" | |
top: "res4b7_branch2a" | |
name: "scale4b7_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b7_branch2a" | |
bottom: "res4b7_branch2a" | |
name: "res4b7_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b7_branch2a" | |
top: "res4b7_branch2b" | |
name: "res4b7_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b7_branch2b" | |
top: "res4b7_branch2b" | |
name: "bn4b7_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b7_branch2b" | |
top: "res4b7_branch2b" | |
name: "scale4b7_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b7_branch2b" | |
bottom: "res4b7_branch2b" | |
name: "res4b7_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b7_branch2b" | |
top: "res4b7_branch2c" | |
name: "res4b7_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 1024 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b7_branch2c" | |
top: "res4b7_branch2c" | |
name: "bn4b7_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b7_branch2c" | |
top: "res4b7_branch2c" | |
name: "scale4b7_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res4b6" | |
bottom: "res4b7_branch2c" | |
top: "res4b7" | |
name: "res4b7" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res4b7" | |
top: "res4b7" | |
name: "res4b7_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b7" | |
top: "res4b8_branch2a" | |
name: "res4b8_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b8_branch2a" | |
top: "res4b8_branch2a" | |
name: "bn4b8_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b8_branch2a" | |
top: "res4b8_branch2a" | |
name: "scale4b8_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b8_branch2a" | |
bottom: "res4b8_branch2a" | |
name: "res4b8_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b8_branch2a" | |
top: "res4b8_branch2b" | |
name: "res4b8_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b8_branch2b" | |
top: "res4b8_branch2b" | |
name: "bn4b8_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b8_branch2b" | |
top: "res4b8_branch2b" | |
name: "scale4b8_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b8_branch2b" | |
bottom: "res4b8_branch2b" | |
name: "res4b8_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b8_branch2b" | |
top: "res4b8_branch2c" | |
name: "res4b8_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 1024 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b8_branch2c" | |
top: "res4b8_branch2c" | |
name: "bn4b8_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b8_branch2c" | |
top: "res4b8_branch2c" | |
name: "scale4b8_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res4b7" | |
bottom: "res4b8_branch2c" | |
top: "res4b8" | |
name: "res4b8" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res4b8" | |
top: "res4b8" | |
name: "res4b8_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b8" | |
top: "res4b9_branch2a" | |
name: "res4b9_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b9_branch2a" | |
top: "res4b9_branch2a" | |
name: "bn4b9_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b9_branch2a" | |
top: "res4b9_branch2a" | |
name: "scale4b9_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b9_branch2a" | |
bottom: "res4b9_branch2a" | |
name: "res4b9_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b9_branch2a" | |
top: "res4b9_branch2b" | |
name: "res4b9_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b9_branch2b" | |
top: "res4b9_branch2b" | |
name: "bn4b9_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b9_branch2b" | |
top: "res4b9_branch2b" | |
name: "scale4b9_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b9_branch2b" | |
bottom: "res4b9_branch2b" | |
name: "res4b9_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b9_branch2b" | |
top: "res4b9_branch2c" | |
name: "res4b9_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 1024 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b9_branch2c" | |
top: "res4b9_branch2c" | |
name: "bn4b9_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b9_branch2c" | |
top: "res4b9_branch2c" | |
name: "scale4b9_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res4b8" | |
bottom: "res4b9_branch2c" | |
top: "res4b9" | |
name: "res4b9" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res4b9" | |
top: "res4b9" | |
name: "res4b9_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b9" | |
top: "res4b10_branch2a" | |
name: "res4b10_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b10_branch2a" | |
top: "res4b10_branch2a" | |
name: "bn4b10_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b10_branch2a" | |
top: "res4b10_branch2a" | |
name: "scale4b10_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b10_branch2a" | |
bottom: "res4b10_branch2a" | |
name: "res4b10_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b10_branch2a" | |
top: "res4b10_branch2b" | |
name: "res4b10_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b10_branch2b" | |
top: "res4b10_branch2b" | |
name: "bn4b10_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b10_branch2b" | |
top: "res4b10_branch2b" | |
name: "scale4b10_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b10_branch2b" | |
bottom: "res4b10_branch2b" | |
name: "res4b10_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b10_branch2b" | |
top: "res4b10_branch2c" | |
name: "res4b10_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 1024 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b10_branch2c" | |
top: "res4b10_branch2c" | |
name: "bn4b10_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b10_branch2c" | |
top: "res4b10_branch2c" | |
name: "scale4b10_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res4b9" | |
bottom: "res4b10_branch2c" | |
top: "res4b10" | |
name: "res4b10" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res4b10" | |
top: "res4b10" | |
name: "res4b10_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b10" | |
top: "res4b11_branch2a" | |
name: "res4b11_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b11_branch2a" | |
top: "res4b11_branch2a" | |
name: "bn4b11_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b11_branch2a" | |
top: "res4b11_branch2a" | |
name: "scale4b11_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b11_branch2a" | |
bottom: "res4b11_branch2a" | |
name: "res4b11_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b11_branch2a" | |
top: "res4b11_branch2b" | |
name: "res4b11_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b11_branch2b" | |
top: "res4b11_branch2b" | |
name: "bn4b11_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b11_branch2b" | |
top: "res4b11_branch2b" | |
name: "scale4b11_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b11_branch2b" | |
bottom: "res4b11_branch2b" | |
name: "res4b11_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b11_branch2b" | |
top: "res4b11_branch2c" | |
name: "res4b11_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 1024 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b11_branch2c" | |
top: "res4b11_branch2c" | |
name: "bn4b11_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b11_branch2c" | |
top: "res4b11_branch2c" | |
name: "scale4b11_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res4b10" | |
bottom: "res4b11_branch2c" | |
top: "res4b11" | |
name: "res4b11" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res4b11" | |
top: "res4b11" | |
name: "res4b11_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b11" | |
top: "res4b12_branch2a" | |
name: "res4b12_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b12_branch2a" | |
top: "res4b12_branch2a" | |
name: "bn4b12_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b12_branch2a" | |
top: "res4b12_branch2a" | |
name: "scale4b12_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b12_branch2a" | |
bottom: "res4b12_branch2a" | |
name: "res4b12_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b12_branch2a" | |
top: "res4b12_branch2b" | |
name: "res4b12_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b12_branch2b" | |
top: "res4b12_branch2b" | |
name: "bn4b12_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b12_branch2b" | |
top: "res4b12_branch2b" | |
name: "scale4b12_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b12_branch2b" | |
bottom: "res4b12_branch2b" | |
name: "res4b12_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b12_branch2b" | |
top: "res4b12_branch2c" | |
name: "res4b12_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 1024 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b12_branch2c" | |
top: "res4b12_branch2c" | |
name: "bn4b12_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b12_branch2c" | |
top: "res4b12_branch2c" | |
name: "scale4b12_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res4b11" | |
bottom: "res4b12_branch2c" | |
top: "res4b12" | |
name: "res4b12" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res4b12" | |
top: "res4b12" | |
name: "res4b12_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b12" | |
top: "res4b13_branch2a" | |
name: "res4b13_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b13_branch2a" | |
top: "res4b13_branch2a" | |
name: "bn4b13_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b13_branch2a" | |
top: "res4b13_branch2a" | |
name: "scale4b13_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b13_branch2a" | |
bottom: "res4b13_branch2a" | |
name: "res4b13_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b13_branch2a" | |
top: "res4b13_branch2b" | |
name: "res4b13_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b13_branch2b" | |
top: "res4b13_branch2b" | |
name: "bn4b13_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b13_branch2b" | |
top: "res4b13_branch2b" | |
name: "scale4b13_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b13_branch2b" | |
bottom: "res4b13_branch2b" | |
name: "res4b13_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b13_branch2b" | |
top: "res4b13_branch2c" | |
name: "res4b13_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 1024 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b13_branch2c" | |
top: "res4b13_branch2c" | |
name: "bn4b13_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b13_branch2c" | |
top: "res4b13_branch2c" | |
name: "scale4b13_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res4b12" | |
bottom: "res4b13_branch2c" | |
top: "res4b13" | |
name: "res4b13" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res4b13" | |
top: "res4b13" | |
name: "res4b13_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b13" | |
top: "res4b14_branch2a" | |
name: "res4b14_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b14_branch2a" | |
top: "res4b14_branch2a" | |
name: "bn4b14_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b14_branch2a" | |
top: "res4b14_branch2a" | |
name: "scale4b14_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b14_branch2a" | |
bottom: "res4b14_branch2a" | |
name: "res4b14_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b14_branch2a" | |
top: "res4b14_branch2b" | |
name: "res4b14_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b14_branch2b" | |
top: "res4b14_branch2b" | |
name: "bn4b14_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b14_branch2b" | |
top: "res4b14_branch2b" | |
name: "scale4b14_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b14_branch2b" | |
bottom: "res4b14_branch2b" | |
name: "res4b14_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b14_branch2b" | |
top: "res4b14_branch2c" | |
name: "res4b14_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 1024 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b14_branch2c" | |
top: "res4b14_branch2c" | |
name: "bn4b14_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b14_branch2c" | |
top: "res4b14_branch2c" | |
name: "scale4b14_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res4b13" | |
bottom: "res4b14_branch2c" | |
top: "res4b14" | |
name: "res4b14" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res4b14" | |
top: "res4b14" | |
name: "res4b14_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b14" | |
top: "res4b15_branch2a" | |
name: "res4b15_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b15_branch2a" | |
top: "res4b15_branch2a" | |
name: "bn4b15_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b15_branch2a" | |
top: "res4b15_branch2a" | |
name: "scale4b15_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b15_branch2a" | |
bottom: "res4b15_branch2a" | |
name: "res4b15_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b15_branch2a" | |
top: "res4b15_branch2b" | |
name: "res4b15_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b15_branch2b" | |
top: "res4b15_branch2b" | |
name: "bn4b15_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b15_branch2b" | |
top: "res4b15_branch2b" | |
name: "scale4b15_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b15_branch2b" | |
bottom: "res4b15_branch2b" | |
name: "res4b15_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b15_branch2b" | |
top: "res4b15_branch2c" | |
name: "res4b15_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 1024 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b15_branch2c" | |
top: "res4b15_branch2c" | |
name: "bn4b15_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b15_branch2c" | |
top: "res4b15_branch2c" | |
name: "scale4b15_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res4b14" | |
bottom: "res4b15_branch2c" | |
top: "res4b15" | |
name: "res4b15" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res4b15" | |
top: "res4b15" | |
name: "res4b15_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b15" | |
top: "res4b16_branch2a" | |
name: "res4b16_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b16_branch2a" | |
top: "res4b16_branch2a" | |
name: "bn4b16_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b16_branch2a" | |
top: "res4b16_branch2a" | |
name: "scale4b16_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b16_branch2a" | |
bottom: "res4b16_branch2a" | |
name: "res4b16_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b16_branch2a" | |
top: "res4b16_branch2b" | |
name: "res4b16_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b16_branch2b" | |
top: "res4b16_branch2b" | |
name: "bn4b16_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b16_branch2b" | |
top: "res4b16_branch2b" | |
name: "scale4b16_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b16_branch2b" | |
bottom: "res4b16_branch2b" | |
name: "res4b16_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b16_branch2b" | |
top: "res4b16_branch2c" | |
name: "res4b16_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 1024 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b16_branch2c" | |
top: "res4b16_branch2c" | |
name: "bn4b16_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b16_branch2c" | |
top: "res4b16_branch2c" | |
name: "scale4b16_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res4b15" | |
bottom: "res4b16_branch2c" | |
top: "res4b16" | |
name: "res4b16" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res4b16" | |
top: "res4b16" | |
name: "res4b16_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b16" | |
top: "res4b17_branch2a" | |
name: "res4b17_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b17_branch2a" | |
top: "res4b17_branch2a" | |
name: "bn4b17_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b17_branch2a" | |
top: "res4b17_branch2a" | |
name: "scale4b17_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b17_branch2a" | |
bottom: "res4b17_branch2a" | |
name: "res4b17_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b17_branch2a" | |
top: "res4b17_branch2b" | |
name: "res4b17_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b17_branch2b" | |
top: "res4b17_branch2b" | |
name: "bn4b17_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b17_branch2b" | |
top: "res4b17_branch2b" | |
name: "scale4b17_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b17_branch2b" | |
bottom: "res4b17_branch2b" | |
name: "res4b17_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b17_branch2b" | |
top: "res4b17_branch2c" | |
name: "res4b17_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 1024 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b17_branch2c" | |
top: "res4b17_branch2c" | |
name: "bn4b17_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b17_branch2c" | |
top: "res4b17_branch2c" | |
name: "scale4b17_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res4b16" | |
bottom: "res4b17_branch2c" | |
top: "res4b17" | |
name: "res4b17" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res4b17" | |
top: "res4b17" | |
name: "res4b17_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b17" | |
top: "res4b18_branch2a" | |
name: "res4b18_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b18_branch2a" | |
top: "res4b18_branch2a" | |
name: "bn4b18_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b18_branch2a" | |
top: "res4b18_branch2a" | |
name: "scale4b18_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b18_branch2a" | |
bottom: "res4b18_branch2a" | |
name: "res4b18_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b18_branch2a" | |
top: "res4b18_branch2b" | |
name: "res4b18_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b18_branch2b" | |
top: "res4b18_branch2b" | |
name: "bn4b18_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b18_branch2b" | |
top: "res4b18_branch2b" | |
name: "scale4b18_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b18_branch2b" | |
bottom: "res4b18_branch2b" | |
name: "res4b18_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b18_branch2b" | |
top: "res4b18_branch2c" | |
name: "res4b18_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 1024 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b18_branch2c" | |
top: "res4b18_branch2c" | |
name: "bn4b18_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b18_branch2c" | |
top: "res4b18_branch2c" | |
name: "scale4b18_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res4b17" | |
bottom: "res4b18_branch2c" | |
top: "res4b18" | |
name: "res4b18" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res4b18" | |
top: "res4b18" | |
name: "res4b18_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b18" | |
top: "res4b19_branch2a" | |
name: "res4b19_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b19_branch2a" | |
top: "res4b19_branch2a" | |
name: "bn4b19_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b19_branch2a" | |
top: "res4b19_branch2a" | |
name: "scale4b19_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b19_branch2a" | |
bottom: "res4b19_branch2a" | |
name: "res4b19_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b19_branch2a" | |
top: "res4b19_branch2b" | |
name: "res4b19_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b19_branch2b" | |
top: "res4b19_branch2b" | |
name: "bn4b19_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b19_branch2b" | |
top: "res4b19_branch2b" | |
name: "scale4b19_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b19_branch2b" | |
bottom: "res4b19_branch2b" | |
name: "res4b19_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b19_branch2b" | |
top: "res4b19_branch2c" | |
name: "res4b19_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 1024 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b19_branch2c" | |
top: "res4b19_branch2c" | |
name: "bn4b19_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b19_branch2c" | |
top: "res4b19_branch2c" | |
name: "scale4b19_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res4b18" | |
bottom: "res4b19_branch2c" | |
top: "res4b19" | |
name: "res4b19" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res4b19" | |
top: "res4b19" | |
name: "res4b19_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b19" | |
top: "res4b20_branch2a" | |
name: "res4b20_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b20_branch2a" | |
top: "res4b20_branch2a" | |
name: "bn4b20_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b20_branch2a" | |
top: "res4b20_branch2a" | |
name: "scale4b20_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b20_branch2a" | |
bottom: "res4b20_branch2a" | |
name: "res4b20_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b20_branch2a" | |
top: "res4b20_branch2b" | |
name: "res4b20_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b20_branch2b" | |
top: "res4b20_branch2b" | |
name: "bn4b20_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b20_branch2b" | |
top: "res4b20_branch2b" | |
name: "scale4b20_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b20_branch2b" | |
bottom: "res4b20_branch2b" | |
name: "res4b20_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b20_branch2b" | |
top: "res4b20_branch2c" | |
name: "res4b20_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 1024 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b20_branch2c" | |
top: "res4b20_branch2c" | |
name: "bn4b20_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b20_branch2c" | |
top: "res4b20_branch2c" | |
name: "scale4b20_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res4b19" | |
bottom: "res4b20_branch2c" | |
top: "res4b20" | |
name: "res4b20" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res4b20" | |
top: "res4b20" | |
name: "res4b20_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b20" | |
top: "res4b21_branch2a" | |
name: "res4b21_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b21_branch2a" | |
top: "res4b21_branch2a" | |
name: "bn4b21_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b21_branch2a" | |
top: "res4b21_branch2a" | |
name: "scale4b21_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b21_branch2a" | |
bottom: "res4b21_branch2a" | |
name: "res4b21_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b21_branch2a" | |
top: "res4b21_branch2b" | |
name: "res4b21_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b21_branch2b" | |
top: "res4b21_branch2b" | |
name: "bn4b21_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b21_branch2b" | |
top: "res4b21_branch2b" | |
name: "scale4b21_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b21_branch2b" | |
bottom: "res4b21_branch2b" | |
name: "res4b21_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b21_branch2b" | |
top: "res4b21_branch2c" | |
name: "res4b21_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 1024 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b21_branch2c" | |
top: "res4b21_branch2c" | |
name: "bn4b21_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b21_branch2c" | |
top: "res4b21_branch2c" | |
name: "scale4b21_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res4b20" | |
bottom: "res4b21_branch2c" | |
top: "res4b21" | |
name: "res4b21" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res4b21" | |
top: "res4b21" | |
name: "res4b21_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b21" | |
top: "res4b22_branch2a" | |
name: "res4b22_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b22_branch2a" | |
top: "res4b22_branch2a" | |
name: "bn4b22_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b22_branch2a" | |
top: "res4b22_branch2a" | |
name: "scale4b22_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b22_branch2a" | |
bottom: "res4b22_branch2a" | |
name: "res4b22_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b22_branch2a" | |
top: "res4b22_branch2b" | |
name: "res4b22_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 256 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b22_branch2b" | |
top: "res4b22_branch2b" | |
name: "bn4b22_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b22_branch2b" | |
top: "res4b22_branch2b" | |
name: "scale4b22_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res4b22_branch2b" | |
bottom: "res4b22_branch2b" | |
name: "res4b22_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b22_branch2b" | |
top: "res4b22_branch2c" | |
name: "res4b22_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 1024 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res4b22_branch2c" | |
top: "res4b22_branch2c" | |
name: "bn4b22_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res4b22_branch2c" | |
top: "res4b22_branch2c" | |
name: "scale4b22_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res4b21" | |
bottom: "res4b22_branch2c" | |
top: "res4b22" | |
name: "res4b22" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res4b22" | |
top: "res4b22" | |
name: "res4b22_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res4b22" | |
top: "res5a_branch1" | |
name: "res5a_branch1" | |
type: "Convolution" | |
convolution_param { | |
num_output: 2048 | |
kernel_size: 1 | |
pad: 0 | |
stride: 2 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res5a_branch1" | |
top: "res5a_branch1" | |
name: "bn5a_branch1" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res5a_branch1" | |
top: "res5a_branch1" | |
name: "scale5a_branch1" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res4b22" | |
top: "res5a_branch2a" | |
name: "res5a_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 512 | |
kernel_size: 1 | |
pad: 0 | |
stride: 2 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res5a_branch2a" | |
top: "res5a_branch2a" | |
name: "bn5a_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res5a_branch2a" | |
top: "res5a_branch2a" | |
name: "scale5a_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res5a_branch2a" | |
bottom: "res5a_branch2a" | |
name: "res5a_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res5a_branch2a" | |
top: "res5a_branch2b" | |
name: "res5a_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 512 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res5a_branch2b" | |
top: "res5a_branch2b" | |
name: "bn5a_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res5a_branch2b" | |
top: "res5a_branch2b" | |
name: "scale5a_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res5a_branch2b" | |
bottom: "res5a_branch2b" | |
name: "res5a_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res5a_branch2b" | |
top: "res5a_branch2c" | |
name: "res5a_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 2048 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res5a_branch2c" | |
top: "res5a_branch2c" | |
name: "bn5a_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res5a_branch2c" | |
top: "res5a_branch2c" | |
name: "scale5a_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res5a_branch1" | |
bottom: "res5a_branch2c" | |
top: "res5a" | |
name: "res5a" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res5a" | |
top: "res5a" | |
name: "res5a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res5a" | |
top: "res5b_branch2a" | |
name: "res5b_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 512 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res5b_branch2a" | |
top: "res5b_branch2a" | |
name: "bn5b_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res5b_branch2a" | |
top: "res5b_branch2a" | |
name: "scale5b_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res5b_branch2a" | |
bottom: "res5b_branch2a" | |
name: "res5b_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res5b_branch2a" | |
top: "res5b_branch2b" | |
name: "res5b_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 512 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res5b_branch2b" | |
top: "res5b_branch2b" | |
name: "bn5b_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res5b_branch2b" | |
top: "res5b_branch2b" | |
name: "scale5b_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res5b_branch2b" | |
bottom: "res5b_branch2b" | |
name: "res5b_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res5b_branch2b" | |
top: "res5b_branch2c" | |
name: "res5b_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 2048 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res5b_branch2c" | |
top: "res5b_branch2c" | |
name: "bn5b_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res5b_branch2c" | |
top: "res5b_branch2c" | |
name: "scale5b_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res5a" | |
bottom: "res5b_branch2c" | |
top: "res5b" | |
name: "res5b" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res5b" | |
top: "res5b" | |
name: "res5b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res5b" | |
top: "res5c_branch2a" | |
name: "res5c_branch2a" | |
type: "Convolution" | |
convolution_param { | |
num_output: 512 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res5c_branch2a" | |
top: "res5c_branch2a" | |
name: "bn5c_branch2a" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res5c_branch2a" | |
top: "res5c_branch2a" | |
name: "scale5c_branch2a" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res5c_branch2a" | |
bottom: "res5c_branch2a" | |
name: "res5c_branch2a_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res5c_branch2a" | |
top: "res5c_branch2b" | |
name: "res5c_branch2b" | |
type: "Convolution" | |
convolution_param { | |
num_output: 512 | |
kernel_size: 3 | |
pad: 1 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res5c_branch2b" | |
top: "res5c_branch2b" | |
name: "bn5c_branch2b" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res5c_branch2b" | |
top: "res5c_branch2b" | |
name: "scale5c_branch2b" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
top: "res5c_branch2b" | |
bottom: "res5c_branch2b" | |
name: "res5c_branch2b_relu" | |
type: "ReLU" | |
} | |
layer { | |
bottom: "res5c_branch2b" | |
top: "res5c_branch2c" | |
name: "res5c_branch2c" | |
type: "Convolution" | |
convolution_param { | |
num_output: 2048 | |
kernel_size: 1 | |
pad: 0 | |
stride: 1 | |
bias_term: false | |
} | |
} | |
layer { | |
bottom: "res5c_branch2c" | |
top: "res5c_branch2c" | |
name: "bn5c_branch2c" | |
type: "BatchNorm" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
bottom: "res5c_branch2c" | |
top: "res5c_branch2c" | |
name: "scale5c_branch2c" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
bottom: "res5b" | |
bottom: "res5c_branch2c" | |
top: "res5c" | |
name: "res5c" | |
type: "Eltwise" | |
} | |
layer { | |
bottom: "res5c" | |
top: "res5c" | |
name: "res5c_relu" | |
type: "ReLU" | |
} | |
## Get rmac regions with a RoiPooling layer. If batch size was 1, we end up with N_regions x D x pooled_h x pooled_w | |
layer { | |
name: "pooled_rois" | |
type: "ROIPooling" | |
bottom: "res5c" | |
bottom: "rois" | |
top: "pooled_rois" | |
roi_pooling_param { | |
pooled_w: 1 | |
pooled_h: 1 | |
spatial_scale: 0.03125 # 1/32 | |
} | |
} | |
layer { | |
name: "pooled_rois/normalized" | |
type: "Normalize" | |
bottom: "pooled_rois" | |
top: "pooled_rois/normalized" | |
} | |
layer { | |
name: "pooled_rois/normalized_flat" | |
type: "Reshape" | |
reshape_param{ | |
shape{ | |
dim: 0 | |
dim: 2048 # feat x spx (1) x spy (1) | |
dim: 1 | |
dim: 1 | |
} | |
} | |
bottom: "pooled_rois/normalized" | |
top: "pooled_rois/normalized_flat" | |
} | |
# Mean center done with a scaling (at 1) + shifting. The shifting needs to be copied into the model weights. | |
layer { | |
name: "pooled_rois/centered" | |
bottom: "pooled_rois/normalized_flat" | |
top: "pooled_rois/centered" | |
type: "Scale" | |
scale_param { | |
bias_term: true | |
filler { | |
type: "constant" | |
value: 1 | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
} | |
# And then the PCA, which is another FC layer | |
layer { | |
name: "pooled_rois/pca" | |
type: "InnerProduct" | |
inner_product_param { | |
num_output: 2048 # output PCA dimensionality | |
weight_filler { | |
type: "xavier" | |
} | |
bias_filler { | |
type: "constant" | |
value: 0 | |
} | |
} | |
bottom: "pooled_rois/centered" | |
top: "pooled_rois/pca" | |
} | |
# After the FC layers the size is now n_batches x d. We reshape to n_batches x d x 1 x 1 to L2 normalize again | |
layer { | |
name: "pooled_rois/pca/reshaped" | |
type: "Reshape" | |
reshape_param{ | |
shape{ | |
dim: 0 | |
dim: 0 | |
dim: 1 | |
dim: 1 | |
} | |
} | |
bottom: "pooled_rois/pca" | |
top: "pooled_rois/pca/reshaped" | |
} | |
layer { | |
name: "pooled_rois/pca/normalized" | |
type: "Normalize" | |
bottom: "pooled_rois/pca/reshaped" | |
top: "pooled_rois/pca/normalized" | |
} | |
layer { | |
name: "rmac" | |
type: "Python" | |
bottom: "pooled_rois/pca/normalized" | |
top: "rmac" | |
python_param { | |
module: 'custom_layers' | |
layer: 'AggregateLayer' | |
param_str: "{}" | |
} | |
} | |
## L2, one last time | |
layer { | |
name: "rmac/normalized" | |
type: "Normalize" | |
bottom: "rmac" | |
top: "rmac/normalized" | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment