Last active
March 1, 2018 20:13
-
-
Save zhreshold/461897faa8748cd890ee7c9637645182 to your computer and use it in GitHub Desktop.
parallel18_6x_CIFAR10 prototxt
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: "Parallel18_6x_CIFAR10" | |
layer { | |
name: "data" | |
type: "Input" | |
top: "data" | |
input_param { | |
shape { | |
dim: 1 | |
dim: 3 | |
dim: 32 | |
dim: 32 | |
} | |
} | |
} | |
layer { | |
name: "conv1" | |
type: "Convolution" | |
bottom: "data" | |
top: "conv1" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn_conv1" | |
type: "BatchNorm" | |
bottom: "conv1" | |
top: "conv1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale_conv1" | |
type: "Scale" | |
bottom: "conv1" | |
top: "conv1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "conv1_relu" | |
type: "ReLU" | |
bottom: "conv1" | |
top: "conv1_relu" | |
} | |
layer { | |
name: "res2a_0_branch1" | |
type: "Convolution" | |
bottom: "conv1_relu" | |
top: "res2a_0_branch1" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2a_0_branch1" | |
type: "BatchNorm" | |
bottom: "res2a_0_branch1" | |
top: "res2a_0_branch1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2a_0_branch1" | |
type: "Scale" | |
bottom: "res2a_0_branch1" | |
top: "res2a_0_branch1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2a_0_branch2a" | |
type: "Convolution" | |
bottom: "conv1_relu" | |
top: "res2a_0_branch2a" | |
convolution_param { | |
num_output: 8 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2a_0_branch2a" | |
type: "BatchNorm" | |
bottom: "res2a_0_branch2a" | |
top: "res2a_0_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2a_0_branch2a" | |
type: "Scale" | |
bottom: "res2a_0_branch2a" | |
top: "res2a_0_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2a_0_branch2a_relu" | |
type: "ReLU" | |
bottom: "res2a_0_branch2a" | |
top: "res2a_0_branch2a" | |
} | |
layer { | |
name: "res2a_0_branch2b" | |
type: "Convolution" | |
bottom: "res2a_0_branch2a" | |
top: "res2a_0_branch2b" | |
convolution_param { | |
num_output: 8 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2a_0_branch2b" | |
type: "BatchNorm" | |
bottom: "res2a_0_branch2b" | |
top: "res2a_0_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2a_0_branch2b" | |
type: "Scale" | |
bottom: "res2a_0_branch2b" | |
top: "res2a_0_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2a_0_branch2b_relu" | |
type: "ReLU" | |
bottom: "res2a_0_branch2b" | |
top: "res2a_0_branch2b" | |
} | |
layer { | |
name: "res2a_0_branch2c" | |
type: "Convolution" | |
bottom: "res2a_0_branch2b" | |
top: "res2a_0_branch2c" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2a_0_branch2c" | |
type: "BatchNorm" | |
bottom: "res2a_0_branch2c" | |
top: "res2a_0_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2a_0_branch2c" | |
type: "Scale" | |
bottom: "res2a_0_branch2c" | |
top: "res2a_0_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2a_5_branch1" | |
type: "Convolution" | |
bottom: "conv1_relu" | |
top: "res2a_5_branch1" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2a_5_branch1" | |
type: "BatchNorm" | |
bottom: "res2a_5_branch1" | |
top: "res2a_5_branch1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2a_5_branch1" | |
type: "Scale" | |
bottom: "res2a_5_branch1" | |
top: "res2a_5_branch1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2a_5_branch2a" | |
type: "Convolution" | |
bottom: "conv1_relu" | |
top: "res2a_5_branch2a" | |
convolution_param { | |
num_output: 8 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2a_5_branch2a" | |
type: "BatchNorm" | |
bottom: "res2a_5_branch2a" | |
top: "res2a_5_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2a_5_branch2a" | |
type: "Scale" | |
bottom: "res2a_5_branch2a" | |
top: "res2a_5_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2a_5_branch2a_relu" | |
type: "ReLU" | |
bottom: "res2a_5_branch2a" | |
top: "res2a_5_branch2a" | |
} | |
layer { | |
name: "res2a_5_branch2b" | |
type: "Convolution" | |
bottom: "res2a_5_branch2a" | |
top: "res2a_5_branch2b" | |
convolution_param { | |
num_output: 8 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2a_5_branch2b" | |
type: "BatchNorm" | |
bottom: "res2a_5_branch2b" | |
top: "res2a_5_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2a_5_branch2b" | |
type: "Scale" | |
bottom: "res2a_5_branch2b" | |
top: "res2a_5_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2a_5_branch2b_relu" | |
type: "ReLU" | |
bottom: "res2a_5_branch2b" | |
top: "res2a_5_branch2b" | |
} | |
layer { | |
name: "res2a_5_branch2c" | |
type: "Convolution" | |
bottom: "res2a_5_branch2b" | |
top: "res2a_5_branch2c" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2a_5_branch2c" | |
type: "BatchNorm" | |
bottom: "res2a_5_branch2c" | |
top: "res2a_5_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2a_5_branch2c" | |
type: "Scale" | |
bottom: "res2a_5_branch2c" | |
top: "res2a_5_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2a" | |
type: "Eltwise" | |
bottom: "res2a_5_branch1" | |
bottom: "res2a_5_branch2c" | |
top: "res2a" | |
} | |
layer { | |
name: "Eltwise1" | |
type: "Eltwise" | |
bottom: "res2a_0_branch1" | |
bottom: "res2a_0_branch2c" | |
top: "Eltwise1" | |
} | |
layer { | |
name: "res2a_0_relu" | |
type: "ReLU" | |
bottom: "Eltwise1" | |
top: "Eltwise1" | |
} | |
layer { | |
name: "res2b_0_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise1" | |
top: "res2b_0_branch2a" | |
convolution_param { | |
num_output: 8 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2b_0_branch2a" | |
type: "BatchNorm" | |
bottom: "res2b_0_branch2a" | |
top: "res2b_0_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2b_0_branch2a" | |
type: "Scale" | |
bottom: "res2b_0_branch2a" | |
top: "res2b_0_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2b_0_branch2a_relu" | |
type: "ReLU" | |
bottom: "res2b_0_branch2a" | |
top: "res2b_0_branch2a" | |
} | |
layer { | |
name: "res2b_0_branch2b" | |
type: "Convolution" | |
bottom: "res2b_0_branch2a" | |
top: "res2b_0_branch2b" | |
convolution_param { | |
num_output: 8 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2b_0_branch2b" | |
type: "BatchNorm" | |
bottom: "res2b_0_branch2b" | |
top: "res2b_0_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2b_0_branch2b" | |
type: "Scale" | |
bottom: "res2b_0_branch2b" | |
top: "res2b_0_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2b_0_branch2b_relu" | |
type: "ReLU" | |
bottom: "res2b_0_branch2b" | |
top: "res2b_0_branch2b" | |
} | |
layer { | |
name: "res2b_0_branch2c" | |
type: "Convolution" | |
bottom: "res2b_0_branch2b" | |
top: "res2b_0_branch2c" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2b_0_branch2c" | |
type: "BatchNorm" | |
bottom: "res2b_0_branch2c" | |
top: "res2b_0_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2b_0_branch2c" | |
type: "Scale" | |
bottom: "res2b_0_branch2c" | |
top: "res2b_0_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2a_5_relu" | |
type: "ReLU" | |
bottom: "res2a" | |
top: "res2a" | |
} | |
layer { | |
name: "res2b_5_branch2a" | |
type: "Convolution" | |
bottom: "res2a" | |
top: "res2b_5_branch2a" | |
convolution_param { | |
num_output: 8 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2b_5_branch2a" | |
type: "BatchNorm" | |
bottom: "res2b_5_branch2a" | |
top: "res2b_5_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2b_5_branch2a" | |
type: "Scale" | |
bottom: "res2b_5_branch2a" | |
top: "res2b_5_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2b_5_branch2a_relu" | |
type: "ReLU" | |
bottom: "res2b_5_branch2a" | |
top: "res2b_5_branch2a" | |
} | |
layer { | |
name: "res2b_5_branch2b" | |
type: "Convolution" | |
bottom: "res2b_5_branch2a" | |
top: "res2b_5_branch2b" | |
convolution_param { | |
num_output: 8 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2b_5_branch2b" | |
type: "BatchNorm" | |
bottom: "res2b_5_branch2b" | |
top: "res2b_5_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2b_5_branch2b" | |
type: "Scale" | |
bottom: "res2b_5_branch2b" | |
top: "res2b_5_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2b_5_branch2b_relu" | |
type: "ReLU" | |
bottom: "res2b_5_branch2b" | |
top: "res2b_5_branch2b" | |
} | |
layer { | |
name: "res2b_5_branch2c" | |
type: "Convolution" | |
bottom: "res2b_5_branch2b" | |
top: "res2b_5_branch2c" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2b_5_branch2c" | |
type: "BatchNorm" | |
bottom: "res2b_5_branch2c" | |
top: "res2b_5_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2b_5_branch2c" | |
type: "Scale" | |
bottom: "res2b_5_branch2c" | |
top: "res2b_5_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2b" | |
type: "Eltwise" | |
bottom: "res2a" | |
bottom: "res2b_5_branch2c" | |
top: "res2b" | |
} | |
layer { | |
name: "Eltwise2" | |
type: "Eltwise" | |
bottom: "Eltwise1" | |
bottom: "res2b_0_branch2c" | |
top: "Eltwise2" | |
} | |
layer { | |
name: "res2b_0_relu" | |
type: "ReLU" | |
bottom: "Eltwise2" | |
top: "Eltwise2" | |
} | |
layer { | |
name: "res3a_0_branch1" | |
type: "Convolution" | |
bottom: "Eltwise2" | |
top: "res3a_0_branch1" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn3a_0_branch1" | |
type: "BatchNorm" | |
bottom: "res3a_0_branch1" | |
top: "res3a_0_branch1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3a_0_branch1" | |
type: "Scale" | |
bottom: "res3a_0_branch1" | |
top: "res3a_0_branch1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3a_0_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise2" | |
top: "res3a_0_branch2a" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn3a_0_branch2a" | |
type: "BatchNorm" | |
bottom: "res3a_0_branch2a" | |
top: "res3a_0_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3a_0_branch2a" | |
type: "Scale" | |
bottom: "res3a_0_branch2a" | |
top: "res3a_0_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3a_0_branch2a_relu" | |
type: "ReLU" | |
bottom: "res3a_0_branch2a" | |
top: "res3a_0_branch2a" | |
} | |
layer { | |
name: "res3a_0_branch2b" | |
type: "Convolution" | |
bottom: "res3a_0_branch2a" | |
top: "res3a_0_branch2b" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn3a_0_branch2b" | |
type: "BatchNorm" | |
bottom: "res3a_0_branch2b" | |
top: "res3a_0_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3a_0_branch2b" | |
type: "Scale" | |
bottom: "res3a_0_branch2b" | |
top: "res3a_0_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3a_0_branch2b_relu" | |
type: "ReLU" | |
bottom: "res3a_0_branch2b" | |
top: "res3a_0_branch2b" | |
} | |
layer { | |
name: "res3a_0_branch2c" | |
type: "Convolution" | |
bottom: "res3a_0_branch2b" | |
top: "res3a_0_branch2c" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn3a_0_branch2c" | |
type: "BatchNorm" | |
bottom: "res3a_0_branch2c" | |
top: "res3a_0_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3a_0_branch2c" | |
type: "Scale" | |
bottom: "res3a_0_branch2c" | |
top: "res3a_0_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2b_5_relu" | |
type: "ReLU" | |
bottom: "res2b" | |
top: "res2b" | |
} | |
layer { | |
name: "res3a_5_branch1" | |
type: "Convolution" | |
bottom: "res2b" | |
top: "res3a_5_branch1" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn3a_5_branch1" | |
type: "BatchNorm" | |
bottom: "res3a_5_branch1" | |
top: "res3a_5_branch1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3a_5_branch1" | |
type: "Scale" | |
bottom: "res3a_5_branch1" | |
top: "res3a_5_branch1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3a_5_branch2a" | |
type: "Convolution" | |
bottom: "res2b" | |
top: "res3a_5_branch2a" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn3a_5_branch2a" | |
type: "BatchNorm" | |
bottom: "res3a_5_branch2a" | |
top: "res3a_5_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3a_5_branch2a" | |
type: "Scale" | |
bottom: "res3a_5_branch2a" | |
top: "res3a_5_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3a_5_branch2a_relu" | |
type: "ReLU" | |
bottom: "res3a_5_branch2a" | |
top: "res3a_5_branch2a" | |
} | |
layer { | |
name: "res3a_5_branch2b" | |
type: "Convolution" | |
bottom: "res3a_5_branch2a" | |
top: "res3a_5_branch2b" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn3a_5_branch2b" | |
type: "BatchNorm" | |
bottom: "res3a_5_branch2b" | |
top: "res3a_5_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3a_5_branch2b" | |
type: "Scale" | |
bottom: "res3a_5_branch2b" | |
top: "res3a_5_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3a_5_branch2b_relu" | |
type: "ReLU" | |
bottom: "res3a_5_branch2b" | |
top: "res3a_5_branch2b" | |
} | |
layer { | |
name: "res3a_5_branch2c" | |
type: "Convolution" | |
bottom: "res3a_5_branch2b" | |
top: "res3a_5_branch2c" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn3a_5_branch2c" | |
type: "BatchNorm" | |
bottom: "res3a_5_branch2c" | |
top: "res3a_5_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3a_5_branch2c" | |
type: "Scale" | |
bottom: "res3a_5_branch2c" | |
top: "res3a_5_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3a" | |
type: "Eltwise" | |
bottom: "res3a_5_branch1" | |
bottom: "res3a_5_branch2c" | |
top: "res3a" | |
} | |
layer { | |
name: "Eltwise3" | |
type: "Eltwise" | |
bottom: "res3a_0_branch1" | |
bottom: "res3a_0_branch2c" | |
top: "Eltwise3" | |
} | |
layer { | |
name: "res3a_0_relu" | |
type: "ReLU" | |
bottom: "Eltwise3" | |
top: "Eltwise3" | |
} | |
layer { | |
name: "res3b_0_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise3" | |
top: "res3b_0_branch2a" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn3b_0_branch2a" | |
type: "BatchNorm" | |
bottom: "res3b_0_branch2a" | |
top: "res3b_0_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3b_0_branch2a" | |
type: "Scale" | |
bottom: "res3b_0_branch2a" | |
top: "res3b_0_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3b_0_branch2a_relu" | |
type: "ReLU" | |
bottom: "res3b_0_branch2a" | |
top: "res3b_0_branch2a" | |
} | |
layer { | |
name: "res3b_0_branch2b" | |
type: "Convolution" | |
bottom: "res3b_0_branch2a" | |
top: "res3b_0_branch2b" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn3b_0_branch2b" | |
type: "BatchNorm" | |
bottom: "res3b_0_branch2b" | |
top: "res3b_0_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3b_0_branch2b" | |
type: "Scale" | |
bottom: "res3b_0_branch2b" | |
top: "res3b_0_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3b_0_branch2b_relu" | |
type: "ReLU" | |
bottom: "res3b_0_branch2b" | |
top: "res3b_0_branch2b" | |
} | |
layer { | |
name: "res3b_0_branch2c" | |
type: "Convolution" | |
bottom: "res3b_0_branch2b" | |
top: "res3b_0_branch2c" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn3b_0_branch2c" | |
type: "BatchNorm" | |
bottom: "res3b_0_branch2c" | |
top: "res3b_0_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3b_0_branch2c" | |
type: "Scale" | |
bottom: "res3b_0_branch2c" | |
top: "res3b_0_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3a_5_relu" | |
type: "ReLU" | |
bottom: "res3a" | |
top: "res3a" | |
} | |
layer { | |
name: "res3b_5_branch2a" | |
type: "Convolution" | |
bottom: "res3a" | |
top: "res3b_5_branch2a" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn3b_5_branch2a" | |
type: "BatchNorm" | |
bottom: "res3b_5_branch2a" | |
top: "res3b_5_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3b_5_branch2a" | |
type: "Scale" | |
bottom: "res3b_5_branch2a" | |
top: "res3b_5_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3b_5_branch2a_relu" | |
type: "ReLU" | |
bottom: "res3b_5_branch2a" | |
top: "res3b_5_branch2a" | |
} | |
layer { | |
name: "res3b_5_branch2b" | |
type: "Convolution" | |
bottom: "res3b_5_branch2a" | |
top: "res3b_5_branch2b" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn3b_5_branch2b" | |
type: "BatchNorm" | |
bottom: "res3b_5_branch2b" | |
top: "res3b_5_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3b_5_branch2b" | |
type: "Scale" | |
bottom: "res3b_5_branch2b" | |
top: "res3b_5_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3b_5_branch2b_relu" | |
type: "ReLU" | |
bottom: "res3b_5_branch2b" | |
top: "res3b_5_branch2b" | |
} | |
layer { | |
name: "res3b_5_branch2c" | |
type: "Convolution" | |
bottom: "res3b_5_branch2b" | |
top: "res3b_5_branch2c" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn3b_5_branch2c" | |
type: "BatchNorm" | |
bottom: "res3b_5_branch2c" | |
top: "res3b_5_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3b_5_branch2c" | |
type: "Scale" | |
bottom: "res3b_5_branch2c" | |
top: "res3b_5_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3b" | |
type: "Eltwise" | |
bottom: "res3a" | |
bottom: "res3b_5_branch2c" | |
top: "res3b" | |
} | |
layer { | |
name: "Eltwise4" | |
type: "Eltwise" | |
bottom: "Eltwise3" | |
bottom: "res3b_0_branch2c" | |
top: "Eltwise4" | |
} | |
layer { | |
name: "res3b_0_relu" | |
type: "ReLU" | |
bottom: "Eltwise4" | |
top: "Eltwise4" | |
} | |
layer { | |
name: "res4a_0_branch1" | |
type: "Convolution" | |
bottom: "Eltwise4" | |
top: "res4a_0_branch1" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn4a_0_branch1" | |
type: "BatchNorm" | |
bottom: "res4a_0_branch1" | |
top: "res4a_0_branch1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4a_0_branch1" | |
type: "Scale" | |
bottom: "res4a_0_branch1" | |
top: "res4a_0_branch1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4a_0_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise4" | |
top: "res4a_0_branch2a" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn4a_0_branch2a" | |
type: "BatchNorm" | |
bottom: "res4a_0_branch2a" | |
top: "res4a_0_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4a_0_branch2a" | |
type: "Scale" | |
bottom: "res4a_0_branch2a" | |
top: "res4a_0_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4a_0_branch2a_relu" | |
type: "ReLU" | |
bottom: "res4a_0_branch2a" | |
top: "res4a_0_branch2a" | |
} | |
layer { | |
name: "res4a_0_branch2b" | |
type: "Convolution" | |
bottom: "res4a_0_branch2a" | |
top: "res4a_0_branch2b" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn4a_0_branch2b" | |
type: "BatchNorm" | |
bottom: "res4a_0_branch2b" | |
top: "res4a_0_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4a_0_branch2b" | |
type: "Scale" | |
bottom: "res4a_0_branch2b" | |
top: "res4a_0_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4a_0_branch2b_relu" | |
type: "ReLU" | |
bottom: "res4a_0_branch2b" | |
top: "res4a_0_branch2b" | |
} | |
layer { | |
name: "res4a_0_branch2c" | |
type: "Convolution" | |
bottom: "res4a_0_branch2b" | |
top: "res4a_0_branch2c" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn4a_0_branch2c" | |
type: "BatchNorm" | |
bottom: "res4a_0_branch2c" | |
top: "res4a_0_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4a_0_branch2c" | |
type: "Scale" | |
bottom: "res4a_0_branch2c" | |
top: "res4a_0_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3b_5_relu" | |
type: "ReLU" | |
bottom: "res3b" | |
top: "res3b" | |
} | |
layer { | |
name: "res4a_5_branch1" | |
type: "Convolution" | |
bottom: "res3b" | |
top: "res4a_5_branch1" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn4a_5_branch1" | |
type: "BatchNorm" | |
bottom: "res4a_5_branch1" | |
top: "res4a_5_branch1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4a_5_branch1" | |
type: "Scale" | |
bottom: "res4a_5_branch1" | |
top: "res4a_5_branch1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4a_5_branch2a" | |
type: "Convolution" | |
bottom: "res3b" | |
top: "res4a_5_branch2a" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn4a_5_branch2a" | |
type: "BatchNorm" | |
bottom: "res4a_5_branch2a" | |
top: "res4a_5_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4a_5_branch2a" | |
type: "Scale" | |
bottom: "res4a_5_branch2a" | |
top: "res4a_5_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4a_5_branch2a_relu" | |
type: "ReLU" | |
bottom: "res4a_5_branch2a" | |
top: "res4a_5_branch2a" | |
} | |
layer { | |
name: "res4a_5_branch2b" | |
type: "Convolution" | |
bottom: "res4a_5_branch2a" | |
top: "res4a_5_branch2b" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn4a_5_branch2b" | |
type: "BatchNorm" | |
bottom: "res4a_5_branch2b" | |
top: "res4a_5_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4a_5_branch2b" | |
type: "Scale" | |
bottom: "res4a_5_branch2b" | |
top: "res4a_5_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4a_5_branch2b_relu" | |
type: "ReLU" | |
bottom: "res4a_5_branch2b" | |
top: "res4a_5_branch2b" | |
} | |
layer { | |
name: "res4a_5_branch2c" | |
type: "Convolution" | |
bottom: "res4a_5_branch2b" | |
top: "res4a_5_branch2c" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn4a_5_branch2c" | |
type: "BatchNorm" | |
bottom: "res4a_5_branch2c" | |
top: "res4a_5_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4a_5_branch2c" | |
type: "Scale" | |
bottom: "res4a_5_branch2c" | |
top: "res4a_5_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4a" | |
type: "Eltwise" | |
bottom: "res4a_5_branch1" | |
bottom: "res4a_5_branch2c" | |
top: "res4a" | |
} | |
layer { | |
name: "Eltwise5" | |
type: "Eltwise" | |
bottom: "res4a_0_branch1" | |
bottom: "res4a_0_branch2c" | |
top: "Eltwise5" | |
} | |
layer { | |
name: "res4a_0_relu" | |
type: "ReLU" | |
bottom: "Eltwise5" | |
top: "Eltwise5" | |
} | |
layer { | |
name: "res4b_0_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise5" | |
top: "res4b_0_branch2a" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn4b_0_branch2a" | |
type: "BatchNorm" | |
bottom: "res4b_0_branch2a" | |
top: "res4b_0_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4b_0_branch2a" | |
type: "Scale" | |
bottom: "res4b_0_branch2a" | |
top: "res4b_0_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4b_0_branch2a_relu" | |
type: "ReLU" | |
bottom: "res4b_0_branch2a" | |
top: "res4b_0_branch2a" | |
} | |
layer { | |
name: "res4b_0_branch2b" | |
type: "Convolution" | |
bottom: "res4b_0_branch2a" | |
top: "res4b_0_branch2b" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn4b_0_branch2b" | |
type: "BatchNorm" | |
bottom: "res4b_0_branch2b" | |
top: "res4b_0_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4b_0_branch2b" | |
type: "Scale" | |
bottom: "res4b_0_branch2b" | |
top: "res4b_0_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4b_0_branch2b_relu" | |
type: "ReLU" | |
bottom: "res4b_0_branch2b" | |
top: "res4b_0_branch2b" | |
} | |
layer { | |
name: "res4b_0_branch2c" | |
type: "Convolution" | |
bottom: "res4b_0_branch2b" | |
top: "res4b_0_branch2c" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn4b_0_branch2c" | |
type: "BatchNorm" | |
bottom: "res4b_0_branch2c" | |
top: "res4b_0_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4b_0_branch2c" | |
type: "Scale" | |
bottom: "res4b_0_branch2c" | |
top: "res4b_0_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4a_5_relu" | |
type: "ReLU" | |
bottom: "res4a" | |
top: "res4a" | |
} | |
layer { | |
name: "res4b_5_branch2a" | |
type: "Convolution" | |
bottom: "res4a" | |
top: "res4b_5_branch2a" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn4b_5_branch2a" | |
type: "BatchNorm" | |
bottom: "res4b_5_branch2a" | |
top: "res4b_5_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4b_5_branch2a" | |
type: "Scale" | |
bottom: "res4b_5_branch2a" | |
top: "res4b_5_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4b_5_branch2a_relu" | |
type: "ReLU" | |
bottom: "res4b_5_branch2a" | |
top: "res4b_5_branch2a" | |
} | |
layer { | |
name: "res4b_5_branch2b" | |
type: "Convolution" | |
bottom: "res4b_5_branch2a" | |
top: "res4b_5_branch2b" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn4b_5_branch2b" | |
type: "BatchNorm" | |
bottom: "res4b_5_branch2b" | |
top: "res4b_5_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4b_5_branch2b" | |
type: "Scale" | |
bottom: "res4b_5_branch2b" | |
top: "res4b_5_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4b_5_branch2b_relu" | |
type: "ReLU" | |
bottom: "res4b_5_branch2b" | |
top: "res4b_5_branch2b" | |
} | |
layer { | |
name: "res4b_5_branch2c" | |
type: "Convolution" | |
bottom: "res4b_5_branch2b" | |
top: "res4b_5_branch2c" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn4b_5_branch2c" | |
type: "BatchNorm" | |
bottom: "res4b_5_branch2c" | |
top: "res4b_5_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4b_5_branch2c" | |
type: "Scale" | |
bottom: "res4b_5_branch2c" | |
top: "res4b_5_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4b" | |
type: "Eltwise" | |
bottom: "res4a" | |
bottom: "res4b_5_branch2c" | |
top: "res4b" | |
} | |
layer { | |
name: "Eltwise6" | |
type: "Eltwise" | |
bottom: "Eltwise5" | |
bottom: "res4b_0_branch2c" | |
top: "Eltwise6" | |
} | |
layer { | |
name: "res4b_0_relu" | |
type: "ReLU" | |
bottom: "Eltwise6" | |
top: "Eltwise6" | |
} | |
layer { | |
name: "res5a_0_branch1" | |
type: "Convolution" | |
bottom: "Eltwise6" | |
top: "res5a_0_branch1" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn5a_0_branch1" | |
type: "BatchNorm" | |
bottom: "res5a_0_branch1" | |
top: "res5a_0_branch1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5a_0_branch1" | |
type: "Scale" | |
bottom: "res5a_0_branch1" | |
top: "res5a_0_branch1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5a_0_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise6" | |
top: "res5a_0_branch2a" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn5a_0_branch2a" | |
type: "BatchNorm" | |
bottom: "res5a_0_branch2a" | |
top: "res5a_0_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5a_0_branch2a" | |
type: "Scale" | |
bottom: "res5a_0_branch2a" | |
top: "res5a_0_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5a_0_branch2a_relu" | |
type: "ReLU" | |
bottom: "res5a_0_branch2a" | |
top: "res5a_0_branch2a" | |
} | |
layer { | |
name: "res5a_0_branch2b" | |
type: "Convolution" | |
bottom: "res5a_0_branch2a" | |
top: "res5a_0_branch2b" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn5a_0_branch2b" | |
type: "BatchNorm" | |
bottom: "res5a_0_branch2b" | |
top: "res5a_0_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5a_0_branch2b" | |
type: "Scale" | |
bottom: "res5a_0_branch2b" | |
top: "res5a_0_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5a_0_branch2b_relu" | |
type: "ReLU" | |
bottom: "res5a_0_branch2b" | |
top: "res5a_0_branch2b" | |
} | |
layer { | |
name: "res5a_0_branch2c" | |
type: "Convolution" | |
bottom: "res5a_0_branch2b" | |
top: "res5a_0_branch2c" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn5a_0_branch2c" | |
type: "BatchNorm" | |
bottom: "res5a_0_branch2c" | |
top: "res5a_0_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5a_0_branch2c" | |
type: "Scale" | |
bottom: "res5a_0_branch2c" | |
top: "res5a_0_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4b_5_relu" | |
type: "ReLU" | |
bottom: "res4b" | |
top: "res4b" | |
} | |
layer { | |
name: "res5a_5_branch1" | |
type: "Convolution" | |
bottom: "res4b" | |
top: "res5a_5_branch1" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn5a_5_branch1" | |
type: "BatchNorm" | |
bottom: "res5a_5_branch1" | |
top: "res5a_5_branch1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5a_5_branch1" | |
type: "Scale" | |
bottom: "res5a_5_branch1" | |
top: "res5a_5_branch1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5a_5_branch2a" | |
type: "Convolution" | |
bottom: "res4b" | |
top: "res5a_5_branch2a" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn5a_5_branch2a" | |
type: "BatchNorm" | |
bottom: "res5a_5_branch2a" | |
top: "res5a_5_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5a_5_branch2a" | |
type: "Scale" | |
bottom: "res5a_5_branch2a" | |
top: "res5a_5_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5a_5_branch2a_relu" | |
type: "ReLU" | |
bottom: "res5a_5_branch2a" | |
top: "res5a_5_branch2a" | |
} | |
layer { | |
name: "res5a_5_branch2b" | |
type: "Convolution" | |
bottom: "res5a_5_branch2a" | |
top: "res5a_5_branch2b" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn5a_5_branch2b" | |
type: "BatchNorm" | |
bottom: "res5a_5_branch2b" | |
top: "res5a_5_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5a_5_branch2b" | |
type: "Scale" | |
bottom: "res5a_5_branch2b" | |
top: "res5a_5_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5a_5_branch2b_relu" | |
type: "ReLU" | |
bottom: "res5a_5_branch2b" | |
top: "res5a_5_branch2b" | |
} | |
layer { | |
name: "res5a_5_branch2c" | |
type: "Convolution" | |
bottom: "res5a_5_branch2b" | |
top: "res5a_5_branch2c" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn5a_5_branch2c" | |
type: "BatchNorm" | |
bottom: "res5a_5_branch2c" | |
top: "res5a_5_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5a_5_branch2c" | |
type: "Scale" | |
bottom: "res5a_5_branch2c" | |
top: "res5a_5_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5a" | |
type: "Eltwise" | |
bottom: "res5a_5_branch1" | |
bottom: "res5a_5_branch2c" | |
top: "res5a" | |
} | |
layer { | |
name: "Eltwise7" | |
type: "Eltwise" | |
bottom: "res5a_0_branch1" | |
bottom: "res5a_0_branch2c" | |
top: "Eltwise7" | |
} | |
layer { | |
name: "res5a_0_relu" | |
type: "ReLU" | |
bottom: "Eltwise7" | |
top: "Eltwise7" | |
} | |
layer { | |
name: "res5b_0_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise7" | |
top: "res5b_0_branch2a" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn5b_0_branch2a" | |
type: "BatchNorm" | |
bottom: "res5b_0_branch2a" | |
top: "res5b_0_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5b_0_branch2a" | |
type: "Scale" | |
bottom: "res5b_0_branch2a" | |
top: "res5b_0_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5b_0_branch2a_relu" | |
type: "ReLU" | |
bottom: "res5b_0_branch2a" | |
top: "res5b_0_branch2a" | |
} | |
layer { | |
name: "res5b_0_branch2b" | |
type: "Convolution" | |
bottom: "res5b_0_branch2a" | |
top: "res5b_0_branch2b" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn5b_0_branch2b" | |
type: "BatchNorm" | |
bottom: "res5b_0_branch2b" | |
top: "res5b_0_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5b_0_branch2b" | |
type: "Scale" | |
bottom: "res5b_0_branch2b" | |
top: "res5b_0_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5b_0_branch2b_relu" | |
type: "ReLU" | |
bottom: "res5b_0_branch2b" | |
top: "res5b_0_branch2b" | |
} | |
layer { | |
name: "res5b_0_branch2c" | |
type: "Convolution" | |
bottom: "res5b_0_branch2b" | |
top: "res5b_0_branch2c" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn5b_0_branch2c" | |
type: "BatchNorm" | |
bottom: "res5b_0_branch2c" | |
top: "res5b_0_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5b_0_branch2c" | |
type: "Scale" | |
bottom: "res5b_0_branch2c" | |
top: "res5b_0_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5a_5_relu" | |
type: "ReLU" | |
bottom: "res5a" | |
top: "res5a" | |
} | |
layer { | |
name: "res5b_5_branch2a" | |
type: "Convolution" | |
bottom: "res5a" | |
top: "res5b_5_branch2a" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn5b_5_branch2a" | |
type: "BatchNorm" | |
bottom: "res5b_5_branch2a" | |
top: "res5b_5_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5b_5_branch2a" | |
type: "Scale" | |
bottom: "res5b_5_branch2a" | |
top: "res5b_5_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5b_5_branch2a_relu" | |
type: "ReLU" | |
bottom: "res5b_5_branch2a" | |
top: "res5b_5_branch2a" | |
} | |
layer { | |
name: "res5b_5_branch2b" | |
type: "Convolution" | |
bottom: "res5b_5_branch2a" | |
top: "res5b_5_branch2b" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn5b_5_branch2b" | |
type: "BatchNorm" | |
bottom: "res5b_5_branch2b" | |
top: "res5b_5_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5b_5_branch2b" | |
type: "Scale" | |
bottom: "res5b_5_branch2b" | |
top: "res5b_5_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5b_5_branch2b_relu" | |
type: "ReLU" | |
bottom: "res5b_5_branch2b" | |
top: "res5b_5_branch2b" | |
} | |
layer { | |
name: "res5b_5_branch2c" | |
type: "Convolution" | |
bottom: "res5b_5_branch2b" | |
top: "res5b_5_branch2c" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn5b_5_branch2c" | |
type: "BatchNorm" | |
bottom: "res5b_5_branch2c" | |
top: "res5b_5_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5b_5_branch2c" | |
type: "Scale" | |
bottom: "res5b_5_branch2c" | |
top: "res5b_5_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5b" | |
type: "Eltwise" | |
bottom: "res5a" | |
bottom: "res5b_5_branch2c" | |
top: "res5b" | |
} | |
layer { | |
name: "Eltwise8" | |
type: "Eltwise" | |
bottom: "Eltwise7" | |
bottom: "res5b_0_branch2c" | |
top: "Eltwise8" | |
} | |
layer { | |
name: "res5b_0_relu" | |
type: "ReLU" | |
bottom: "Eltwise8" | |
top: "Eltwise8" | |
} | |
layer { | |
name: "gpool_0" | |
type: "Pooling" | |
bottom: "Eltwise8" | |
top: "gpool_0" | |
pooling_param { | |
pool: AVE | |
kernel_size: 4 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "res2a_1_branch1" | |
type: "Convolution" | |
bottom: "conv1_relu" | |
top: "res2a_1_branch1" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2a_1_branch1" | |
type: "BatchNorm" | |
bottom: "res2a_1_branch1" | |
top: "res2a_1_branch1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2a_1_branch1" | |
type: "Scale" | |
bottom: "res2a_1_branch1" | |
top: "res2a_1_branch1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2a_1_branch2a" | |
type: "Convolution" | |
bottom: "conv1_relu" | |
top: "res2a_1_branch2a" | |
convolution_param { | |
num_output: 8 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2a_1_branch2a" | |
type: "BatchNorm" | |
bottom: "res2a_1_branch2a" | |
top: "res2a_1_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2a_1_branch2a" | |
type: "Scale" | |
bottom: "res2a_1_branch2a" | |
top: "res2a_1_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2a_1_branch2a_relu" | |
type: "ReLU" | |
bottom: "res2a_1_branch2a" | |
top: "res2a_1_branch2a" | |
} | |
layer { | |
name: "res2a_1_branch2b" | |
type: "Convolution" | |
bottom: "res2a_1_branch2a" | |
top: "res2a_1_branch2b" | |
convolution_param { | |
num_output: 8 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2a_1_branch2b" | |
type: "BatchNorm" | |
bottom: "res2a_1_branch2b" | |
top: "res2a_1_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2a_1_branch2b" | |
type: "Scale" | |
bottom: "res2a_1_branch2b" | |
top: "res2a_1_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2a_1_branch2b_relu" | |
type: "ReLU" | |
bottom: "res2a_1_branch2b" | |
top: "res2a_1_branch2b" | |
} | |
layer { | |
name: "res2a_1_branch2c" | |
type: "Convolution" | |
bottom: "res2a_1_branch2b" | |
top: "res2a_1_branch2c" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2a_1_branch2c" | |
type: "BatchNorm" | |
bottom: "res2a_1_branch2c" | |
top: "res2a_1_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2a_1_branch2c" | |
type: "Scale" | |
bottom: "res2a_1_branch2c" | |
top: "res2a_1_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise9" | |
type: "Eltwise" | |
bottom: "res2a_1_branch1" | |
bottom: "res2a_1_branch2c" | |
top: "Eltwise9" | |
} | |
layer { | |
name: "res2a_1_relu" | |
type: "ReLU" | |
bottom: "Eltwise9" | |
top: "Eltwise9" | |
} | |
layer { | |
name: "res2b_1_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise9" | |
top: "res2b_1_branch2a" | |
convolution_param { | |
num_output: 8 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2b_1_branch2a" | |
type: "BatchNorm" | |
bottom: "res2b_1_branch2a" | |
top: "res2b_1_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2b_1_branch2a" | |
type: "Scale" | |
bottom: "res2b_1_branch2a" | |
top: "res2b_1_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2b_1_branch2a_relu" | |
type: "ReLU" | |
bottom: "res2b_1_branch2a" | |
top: "res2b_1_branch2a" | |
} | |
layer { | |
name: "res2b_1_branch2b" | |
type: "Convolution" | |
bottom: "res2b_1_branch2a" | |
top: "res2b_1_branch2b" | |
convolution_param { | |
num_output: 8 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2b_1_branch2b" | |
type: "BatchNorm" | |
bottom: "res2b_1_branch2b" | |
top: "res2b_1_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2b_1_branch2b" | |
type: "Scale" | |
bottom: "res2b_1_branch2b" | |
top: "res2b_1_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2b_1_branch2b_relu" | |
type: "ReLU" | |
bottom: "res2b_1_branch2b" | |
top: "res2b_1_branch2b" | |
} | |
layer { | |
name: "res2b_1_branch2c" | |
type: "Convolution" | |
bottom: "res2b_1_branch2b" | |
top: "res2b_1_branch2c" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2b_1_branch2c" | |
type: "BatchNorm" | |
bottom: "res2b_1_branch2c" | |
top: "res2b_1_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2b_1_branch2c" | |
type: "Scale" | |
bottom: "res2b_1_branch2c" | |
top: "res2b_1_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise10" | |
type: "Eltwise" | |
bottom: "Eltwise9" | |
bottom: "res2b_1_branch2c" | |
top: "Eltwise10" | |
} | |
layer { | |
name: "res2b_1_relu" | |
type: "ReLU" | |
bottom: "Eltwise10" | |
top: "Eltwise10" | |
} | |
layer { | |
name: "res3a_1_branch1" | |
type: "Convolution" | |
bottom: "Eltwise10" | |
top: "res3a_1_branch1" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn3a_1_branch1" | |
type: "BatchNorm" | |
bottom: "res3a_1_branch1" | |
top: "res3a_1_branch1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3a_1_branch1" | |
type: "Scale" | |
bottom: "res3a_1_branch1" | |
top: "res3a_1_branch1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3a_1_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise10" | |
top: "res3a_1_branch2a" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn3a_1_branch2a" | |
type: "BatchNorm" | |
bottom: "res3a_1_branch2a" | |
top: "res3a_1_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3a_1_branch2a" | |
type: "Scale" | |
bottom: "res3a_1_branch2a" | |
top: "res3a_1_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3a_1_branch2a_relu" | |
type: "ReLU" | |
bottom: "res3a_1_branch2a" | |
top: "res3a_1_branch2a" | |
} | |
layer { | |
name: "res3a_1_branch2b" | |
type: "Convolution" | |
bottom: "res3a_1_branch2a" | |
top: "res3a_1_branch2b" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn3a_1_branch2b" | |
type: "BatchNorm" | |
bottom: "res3a_1_branch2b" | |
top: "res3a_1_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3a_1_branch2b" | |
type: "Scale" | |
bottom: "res3a_1_branch2b" | |
top: "res3a_1_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3a_1_branch2b_relu" | |
type: "ReLU" | |
bottom: "res3a_1_branch2b" | |
top: "res3a_1_branch2b" | |
} | |
layer { | |
name: "res3a_1_branch2c" | |
type: "Convolution" | |
bottom: "res3a_1_branch2b" | |
top: "res3a_1_branch2c" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn3a_1_branch2c" | |
type: "BatchNorm" | |
bottom: "res3a_1_branch2c" | |
top: "res3a_1_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3a_1_branch2c" | |
type: "Scale" | |
bottom: "res3a_1_branch2c" | |
top: "res3a_1_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise11" | |
type: "Eltwise" | |
bottom: "res3a_1_branch1" | |
bottom: "res3a_1_branch2c" | |
top: "Eltwise11" | |
} | |
layer { | |
name: "res3a_1_relu" | |
type: "ReLU" | |
bottom: "Eltwise11" | |
top: "Eltwise11" | |
} | |
layer { | |
name: "res3b_1_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise11" | |
top: "res3b_1_branch2a" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn3b_1_branch2a" | |
type: "BatchNorm" | |
bottom: "res3b_1_branch2a" | |
top: "res3b_1_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3b_1_branch2a" | |
type: "Scale" | |
bottom: "res3b_1_branch2a" | |
top: "res3b_1_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3b_1_branch2a_relu" | |
type: "ReLU" | |
bottom: "res3b_1_branch2a" | |
top: "res3b_1_branch2a" | |
} | |
layer { | |
name: "res3b_1_branch2b" | |
type: "Convolution" | |
bottom: "res3b_1_branch2a" | |
top: "res3b_1_branch2b" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn3b_1_branch2b" | |
type: "BatchNorm" | |
bottom: "res3b_1_branch2b" | |
top: "res3b_1_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3b_1_branch2b" | |
type: "Scale" | |
bottom: "res3b_1_branch2b" | |
top: "res3b_1_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3b_1_branch2b_relu" | |
type: "ReLU" | |
bottom: "res3b_1_branch2b" | |
top: "res3b_1_branch2b" | |
} | |
layer { | |
name: "res3b_1_branch2c" | |
type: "Convolution" | |
bottom: "res3b_1_branch2b" | |
top: "res3b_1_branch2c" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn3b_1_branch2c" | |
type: "BatchNorm" | |
bottom: "res3b_1_branch2c" | |
top: "res3b_1_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3b_1_branch2c" | |
type: "Scale" | |
bottom: "res3b_1_branch2c" | |
top: "res3b_1_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise12" | |
type: "Eltwise" | |
bottom: "Eltwise11" | |
bottom: "res3b_1_branch2c" | |
top: "Eltwise12" | |
} | |
layer { | |
name: "res3b_1_relu" | |
type: "ReLU" | |
bottom: "Eltwise12" | |
top: "Eltwise12" | |
} | |
layer { | |
name: "res4a_1_branch1" | |
type: "Convolution" | |
bottom: "Eltwise12" | |
top: "res4a_1_branch1" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn4a_1_branch1" | |
type: "BatchNorm" | |
bottom: "res4a_1_branch1" | |
top: "res4a_1_branch1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4a_1_branch1" | |
type: "Scale" | |
bottom: "res4a_1_branch1" | |
top: "res4a_1_branch1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4a_1_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise12" | |
top: "res4a_1_branch2a" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn4a_1_branch2a" | |
type: "BatchNorm" | |
bottom: "res4a_1_branch2a" | |
top: "res4a_1_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4a_1_branch2a" | |
type: "Scale" | |
bottom: "res4a_1_branch2a" | |
top: "res4a_1_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4a_1_branch2a_relu" | |
type: "ReLU" | |
bottom: "res4a_1_branch2a" | |
top: "res4a_1_branch2a" | |
} | |
layer { | |
name: "res4a_1_branch2b" | |
type: "Convolution" | |
bottom: "res4a_1_branch2a" | |
top: "res4a_1_branch2b" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn4a_1_branch2b" | |
type: "BatchNorm" | |
bottom: "res4a_1_branch2b" | |
top: "res4a_1_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4a_1_branch2b" | |
type: "Scale" | |
bottom: "res4a_1_branch2b" | |
top: "res4a_1_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4a_1_branch2b_relu" | |
type: "ReLU" | |
bottom: "res4a_1_branch2b" | |
top: "res4a_1_branch2b" | |
} | |
layer { | |
name: "res4a_1_branch2c" | |
type: "Convolution" | |
bottom: "res4a_1_branch2b" | |
top: "res4a_1_branch2c" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn4a_1_branch2c" | |
type: "BatchNorm" | |
bottom: "res4a_1_branch2c" | |
top: "res4a_1_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4a_1_branch2c" | |
type: "Scale" | |
bottom: "res4a_1_branch2c" | |
top: "res4a_1_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise13" | |
type: "Eltwise" | |
bottom: "res4a_1_branch1" | |
bottom: "res4a_1_branch2c" | |
top: "Eltwise13" | |
} | |
layer { | |
name: "res4a_1_relu" | |
type: "ReLU" | |
bottom: "Eltwise13" | |
top: "Eltwise13" | |
} | |
layer { | |
name: "res4b_1_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise13" | |
top: "res4b_1_branch2a" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn4b_1_branch2a" | |
type: "BatchNorm" | |
bottom: "res4b_1_branch2a" | |
top: "res4b_1_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4b_1_branch2a" | |
type: "Scale" | |
bottom: "res4b_1_branch2a" | |
top: "res4b_1_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4b_1_branch2a_relu" | |
type: "ReLU" | |
bottom: "res4b_1_branch2a" | |
top: "res4b_1_branch2a" | |
} | |
layer { | |
name: "res4b_1_branch2b" | |
type: "Convolution" | |
bottom: "res4b_1_branch2a" | |
top: "res4b_1_branch2b" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn4b_1_branch2b" | |
type: "BatchNorm" | |
bottom: "res4b_1_branch2b" | |
top: "res4b_1_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4b_1_branch2b" | |
type: "Scale" | |
bottom: "res4b_1_branch2b" | |
top: "res4b_1_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4b_1_branch2b_relu" | |
type: "ReLU" | |
bottom: "res4b_1_branch2b" | |
top: "res4b_1_branch2b" | |
} | |
layer { | |
name: "res4b_1_branch2c" | |
type: "Convolution" | |
bottom: "res4b_1_branch2b" | |
top: "res4b_1_branch2c" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn4b_1_branch2c" | |
type: "BatchNorm" | |
bottom: "res4b_1_branch2c" | |
top: "res4b_1_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4b_1_branch2c" | |
type: "Scale" | |
bottom: "res4b_1_branch2c" | |
top: "res4b_1_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise14" | |
type: "Eltwise" | |
bottom: "Eltwise13" | |
bottom: "res4b_1_branch2c" | |
top: "Eltwise14" | |
} | |
layer { | |
name: "res4b_1_relu" | |
type: "ReLU" | |
bottom: "Eltwise14" | |
top: "Eltwise14" | |
} | |
layer { | |
name: "res5a_1_branch1" | |
type: "Convolution" | |
bottom: "Eltwise14" | |
top: "res5a_1_branch1" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn5a_1_branch1" | |
type: "BatchNorm" | |
bottom: "res5a_1_branch1" | |
top: "res5a_1_branch1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5a_1_branch1" | |
type: "Scale" | |
bottom: "res5a_1_branch1" | |
top: "res5a_1_branch1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5a_1_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise14" | |
top: "res5a_1_branch2a" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn5a_1_branch2a" | |
type: "BatchNorm" | |
bottom: "res5a_1_branch2a" | |
top: "res5a_1_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5a_1_branch2a" | |
type: "Scale" | |
bottom: "res5a_1_branch2a" | |
top: "res5a_1_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5a_1_branch2a_relu" | |
type: "ReLU" | |
bottom: "res5a_1_branch2a" | |
top: "res5a_1_branch2a" | |
} | |
layer { | |
name: "res5a_1_branch2b" | |
type: "Convolution" | |
bottom: "res5a_1_branch2a" | |
top: "res5a_1_branch2b" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn5a_1_branch2b" | |
type: "BatchNorm" | |
bottom: "res5a_1_branch2b" | |
top: "res5a_1_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5a_1_branch2b" | |
type: "Scale" | |
bottom: "res5a_1_branch2b" | |
top: "res5a_1_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5a_1_branch2b_relu" | |
type: "ReLU" | |
bottom: "res5a_1_branch2b" | |
top: "res5a_1_branch2b" | |
} | |
layer { | |
name: "res5a_1_branch2c" | |
type: "Convolution" | |
bottom: "res5a_1_branch2b" | |
top: "res5a_1_branch2c" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn5a_1_branch2c" | |
type: "BatchNorm" | |
bottom: "res5a_1_branch2c" | |
top: "res5a_1_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5a_1_branch2c" | |
type: "Scale" | |
bottom: "res5a_1_branch2c" | |
top: "res5a_1_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise15" | |
type: "Eltwise" | |
bottom: "res5a_1_branch1" | |
bottom: "res5a_1_branch2c" | |
top: "Eltwise15" | |
} | |
layer { | |
name: "res5a_1_relu" | |
type: "ReLU" | |
bottom: "Eltwise15" | |
top: "Eltwise15" | |
} | |
layer { | |
name: "res5b_1_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise15" | |
top: "res5b_1_branch2a" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn5b_1_branch2a" | |
type: "BatchNorm" | |
bottom: "res5b_1_branch2a" | |
top: "res5b_1_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5b_1_branch2a" | |
type: "Scale" | |
bottom: "res5b_1_branch2a" | |
top: "res5b_1_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5b_1_branch2a_relu" | |
type: "ReLU" | |
bottom: "res5b_1_branch2a" | |
top: "res5b_1_branch2a" | |
} | |
layer { | |
name: "res5b_1_branch2b" | |
type: "Convolution" | |
bottom: "res5b_1_branch2a" | |
top: "res5b_1_branch2b" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn5b_1_branch2b" | |
type: "BatchNorm" | |
bottom: "res5b_1_branch2b" | |
top: "res5b_1_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5b_1_branch2b" | |
type: "Scale" | |
bottom: "res5b_1_branch2b" | |
top: "res5b_1_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5b_1_branch2b_relu" | |
type: "ReLU" | |
bottom: "res5b_1_branch2b" | |
top: "res5b_1_branch2b" | |
} | |
layer { | |
name: "res5b_1_branch2c" | |
type: "Convolution" | |
bottom: "res5b_1_branch2b" | |
top: "res5b_1_branch2c" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn5b_1_branch2c" | |
type: "BatchNorm" | |
bottom: "res5b_1_branch2c" | |
top: "res5b_1_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5b_1_branch2c" | |
type: "Scale" | |
bottom: "res5b_1_branch2c" | |
top: "res5b_1_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise16" | |
type: "Eltwise" | |
bottom: "Eltwise15" | |
bottom: "res5b_1_branch2c" | |
top: "Eltwise16" | |
} | |
layer { | |
name: "res5b_1_relu" | |
type: "ReLU" | |
bottom: "Eltwise16" | |
top: "Eltwise16" | |
} | |
layer { | |
name: "gpool_1" | |
type: "Pooling" | |
bottom: "Eltwise16" | |
top: "gpool_1" | |
pooling_param { | |
pool: AVE | |
kernel_size: 4 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "res2a_2_branch1" | |
type: "Convolution" | |
bottom: "conv1_relu" | |
top: "res2a_2_branch1" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2a_2_branch1" | |
type: "BatchNorm" | |
bottom: "res2a_2_branch1" | |
top: "res2a_2_branch1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2a_2_branch1" | |
type: "Scale" | |
bottom: "res2a_2_branch1" | |
top: "res2a_2_branch1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2a_2_branch2a" | |
type: "Convolution" | |
bottom: "conv1_relu" | |
top: "res2a_2_branch2a" | |
convolution_param { | |
num_output: 8 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2a_2_branch2a" | |
type: "BatchNorm" | |
bottom: "res2a_2_branch2a" | |
top: "res2a_2_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2a_2_branch2a" | |
type: "Scale" | |
bottom: "res2a_2_branch2a" | |
top: "res2a_2_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2a_2_branch2a_relu" | |
type: "ReLU" | |
bottom: "res2a_2_branch2a" | |
top: "res2a_2_branch2a" | |
} | |
layer { | |
name: "res2a_2_branch2b" | |
type: "Convolution" | |
bottom: "res2a_2_branch2a" | |
top: "res2a_2_branch2b" | |
convolution_param { | |
num_output: 8 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2a_2_branch2b" | |
type: "BatchNorm" | |
bottom: "res2a_2_branch2b" | |
top: "res2a_2_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2a_2_branch2b" | |
type: "Scale" | |
bottom: "res2a_2_branch2b" | |
top: "res2a_2_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2a_2_branch2b_relu" | |
type: "ReLU" | |
bottom: "res2a_2_branch2b" | |
top: "res2a_2_branch2b" | |
} | |
layer { | |
name: "res2a_2_branch2c" | |
type: "Convolution" | |
bottom: "res2a_2_branch2b" | |
top: "res2a_2_branch2c" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2a_2_branch2c" | |
type: "BatchNorm" | |
bottom: "res2a_2_branch2c" | |
top: "res2a_2_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2a_2_branch2c" | |
type: "Scale" | |
bottom: "res2a_2_branch2c" | |
top: "res2a_2_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise17" | |
type: "Eltwise" | |
bottom: "res2a_2_branch1" | |
bottom: "res2a_2_branch2c" | |
top: "Eltwise17" | |
} | |
layer { | |
name: "res2a_2_relu" | |
type: "ReLU" | |
bottom: "Eltwise17" | |
top: "Eltwise17" | |
} | |
layer { | |
name: "res2b_2_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise17" | |
top: "res2b_2_branch2a" | |
convolution_param { | |
num_output: 8 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2b_2_branch2a" | |
type: "BatchNorm" | |
bottom: "res2b_2_branch2a" | |
top: "res2b_2_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2b_2_branch2a" | |
type: "Scale" | |
bottom: "res2b_2_branch2a" | |
top: "res2b_2_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2b_2_branch2a_relu" | |
type: "ReLU" | |
bottom: "res2b_2_branch2a" | |
top: "res2b_2_branch2a" | |
} | |
layer { | |
name: "res2b_2_branch2b" | |
type: "Convolution" | |
bottom: "res2b_2_branch2a" | |
top: "res2b_2_branch2b" | |
convolution_param { | |
num_output: 8 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2b_2_branch2b" | |
type: "BatchNorm" | |
bottom: "res2b_2_branch2b" | |
top: "res2b_2_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2b_2_branch2b" | |
type: "Scale" | |
bottom: "res2b_2_branch2b" | |
top: "res2b_2_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2b_2_branch2b_relu" | |
type: "ReLU" | |
bottom: "res2b_2_branch2b" | |
top: "res2b_2_branch2b" | |
} | |
layer { | |
name: "res2b_2_branch2c" | |
type: "Convolution" | |
bottom: "res2b_2_branch2b" | |
top: "res2b_2_branch2c" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2b_2_branch2c" | |
type: "BatchNorm" | |
bottom: "res2b_2_branch2c" | |
top: "res2b_2_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2b_2_branch2c" | |
type: "Scale" | |
bottom: "res2b_2_branch2c" | |
top: "res2b_2_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise18" | |
type: "Eltwise" | |
bottom: "Eltwise17" | |
bottom: "res2b_2_branch2c" | |
top: "Eltwise18" | |
} | |
layer { | |
name: "res2b_2_relu" | |
type: "ReLU" | |
bottom: "Eltwise18" | |
top: "Eltwise18" | |
} | |
layer { | |
name: "res3a_2_branch1" | |
type: "Convolution" | |
bottom: "Eltwise18" | |
top: "res3a_2_branch1" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn3a_2_branch1" | |
type: "BatchNorm" | |
bottom: "res3a_2_branch1" | |
top: "res3a_2_branch1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3a_2_branch1" | |
type: "Scale" | |
bottom: "res3a_2_branch1" | |
top: "res3a_2_branch1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3a_2_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise18" | |
top: "res3a_2_branch2a" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn3a_2_branch2a" | |
type: "BatchNorm" | |
bottom: "res3a_2_branch2a" | |
top: "res3a_2_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3a_2_branch2a" | |
type: "Scale" | |
bottom: "res3a_2_branch2a" | |
top: "res3a_2_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3a_2_branch2a_relu" | |
type: "ReLU" | |
bottom: "res3a_2_branch2a" | |
top: "res3a_2_branch2a" | |
} | |
layer { | |
name: "res3a_2_branch2b" | |
type: "Convolution" | |
bottom: "res3a_2_branch2a" | |
top: "res3a_2_branch2b" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn3a_2_branch2b" | |
type: "BatchNorm" | |
bottom: "res3a_2_branch2b" | |
top: "res3a_2_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3a_2_branch2b" | |
type: "Scale" | |
bottom: "res3a_2_branch2b" | |
top: "res3a_2_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3a_2_branch2b_relu" | |
type: "ReLU" | |
bottom: "res3a_2_branch2b" | |
top: "res3a_2_branch2b" | |
} | |
layer { | |
name: "res3a_2_branch2c" | |
type: "Convolution" | |
bottom: "res3a_2_branch2b" | |
top: "res3a_2_branch2c" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn3a_2_branch2c" | |
type: "BatchNorm" | |
bottom: "res3a_2_branch2c" | |
top: "res3a_2_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3a_2_branch2c" | |
type: "Scale" | |
bottom: "res3a_2_branch2c" | |
top: "res3a_2_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise19" | |
type: "Eltwise" | |
bottom: "res3a_2_branch1" | |
bottom: "res3a_2_branch2c" | |
top: "Eltwise19" | |
} | |
layer { | |
name: "res3a_2_relu" | |
type: "ReLU" | |
bottom: "Eltwise19" | |
top: "Eltwise19" | |
} | |
layer { | |
name: "res3b_2_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise19" | |
top: "res3b_2_branch2a" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn3b_2_branch2a" | |
type: "BatchNorm" | |
bottom: "res3b_2_branch2a" | |
top: "res3b_2_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3b_2_branch2a" | |
type: "Scale" | |
bottom: "res3b_2_branch2a" | |
top: "res3b_2_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3b_2_branch2a_relu" | |
type: "ReLU" | |
bottom: "res3b_2_branch2a" | |
top: "res3b_2_branch2a" | |
} | |
layer { | |
name: "res3b_2_branch2b" | |
type: "Convolution" | |
bottom: "res3b_2_branch2a" | |
top: "res3b_2_branch2b" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn3b_2_branch2b" | |
type: "BatchNorm" | |
bottom: "res3b_2_branch2b" | |
top: "res3b_2_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3b_2_branch2b" | |
type: "Scale" | |
bottom: "res3b_2_branch2b" | |
top: "res3b_2_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3b_2_branch2b_relu" | |
type: "ReLU" | |
bottom: "res3b_2_branch2b" | |
top: "res3b_2_branch2b" | |
} | |
layer { | |
name: "res3b_2_branch2c" | |
type: "Convolution" | |
bottom: "res3b_2_branch2b" | |
top: "res3b_2_branch2c" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn3b_2_branch2c" | |
type: "BatchNorm" | |
bottom: "res3b_2_branch2c" | |
top: "res3b_2_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3b_2_branch2c" | |
type: "Scale" | |
bottom: "res3b_2_branch2c" | |
top: "res3b_2_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise20" | |
type: "Eltwise" | |
bottom: "Eltwise19" | |
bottom: "res3b_2_branch2c" | |
top: "Eltwise20" | |
} | |
layer { | |
name: "res3b_2_relu" | |
type: "ReLU" | |
bottom: "Eltwise20" | |
top: "Eltwise20" | |
} | |
layer { | |
name: "res4a_2_branch1" | |
type: "Convolution" | |
bottom: "Eltwise20" | |
top: "res4a_2_branch1" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn4a_2_branch1" | |
type: "BatchNorm" | |
bottom: "res4a_2_branch1" | |
top: "res4a_2_branch1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4a_2_branch1" | |
type: "Scale" | |
bottom: "res4a_2_branch1" | |
top: "res4a_2_branch1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4a_2_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise20" | |
top: "res4a_2_branch2a" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn4a_2_branch2a" | |
type: "BatchNorm" | |
bottom: "res4a_2_branch2a" | |
top: "res4a_2_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4a_2_branch2a" | |
type: "Scale" | |
bottom: "res4a_2_branch2a" | |
top: "res4a_2_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4a_2_branch2a_relu" | |
type: "ReLU" | |
bottom: "res4a_2_branch2a" | |
top: "res4a_2_branch2a" | |
} | |
layer { | |
name: "res4a_2_branch2b" | |
type: "Convolution" | |
bottom: "res4a_2_branch2a" | |
top: "res4a_2_branch2b" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn4a_2_branch2b" | |
type: "BatchNorm" | |
bottom: "res4a_2_branch2b" | |
top: "res4a_2_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4a_2_branch2b" | |
type: "Scale" | |
bottom: "res4a_2_branch2b" | |
top: "res4a_2_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4a_2_branch2b_relu" | |
type: "ReLU" | |
bottom: "res4a_2_branch2b" | |
top: "res4a_2_branch2b" | |
} | |
layer { | |
name: "res4a_2_branch2c" | |
type: "Convolution" | |
bottom: "res4a_2_branch2b" | |
top: "res4a_2_branch2c" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn4a_2_branch2c" | |
type: "BatchNorm" | |
bottom: "res4a_2_branch2c" | |
top: "res4a_2_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4a_2_branch2c" | |
type: "Scale" | |
bottom: "res4a_2_branch2c" | |
top: "res4a_2_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise21" | |
type: "Eltwise" | |
bottom: "res4a_2_branch1" | |
bottom: "res4a_2_branch2c" | |
top: "Eltwise21" | |
} | |
layer { | |
name: "res4a_2_relu" | |
type: "ReLU" | |
bottom: "Eltwise21" | |
top: "Eltwise21" | |
} | |
layer { | |
name: "res4b_2_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise21" | |
top: "res4b_2_branch2a" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn4b_2_branch2a" | |
type: "BatchNorm" | |
bottom: "res4b_2_branch2a" | |
top: "res4b_2_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4b_2_branch2a" | |
type: "Scale" | |
bottom: "res4b_2_branch2a" | |
top: "res4b_2_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4b_2_branch2a_relu" | |
type: "ReLU" | |
bottom: "res4b_2_branch2a" | |
top: "res4b_2_branch2a" | |
} | |
layer { | |
name: "res4b_2_branch2b" | |
type: "Convolution" | |
bottom: "res4b_2_branch2a" | |
top: "res4b_2_branch2b" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn4b_2_branch2b" | |
type: "BatchNorm" | |
bottom: "res4b_2_branch2b" | |
top: "res4b_2_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4b_2_branch2b" | |
type: "Scale" | |
bottom: "res4b_2_branch2b" | |
top: "res4b_2_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4b_2_branch2b_relu" | |
type: "ReLU" | |
bottom: "res4b_2_branch2b" | |
top: "res4b_2_branch2b" | |
} | |
layer { | |
name: "res4b_2_branch2c" | |
type: "Convolution" | |
bottom: "res4b_2_branch2b" | |
top: "res4b_2_branch2c" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn4b_2_branch2c" | |
type: "BatchNorm" | |
bottom: "res4b_2_branch2c" | |
top: "res4b_2_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4b_2_branch2c" | |
type: "Scale" | |
bottom: "res4b_2_branch2c" | |
top: "res4b_2_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise22" | |
type: "Eltwise" | |
bottom: "Eltwise21" | |
bottom: "res4b_2_branch2c" | |
top: "Eltwise22" | |
} | |
layer { | |
name: "res4b_2_relu" | |
type: "ReLU" | |
bottom: "Eltwise22" | |
top: "Eltwise22" | |
} | |
layer { | |
name: "res5a_2_branch1" | |
type: "Convolution" | |
bottom: "Eltwise22" | |
top: "res5a_2_branch1" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn5a_2_branch1" | |
type: "BatchNorm" | |
bottom: "res5a_2_branch1" | |
top: "res5a_2_branch1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5a_2_branch1" | |
type: "Scale" | |
bottom: "res5a_2_branch1" | |
top: "res5a_2_branch1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5a_2_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise22" | |
top: "res5a_2_branch2a" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn5a_2_branch2a" | |
type: "BatchNorm" | |
bottom: "res5a_2_branch2a" | |
top: "res5a_2_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5a_2_branch2a" | |
type: "Scale" | |
bottom: "res5a_2_branch2a" | |
top: "res5a_2_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5a_2_branch2a_relu" | |
type: "ReLU" | |
bottom: "res5a_2_branch2a" | |
top: "res5a_2_branch2a" | |
} | |
layer { | |
name: "res5a_2_branch2b" | |
type: "Convolution" | |
bottom: "res5a_2_branch2a" | |
top: "res5a_2_branch2b" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn5a_2_branch2b" | |
type: "BatchNorm" | |
bottom: "res5a_2_branch2b" | |
top: "res5a_2_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5a_2_branch2b" | |
type: "Scale" | |
bottom: "res5a_2_branch2b" | |
top: "res5a_2_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5a_2_branch2b_relu" | |
type: "ReLU" | |
bottom: "res5a_2_branch2b" | |
top: "res5a_2_branch2b" | |
} | |
layer { | |
name: "res5a_2_branch2c" | |
type: "Convolution" | |
bottom: "res5a_2_branch2b" | |
top: "res5a_2_branch2c" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn5a_2_branch2c" | |
type: "BatchNorm" | |
bottom: "res5a_2_branch2c" | |
top: "res5a_2_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5a_2_branch2c" | |
type: "Scale" | |
bottom: "res5a_2_branch2c" | |
top: "res5a_2_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise23" | |
type: "Eltwise" | |
bottom: "res5a_2_branch1" | |
bottom: "res5a_2_branch2c" | |
top: "Eltwise23" | |
} | |
layer { | |
name: "res5a_2_relu" | |
type: "ReLU" | |
bottom: "Eltwise23" | |
top: "Eltwise23" | |
} | |
layer { | |
name: "res5b_2_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise23" | |
top: "res5b_2_branch2a" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn5b_2_branch2a" | |
type: "BatchNorm" | |
bottom: "res5b_2_branch2a" | |
top: "res5b_2_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5b_2_branch2a" | |
type: "Scale" | |
bottom: "res5b_2_branch2a" | |
top: "res5b_2_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5b_2_branch2a_relu" | |
type: "ReLU" | |
bottom: "res5b_2_branch2a" | |
top: "res5b_2_branch2a" | |
} | |
layer { | |
name: "res5b_2_branch2b" | |
type: "Convolution" | |
bottom: "res5b_2_branch2a" | |
top: "res5b_2_branch2b" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn5b_2_branch2b" | |
type: "BatchNorm" | |
bottom: "res5b_2_branch2b" | |
top: "res5b_2_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5b_2_branch2b" | |
type: "Scale" | |
bottom: "res5b_2_branch2b" | |
top: "res5b_2_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5b_2_branch2b_relu" | |
type: "ReLU" | |
bottom: "res5b_2_branch2b" | |
top: "res5b_2_branch2b" | |
} | |
layer { | |
name: "res5b_2_branch2c" | |
type: "Convolution" | |
bottom: "res5b_2_branch2b" | |
top: "res5b_2_branch2c" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn5b_2_branch2c" | |
type: "BatchNorm" | |
bottom: "res5b_2_branch2c" | |
top: "res5b_2_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5b_2_branch2c" | |
type: "Scale" | |
bottom: "res5b_2_branch2c" | |
top: "res5b_2_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise24" | |
type: "Eltwise" | |
bottom: "Eltwise23" | |
bottom: "res5b_2_branch2c" | |
top: "Eltwise24" | |
} | |
layer { | |
name: "res5b_2_relu" | |
type: "ReLU" | |
bottom: "Eltwise24" | |
top: "Eltwise24" | |
} | |
layer { | |
name: "gpool_2" | |
type: "Pooling" | |
bottom: "Eltwise24" | |
top: "gpool_2" | |
pooling_param { | |
pool: AVE | |
kernel_size: 4 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "res2a_3_branch1" | |
type: "Convolution" | |
bottom: "conv1_relu" | |
top: "res2a_3_branch1" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2a_3_branch1" | |
type: "BatchNorm" | |
bottom: "res2a_3_branch1" | |
top: "res2a_3_branch1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2a_3_branch1" | |
type: "Scale" | |
bottom: "res2a_3_branch1" | |
top: "res2a_3_branch1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2a_3_branch2a" | |
type: "Convolution" | |
bottom: "conv1_relu" | |
top: "res2a_3_branch2a" | |
convolution_param { | |
num_output: 8 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2a_3_branch2a" | |
type: "BatchNorm" | |
bottom: "res2a_3_branch2a" | |
top: "res2a_3_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2a_3_branch2a" | |
type: "Scale" | |
bottom: "res2a_3_branch2a" | |
top: "res2a_3_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2a_3_branch2a_relu" | |
type: "ReLU" | |
bottom: "res2a_3_branch2a" | |
top: "res2a_3_branch2a" | |
} | |
layer { | |
name: "res2a_3_branch2b" | |
type: "Convolution" | |
bottom: "res2a_3_branch2a" | |
top: "res2a_3_branch2b" | |
convolution_param { | |
num_output: 8 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2a_3_branch2b" | |
type: "BatchNorm" | |
bottom: "res2a_3_branch2b" | |
top: "res2a_3_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2a_3_branch2b" | |
type: "Scale" | |
bottom: "res2a_3_branch2b" | |
top: "res2a_3_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2a_3_branch2b_relu" | |
type: "ReLU" | |
bottom: "res2a_3_branch2b" | |
top: "res2a_3_branch2b" | |
} | |
layer { | |
name: "res2a_3_branch2c" | |
type: "Convolution" | |
bottom: "res2a_3_branch2b" | |
top: "res2a_3_branch2c" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2a_3_branch2c" | |
type: "BatchNorm" | |
bottom: "res2a_3_branch2c" | |
top: "res2a_3_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2a_3_branch2c" | |
type: "Scale" | |
bottom: "res2a_3_branch2c" | |
top: "res2a_3_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise25" | |
type: "Eltwise" | |
bottom: "res2a_3_branch1" | |
bottom: "res2a_3_branch2c" | |
top: "Eltwise25" | |
} | |
layer { | |
name: "res2a_3_relu" | |
type: "ReLU" | |
bottom: "Eltwise25" | |
top: "Eltwise25" | |
} | |
layer { | |
name: "res2b_3_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise25" | |
top: "res2b_3_branch2a" | |
convolution_param { | |
num_output: 8 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2b_3_branch2a" | |
type: "BatchNorm" | |
bottom: "res2b_3_branch2a" | |
top: "res2b_3_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2b_3_branch2a" | |
type: "Scale" | |
bottom: "res2b_3_branch2a" | |
top: "res2b_3_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2b_3_branch2a_relu" | |
type: "ReLU" | |
bottom: "res2b_3_branch2a" | |
top: "res2b_3_branch2a" | |
} | |
layer { | |
name: "res2b_3_branch2b" | |
type: "Convolution" | |
bottom: "res2b_3_branch2a" | |
top: "res2b_3_branch2b" | |
convolution_param { | |
num_output: 8 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2b_3_branch2b" | |
type: "BatchNorm" | |
bottom: "res2b_3_branch2b" | |
top: "res2b_3_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2b_3_branch2b" | |
type: "Scale" | |
bottom: "res2b_3_branch2b" | |
top: "res2b_3_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2b_3_branch2b_relu" | |
type: "ReLU" | |
bottom: "res2b_3_branch2b" | |
top: "res2b_3_branch2b" | |
} | |
layer { | |
name: "res2b_3_branch2c" | |
type: "Convolution" | |
bottom: "res2b_3_branch2b" | |
top: "res2b_3_branch2c" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2b_3_branch2c" | |
type: "BatchNorm" | |
bottom: "res2b_3_branch2c" | |
top: "res2b_3_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2b_3_branch2c" | |
type: "Scale" | |
bottom: "res2b_3_branch2c" | |
top: "res2b_3_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise26" | |
type: "Eltwise" | |
bottom: "Eltwise25" | |
bottom: "res2b_3_branch2c" | |
top: "Eltwise26" | |
} | |
layer { | |
name: "res2b_3_relu" | |
type: "ReLU" | |
bottom: "Eltwise26" | |
top: "Eltwise26" | |
} | |
layer { | |
name: "res3a_3_branch1" | |
type: "Convolution" | |
bottom: "Eltwise26" | |
top: "res3a_3_branch1" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn3a_3_branch1" | |
type: "BatchNorm" | |
bottom: "res3a_3_branch1" | |
top: "res3a_3_branch1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3a_3_branch1" | |
type: "Scale" | |
bottom: "res3a_3_branch1" | |
top: "res3a_3_branch1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3a_3_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise26" | |
top: "res3a_3_branch2a" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn3a_3_branch2a" | |
type: "BatchNorm" | |
bottom: "res3a_3_branch2a" | |
top: "res3a_3_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3a_3_branch2a" | |
type: "Scale" | |
bottom: "res3a_3_branch2a" | |
top: "res3a_3_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3a_3_branch2a_relu" | |
type: "ReLU" | |
bottom: "res3a_3_branch2a" | |
top: "res3a_3_branch2a" | |
} | |
layer { | |
name: "res3a_3_branch2b" | |
type: "Convolution" | |
bottom: "res3a_3_branch2a" | |
top: "res3a_3_branch2b" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn3a_3_branch2b" | |
type: "BatchNorm" | |
bottom: "res3a_3_branch2b" | |
top: "res3a_3_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3a_3_branch2b" | |
type: "Scale" | |
bottom: "res3a_3_branch2b" | |
top: "res3a_3_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3a_3_branch2b_relu" | |
type: "ReLU" | |
bottom: "res3a_3_branch2b" | |
top: "res3a_3_branch2b" | |
} | |
layer { | |
name: "res3a_3_branch2c" | |
type: "Convolution" | |
bottom: "res3a_3_branch2b" | |
top: "res3a_3_branch2c" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn3a_3_branch2c" | |
type: "BatchNorm" | |
bottom: "res3a_3_branch2c" | |
top: "res3a_3_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3a_3_branch2c" | |
type: "Scale" | |
bottom: "res3a_3_branch2c" | |
top: "res3a_3_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise27" | |
type: "Eltwise" | |
bottom: "res3a_3_branch1" | |
bottom: "res3a_3_branch2c" | |
top: "Eltwise27" | |
} | |
layer { | |
name: "res3a_3_relu" | |
type: "ReLU" | |
bottom: "Eltwise27" | |
top: "Eltwise27" | |
} | |
layer { | |
name: "res3b_3_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise27" | |
top: "res3b_3_branch2a" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn3b_3_branch2a" | |
type: "BatchNorm" | |
bottom: "res3b_3_branch2a" | |
top: "res3b_3_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3b_3_branch2a" | |
type: "Scale" | |
bottom: "res3b_3_branch2a" | |
top: "res3b_3_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3b_3_branch2a_relu" | |
type: "ReLU" | |
bottom: "res3b_3_branch2a" | |
top: "res3b_3_branch2a" | |
} | |
layer { | |
name: "res3b_3_branch2b" | |
type: "Convolution" | |
bottom: "res3b_3_branch2a" | |
top: "res3b_3_branch2b" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn3b_3_branch2b" | |
type: "BatchNorm" | |
bottom: "res3b_3_branch2b" | |
top: "res3b_3_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3b_3_branch2b" | |
type: "Scale" | |
bottom: "res3b_3_branch2b" | |
top: "res3b_3_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3b_3_branch2b_relu" | |
type: "ReLU" | |
bottom: "res3b_3_branch2b" | |
top: "res3b_3_branch2b" | |
} | |
layer { | |
name: "res3b_3_branch2c" | |
type: "Convolution" | |
bottom: "res3b_3_branch2b" | |
top: "res3b_3_branch2c" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn3b_3_branch2c" | |
type: "BatchNorm" | |
bottom: "res3b_3_branch2c" | |
top: "res3b_3_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3b_3_branch2c" | |
type: "Scale" | |
bottom: "res3b_3_branch2c" | |
top: "res3b_3_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise28" | |
type: "Eltwise" | |
bottom: "Eltwise27" | |
bottom: "res3b_3_branch2c" | |
top: "Eltwise28" | |
} | |
layer { | |
name: "res3b_3_relu" | |
type: "ReLU" | |
bottom: "Eltwise28" | |
top: "Eltwise28" | |
} | |
layer { | |
name: "res4a_3_branch1" | |
type: "Convolution" | |
bottom: "Eltwise28" | |
top: "res4a_3_branch1" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn4a_3_branch1" | |
type: "BatchNorm" | |
bottom: "res4a_3_branch1" | |
top: "res4a_3_branch1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4a_3_branch1" | |
type: "Scale" | |
bottom: "res4a_3_branch1" | |
top: "res4a_3_branch1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4a_3_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise28" | |
top: "res4a_3_branch2a" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn4a_3_branch2a" | |
type: "BatchNorm" | |
bottom: "res4a_3_branch2a" | |
top: "res4a_3_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4a_3_branch2a" | |
type: "Scale" | |
bottom: "res4a_3_branch2a" | |
top: "res4a_3_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4a_3_branch2a_relu" | |
type: "ReLU" | |
bottom: "res4a_3_branch2a" | |
top: "res4a_3_branch2a" | |
} | |
layer { | |
name: "res4a_3_branch2b" | |
type: "Convolution" | |
bottom: "res4a_3_branch2a" | |
top: "res4a_3_branch2b" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn4a_3_branch2b" | |
type: "BatchNorm" | |
bottom: "res4a_3_branch2b" | |
top: "res4a_3_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4a_3_branch2b" | |
type: "Scale" | |
bottom: "res4a_3_branch2b" | |
top: "res4a_3_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4a_3_branch2b_relu" | |
type: "ReLU" | |
bottom: "res4a_3_branch2b" | |
top: "res4a_3_branch2b" | |
} | |
layer { | |
name: "res4a_3_branch2c" | |
type: "Convolution" | |
bottom: "res4a_3_branch2b" | |
top: "res4a_3_branch2c" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn4a_3_branch2c" | |
type: "BatchNorm" | |
bottom: "res4a_3_branch2c" | |
top: "res4a_3_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4a_3_branch2c" | |
type: "Scale" | |
bottom: "res4a_3_branch2c" | |
top: "res4a_3_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise29" | |
type: "Eltwise" | |
bottom: "res4a_3_branch1" | |
bottom: "res4a_3_branch2c" | |
top: "Eltwise29" | |
} | |
layer { | |
name: "res4a_3_relu" | |
type: "ReLU" | |
bottom: "Eltwise29" | |
top: "Eltwise29" | |
} | |
layer { | |
name: "res4b_3_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise29" | |
top: "res4b_3_branch2a" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn4b_3_branch2a" | |
type: "BatchNorm" | |
bottom: "res4b_3_branch2a" | |
top: "res4b_3_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4b_3_branch2a" | |
type: "Scale" | |
bottom: "res4b_3_branch2a" | |
top: "res4b_3_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4b_3_branch2a_relu" | |
type: "ReLU" | |
bottom: "res4b_3_branch2a" | |
top: "res4b_3_branch2a" | |
} | |
layer { | |
name: "res4b_3_branch2b" | |
type: "Convolution" | |
bottom: "res4b_3_branch2a" | |
top: "res4b_3_branch2b" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn4b_3_branch2b" | |
type: "BatchNorm" | |
bottom: "res4b_3_branch2b" | |
top: "res4b_3_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4b_3_branch2b" | |
type: "Scale" | |
bottom: "res4b_3_branch2b" | |
top: "res4b_3_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4b_3_branch2b_relu" | |
type: "ReLU" | |
bottom: "res4b_3_branch2b" | |
top: "res4b_3_branch2b" | |
} | |
layer { | |
name: "res4b_3_branch2c" | |
type: "Convolution" | |
bottom: "res4b_3_branch2b" | |
top: "res4b_3_branch2c" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn4b_3_branch2c" | |
type: "BatchNorm" | |
bottom: "res4b_3_branch2c" | |
top: "res4b_3_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4b_3_branch2c" | |
type: "Scale" | |
bottom: "res4b_3_branch2c" | |
top: "res4b_3_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise30" | |
type: "Eltwise" | |
bottom: "Eltwise29" | |
bottom: "res4b_3_branch2c" | |
top: "Eltwise30" | |
} | |
layer { | |
name: "res4b_3_relu" | |
type: "ReLU" | |
bottom: "Eltwise30" | |
top: "Eltwise30" | |
} | |
layer { | |
name: "res5a_3_branch1" | |
type: "Convolution" | |
bottom: "Eltwise30" | |
top: "res5a_3_branch1" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn5a_3_branch1" | |
type: "BatchNorm" | |
bottom: "res5a_3_branch1" | |
top: "res5a_3_branch1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5a_3_branch1" | |
type: "Scale" | |
bottom: "res5a_3_branch1" | |
top: "res5a_3_branch1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5a_3_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise30" | |
top: "res5a_3_branch2a" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn5a_3_branch2a" | |
type: "BatchNorm" | |
bottom: "res5a_3_branch2a" | |
top: "res5a_3_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5a_3_branch2a" | |
type: "Scale" | |
bottom: "res5a_3_branch2a" | |
top: "res5a_3_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5a_3_branch2a_relu" | |
type: "ReLU" | |
bottom: "res5a_3_branch2a" | |
top: "res5a_3_branch2a" | |
} | |
layer { | |
name: "res5a_3_branch2b" | |
type: "Convolution" | |
bottom: "res5a_3_branch2a" | |
top: "res5a_3_branch2b" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn5a_3_branch2b" | |
type: "BatchNorm" | |
bottom: "res5a_3_branch2b" | |
top: "res5a_3_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5a_3_branch2b" | |
type: "Scale" | |
bottom: "res5a_3_branch2b" | |
top: "res5a_3_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5a_3_branch2b_relu" | |
type: "ReLU" | |
bottom: "res5a_3_branch2b" | |
top: "res5a_3_branch2b" | |
} | |
layer { | |
name: "res5a_3_branch2c" | |
type: "Convolution" | |
bottom: "res5a_3_branch2b" | |
top: "res5a_3_branch2c" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn5a_3_branch2c" | |
type: "BatchNorm" | |
bottom: "res5a_3_branch2c" | |
top: "res5a_3_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5a_3_branch2c" | |
type: "Scale" | |
bottom: "res5a_3_branch2c" | |
top: "res5a_3_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise31" | |
type: "Eltwise" | |
bottom: "res5a_3_branch1" | |
bottom: "res5a_3_branch2c" | |
top: "Eltwise31" | |
} | |
layer { | |
name: "res5a_3_relu" | |
type: "ReLU" | |
bottom: "Eltwise31" | |
top: "Eltwise31" | |
} | |
layer { | |
name: "res5b_3_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise31" | |
top: "res5b_3_branch2a" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn5b_3_branch2a" | |
type: "BatchNorm" | |
bottom: "res5b_3_branch2a" | |
top: "res5b_3_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5b_3_branch2a" | |
type: "Scale" | |
bottom: "res5b_3_branch2a" | |
top: "res5b_3_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5b_3_branch2a_relu" | |
type: "ReLU" | |
bottom: "res5b_3_branch2a" | |
top: "res5b_3_branch2a" | |
} | |
layer { | |
name: "res5b_3_branch2b" | |
type: "Convolution" | |
bottom: "res5b_3_branch2a" | |
top: "res5b_3_branch2b" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn5b_3_branch2b" | |
type: "BatchNorm" | |
bottom: "res5b_3_branch2b" | |
top: "res5b_3_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5b_3_branch2b" | |
type: "Scale" | |
bottom: "res5b_3_branch2b" | |
top: "res5b_3_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5b_3_branch2b_relu" | |
type: "ReLU" | |
bottom: "res5b_3_branch2b" | |
top: "res5b_3_branch2b" | |
} | |
layer { | |
name: "res5b_3_branch2c" | |
type: "Convolution" | |
bottom: "res5b_3_branch2b" | |
top: "res5b_3_branch2c" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn5b_3_branch2c" | |
type: "BatchNorm" | |
bottom: "res5b_3_branch2c" | |
top: "res5b_3_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5b_3_branch2c" | |
type: "Scale" | |
bottom: "res5b_3_branch2c" | |
top: "res5b_3_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise32" | |
type: "Eltwise" | |
bottom: "Eltwise31" | |
bottom: "res5b_3_branch2c" | |
top: "Eltwise32" | |
} | |
layer { | |
name: "res5b_3_relu" | |
type: "ReLU" | |
bottom: "Eltwise32" | |
top: "Eltwise32" | |
} | |
layer { | |
name: "gpool_3" | |
type: "Pooling" | |
bottom: "Eltwise32" | |
top: "gpool_3" | |
pooling_param { | |
pool: AVE | |
kernel_size: 4 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "res2a_4_branch1" | |
type: "Convolution" | |
bottom: "conv1_relu" | |
top: "res2a_4_branch1" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2a_4_branch1" | |
type: "BatchNorm" | |
bottom: "res2a_4_branch1" | |
top: "res2a_4_branch1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2a_4_branch1" | |
type: "Scale" | |
bottom: "res2a_4_branch1" | |
top: "res2a_4_branch1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2a_4_branch2a" | |
type: "Convolution" | |
bottom: "conv1_relu" | |
top: "res2a_4_branch2a" | |
convolution_param { | |
num_output: 8 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2a_4_branch2a" | |
type: "BatchNorm" | |
bottom: "res2a_4_branch2a" | |
top: "res2a_4_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2a_4_branch2a" | |
type: "Scale" | |
bottom: "res2a_4_branch2a" | |
top: "res2a_4_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2a_4_branch2a_relu" | |
type: "ReLU" | |
bottom: "res2a_4_branch2a" | |
top: "res2a_4_branch2a" | |
} | |
layer { | |
name: "res2a_4_branch2b" | |
type: "Convolution" | |
bottom: "res2a_4_branch2a" | |
top: "res2a_4_branch2b" | |
convolution_param { | |
num_output: 8 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2a_4_branch2b" | |
type: "BatchNorm" | |
bottom: "res2a_4_branch2b" | |
top: "res2a_4_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2a_4_branch2b" | |
type: "Scale" | |
bottom: "res2a_4_branch2b" | |
top: "res2a_4_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2a_4_branch2b_relu" | |
type: "ReLU" | |
bottom: "res2a_4_branch2b" | |
top: "res2a_4_branch2b" | |
} | |
layer { | |
name: "res2a_4_branch2c" | |
type: "Convolution" | |
bottom: "res2a_4_branch2b" | |
top: "res2a_4_branch2c" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2a_4_branch2c" | |
type: "BatchNorm" | |
bottom: "res2a_4_branch2c" | |
top: "res2a_4_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2a_4_branch2c" | |
type: "Scale" | |
bottom: "res2a_4_branch2c" | |
top: "res2a_4_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise33" | |
type: "Eltwise" | |
bottom: "res2a_4_branch1" | |
bottom: "res2a_4_branch2c" | |
top: "Eltwise33" | |
} | |
layer { | |
name: "res2a_4_relu" | |
type: "ReLU" | |
bottom: "Eltwise33" | |
top: "Eltwise33" | |
} | |
layer { | |
name: "res2b_4_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise33" | |
top: "res2b_4_branch2a" | |
convolution_param { | |
num_output: 8 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2b_4_branch2a" | |
type: "BatchNorm" | |
bottom: "res2b_4_branch2a" | |
top: "res2b_4_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2b_4_branch2a" | |
type: "Scale" | |
bottom: "res2b_4_branch2a" | |
top: "res2b_4_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2b_4_branch2a_relu" | |
type: "ReLU" | |
bottom: "res2b_4_branch2a" | |
top: "res2b_4_branch2a" | |
} | |
layer { | |
name: "res2b_4_branch2b" | |
type: "Convolution" | |
bottom: "res2b_4_branch2a" | |
top: "res2b_4_branch2b" | |
convolution_param { | |
num_output: 8 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2b_4_branch2b" | |
type: "BatchNorm" | |
bottom: "res2b_4_branch2b" | |
top: "res2b_4_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2b_4_branch2b" | |
type: "Scale" | |
bottom: "res2b_4_branch2b" | |
top: "res2b_4_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res2b_4_branch2b_relu" | |
type: "ReLU" | |
bottom: "res2b_4_branch2b" | |
top: "res2b_4_branch2b" | |
} | |
layer { | |
name: "res2b_4_branch2c" | |
type: "Convolution" | |
bottom: "res2b_4_branch2b" | |
top: "res2b_4_branch2c" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn2b_4_branch2c" | |
type: "BatchNorm" | |
bottom: "res2b_4_branch2c" | |
top: "res2b_4_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale2b_4_branch2c" | |
type: "Scale" | |
bottom: "res2b_4_branch2c" | |
top: "res2b_4_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise34" | |
type: "Eltwise" | |
bottom: "Eltwise33" | |
bottom: "res2b_4_branch2c" | |
top: "Eltwise34" | |
} | |
layer { | |
name: "res2b_4_relu" | |
type: "ReLU" | |
bottom: "Eltwise34" | |
top: "Eltwise34" | |
} | |
layer { | |
name: "res3a_4_branch1" | |
type: "Convolution" | |
bottom: "Eltwise34" | |
top: "res3a_4_branch1" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn3a_4_branch1" | |
type: "BatchNorm" | |
bottom: "res3a_4_branch1" | |
top: "res3a_4_branch1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3a_4_branch1" | |
type: "Scale" | |
bottom: "res3a_4_branch1" | |
top: "res3a_4_branch1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3a_4_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise34" | |
top: "res3a_4_branch2a" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn3a_4_branch2a" | |
type: "BatchNorm" | |
bottom: "res3a_4_branch2a" | |
top: "res3a_4_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3a_4_branch2a" | |
type: "Scale" | |
bottom: "res3a_4_branch2a" | |
top: "res3a_4_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3a_4_branch2a_relu" | |
type: "ReLU" | |
bottom: "res3a_4_branch2a" | |
top: "res3a_4_branch2a" | |
} | |
layer { | |
name: "res3a_4_branch2b" | |
type: "Convolution" | |
bottom: "res3a_4_branch2a" | |
top: "res3a_4_branch2b" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn3a_4_branch2b" | |
type: "BatchNorm" | |
bottom: "res3a_4_branch2b" | |
top: "res3a_4_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3a_4_branch2b" | |
type: "Scale" | |
bottom: "res3a_4_branch2b" | |
top: "res3a_4_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3a_4_branch2b_relu" | |
type: "ReLU" | |
bottom: "res3a_4_branch2b" | |
top: "res3a_4_branch2b" | |
} | |
layer { | |
name: "res3a_4_branch2c" | |
type: "Convolution" | |
bottom: "res3a_4_branch2b" | |
top: "res3a_4_branch2c" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn3a_4_branch2c" | |
type: "BatchNorm" | |
bottom: "res3a_4_branch2c" | |
top: "res3a_4_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3a_4_branch2c" | |
type: "Scale" | |
bottom: "res3a_4_branch2c" | |
top: "res3a_4_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise35" | |
type: "Eltwise" | |
bottom: "res3a_4_branch1" | |
bottom: "res3a_4_branch2c" | |
top: "Eltwise35" | |
} | |
layer { | |
name: "res3a_4_relu" | |
type: "ReLU" | |
bottom: "Eltwise35" | |
top: "Eltwise35" | |
} | |
layer { | |
name: "res3b_4_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise35" | |
top: "res3b_4_branch2a" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn3b_4_branch2a" | |
type: "BatchNorm" | |
bottom: "res3b_4_branch2a" | |
top: "res3b_4_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3b_4_branch2a" | |
type: "Scale" | |
bottom: "res3b_4_branch2a" | |
top: "res3b_4_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3b_4_branch2a_relu" | |
type: "ReLU" | |
bottom: "res3b_4_branch2a" | |
top: "res3b_4_branch2a" | |
} | |
layer { | |
name: "res3b_4_branch2b" | |
type: "Convolution" | |
bottom: "res3b_4_branch2a" | |
top: "res3b_4_branch2b" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn3b_4_branch2b" | |
type: "BatchNorm" | |
bottom: "res3b_4_branch2b" | |
top: "res3b_4_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3b_4_branch2b" | |
type: "Scale" | |
bottom: "res3b_4_branch2b" | |
top: "res3b_4_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res3b_4_branch2b_relu" | |
type: "ReLU" | |
bottom: "res3b_4_branch2b" | |
top: "res3b_4_branch2b" | |
} | |
layer { | |
name: "res3b_4_branch2c" | |
type: "Convolution" | |
bottom: "res3b_4_branch2b" | |
top: "res3b_4_branch2c" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn3b_4_branch2c" | |
type: "BatchNorm" | |
bottom: "res3b_4_branch2c" | |
top: "res3b_4_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale3b_4_branch2c" | |
type: "Scale" | |
bottom: "res3b_4_branch2c" | |
top: "res3b_4_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise36" | |
type: "Eltwise" | |
bottom: "Eltwise35" | |
bottom: "res3b_4_branch2c" | |
top: "Eltwise36" | |
} | |
layer { | |
name: "res3b_4_relu" | |
type: "ReLU" | |
bottom: "Eltwise36" | |
top: "Eltwise36" | |
} | |
layer { | |
name: "res4a_4_branch1" | |
type: "Convolution" | |
bottom: "Eltwise36" | |
top: "res4a_4_branch1" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn4a_4_branch1" | |
type: "BatchNorm" | |
bottom: "res4a_4_branch1" | |
top: "res4a_4_branch1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4a_4_branch1" | |
type: "Scale" | |
bottom: "res4a_4_branch1" | |
top: "res4a_4_branch1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4a_4_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise36" | |
top: "res4a_4_branch2a" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn4a_4_branch2a" | |
type: "BatchNorm" | |
bottom: "res4a_4_branch2a" | |
top: "res4a_4_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4a_4_branch2a" | |
type: "Scale" | |
bottom: "res4a_4_branch2a" | |
top: "res4a_4_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4a_4_branch2a_relu" | |
type: "ReLU" | |
bottom: "res4a_4_branch2a" | |
top: "res4a_4_branch2a" | |
} | |
layer { | |
name: "res4a_4_branch2b" | |
type: "Convolution" | |
bottom: "res4a_4_branch2a" | |
top: "res4a_4_branch2b" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn4a_4_branch2b" | |
type: "BatchNorm" | |
bottom: "res4a_4_branch2b" | |
top: "res4a_4_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4a_4_branch2b" | |
type: "Scale" | |
bottom: "res4a_4_branch2b" | |
top: "res4a_4_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4a_4_branch2b_relu" | |
type: "ReLU" | |
bottom: "res4a_4_branch2b" | |
top: "res4a_4_branch2b" | |
} | |
layer { | |
name: "res4a_4_branch2c" | |
type: "Convolution" | |
bottom: "res4a_4_branch2b" | |
top: "res4a_4_branch2c" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn4a_4_branch2c" | |
type: "BatchNorm" | |
bottom: "res4a_4_branch2c" | |
top: "res4a_4_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4a_4_branch2c" | |
type: "Scale" | |
bottom: "res4a_4_branch2c" | |
top: "res4a_4_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise37" | |
type: "Eltwise" | |
bottom: "res4a_4_branch1" | |
bottom: "res4a_4_branch2c" | |
top: "Eltwise37" | |
} | |
layer { | |
name: "res4a_4_relu" | |
type: "ReLU" | |
bottom: "Eltwise37" | |
top: "Eltwise37" | |
} | |
layer { | |
name: "res4b_4_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise37" | |
top: "res4b_4_branch2a" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn4b_4_branch2a" | |
type: "BatchNorm" | |
bottom: "res4b_4_branch2a" | |
top: "res4b_4_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4b_4_branch2a" | |
type: "Scale" | |
bottom: "res4b_4_branch2a" | |
top: "res4b_4_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4b_4_branch2a_relu" | |
type: "ReLU" | |
bottom: "res4b_4_branch2a" | |
top: "res4b_4_branch2a" | |
} | |
layer { | |
name: "res4b_4_branch2b" | |
type: "Convolution" | |
bottom: "res4b_4_branch2a" | |
top: "res4b_4_branch2b" | |
convolution_param { | |
num_output: 32 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn4b_4_branch2b" | |
type: "BatchNorm" | |
bottom: "res4b_4_branch2b" | |
top: "res4b_4_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4b_4_branch2b" | |
type: "Scale" | |
bottom: "res4b_4_branch2b" | |
top: "res4b_4_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res4b_4_branch2b_relu" | |
type: "ReLU" | |
bottom: "res4b_4_branch2b" | |
top: "res4b_4_branch2b" | |
} | |
layer { | |
name: "res4b_4_branch2c" | |
type: "Convolution" | |
bottom: "res4b_4_branch2b" | |
top: "res4b_4_branch2c" | |
convolution_param { | |
num_output: 128 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn4b_4_branch2c" | |
type: "BatchNorm" | |
bottom: "res4b_4_branch2c" | |
top: "res4b_4_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale4b_4_branch2c" | |
type: "Scale" | |
bottom: "res4b_4_branch2c" | |
top: "res4b_4_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise38" | |
type: "Eltwise" | |
bottom: "Eltwise37" | |
bottom: "res4b_4_branch2c" | |
top: "Eltwise38" | |
} | |
layer { | |
name: "res4b_4_relu" | |
type: "ReLU" | |
bottom: "Eltwise38" | |
top: "Eltwise38" | |
} | |
layer { | |
name: "res5a_4_branch1" | |
type: "Convolution" | |
bottom: "Eltwise38" | |
top: "res5a_4_branch1" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn5a_4_branch1" | |
type: "BatchNorm" | |
bottom: "res5a_4_branch1" | |
top: "res5a_4_branch1" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5a_4_branch1" | |
type: "Scale" | |
bottom: "res5a_4_branch1" | |
top: "res5a_4_branch1" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5a_4_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise38" | |
top: "res5a_4_branch2a" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 2 | |
} | |
} | |
layer { | |
name: "bn5a_4_branch2a" | |
type: "BatchNorm" | |
bottom: "res5a_4_branch2a" | |
top: "res5a_4_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5a_4_branch2a" | |
type: "Scale" | |
bottom: "res5a_4_branch2a" | |
top: "res5a_4_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5a_4_branch2a_relu" | |
type: "ReLU" | |
bottom: "res5a_4_branch2a" | |
top: "res5a_4_branch2a" | |
} | |
layer { | |
name: "res5a_4_branch2b" | |
type: "Convolution" | |
bottom: "res5a_4_branch2a" | |
top: "res5a_4_branch2b" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn5a_4_branch2b" | |
type: "BatchNorm" | |
bottom: "res5a_4_branch2b" | |
top: "res5a_4_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5a_4_branch2b" | |
type: "Scale" | |
bottom: "res5a_4_branch2b" | |
top: "res5a_4_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5a_4_branch2b_relu" | |
type: "ReLU" | |
bottom: "res5a_4_branch2b" | |
top: "res5a_4_branch2b" | |
} | |
layer { | |
name: "res5a_4_branch2c" | |
type: "Convolution" | |
bottom: "res5a_4_branch2b" | |
top: "res5a_4_branch2c" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn5a_4_branch2c" | |
type: "BatchNorm" | |
bottom: "res5a_4_branch2c" | |
top: "res5a_4_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5a_4_branch2c" | |
type: "Scale" | |
bottom: "res5a_4_branch2c" | |
top: "res5a_4_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise39" | |
type: "Eltwise" | |
bottom: "res5a_4_branch1" | |
bottom: "res5a_4_branch2c" | |
top: "Eltwise39" | |
} | |
layer { | |
name: "res5a_4_relu" | |
type: "ReLU" | |
bottom: "Eltwise39" | |
top: "Eltwise39" | |
} | |
layer { | |
name: "res5b_4_branch2a" | |
type: "Convolution" | |
bottom: "Eltwise39" | |
top: "res5b_4_branch2a" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn5b_4_branch2a" | |
type: "BatchNorm" | |
bottom: "res5b_4_branch2a" | |
top: "res5b_4_branch2a" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5b_4_branch2a" | |
type: "Scale" | |
bottom: "res5b_4_branch2a" | |
top: "res5b_4_branch2a" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5b_4_branch2a_relu" | |
type: "ReLU" | |
bottom: "res5b_4_branch2a" | |
top: "res5b_4_branch2a" | |
} | |
layer { | |
name: "res5b_4_branch2b" | |
type: "Convolution" | |
bottom: "res5b_4_branch2a" | |
top: "res5b_4_branch2b" | |
convolution_param { | |
num_output: 64 | |
bias_term: false | |
pad: 1 | |
kernel_size: 3 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn5b_4_branch2b" | |
type: "BatchNorm" | |
bottom: "res5b_4_branch2b" | |
top: "res5b_4_branch2b" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5b_4_branch2b" | |
type: "Scale" | |
bottom: "res5b_4_branch2b" | |
top: "res5b_4_branch2b" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "res5b_4_branch2b_relu" | |
type: "ReLU" | |
bottom: "res5b_4_branch2b" | |
top: "res5b_4_branch2b" | |
} | |
layer { | |
name: "res5b_4_branch2c" | |
type: "Convolution" | |
bottom: "res5b_4_branch2b" | |
top: "res5b_4_branch2c" | |
convolution_param { | |
num_output: 256 | |
bias_term: false | |
pad: 0 | |
kernel_size: 1 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "bn5b_4_branch2c" | |
type: "BatchNorm" | |
bottom: "res5b_4_branch2c" | |
top: "res5b_4_branch2c" | |
batch_norm_param { | |
use_global_stats: true | |
} | |
} | |
layer { | |
name: "scale5b_4_branch2c" | |
type: "Scale" | |
bottom: "res5b_4_branch2c" | |
top: "res5b_4_branch2c" | |
scale_param { | |
bias_term: true | |
} | |
} | |
layer { | |
name: "Eltwise40" | |
type: "Eltwise" | |
bottom: "Eltwise39" | |
bottom: "res5b_4_branch2c" | |
top: "Eltwise40" | |
} | |
layer { | |
name: "res5b_4_relu" | |
type: "ReLU" | |
bottom: "Eltwise40" | |
top: "Eltwise40" | |
} | |
layer { | |
name: "gpool_4" | |
type: "Pooling" | |
bottom: "Eltwise40" | |
top: "gpool_4" | |
pooling_param { | |
pool: AVE | |
kernel_size: 4 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "res5b_5_relu" | |
type: "ReLU" | |
bottom: "res5b" | |
top: "res5b" | |
} | |
layer { | |
name: "gpool_5" | |
type: "Pooling" | |
bottom: "res5b" | |
top: "gpool_5" | |
pooling_param { | |
pool: AVE | |
kernel_size: 4 | |
stride: 1 | |
} | |
} | |
layer { | |
name: "feat_0" | |
type: "Concat" | |
bottom: "gpool_0" | |
top: "feat_0" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
name: "fc_0" | |
type: "InnerProduct" | |
bottom: "feat_0" | |
top: "fc_0" | |
inner_product_param { | |
num_output: 10 | |
} | |
} | |
layer { | |
name: "prob_0" | |
type: "Softmax" | |
bottom: "fc_0" | |
top: "prob_0" | |
} | |
layer { | |
name: "feat_1" | |
type: "Concat" | |
bottom: "gpool_0" | |
bottom: "gpool_1" | |
top: "feat_1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
name: "fc_1" | |
type: "InnerProduct" | |
bottom: "feat_1" | |
top: "fc_1" | |
inner_product_param { | |
num_output: 10 | |
} | |
} | |
layer { | |
name: "prob_1" | |
type: "Softmax" | |
bottom: "fc_1" | |
top: "prob_1" | |
} | |
layer { | |
name: "feat_2" | |
type: "Concat" | |
bottom: "gpool_0" | |
bottom: "gpool_1" | |
bottom: "gpool_2" | |
top: "feat_2" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
name: "fc_2" | |
type: "InnerProduct" | |
bottom: "feat_2" | |
top: "fc_2" | |
inner_product_param { | |
num_output: 10 | |
} | |
} | |
layer { | |
name: "prob_2" | |
type: "Softmax" | |
bottom: "fc_2" | |
top: "prob_2" | |
} | |
layer { | |
name: "feat_3" | |
type: "Concat" | |
bottom: "gpool_0" | |
bottom: "gpool_1" | |
bottom: "gpool_2" | |
bottom: "gpool_3" | |
top: "feat_3" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
name: "fc_3" | |
type: "InnerProduct" | |
bottom: "feat_3" | |
top: "fc_3" | |
inner_product_param { | |
num_output: 10 | |
} | |
} | |
layer { | |
name: "prob_3" | |
type: "Softmax" | |
bottom: "fc_3" | |
top: "prob_3" | |
} | |
layer { | |
name: "feat_4" | |
type: "Concat" | |
bottom: "gpool_0" | |
bottom: "gpool_1" | |
bottom: "gpool_2" | |
bottom: "gpool_3" | |
bottom: "gpool_4" | |
top: "feat_4" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
name: "fc_4" | |
type: "InnerProduct" | |
bottom: "feat_4" | |
top: "fc_4" | |
inner_product_param { | |
num_output: 10 | |
} | |
} | |
layer { | |
name: "prob_4" | |
type: "Softmax" | |
bottom: "fc_4" | |
top: "prob_4" | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment