Last active
August 22, 2018 00:24
-
-
Save mgolub2/cc8c77cde5eb0fc0c563d52b656b9fa1 to your computer and use it in GitHub Desktop.
Densenet
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: "dense" | |
layer { | |
name: "data" | |
type: "Input" | |
top: "data" | |
input_param { shape: { dim: 5 dim: 3 dim: 32 dim: 32 } } | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-0-1" | |
bottom: "data" | |
top: "Convolution2DFunction-0-1" | |
convolution_param { | |
num_output: 16 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-1-1_bn" | |
bottom: "Convolution2DFunction-0-1" | |
top: "FixedBatchNormalization-1-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-1-1" | |
bottom: "FixedBatchNormalization-1-1_bn" | |
top: "FixedBatchNormalization-1-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-2-1" | |
bottom: "FixedBatchNormalization-1-1" | |
top: "ReLU-2-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-3-1" | |
bottom: "ReLU-2-1" | |
top: "Convolution2DFunction-3-1" | |
convolution_param { | |
num_output: 24 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Concat" | |
name: "Concat-4-1" | |
bottom: "Convolution2DFunction-0-1" | |
bottom: "Convolution2DFunction-3-1" | |
top: "Concat-4-1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-5-1_bn" | |
bottom: "Concat-4-1" | |
top: "FixedBatchNormalization-5-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-5-1" | |
bottom: "FixedBatchNormalization-5-1_bn" | |
top: "FixedBatchNormalization-5-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-6-1" | |
bottom: "FixedBatchNormalization-5-1" | |
top: "ReLU-6-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-7-1" | |
bottom: "ReLU-6-1" | |
top: "Convolution2DFunction-7-1" | |
convolution_param { | |
num_output: 24 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Concat" | |
name: "Concat-8-1" | |
bottom: "Concat-4-1" | |
bottom: "Convolution2DFunction-7-1" | |
top: "Concat-8-1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-9-1_bn" | |
bottom: "Concat-8-1" | |
top: "FixedBatchNormalization-9-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-9-1" | |
bottom: "FixedBatchNormalization-9-1_bn" | |
top: "FixedBatchNormalization-9-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-10-1" | |
bottom: "FixedBatchNormalization-9-1" | |
top: "ReLU-10-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-11-1" | |
bottom: "ReLU-10-1" | |
top: "Convolution2DFunction-11-1" | |
convolution_param { | |
num_output: 24 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Concat" | |
name: "Concat-12-1" | |
bottom: "Concat-8-1" | |
bottom: "Convolution2DFunction-11-1" | |
top: "Concat-12-1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-13-1_bn" | |
bottom: "Concat-12-1" | |
top: "FixedBatchNormalization-13-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-13-1" | |
bottom: "FixedBatchNormalization-13-1_bn" | |
top: "FixedBatchNormalization-13-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-14-1" | |
bottom: "FixedBatchNormalization-13-1" | |
top: "ReLU-14-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-15-1" | |
bottom: "ReLU-14-1" | |
top: "Convolution2DFunction-15-1" | |
convolution_param { | |
num_output: 24 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Concat" | |
name: "Concat-16-1" | |
bottom: "Concat-12-1" | |
bottom: "Convolution2DFunction-15-1" | |
top: "Concat-16-1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-17-1_bn" | |
bottom: "Concat-16-1" | |
top: "FixedBatchNormalization-17-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-17-1" | |
bottom: "FixedBatchNormalization-17-1_bn" | |
top: "FixedBatchNormalization-17-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-18-1" | |
bottom: "FixedBatchNormalization-17-1" | |
top: "ReLU-18-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-19-1" | |
bottom: "ReLU-18-1" | |
top: "Convolution2DFunction-19-1" | |
convolution_param { | |
num_output: 24 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Concat" | |
name: "Concat-20-1" | |
bottom: "Concat-16-1" | |
bottom: "Convolution2DFunction-19-1" | |
top: "Concat-20-1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-21-1_bn" | |
bottom: "Concat-20-1" | |
top: "FixedBatchNormalization-21-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-21-1" | |
bottom: "FixedBatchNormalization-21-1_bn" | |
top: "FixedBatchNormalization-21-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-22-1" | |
bottom: "FixedBatchNormalization-21-1" | |
top: "ReLU-22-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-23-1" | |
bottom: "ReLU-22-1" | |
top: "Convolution2DFunction-23-1" | |
convolution_param { | |
num_output: 24 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Concat" | |
name: "Concat-24-1" | |
bottom: "Concat-20-1" | |
bottom: "Convolution2DFunction-23-1" | |
top: "Concat-24-1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-25-1_bn" | |
bottom: "Concat-24-1" | |
top: "FixedBatchNormalization-25-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-25-1" | |
bottom: "FixedBatchNormalization-25-1_bn" | |
top: "FixedBatchNormalization-25-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-26-1" | |
bottom: "FixedBatchNormalization-25-1" | |
top: "ReLU-26-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-27-1" | |
bottom: "ReLU-26-1" | |
top: "Convolution2DFunction-27-1" | |
convolution_param { | |
num_output: 24 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Concat" | |
name: "Concat-28-1" | |
bottom: "Concat-24-1" | |
bottom: "Convolution2DFunction-27-1" | |
top: "Concat-28-1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-29-1_bn" | |
bottom: "Concat-28-1" | |
top: "FixedBatchNormalization-29-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-29-1" | |
bottom: "FixedBatchNormalization-29-1_bn" | |
top: "FixedBatchNormalization-29-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-30-1" | |
bottom: "FixedBatchNormalization-29-1" | |
top: "ReLU-30-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-31-1" | |
bottom: "ReLU-30-1" | |
top: "Convolution2DFunction-31-1" | |
convolution_param { | |
num_output: 24 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Concat" | |
name: "Concat-32-1" | |
bottom: "Concat-28-1" | |
bottom: "Convolution2DFunction-31-1" | |
top: "Concat-32-1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-33-1_bn" | |
bottom: "Concat-32-1" | |
top: "FixedBatchNormalization-33-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-33-1" | |
bottom: "FixedBatchNormalization-33-1_bn" | |
top: "FixedBatchNormalization-33-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-34-1" | |
bottom: "FixedBatchNormalization-33-1" | |
top: "ReLU-34-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-35-1" | |
bottom: "ReLU-34-1" | |
top: "Convolution2DFunction-35-1" | |
convolution_param { | |
num_output: 24 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Concat" | |
name: "Concat-36-1" | |
bottom: "Concat-32-1" | |
bottom: "Convolution2DFunction-35-1" | |
top: "Concat-36-1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-37-1_bn" | |
bottom: "Concat-36-1" | |
top: "FixedBatchNormalization-37-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-37-1" | |
bottom: "FixedBatchNormalization-37-1_bn" | |
top: "FixedBatchNormalization-37-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-38-1" | |
bottom: "FixedBatchNormalization-37-1" | |
top: "ReLU-38-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-39-1" | |
bottom: "ReLU-38-1" | |
top: "Convolution2DFunction-39-1" | |
convolution_param { | |
num_output: 24 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Concat" | |
name: "Concat-40-1" | |
bottom: "Concat-36-1" | |
bottom: "Convolution2DFunction-39-1" | |
top: "Concat-40-1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-41-1_bn" | |
bottom: "Concat-40-1" | |
top: "FixedBatchNormalization-41-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-41-1" | |
bottom: "FixedBatchNormalization-41-1_bn" | |
top: "FixedBatchNormalization-41-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-42-1" | |
bottom: "FixedBatchNormalization-41-1" | |
top: "ReLU-42-1" | |
} | |
layer { | |
type: "Pooling" | |
name: "AveragePooling2D-43-1" | |
bottom: "ReLU-42-1" | |
top: "AveragePooling2D-43-1" | |
pooling_param { | |
pool: AVE | |
pad_w: 0 | |
pad_h: 0 | |
stride_w: 2 | |
stride_h: 2 | |
kernel_w: 2 | |
kernel_h: 2 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-44-1_bn" | |
bottom: "AveragePooling2D-43-1" | |
top: "FixedBatchNormalization-44-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-44-1" | |
bottom: "FixedBatchNormalization-44-1_bn" | |
top: "FixedBatchNormalization-44-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-45-1" | |
bottom: "FixedBatchNormalization-44-1" | |
top: "ReLU-45-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-46-1" | |
bottom: "ReLU-45-1" | |
top: "Convolution2DFunction-46-1" | |
convolution_param { | |
num_output: 24 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Concat" | |
name: "Concat-47-1" | |
bottom: "AveragePooling2D-43-1" | |
bottom: "Convolution2DFunction-46-1" | |
top: "Concat-47-1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-48-1_bn" | |
bottom: "Concat-47-1" | |
top: "FixedBatchNormalization-48-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-48-1" | |
bottom: "FixedBatchNormalization-48-1_bn" | |
top: "FixedBatchNormalization-48-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-49-1" | |
bottom: "FixedBatchNormalization-48-1" | |
top: "ReLU-49-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-50-1" | |
bottom: "ReLU-49-1" | |
top: "Convolution2DFunction-50-1" | |
convolution_param { | |
num_output: 24 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Concat" | |
name: "Concat-51-1" | |
bottom: "Concat-47-1" | |
bottom: "Convolution2DFunction-50-1" | |
top: "Concat-51-1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-52-1_bn" | |
bottom: "Concat-51-1" | |
top: "FixedBatchNormalization-52-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-52-1" | |
bottom: "FixedBatchNormalization-52-1_bn" | |
top: "FixedBatchNormalization-52-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-53-1" | |
bottom: "FixedBatchNormalization-52-1" | |
top: "ReLU-53-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-54-1" | |
bottom: "ReLU-53-1" | |
top: "Convolution2DFunction-54-1" | |
convolution_param { | |
num_output: 24 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Concat" | |
name: "Concat-55-1" | |
bottom: "Concat-51-1" | |
bottom: "Convolution2DFunction-54-1" | |
top: "Concat-55-1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-56-1_bn" | |
bottom: "Concat-55-1" | |
top: "FixedBatchNormalization-56-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-56-1" | |
bottom: "FixedBatchNormalization-56-1_bn" | |
top: "FixedBatchNormalization-56-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-57-1" | |
bottom: "FixedBatchNormalization-56-1" | |
top: "ReLU-57-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-58-1" | |
bottom: "ReLU-57-1" | |
top: "Convolution2DFunction-58-1" | |
convolution_param { | |
num_output: 24 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Concat" | |
name: "Concat-59-1" | |
bottom: "Concat-55-1" | |
bottom: "Convolution2DFunction-58-1" | |
top: "Concat-59-1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-60-1_bn" | |
bottom: "Concat-59-1" | |
top: "FixedBatchNormalization-60-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-60-1" | |
bottom: "FixedBatchNormalization-60-1_bn" | |
top: "FixedBatchNormalization-60-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-61-1" | |
bottom: "FixedBatchNormalization-60-1" | |
top: "ReLU-61-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-62-1" | |
bottom: "ReLU-61-1" | |
top: "Convolution2DFunction-62-1" | |
convolution_param { | |
num_output: 24 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Concat" | |
name: "Concat-63-1" | |
bottom: "Concat-59-1" | |
bottom: "Convolution2DFunction-62-1" | |
top: "Concat-63-1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-64-1_bn" | |
bottom: "Concat-63-1" | |
top: "FixedBatchNormalization-64-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-64-1" | |
bottom: "FixedBatchNormalization-64-1_bn" | |
top: "FixedBatchNormalization-64-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-65-1" | |
bottom: "FixedBatchNormalization-64-1" | |
top: "ReLU-65-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-66-1" | |
bottom: "ReLU-65-1" | |
top: "Convolution2DFunction-66-1" | |
convolution_param { | |
num_output: 24 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Concat" | |
name: "Concat-67-1" | |
bottom: "Concat-63-1" | |
bottom: "Convolution2DFunction-66-1" | |
top: "Concat-67-1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-68-1_bn" | |
bottom: "Concat-67-1" | |
top: "FixedBatchNormalization-68-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-68-1" | |
bottom: "FixedBatchNormalization-68-1_bn" | |
top: "FixedBatchNormalization-68-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-69-1" | |
bottom: "FixedBatchNormalization-68-1" | |
top: "ReLU-69-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-70-1" | |
bottom: "ReLU-69-1" | |
top: "Convolution2DFunction-70-1" | |
convolution_param { | |
num_output: 24 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Concat" | |
name: "Concat-71-1" | |
bottom: "Concat-67-1" | |
bottom: "Convolution2DFunction-70-1" | |
top: "Concat-71-1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-72-1_bn" | |
bottom: "Concat-71-1" | |
top: "FixedBatchNormalization-72-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-72-1" | |
bottom: "FixedBatchNormalization-72-1_bn" | |
top: "FixedBatchNormalization-72-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-73-1" | |
bottom: "FixedBatchNormalization-72-1" | |
top: "ReLU-73-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-74-1" | |
bottom: "ReLU-73-1" | |
top: "Convolution2DFunction-74-1" | |
convolution_param { | |
num_output: 24 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Concat" | |
name: "Concat-75-1" | |
bottom: "Concat-71-1" | |
bottom: "Convolution2DFunction-74-1" | |
top: "Concat-75-1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-76-1_bn" | |
bottom: "Concat-75-1" | |
top: "FixedBatchNormalization-76-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-76-1" | |
bottom: "FixedBatchNormalization-76-1_bn" | |
top: "FixedBatchNormalization-76-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-77-1" | |
bottom: "FixedBatchNormalization-76-1" | |
top: "ReLU-77-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-78-1" | |
bottom: "ReLU-77-1" | |
top: "Convolution2DFunction-78-1" | |
convolution_param { | |
num_output: 24 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Concat" | |
name: "Concat-79-1" | |
bottom: "Concat-75-1" | |
bottom: "Convolution2DFunction-78-1" | |
top: "Concat-79-1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-80-1_bn" | |
bottom: "Concat-79-1" | |
top: "FixedBatchNormalization-80-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-80-1" | |
bottom: "FixedBatchNormalization-80-1_bn" | |
top: "FixedBatchNormalization-80-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-81-1" | |
bottom: "FixedBatchNormalization-80-1" | |
top: "ReLU-81-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-82-1" | |
bottom: "ReLU-81-1" | |
top: "Convolution2DFunction-82-1" | |
convolution_param { | |
num_output: 24 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Concat" | |
name: "Concat-83-1" | |
bottom: "Concat-79-1" | |
bottom: "Convolution2DFunction-82-1" | |
top: "Concat-83-1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-84-1_bn" | |
bottom: "Concat-83-1" | |
top: "FixedBatchNormalization-84-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-84-1" | |
bottom: "FixedBatchNormalization-84-1_bn" | |
top: "FixedBatchNormalization-84-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-85-1" | |
bottom: "FixedBatchNormalization-84-1" | |
top: "ReLU-85-1" | |
} | |
layer { | |
type: "Pooling" | |
name: "AveragePooling2D-86-1" | |
bottom: "ReLU-85-1" | |
top: "AveragePooling2D-86-1" | |
pooling_param { | |
pool: AVE | |
pad_w: 0 | |
pad_h: 0 | |
stride_w: 2 | |
stride_h: 2 | |
kernel_w: 2 | |
kernel_h: 2 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-87-1_bn" | |
bottom: "AveragePooling2D-86-1" | |
top: "FixedBatchNormalization-87-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-87-1" | |
bottom: "FixedBatchNormalization-87-1_bn" | |
top: "FixedBatchNormalization-87-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-88-1" | |
bottom: "FixedBatchNormalization-87-1" | |
top: "ReLU-88-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-89-1" | |
bottom: "ReLU-88-1" | |
top: "Convolution2DFunction-89-1" | |
convolution_param { | |
num_output: 24 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Concat" | |
name: "Concat-90-1" | |
bottom: "AveragePooling2D-86-1" | |
bottom: "Convolution2DFunction-89-1" | |
top: "Concat-90-1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-91-1_bn" | |
bottom: "Concat-90-1" | |
top: "FixedBatchNormalization-91-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-91-1" | |
bottom: "FixedBatchNormalization-91-1_bn" | |
top: "FixedBatchNormalization-91-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-92-1" | |
bottom: "FixedBatchNormalization-91-1" | |
top: "ReLU-92-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-93-1" | |
bottom: "ReLU-92-1" | |
top: "Convolution2DFunction-93-1" | |
convolution_param { | |
num_output: 24 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Concat" | |
name: "Concat-94-1" | |
bottom: "Concat-90-1" | |
bottom: "Convolution2DFunction-93-1" | |
top: "Concat-94-1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-95-1_bn" | |
bottom: "Concat-94-1" | |
top: "FixedBatchNormalization-95-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-95-1" | |
bottom: "FixedBatchNormalization-95-1_bn" | |
top: "FixedBatchNormalization-95-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-96-1" | |
bottom: "FixedBatchNormalization-95-1" | |
top: "ReLU-96-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-97-1" | |
bottom: "ReLU-96-1" | |
top: "Convolution2DFunction-97-1" | |
convolution_param { | |
num_output: 24 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Concat" | |
name: "Concat-98-1" | |
bottom: "Concat-94-1" | |
bottom: "Convolution2DFunction-97-1" | |
top: "Concat-98-1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-99-1_bn" | |
bottom: "Concat-98-1" | |
top: "FixedBatchNormalization-99-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-99-1" | |
bottom: "FixedBatchNormalization-99-1_bn" | |
top: "FixedBatchNormalization-99-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-100-1" | |
bottom: "FixedBatchNormalization-99-1" | |
top: "ReLU-100-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-101-1" | |
bottom: "ReLU-100-1" | |
top: "Convolution2DFunction-101-1" | |
convolution_param { | |
num_output: 24 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Concat" | |
name: "Concat-102-1" | |
bottom: "Concat-98-1" | |
bottom: "Convolution2DFunction-101-1" | |
top: "Concat-102-1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-103-1_bn" | |
bottom: "Concat-102-1" | |
top: "FixedBatchNormalization-103-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-103-1" | |
bottom: "FixedBatchNormalization-103-1_bn" | |
top: "FixedBatchNormalization-103-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-104-1" | |
bottom: "FixedBatchNormalization-103-1" | |
top: "ReLU-104-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-105-1" | |
bottom: "ReLU-104-1" | |
top: "Convolution2DFunction-105-1" | |
convolution_param { | |
num_output: 24 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Concat" | |
name: "Concat-106-1" | |
bottom: "Concat-102-1" | |
bottom: "Convolution2DFunction-105-1" | |
top: "Concat-106-1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-107-1_bn" | |
bottom: "Concat-106-1" | |
top: "FixedBatchNormalization-107-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-107-1" | |
bottom: "FixedBatchNormalization-107-1_bn" | |
top: "FixedBatchNormalization-107-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-108-1" | |
bottom: "FixedBatchNormalization-107-1" | |
top: "ReLU-108-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-109-1" | |
bottom: "ReLU-108-1" | |
top: "Convolution2DFunction-109-1" | |
convolution_param { | |
num_output: 24 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Concat" | |
name: "Concat-110-1" | |
bottom: "Concat-106-1" | |
bottom: "Convolution2DFunction-109-1" | |
top: "Concat-110-1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-111-1_bn" | |
bottom: "Concat-110-1" | |
top: "FixedBatchNormalization-111-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-111-1" | |
bottom: "FixedBatchNormalization-111-1_bn" | |
top: "FixedBatchNormalization-111-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-112-1" | |
bottom: "FixedBatchNormalization-111-1" | |
top: "ReLU-112-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-113-1" | |
bottom: "ReLU-112-1" | |
top: "Convolution2DFunction-113-1" | |
convolution_param { | |
num_output: 24 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Concat" | |
name: "Concat-114-1" | |
bottom: "Concat-110-1" | |
bottom: "Convolution2DFunction-113-1" | |
top: "Concat-114-1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-115-1_bn" | |
bottom: "Concat-114-1" | |
top: "FixedBatchNormalization-115-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-115-1" | |
bottom: "FixedBatchNormalization-115-1_bn" | |
top: "FixedBatchNormalization-115-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-116-1" | |
bottom: "FixedBatchNormalization-115-1" | |
top: "ReLU-116-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-117-1" | |
bottom: "ReLU-116-1" | |
top: "Convolution2DFunction-117-1" | |
convolution_param { | |
num_output: 24 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Concat" | |
name: "Concat-118-1" | |
bottom: "Concat-114-1" | |
bottom: "Convolution2DFunction-117-1" | |
top: "Concat-118-1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-119-1_bn" | |
bottom: "Concat-118-1" | |
top: "FixedBatchNormalization-119-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-119-1" | |
bottom: "FixedBatchNormalization-119-1_bn" | |
top: "FixedBatchNormalization-119-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-120-1" | |
bottom: "FixedBatchNormalization-119-1" | |
top: "ReLU-120-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-121-1" | |
bottom: "ReLU-120-1" | |
top: "Convolution2DFunction-121-1" | |
convolution_param { | |
num_output: 24 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Concat" | |
name: "Concat-122-1" | |
bottom: "Concat-118-1" | |
bottom: "Convolution2DFunction-121-1" | |
top: "Concat-122-1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-123-1_bn" | |
bottom: "Concat-122-1" | |
top: "FixedBatchNormalization-123-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-123-1" | |
bottom: "FixedBatchNormalization-123-1_bn" | |
top: "FixedBatchNormalization-123-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-124-1" | |
bottom: "FixedBatchNormalization-123-1" | |
top: "ReLU-124-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-125-1" | |
bottom: "ReLU-124-1" | |
top: "Convolution2DFunction-125-1" | |
convolution_param { | |
num_output: 24 | |
bias_term: true | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Concat" | |
name: "Concat-126-1" | |
bottom: "Concat-122-1" | |
bottom: "Convolution2DFunction-125-1" | |
top: "Concat-126-1" | |
concat_param { | |
axis: 1 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-127-1_bn" | |
bottom: "Concat-126-1" | |
top: "FixedBatchNormalization-127-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-127-1" | |
bottom: "FixedBatchNormalization-127-1_bn" | |
top: "FixedBatchNormalization-127-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-128-1" | |
bottom: "FixedBatchNormalization-127-1" | |
top: "ReLU-128-1" | |
} | |
layer { | |
type: "Pooling" | |
name: "AveragePooling2D-129-1" | |
bottom: "ReLU-128-1" | |
top: "AveragePooling2D-129-1" | |
pooling_param { | |
pool: AVE | |
pad_w: 0 | |
pad_h: 0 | |
stride_w: 8 | |
stride_h: 8 | |
kernel_w: 8 | |
kernel_h: 8 | |
} | |
} | |
layer { | |
type: "Reshape" | |
name: "Reshape-130-1" | |
bottom: "AveragePooling2D-129-1" | |
top: "Reshape-130-1" | |
reshape_param { | |
shape { | |
dim: 5 | |
dim: -1 | |
} | |
} | |
} | |
layer { | |
type: "InnerProduct" | |
name: "LinearFunction-131-1" | |
bottom: "Reshape-130-1" | |
top: "LinearFunction-131-1" | |
inner_product_param { | |
num_output: 10 | |
bias_term: true | |
} | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment