Last active
August 21, 2018 02:49
-
-
Save mgolub2/629a44d8666cd07e4bd68f8e5a27ee2f to your computer and use it in GitHub Desktop.
wrn_real
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: "wrn" | |
layer { | |
name: "data" | |
type: "Input" | |
top: "data" | |
input_param { shape: { dim: 1 dim: 3 dim: 32 dim: 32 } } | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-0-1" | |
bottom: "data" | |
top: "Convolution2DFunction-0-1" | |
convolution_param { | |
num_output: 16 | |
bias_term: false | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-1-1_bn" | |
bottom: "Convolution2DFunction-0-1" | |
top: "FixedBatchNormalization-1-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-1-1" | |
bottom: "FixedBatchNormalization-1-1_bn" | |
top: "FixedBatchNormalization-1-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-2-1" | |
bottom: "FixedBatchNormalization-1-1" | |
top: "ReLU-2-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-3-1" | |
bottom: "ReLU-2-1" | |
top: "Convolution2DFunction-3-1" | |
convolution_param { | |
num_output: 160 | |
bias_term: false | |
pad_w: 0 | |
pad_h: 0 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 1 | |
kernel_h: 1 | |
} | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-3-2" | |
bottom: "ReLU-2-1" | |
top: "Convolution2DFunction-3-2" | |
convolution_param { | |
num_output: 160 | |
bias_term: false | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-4-1_bn" | |
bottom: "Convolution2DFunction-3-2" | |
top: "FixedBatchNormalization-4-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-4-1" | |
bottom: "FixedBatchNormalization-4-1_bn" | |
top: "FixedBatchNormalization-4-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-5-1" | |
bottom: "FixedBatchNormalization-4-1" | |
top: "ReLU-5-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-6-1" | |
bottom: "ReLU-5-1" | |
top: "Convolution2DFunction-6-1" | |
convolution_param { | |
num_output: 160 | |
bias_term: false | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Eltwise" | |
name: "_ + _-7-1" | |
bottom: "Convolution2DFunction-6-1" | |
bottom: "Convolution2DFunction-3-1" | |
top: "_ + _-7-1" | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-8-1_bn" | |
bottom: "_ + _-7-1" | |
top: "FixedBatchNormalization-8-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-8-1" | |
bottom: "FixedBatchNormalization-8-1_bn" | |
top: "FixedBatchNormalization-8-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-9-1" | |
bottom: "FixedBatchNormalization-8-1" | |
top: "ReLU-9-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-10-1" | |
bottom: "ReLU-9-1" | |
top: "Convolution2DFunction-10-1" | |
convolution_param { | |
num_output: 160 | |
bias_term: false | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-11-1_bn" | |
bottom: "Convolution2DFunction-10-1" | |
top: "FixedBatchNormalization-11-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-11-1" | |
bottom: "FixedBatchNormalization-11-1_bn" | |
top: "FixedBatchNormalization-11-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-12-1" | |
bottom: "FixedBatchNormalization-11-1" | |
top: "ReLU-12-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-13-1" | |
bottom: "ReLU-12-1" | |
top: "Convolution2DFunction-13-1" | |
convolution_param { | |
num_output: 160 | |
bias_term: false | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Eltwise" | |
name: "_ + _-14-1" | |
bottom: "Convolution2DFunction-13-1" | |
bottom: "ReLU-9-1" | |
top: "_ + _-14-1" | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-15-1_bn" | |
bottom: "_ + _-14-1" | |
top: "FixedBatchNormalization-15-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-15-1" | |
bottom: "FixedBatchNormalization-15-1_bn" | |
top: "FixedBatchNormalization-15-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-16-1" | |
bottom: "FixedBatchNormalization-15-1" | |
top: "ReLU-16-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-17-1" | |
bottom: "ReLU-16-1" | |
top: "Convolution2DFunction-17-1" | |
convolution_param { | |
num_output: 160 | |
bias_term: false | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-18-1_bn" | |
bottom: "Convolution2DFunction-17-1" | |
top: "FixedBatchNormalization-18-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-18-1" | |
bottom: "FixedBatchNormalization-18-1_bn" | |
top: "FixedBatchNormalization-18-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-19-1" | |
bottom: "FixedBatchNormalization-18-1" | |
top: "ReLU-19-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-20-1" | |
bottom: "ReLU-19-1" | |
top: "Convolution2DFunction-20-1" | |
convolution_param { | |
num_output: 160 | |
bias_term: false | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Eltwise" | |
name: "_ + _-21-1" | |
bottom: "Convolution2DFunction-20-1" | |
bottom: "ReLU-16-1" | |
top: "_ + _-21-1" | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-22-1_bn" | |
bottom: "_ + _-21-1" | |
top: "FixedBatchNormalization-22-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-22-1" | |
bottom: "FixedBatchNormalization-22-1_bn" | |
top: "FixedBatchNormalization-22-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-23-1" | |
bottom: "FixedBatchNormalization-22-1" | |
top: "ReLU-23-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-24-1" | |
bottom: "ReLU-23-1" | |
top: "Convolution2DFunction-24-1" | |
convolution_param { | |
num_output: 160 | |
bias_term: false | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-25-1_bn" | |
bottom: "Convolution2DFunction-24-1" | |
top: "FixedBatchNormalization-25-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-25-1" | |
bottom: "FixedBatchNormalization-25-1_bn" | |
top: "FixedBatchNormalization-25-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-26-1" | |
bottom: "FixedBatchNormalization-25-1" | |
top: "ReLU-26-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-27-1" | |
bottom: "ReLU-26-1" | |
top: "Convolution2DFunction-27-1" | |
convolution_param { | |
num_output: 160 | |
bias_term: false | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Eltwise" | |
name: "_ + _-28-1" | |
bottom: "Convolution2DFunction-27-1" | |
bottom: "ReLU-23-1" | |
top: "_ + _-28-1" | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-29-1_bn" | |
bottom: "_ + _-28-1" | |
top: "FixedBatchNormalization-29-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-29-1" | |
bottom: "FixedBatchNormalization-29-1_bn" | |
top: "FixedBatchNormalization-29-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-30-1" | |
bottom: "FixedBatchNormalization-29-1" | |
top: "ReLU-30-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-31-1" | |
bottom: "ReLU-30-1" | |
top: "Convolution2DFunction-31-1" | |
convolution_param { | |
num_output: 320 | |
bias_term: false | |
pad_w: 0 | |
pad_h: 0 | |
stride_w: 2 | |
stride_h: 2 | |
kernel_w: 1 | |
kernel_h: 1 | |
} | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-31-2" | |
bottom: "ReLU-30-1" | |
top: "Convolution2DFunction-31-2" | |
convolution_param { | |
num_output: 320 | |
bias_term: false | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 2 | |
stride_h: 2 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-32-1_bn" | |
bottom: "Convolution2DFunction-31-2" | |
top: "FixedBatchNormalization-32-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-32-1" | |
bottom: "FixedBatchNormalization-32-1_bn" | |
top: "FixedBatchNormalization-32-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-33-1" | |
bottom: "FixedBatchNormalization-32-1" | |
top: "ReLU-33-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-34-1" | |
bottom: "ReLU-33-1" | |
top: "Convolution2DFunction-34-1" | |
convolution_param { | |
num_output: 320 | |
bias_term: false | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Eltwise" | |
name: "_ + _-35-1" | |
bottom: "Convolution2DFunction-34-1" | |
bottom: "Convolution2DFunction-31-1" | |
top: "_ + _-35-1" | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-36-1_bn" | |
bottom: "_ + _-35-1" | |
top: "FixedBatchNormalization-36-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-36-1" | |
bottom: "FixedBatchNormalization-36-1_bn" | |
top: "FixedBatchNormalization-36-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-37-1" | |
bottom: "FixedBatchNormalization-36-1" | |
top: "ReLU-37-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-38-1" | |
bottom: "ReLU-37-1" | |
top: "Convolution2DFunction-38-1" | |
convolution_param { | |
num_output: 320 | |
bias_term: false | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-39-1_bn" | |
bottom: "Convolution2DFunction-38-1" | |
top: "FixedBatchNormalization-39-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-39-1" | |
bottom: "FixedBatchNormalization-39-1_bn" | |
top: "FixedBatchNormalization-39-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-40-1" | |
bottom: "FixedBatchNormalization-39-1" | |
top: "ReLU-40-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-41-1" | |
bottom: "ReLU-40-1" | |
top: "Convolution2DFunction-41-1" | |
convolution_param { | |
num_output: 320 | |
bias_term: false | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Eltwise" | |
name: "_ + _-42-1" | |
bottom: "Convolution2DFunction-41-1" | |
bottom: "ReLU-37-1" | |
top: "_ + _-42-1" | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-43-1_bn" | |
bottom: "_ + _-42-1" | |
top: "FixedBatchNormalization-43-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-43-1" | |
bottom: "FixedBatchNormalization-43-1_bn" | |
top: "FixedBatchNormalization-43-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-44-1" | |
bottom: "FixedBatchNormalization-43-1" | |
top: "ReLU-44-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-45-1" | |
bottom: "ReLU-44-1" | |
top: "Convolution2DFunction-45-1" | |
convolution_param { | |
num_output: 320 | |
bias_term: false | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-46-1_bn" | |
bottom: "Convolution2DFunction-45-1" | |
top: "FixedBatchNormalization-46-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-46-1" | |
bottom: "FixedBatchNormalization-46-1_bn" | |
top: "FixedBatchNormalization-46-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-47-1" | |
bottom: "FixedBatchNormalization-46-1" | |
top: "ReLU-47-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-48-1" | |
bottom: "ReLU-47-1" | |
top: "Convolution2DFunction-48-1" | |
convolution_param { | |
num_output: 320 | |
bias_term: false | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Eltwise" | |
name: "_ + _-49-1" | |
bottom: "Convolution2DFunction-48-1" | |
bottom: "ReLU-44-1" | |
top: "_ + _-49-1" | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-50-1_bn" | |
bottom: "_ + _-49-1" | |
top: "FixedBatchNormalization-50-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-50-1" | |
bottom: "FixedBatchNormalization-50-1_bn" | |
top: "FixedBatchNormalization-50-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-51-1" | |
bottom: "FixedBatchNormalization-50-1" | |
top: "ReLU-51-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-52-1" | |
bottom: "ReLU-51-1" | |
top: "Convolution2DFunction-52-1" | |
convolution_param { | |
num_output: 320 | |
bias_term: false | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-53-1_bn" | |
bottom: "Convolution2DFunction-52-1" | |
top: "FixedBatchNormalization-53-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-53-1" | |
bottom: "FixedBatchNormalization-53-1_bn" | |
top: "FixedBatchNormalization-53-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-54-1" | |
bottom: "FixedBatchNormalization-53-1" | |
top: "ReLU-54-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-55-1" | |
bottom: "ReLU-54-1" | |
top: "Convolution2DFunction-55-1" | |
convolution_param { | |
num_output: 320 | |
bias_term: false | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Eltwise" | |
name: "_ + _-56-1" | |
bottom: "Convolution2DFunction-55-1" | |
bottom: "ReLU-51-1" | |
top: "_ + _-56-1" | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-57-1_bn" | |
bottom: "_ + _-56-1" | |
top: "FixedBatchNormalization-57-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-57-1" | |
bottom: "FixedBatchNormalization-57-1_bn" | |
top: "FixedBatchNormalization-57-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-58-1" | |
bottom: "FixedBatchNormalization-57-1" | |
top: "ReLU-58-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-59-1" | |
bottom: "ReLU-58-1" | |
top: "Convolution2DFunction-59-1" | |
convolution_param { | |
num_output: 640 | |
bias_term: false | |
pad_w: 0 | |
pad_h: 0 | |
stride_w: 2 | |
stride_h: 2 | |
kernel_w: 1 | |
kernel_h: 1 | |
} | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-59-2" | |
bottom: "ReLU-58-1" | |
top: "Convolution2DFunction-59-2" | |
convolution_param { | |
num_output: 640 | |
bias_term: false | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 2 | |
stride_h: 2 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-60-1_bn" | |
bottom: "Convolution2DFunction-59-2" | |
top: "FixedBatchNormalization-60-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-60-1" | |
bottom: "FixedBatchNormalization-60-1_bn" | |
top: "FixedBatchNormalization-60-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-61-1" | |
bottom: "FixedBatchNormalization-60-1" | |
top: "ReLU-61-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-62-1" | |
bottom: "ReLU-61-1" | |
top: "Convolution2DFunction-62-1" | |
convolution_param { | |
num_output: 640 | |
bias_term: false | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Eltwise" | |
name: "_ + _-63-1" | |
bottom: "Convolution2DFunction-62-1" | |
bottom: "Convolution2DFunction-59-1" | |
top: "_ + _-63-1" | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-64-1_bn" | |
bottom: "_ + _-63-1" | |
top: "FixedBatchNormalization-64-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-64-1" | |
bottom: "FixedBatchNormalization-64-1_bn" | |
top: "FixedBatchNormalization-64-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-65-1" | |
bottom: "FixedBatchNormalization-64-1" | |
top: "ReLU-65-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-66-1" | |
bottom: "ReLU-65-1" | |
top: "Convolution2DFunction-66-1" | |
convolution_param { | |
num_output: 640 | |
bias_term: false | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-67-1_bn" | |
bottom: "Convolution2DFunction-66-1" | |
top: "FixedBatchNormalization-67-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-67-1" | |
bottom: "FixedBatchNormalization-67-1_bn" | |
top: "FixedBatchNormalization-67-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-68-1" | |
bottom: "FixedBatchNormalization-67-1" | |
top: "ReLU-68-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-69-1" | |
bottom: "ReLU-68-1" | |
top: "Convolution2DFunction-69-1" | |
convolution_param { | |
num_output: 640 | |
bias_term: false | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Eltwise" | |
name: "_ + _-70-1" | |
bottom: "Convolution2DFunction-69-1" | |
bottom: "ReLU-65-1" | |
top: "_ + _-70-1" | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-71-1_bn" | |
bottom: "_ + _-70-1" | |
top: "FixedBatchNormalization-71-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-71-1" | |
bottom: "FixedBatchNormalization-71-1_bn" | |
top: "FixedBatchNormalization-71-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-72-1" | |
bottom: "FixedBatchNormalization-71-1" | |
top: "ReLU-72-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-73-1" | |
bottom: "ReLU-72-1" | |
top: "Convolution2DFunction-73-1" | |
convolution_param { | |
num_output: 640 | |
bias_term: false | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-74-1_bn" | |
bottom: "Convolution2DFunction-73-1" | |
top: "FixedBatchNormalization-74-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-74-1" | |
bottom: "FixedBatchNormalization-74-1_bn" | |
top: "FixedBatchNormalization-74-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-75-1" | |
bottom: "FixedBatchNormalization-74-1" | |
top: "ReLU-75-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-76-1" | |
bottom: "ReLU-75-1" | |
top: "Convolution2DFunction-76-1" | |
convolution_param { | |
num_output: 640 | |
bias_term: false | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Eltwise" | |
name: "_ + _-77-1" | |
bottom: "Convolution2DFunction-76-1" | |
bottom: "ReLU-72-1" | |
top: "_ + _-77-1" | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-78-1_bn" | |
bottom: "_ + _-77-1" | |
top: "FixedBatchNormalization-78-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-78-1" | |
bottom: "FixedBatchNormalization-78-1_bn" | |
top: "FixedBatchNormalization-78-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-79-1" | |
bottom: "FixedBatchNormalization-78-1" | |
top: "ReLU-79-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-80-1" | |
bottom: "ReLU-79-1" | |
top: "Convolution2DFunction-80-1" | |
convolution_param { | |
num_output: 640 | |
bias_term: false | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-81-1_bn" | |
bottom: "Convolution2DFunction-80-1" | |
top: "FixedBatchNormalization-81-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-81-1" | |
bottom: "FixedBatchNormalization-81-1_bn" | |
top: "FixedBatchNormalization-81-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-82-1" | |
bottom: "FixedBatchNormalization-81-1" | |
top: "ReLU-82-1" | |
} | |
layer { | |
type: "Convolution" | |
name: "Convolution2DFunction-83-1" | |
bottom: "ReLU-82-1" | |
top: "Convolution2DFunction-83-1" | |
convolution_param { | |
num_output: 640 | |
bias_term: false | |
pad_w: 1 | |
pad_h: 1 | |
stride_w: 1 | |
stride_h: 1 | |
kernel_w: 3 | |
kernel_h: 3 | |
} | |
} | |
layer { | |
type: "Eltwise" | |
name: "_ + _-84-1" | |
bottom: "Convolution2DFunction-83-1" | |
bottom: "ReLU-79-1" | |
top: "_ + _-84-1" | |
} | |
layer { | |
type: "BatchNorm" | |
name: "FixedBatchNormalization-85-1_bn" | |
bottom: "_ + _-84-1" | |
top: "FixedBatchNormalization-85-1_bn" | |
batch_norm_param { | |
use_global_stats: true | |
eps: 2e-05 | |
} | |
} | |
layer { | |
type: "Scale" | |
name: "FixedBatchNormalization-85-1" | |
bottom: "FixedBatchNormalization-85-1_bn" | |
top: "FixedBatchNormalization-85-1" | |
scale_param { | |
axis: 1 | |
bias_term: true | |
} | |
} | |
layer { | |
type: "ReLU" | |
name: "ReLU-86-1" | |
bottom: "FixedBatchNormalization-85-1" | |
top: "ReLU-86-1" | |
} | |
layer { | |
type: "Pooling" | |
name: "AveragePooling2D-87-1" | |
bottom: "ReLU-86-1" | |
top: "AveragePooling2D-87-1" | |
pooling_param { | |
pool: AVE | |
pad_w: 0 | |
pad_h: 0 | |
stride_w: 8 | |
stride_h: 8 | |
kernel_w: 8 | |
kernel_h: 8 | |
} | |
} | |
layer { | |
type: "Reshape" | |
name: "Reshape-88-1" | |
bottom: "AveragePooling2D-87-1" | |
top: "Reshape-88-1" | |
reshape_param { | |
shape { | |
dim: 1 | |
dim: -1 | |
} | |
} | |
} | |
layer { | |
type: "InnerProduct" | |
name: "LinearFunction-89-1" | |
bottom: "Reshape-88-1" | |
top: "LinearFunction-89-1" | |
inner_product_param { | |
num_output: 10 | |
bias_term: true | |
} | |
} | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment