Created
March 20, 2018 02:32
-
-
Save walsvid/996548cff6bb82c5c66fe16983a6e6cc to your computer and use it in GitHub Desktop.
[SSH prototxt] #caffe #SSH
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # ***************************************************************** # | |
| # SSH: Single Stage Headless Face Detector | |
| # Train Prototxt | |
| # Written by Mahyar Najibi | |
| # ***************************************************************** # | |
| name: "SSH" | |
| # layer { | |
| # name: 'input-data' | |
| # type: 'Python' | |
| # top: 'data' | |
| # top: 'im_info' | |
| # top: 'gt_boxes' | |
| # python_param { | |
| # module: 'roi_data_layer.layer' | |
| # layer: 'RoIDataLayer' | |
| # param_str: "'num_classes': 2" | |
| # } | |
| # } | |
| layer { | |
| name: "data" | |
| type: "Data" | |
| include { | |
| phase: TRAIN | |
| } | |
| transform_param { | |
| crop_size: 224 | |
| mean_value: 104 | |
| mean_value: 117 | |
| mean_value: 123 | |
| mirror: true | |
| } | |
| data_param { | |
| source: "data_set/train_lmdb" | |
| batch_size: 128 | |
| backend: LMDB | |
| } | |
| top: "data" | |
| top: "label" | |
| } | |
| layer { | |
| name: "conv1_1" | |
| type: "Convolution" | |
| bottom: "data" | |
| top: "conv1_1" | |
| param { | |
| lr_mult: 0 | |
| decay_mult: 0 | |
| } | |
| param { | |
| lr_mult: 0 | |
| decay_mult: 0 | |
| } | |
| convolution_param { | |
| num_output: 64 | |
| pad: 1 | |
| kernel_size: 3 | |
| } | |
| } | |
| layer { | |
| name: "relu1_1" | |
| type: "ReLU" | |
| bottom: "conv1_1" | |
| top: "conv1_1" | |
| } | |
| layer { | |
| name: "conv1_2" | |
| type: "Convolution" | |
| bottom: "conv1_1" | |
| top: "conv1_2" | |
| param { | |
| lr_mult: 0 | |
| decay_mult: 0 | |
| } | |
| param { | |
| lr_mult: 0 | |
| decay_mult: 0 | |
| } | |
| convolution_param { | |
| num_output: 64 | |
| pad: 1 | |
| kernel_size: 3 | |
| } | |
| } | |
| layer { | |
| name: "relu1_2" | |
| type: "ReLU" | |
| bottom: "conv1_2" | |
| top: "conv1_2" | |
| } | |
| layer { | |
| name: "pool1" | |
| type: "Pooling" | |
| bottom: "conv1_2" | |
| top: "pool1" | |
| pooling_param { | |
| pool: MAX | |
| kernel_size: 2 | |
| stride: 2 | |
| } | |
| } | |
| layer { | |
| name: "conv2_1" | |
| type: "Convolution" | |
| bottom: "pool1" | |
| top: "conv2_1" | |
| param { | |
| lr_mult: 0 | |
| decay_mult: 0 | |
| } | |
| param { | |
| lr_mult: 0 | |
| decay_mult: 0 | |
| } | |
| convolution_param { | |
| num_output: 128 | |
| pad: 1 | |
| kernel_size: 3 | |
| } | |
| } | |
| layer { | |
| name: "relu2_1" | |
| type: "ReLU" | |
| bottom: "conv2_1" | |
| top: "conv2_1" | |
| } | |
| layer { | |
| name: "conv2_2" | |
| type: "Convolution" | |
| bottom: "conv2_1" | |
| top: "conv2_2" | |
| param { | |
| lr_mult: 0 | |
| decay_mult: 0 | |
| } | |
| param { | |
| lr_mult: 0 | |
| decay_mult: 0 | |
| } | |
| convolution_param { | |
| num_output: 128 | |
| pad: 1 | |
| kernel_size: 3 | |
| } | |
| } | |
| layer { | |
| name: "relu2_2" | |
| type: "ReLU" | |
| bottom: "conv2_2" | |
| top: "conv2_2" | |
| } | |
| layer { | |
| name: "pool2" | |
| type: "Pooling" | |
| bottom: "conv2_2" | |
| top: "pool2" | |
| pooling_param { | |
| pool: MAX | |
| kernel_size: 2 | |
| stride: 2 | |
| } | |
| } | |
| layer { | |
| name: "conv3_1" | |
| type: "Convolution" | |
| bottom: "pool2" | |
| top: "conv3_1" | |
| param { | |
| lr_mult: 1 | |
| } | |
| param { | |
| lr_mult: 2 | |
| } | |
| convolution_param { | |
| num_output: 256 | |
| pad: 1 | |
| kernel_size: 3 | |
| } | |
| } | |
| layer { | |
| name: "relu3_1" | |
| type: "ReLU" | |
| bottom: "conv3_1" | |
| top: "conv3_1" | |
| } | |
| layer { | |
| name: "conv3_2" | |
| type: "Convolution" | |
| bottom: "conv3_1" | |
| top: "conv3_2" | |
| param { | |
| lr_mult: 1 | |
| } | |
| param { | |
| lr_mult: 2 | |
| } | |
| convolution_param { | |
| num_output: 256 | |
| pad: 1 | |
| kernel_size: 3 | |
| } | |
| } | |
| layer { | |
| name: "relu3_2" | |
| type: "ReLU" | |
| bottom: "conv3_2" | |
| top: "conv3_2" | |
| } | |
| layer { | |
| name: "conv3_3" | |
| type: "Convolution" | |
| bottom: "conv3_2" | |
| top: "conv3_3" | |
| param { | |
| lr_mult: 1 | |
| } | |
| param { | |
| lr_mult: 2 | |
| } | |
| convolution_param { | |
| num_output: 256 | |
| pad: 1 | |
| kernel_size: 3 | |
| } | |
| } | |
| layer { | |
| name: "relu3_3" | |
| type: "ReLU" | |
| bottom: "conv3_3" | |
| top: "conv3_3" | |
| } | |
| layer { | |
| name: "pool3" | |
| type: "Pooling" | |
| bottom: "conv3_3" | |
| top: "pool3" | |
| pooling_param { | |
| pool: MAX | |
| kernel_size: 2 | |
| stride: 2 | |
| } | |
| } | |
| layer { | |
| name: "conv4_1" | |
| type: "Convolution" | |
| bottom: "pool3" | |
| top: "conv4_1" | |
| param { | |
| lr_mult: 1 | |
| } | |
| param { | |
| lr_mult: 2 | |
| } | |
| convolution_param { | |
| num_output: 512 | |
| pad: 1 | |
| kernel_size: 3 | |
| } | |
| } | |
| layer { | |
| name: "relu4_1" | |
| type: "ReLU" | |
| bottom: "conv4_1" | |
| top: "conv4_1" | |
| } | |
| layer { | |
| name: "conv4_2" | |
| type: "Convolution" | |
| bottom: "conv4_1" | |
| top: "conv4_2" | |
| param { | |
| lr_mult: 1 | |
| } | |
| param { | |
| lr_mult: 2 | |
| } | |
| convolution_param { | |
| num_output: 512 | |
| pad: 1 | |
| kernel_size: 3 | |
| } | |
| } | |
| layer { | |
| name: "relu4_2" | |
| type: "ReLU" | |
| bottom: "conv4_2" | |
| top: "conv4_2" | |
| } | |
| layer { | |
| name: "conv4_3" | |
| type: "Convolution" | |
| bottom: "conv4_2" | |
| top: "conv4_3" | |
| param { | |
| lr_mult: 1 | |
| } | |
| param { | |
| lr_mult: 2 | |
| } | |
| convolution_param { | |
| num_output: 512 | |
| pad: 1 | |
| kernel_size: 3 | |
| } | |
| } | |
| layer { | |
| name: "relu4_3" | |
| type: "ReLU" | |
| bottom: "conv4_3" | |
| top: "conv4_3" | |
| } | |
| layer { | |
| name: "pool4" | |
| type: "Pooling" | |
| bottom: "conv4_3" | |
| top: "pool4" | |
| pooling_param { | |
| pool: MAX | |
| kernel_size: 2 | |
| stride: 2 | |
| } | |
| } | |
| layer { | |
| name: "conv5_1" | |
| type: "Convolution" | |
| bottom: "pool4" | |
| top: "conv5_1" | |
| param { | |
| lr_mult: 1 | |
| } | |
| param { | |
| lr_mult: 2 | |
| } | |
| convolution_param { | |
| num_output: 512 | |
| pad: 1 | |
| kernel_size: 3 | |
| } | |
| } | |
| layer { | |
| name: "relu5_1" | |
| type: "ReLU" | |
| bottom: "conv5_1" | |
| top: "conv5_1" | |
| } | |
| layer { | |
| name: "conv5_2" | |
| type: "Convolution" | |
| bottom: "conv5_1" | |
| top: "conv5_2" | |
| param { | |
| lr_mult: 1 | |
| } | |
| param { | |
| lr_mult: 2 | |
| } | |
| convolution_param { | |
| num_output: 512 | |
| pad: 1 | |
| kernel_size: 3 | |
| } | |
| } | |
| layer { | |
| name: "relu5_2" | |
| type: "ReLU" | |
| bottom: "conv5_2" | |
| top: "conv5_2" | |
| } | |
| layer { | |
| name: "conv5_3" | |
| type: "Convolution" | |
| bottom: "conv5_2" | |
| top: "conv5_3" | |
| param { | |
| lr_mult: 1 | |
| } | |
| param { | |
| lr_mult: 2 | |
| } | |
| convolution_param { | |
| num_output: 512 | |
| pad: 1 | |
| kernel_size: 3 | |
| } | |
| } | |
| layer { | |
| name: "relu5_3" | |
| type: "ReLU" | |
| bottom: "conv5_3" | |
| top: "conv5_3" | |
| } | |
| #==========CONV4 Backwards for M1====== | |
| # reduce conv5_3 channels | |
| layer { | |
| name: "conv5_128" | |
| type: "Convolution" | |
| bottom: "conv5_3" | |
| top: "conv5_128" | |
| param { | |
| lr_mult: 1 | |
| } | |
| param { | |
| lr_mult: 2 | |
| } | |
| convolution_param { | |
| num_output: 128 | |
| pad: 0 | |
| kernel_size: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| } | |
| layer { | |
| name: "conv5_128_relu" | |
| type: "ReLU" | |
| bottom: "conv5_128" | |
| top: "conv5_128" | |
| } | |
| # Upsample conv5_3 | |
| layer { | |
| name: "conv5_128_up" | |
| type: "Deconvolution" | |
| bottom: "conv5_128" | |
| top: "conv5_128_up" | |
| convolution_param { | |
| kernel_size: 4 | |
| stride: 2 | |
| num_output: 128 | |
| group: 128 | |
| pad: 1 | |
| weight_filler: { type: "bilinear" } | |
| bias_term: false | |
| } | |
| param { lr_mult: 0 decay_mult: 0 } | |
| } | |
| layer { | |
| name: "conv4_128" | |
| type: "Convolution" | |
| bottom: "conv4_3" | |
| top: "conv4_128" | |
| param { | |
| lr_mult: 1 | |
| } | |
| param { | |
| lr_mult: 2 | |
| } | |
| convolution_param { | |
| num_output: 128 | |
| pad: 0 | |
| kernel_size: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| } | |
| layer { | |
| name: "conv4_128_relu" | |
| type: "ReLU" | |
| bottom: "conv4_128" | |
| top: "conv4_128" | |
| } | |
| # Crop conv5_3 | |
| layer { | |
| name: "conv5_128_crop" | |
| type: "Crop" | |
| bottom: "conv5_128_up" | |
| bottom: "conv4_128" | |
| top: "conv5_128_crop" | |
| crop_param { | |
| axis: 2 | |
| offset: 0 | |
| } | |
| } | |
| # Eltwise summation | |
| layer { | |
| name: "conv4_fuse" | |
| type: "Eltwise" | |
| bottom: "conv5_128_crop" | |
| bottom: "conv4_128" | |
| top: "conv4_fuse" | |
| eltwise_param { | |
| operation: SUM | |
| } | |
| } | |
| # Perform final 3x3 convolution | |
| layer { | |
| name: "conv4_fuse_final" | |
| type: "Convolution" | |
| bottom: "conv4_fuse" | |
| top: "conv4_fuse_final" | |
| param { | |
| lr_mult: 1 | |
| } | |
| param { | |
| lr_mult: 2 | |
| } | |
| convolution_param { | |
| num_output: 128 | |
| pad: 1 | |
| kernel_size: 3 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| } | |
| layer { | |
| name: "conv4_fuse_final_relu" | |
| type: "ReLU" | |
| bottom: "conv4_fuse_final" | |
| top: "conv4_fuse_final" | |
| } | |
| #========== M3@OHEM OHEM ========= | |
| layer { | |
| name: "pool6" | |
| type: "Pooling" | |
| bottom: "conv5_3" | |
| top: "pool6" | |
| pooling_param { | |
| pool: MAX | |
| kernel_size: 2 | |
| stride: 2 | |
| } | |
| } | |
| layer { | |
| name: "m3@ssh_3x3_ohem" | |
| type: "Convolution" | |
| bottom: "pool6" | |
| top: "m3@ssh_3x3_output_ohem" | |
| param {name:'m3@ssh_3x3_param1'} | |
| param {name:'m3@ssh_3x3_param2'} | |
| convolution_param { | |
| num_output: 256 | |
| kernel_size: 3 pad: 1 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| propagate_down: false | |
| } | |
| # Dim red | |
| layer { | |
| name: "m3@ssh_dimred_ohem" | |
| type: "Convolution" | |
| bottom: "pool6" | |
| top: "m3@ssh_dimred_output_ohem" | |
| param {name: 'm3@ssh_dimred_param1' } | |
| param {name: 'm3@ssh_dimred_param2'} | |
| convolution_param { | |
| num_output: 128 | |
| kernel_size: 3 pad: 1 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| propagate_down: false | |
| } | |
| layer { | |
| name: "m3@ssh_dimred_relu_ohem" | |
| type: "ReLU" | |
| bottom: "m3@ssh_dimred_output_ohem" | |
| top: "m3@ssh_dimred_output_ohem" | |
| propagate_down: false | |
| } | |
| # 5x5 | |
| layer { | |
| name: "m3@ssh_5x5_ohem" | |
| type: "Convolution" | |
| bottom: "m3@ssh_dimred_output_ohem" | |
| top: "m3@ssh_5x5_output_ohem" | |
| param {name: 'm3@ssh_5x5_param1'} | |
| param {name: 'm3@ssh_5x5_param2'} | |
| convolution_param { | |
| num_output: 128 | |
| kernel_size: 3 pad: 1 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| propagate_down: false | |
| } | |
| #7x7 | |
| layer { | |
| name: "m3@ssh_7x7-1_ohem" | |
| type: "Convolution" | |
| bottom: "m3@ssh_dimred_output_ohem" | |
| top: "m3@ssh_7x7-1_output_ohem" | |
| param {name: 'm3@ssh_7x7-1_param1'} | |
| param {name: 'm3@ssh_7x7-1_param2'} | |
| convolution_param { | |
| num_output: 128 | |
| kernel_size: 3 pad: 1 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| propagate_down: false | |
| } | |
| layer { | |
| name: "m3@ssh_7x7-1_relu_ohem" | |
| type: "ReLU" | |
| bottom: "m3@ssh_7x7-1_output_ohem" | |
| top: "m3@ssh_7x7-1_output_ohem" | |
| propagate_down: false | |
| } | |
| layer { | |
| name: "m3@ssh_7x7" | |
| type: "Convolution" | |
| bottom: "m3@ssh_7x7-1_output_ohem" | |
| top: "m3@ssh_7x7_output_ohem" | |
| param {name: 'm3@ssh_7x7_param1'} | |
| param {name: 'm3@ssh_7x7_param2'} | |
| convolution_param { | |
| num_output: 128 | |
| kernel_size: 3 pad: 1 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| propagate_down: false | |
| } | |
| layer{ | |
| name: "m3@ssh_output_ohem" | |
| type: "Concat" | |
| bottom: "m3@ssh_3x3_output_ohem" | |
| bottom: "m3@ssh_5x5_output_ohem" | |
| bottom: "m3@ssh_7x7_output_ohem" | |
| top: "m3@ssh_output_ohem" | |
| concat_param{ | |
| axis: 1 | |
| } | |
| propagate_down: false | |
| propagate_down: false | |
| propagate_down: false | |
| } | |
| layer { | |
| name: "m3@ssh_output_relu_ohem" | |
| type: "ReLU" | |
| bottom: "m3@ssh_output_ohem" | |
| top: "m3@ssh_output_ohem" | |
| propagate_down: false | |
| } | |
| layer { | |
| name: "m3@ssh_cls_score_ohem" | |
| type: "Convolution" | |
| bottom: "m3@ssh_output_ohem" | |
| top: "m3@ssh_cls_score_output_ohem" | |
| param {name: 'm3@ssh_cls_score_param1'} | |
| param {name: 'm3@ssh_cls_score_param2'} | |
| convolution_param { | |
| num_output: 4 # 2(bg/fg) * 21(anchors) | |
| kernel_size: 1 pad: 0 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| propagate_down: false | |
| } | |
| layer { | |
| bottom: "m3@ssh_cls_score_output_ohem" | |
| top: "m3@ssh_cls_score_reshape_output_ohem" | |
| name: "m3@ssh_cls_reshape_ohem" | |
| type: "Reshape" | |
| reshape_param { shape { dim: 0 dim: 2 dim: -1 dim: 0 } } | |
| propagate_down: false | |
| } | |
| layer { | |
| name: "m3@ssh_cls_prob_ohem" | |
| type: "Softmax" | |
| bottom: "m3@ssh_cls_score_reshape_output_ohem" | |
| top: "m3@ssh_cls_prob_output_ohem" | |
| propagate_down: false | |
| } | |
| layer { | |
| name: 'm3@ssh_cls_prob_reshape_ohem' | |
| type: 'Reshape' | |
| bottom: 'm3@ssh_cls_prob_output_ohem' | |
| top: 'm3@ssh_cls_prob_reshape_output_ohem' | |
| reshape_param { shape { dim: 0 dim:4 dim: -1 dim: 0 } } | |
| propagate_down: false | |
| } | |
| #========== M3@SSH ========= | |
| layer { | |
| name: "m3@ssh_3x3" | |
| type: "Convolution" | |
| bottom: "pool6" | |
| top: "m3@ssh_3x3_output" | |
| param { lr_mult: 1.0 decay_mult: 1.0 name:'m3@ssh_3x3_param1'} | |
| param { lr_mult: 2.0 decay_mult: 0 name:'m3@ssh_3x3_param2'} | |
| convolution_param { | |
| num_output: 256 | |
| kernel_size: 3 pad: 1 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| } | |
| # Dim red | |
| layer { | |
| name: "m3@ssh_dimred" | |
| type: "Convolution" | |
| bottom: "pool6" | |
| top: "m3@ssh_dimred_output" | |
| param { lr_mult: 1.0 decay_mult: 1.0 name: 'm3@ssh_dimred_param1' } | |
| param { lr_mult: 2.0 decay_mult: 0 name: 'm3@ssh_dimred_param2'} | |
| convolution_param { | |
| num_output: 128 | |
| kernel_size: 3 pad: 1 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| } | |
| layer { | |
| name: "m3@ssh_dimred_relu" | |
| type: "ReLU" | |
| bottom: "m3@ssh_dimred_output" | |
| top: "m3@ssh_dimred_output" | |
| } | |
| # 5x5 | |
| layer { | |
| name: "m3@ssh_5x5" | |
| type: "Convolution" | |
| bottom: "m3@ssh_dimred_output" | |
| top: "m3@ssh_5x5_output" | |
| param { lr_mult: 1.0 decay_mult: 1.0 name: 'm3@ssh_5x5_param1'} | |
| param { lr_mult: 2.0 decay_mult: 0 name: 'm3@ssh_5x5_param2'} | |
| convolution_param { | |
| num_output: 128 | |
| kernel_size: 3 pad: 1 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| } | |
| #7x7 | |
| layer { | |
| name: "m3@ssh_7x7-1" | |
| type: "Convolution" | |
| bottom: "m3@ssh_dimred_output" | |
| top: "m3@ssh_7x7-1_output" | |
| param { lr_mult: 1.0 decay_mult: 1.0 name: 'm3@ssh_7x7-1_param1'} | |
| param { lr_mult: 2.0 decay_mult: 0 name: 'm3@ssh_7x7-1_param2'} | |
| convolution_param { | |
| num_output: 128 | |
| kernel_size: 3 pad: 1 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| } | |
| layer { | |
| name: "m3@ssh_7x7-1_relu" | |
| type: "ReLU" | |
| bottom: "m3@ssh_7x7-1_output" | |
| top: "m3@ssh_7x7-1_output" | |
| } | |
| layer { | |
| name: "m3@ssh_7x7" | |
| type: "Convolution" | |
| bottom: "m3@ssh_7x7-1_output" | |
| top: "m3@ssh_7x7_output" | |
| param { lr_mult: 1.0 decay_mult: 1.0 name: 'm3@ssh_7x7_param1'} | |
| param { lr_mult: 2.0 decay_mult: 0 name: 'm3@ssh_7x7_param2'} | |
| convolution_param { | |
| num_output: 128 | |
| kernel_size: 3 pad: 1 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| } | |
| layer{ | |
| name: "m3@ssh_output" | |
| type: "Concat" | |
| bottom: "m3@ssh_3x3_output" | |
| bottom: "m3@ssh_5x5_output" | |
| bottom: "m3@ssh_7x7_output" | |
| top: "m3@ssh_output" | |
| concat_param{ | |
| axis: 1 | |
| } | |
| } | |
| layer { | |
| name: "m3@ssh_output_relu" | |
| type: "ReLU" | |
| bottom: "m3@ssh_output" | |
| top: "m3@ssh_output" | |
| } | |
| layer { | |
| name: "m3@ssh_cls_score" | |
| type: "Convolution" | |
| bottom: "m3@ssh_output" | |
| top: "m3@ssh_cls_score_output" | |
| param { lr_mult: 1.0 decay_mult: 1.0 name: 'm3@ssh_cls_score_param1'} | |
| param { lr_mult: 2.0 decay_mult: 0 name: 'm3@ssh_cls_score_param2'} | |
| convolution_param { | |
| num_output: 4 # 2(bg/fg) * 21(anchors) | |
| kernel_size: 1 pad: 0 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| } | |
| layer { | |
| name: "m3@ssh_bbox_pred" | |
| type: "Convolution" | |
| bottom: "m3@ssh_output" | |
| top: "m3@ssh_bbox_pred_output" | |
| param { lr_mult: 1.0 decay_mult: 1.0} | |
| param { lr_mult: 2.0 decay_mult: 0} | |
| convolution_param { | |
| num_output: 8 # 4 * 21(anchors) | |
| kernel_size: 1 pad: 0 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| } | |
| layer { | |
| bottom: "m3@ssh_cls_score_output" | |
| top: "m3@ssh_cls_score_reshape_output" | |
| name: "m3@ssh_cls_reshape" | |
| type: "Reshape" | |
| reshape_param { shape { dim: 0 dim: 2 dim: -1 dim: 0 } } | |
| } | |
| layer { | |
| name: 'm3@ssh_target_layer' | |
| type: 'Python' | |
| bottom: 'm3@ssh_cls_score_output' | |
| bottom: 'gt_boxes' | |
| bottom: 'im_info' | |
| bottom: 'data' | |
| bottom: 'm3@ssh_cls_prob_reshape_output_ohem' | |
| top: 'm3@ssh_anchor_labels' | |
| top: 'm3@ssh_reg_tragets' | |
| top: 'm3@ssh_reg_inside_weights' | |
| top: 'm3@ssh_reg_outside_weights' | |
| python_param { | |
| module: 'SSH.layers.anchor_target_layer' | |
| layer: 'AnchorTargetLayer' | |
| param_str: "{'feat_stride': 32,'scales': [16,32], 'ratios':[1,], 'allowed_border': 512}" | |
| } | |
| } | |
| layer { | |
| name: "m3@ssh_cls_loss" | |
| type: "SoftmaxWithLoss" | |
| bottom: "m3@ssh_cls_score_reshape_output" | |
| bottom: "m3@ssh_anchor_labels" | |
| propagate_down: 1 | |
| propagate_down: 0 | |
| top: "m3@ssh_cls_loss" | |
| loss_weight: 1 | |
| loss_param { | |
| ignore_label: -1 | |
| normalize: true | |
| } | |
| } | |
| layer { | |
| name: "m3@ssh_reg_loss" | |
| type: "SmoothL1Loss" | |
| bottom: "m3@ssh_bbox_pred_output" | |
| bottom: "m3@ssh_reg_tragets" | |
| bottom: 'm3@ssh_reg_inside_weights' | |
| bottom: 'm3@ssh_reg_outside_weights' | |
| top: "m3@ssh_reg_loss" | |
| loss_weight: 1 | |
| smooth_l1_loss_param { sigma: 3.0 } | |
| } | |
| #========= M2@SSH OHEM ========== | |
| layer { | |
| name: "m2@ssh_3x3_ohem" | |
| type: "Convolution" | |
| bottom: "conv5_3" | |
| top: "m2@ssh_3x3_output_ohem" | |
| propagate_down: false | |
| param {name: "m2@ssh_3x3_param1"} | |
| param {name: "m2@ssh_3x3_param2"} | |
| convolution_param { | |
| num_output: 256 | |
| kernel_size: 3 pad: 1 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| } | |
| # Dim red | |
| layer { | |
| name: "m2@ssh_dimred_ohem" | |
| type: "Convolution" | |
| bottom: "conv5_3" | |
| top: "m2@ssh_dimred_output_ohem" | |
| propagate_down: false | |
| param {name: "m2@ssh_dimred_param1"} | |
| param {name: "m2@ssh_dimred_param2"} | |
| convolution_param { | |
| num_output: 128 | |
| kernel_size: 3 pad: 1 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| } | |
| layer { | |
| name: "m2@ssh_dimred_relu_ohem" | |
| type: "ReLU" | |
| bottom: "m2@ssh_dimred_output_ohem" | |
| top: "m2@ssh_dimred_output_ohem" | |
| propagate_down: false | |
| } | |
| # 5x5 | |
| layer { | |
| name: "m2@ssh_5x5_ohem" | |
| type: "Convolution" | |
| bottom: "m2@ssh_dimred_output_ohem" | |
| top: "m2@ssh_5x5_output_ohem" | |
| propagate_down: false | |
| param {name: "m2@ssh_5x5_param1"} | |
| param {name: "m2@ssh_5x5_param2"} | |
| convolution_param { | |
| num_output: 128 | |
| kernel_size: 3 pad: 1 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| } | |
| #7x7 | |
| layer { | |
| name: "m2@ssh_7x7-1_ohem" | |
| type: "Convolution" | |
| bottom: "m2@ssh_dimred_output_ohem" | |
| top: "m2@ssh_7x7-1_output_ohem" | |
| propagate_down: false | |
| param {name: "m2@ssh_7x7-1_param1"} | |
| param {name: "m2@ssh_7x7-1_param2"} | |
| convolution_param { | |
| num_output: 128 | |
| kernel_size: 3 pad: 1 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| } | |
| layer { | |
| name: "m2@ssh_7x7-1_relu_ohem" | |
| type: "ReLU" | |
| bottom: "m2@ssh_7x7-1_output_ohem" | |
| top: "m2@ssh_7x7-1_output_ohem" | |
| propagate_down: false | |
| } | |
| layer { | |
| name: "m2@ssh_7x7" | |
| type: "Convolution" | |
| bottom: "m2@ssh_7x7-1_output_ohem" | |
| top: "m2@ssh_7x7_output_ohem" | |
| propagate_down: false | |
| param {name: "m2@ssh_7x7_param1"} | |
| param {name: "m2@ssh_7x7_param2"} | |
| convolution_param { | |
| num_output: 128 | |
| kernel_size: 3 pad: 1 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| } | |
| layer{ | |
| name: "m2@ssh_output_ohem" | |
| type: "Concat" | |
| bottom: "m2@ssh_3x3_output_ohem" | |
| bottom: "m2@ssh_5x5_output_ohem" | |
| bottom: "m2@ssh_7x7_output_ohem" | |
| top: "m2@ssh_output_ohem" | |
| concat_param{ | |
| axis: 1 | |
| } | |
| propagate_down: false | |
| propagate_down: false | |
| propagate_down: false | |
| } | |
| layer { | |
| name: "m2@ssh_output_relu_ohem" | |
| type: "ReLU" | |
| bottom: "m2@ssh_output_ohem" | |
| top: "m2@ssh_output_ohem" | |
| propagate_down: false | |
| } | |
| layer { | |
| name: "m2@ssh_cls_score_ohem" | |
| type: "Convolution" | |
| bottom: "m2@ssh_output_ohem" | |
| top: "m2@ssh_cls_score_output_ohem" | |
| param {name: "m2@ssh_cls_score_param1"} | |
| param {name: "m2@ssh_cls_score_param2"} | |
| convolution_param { | |
| num_output: 4 # 2(bg/fg) * 21(anchors) | |
| kernel_size: 1 pad: 0 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| propagate_down: false | |
| } | |
| layer { | |
| bottom: "m2@ssh_cls_score_output_ohem" | |
| top: "m2@ssh_cls_score_reshape_output_ohem" | |
| name: "m2@ssh_cls_reshape_ohem" | |
| type: "Reshape" | |
| reshape_param { shape { dim: 0 dim: 2 dim: -1 dim: 0 } } | |
| propagate_down: false | |
| } | |
| layer { | |
| name: "m2@ssh_cls_prob_ohem" | |
| type: "Softmax" | |
| bottom: "m2@ssh_cls_score_reshape_output_ohem" | |
| top: "m2@ssh_cls_prob_output_ohem" | |
| propagate_down: false | |
| } | |
| layer { | |
| name: 'm2@ssh_cls_prob_reshape_ohem' | |
| type: 'Reshape' | |
| bottom: 'm2@ssh_cls_prob_output_ohem' | |
| top: 'm2@ssh_cls_prob_reshape_output_ohem' | |
| reshape_param { shape { dim: 0 dim:4 dim: -1 dim: 0 } } | |
| propagate_down: false | |
| } | |
| #========== M2@SSH ========= | |
| layer { | |
| name: "m2@ssh_3x3" | |
| type: "Convolution" | |
| bottom: "conv5_3" | |
| top: "m2@ssh_3x3_output" | |
| param { lr_mult: 1.0 decay_mult: 1.0 name: "m2@ssh_3x3_param1"} | |
| param { lr_mult: 2.0 decay_mult: 0 name: "m2@ssh_3x3_param2"} | |
| convolution_param { | |
| num_output: 256 | |
| kernel_size: 3 pad: 1 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| } | |
| # Dim red | |
| layer { | |
| name: "m2@ssh_dimred" | |
| type: "Convolution" | |
| bottom: "conv5_3" | |
| top: "m2@ssh_dimred_output" | |
| param { lr_mult: 1.0 decay_mult: 1.0 name: "m2@ssh_dimred_param1"} | |
| param { lr_mult: 2.0 decay_mult: 0 name: "m2@ssh_dimred_param2"} | |
| convolution_param { | |
| num_output: 128 | |
| kernel_size: 3 pad: 1 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| } | |
| layer { | |
| name: "m2@ssh_dimred_relu" | |
| type: "ReLU" | |
| bottom: "m2@ssh_dimred_output" | |
| top: "m2@ssh_dimred_output" | |
| } | |
| # 5x5 | |
| layer { | |
| name: "m2@ssh_5x5" | |
| type: "Convolution" | |
| bottom: "m2@ssh_dimred_output" | |
| top: "m2@ssh_5x5_output" | |
| param { lr_mult: 1.0 decay_mult: 1.0 name: "m2@ssh_5x5_param1"} | |
| param { lr_mult: 2.0 decay_mult: 0 name: "m2@ssh_5x5_param2"} | |
| convolution_param { | |
| num_output: 128 | |
| kernel_size: 3 pad: 1 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| } | |
| #7x7 | |
| layer { | |
| name: "m2@ssh_7x7-1" | |
| type: "Convolution" | |
| bottom: "m2@ssh_dimred_output" | |
| top: "m2@ssh_7x7-1_output" | |
| param { lr_mult: 1.0 decay_mult: 1.0 name: "m2@ssh_7x7-1_param1"} | |
| param { lr_mult: 2.0 decay_mult: 0 name: "m2@ssh_7x7-1_param2"} | |
| convolution_param { | |
| num_output: 128 | |
| kernel_size: 3 pad: 1 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| } | |
| layer { | |
| name: "m2@ssh_7x7-1_relu" | |
| type: "ReLU" | |
| bottom: "m2@ssh_7x7-1_output" | |
| top: "m2@ssh_7x7-1_output" | |
| } | |
| layer { | |
| name: "m2@ssh_7x7" | |
| type: "Convolution" | |
| bottom: "m2@ssh_7x7-1_output" | |
| top: "m2@ssh_7x7_output" | |
| param { lr_mult: 1.0 decay_mult: 1.0 name: "m2@ssh_7x7_param1"} | |
| param { lr_mult: 2.0 decay_mult: 0 name: "m2@ssh_7x7_param2"} | |
| convolution_param { | |
| num_output: 128 | |
| kernel_size: 3 pad: 1 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| } | |
| layer{ | |
| name: "m2@ssh_output" | |
| type: "Concat" | |
| bottom: "m2@ssh_3x3_output" | |
| bottom: "m2@ssh_5x5_output" | |
| bottom: "m2@ssh_7x7_output" | |
| top: "m2@ssh_output" | |
| concat_param{ | |
| axis: 1 | |
| } | |
| } | |
| layer { | |
| name: "m2@ssh_output_relu" | |
| type: "ReLU" | |
| bottom: "m2@ssh_output" | |
| top: "m2@ssh_output" | |
| } | |
| layer { | |
| name: "m2@ssh_cls_score" | |
| type: "Convolution" | |
| bottom: "m2@ssh_output" | |
| top: "m2@ssh_cls_score_output" | |
| param { lr_mult: 1.0 decay_mult: 1.0 name: "m2@ssh_cls_score_param1"} | |
| param { lr_mult: 2.0 decay_mult: 0 name: "m2@ssh_cls_score_param2"} | |
| convolution_param { | |
| num_output: 4 # 2(bg/fg) * 21(anchors) | |
| kernel_size: 1 pad: 0 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| } | |
| layer { | |
| name: "m2@ssh_bbox_pred" | |
| type: "Convolution" | |
| bottom: "m2@ssh_output" | |
| top: "m2@ssh_bbox_pred_output" | |
| param { lr_mult: 1.0 decay_mult: 1.0 } | |
| param { lr_mult: 2.0 decay_mult: 0 } | |
| convolution_param { | |
| num_output: 8 # 4 * 21(anchors) | |
| kernel_size: 1 pad: 0 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| } | |
| layer { | |
| bottom: "m2@ssh_cls_score_output" | |
| top: "m2@ssh_cls_score_reshape_output" | |
| name: "m2@ssh_cls_reshape" | |
| type: "Reshape" | |
| reshape_param { shape { dim: 0 dim: 2 dim: -1 dim: 0 } } | |
| } | |
| layer { | |
| name: 'm2@ssh_target_layer' | |
| type: 'Python' | |
| bottom: 'm2@ssh_cls_score_output' | |
| bottom: 'gt_boxes' | |
| bottom: 'im_info' | |
| bottom: 'data' | |
| bottom: 'm2@ssh_cls_prob_reshape_output_ohem' | |
| top: 'm2@ssh_anchor_labels' | |
| top: 'm2@ssh_reg_tragets' | |
| top: 'm2@ssh_reg_inside_weights' | |
| top: 'm2@ssh_reg_outside_weights' | |
| python_param { | |
| module: 'SSH.layers.anchor_target_layer' | |
| layer: 'AnchorTargetLayer' | |
| param_str: "{'feat_stride': 16,'scales': [4,8], 'ratios':[1,]}" | |
| } | |
| } | |
| layer { | |
| name: "m2@ssh_cls_loss" | |
| type: "SoftmaxWithLoss" | |
| bottom: "m2@ssh_cls_score_reshape_output" | |
| bottom: "m2@ssh_anchor_labels" | |
| propagate_down: 1 | |
| propagate_down: 0 | |
| top: "m2@ssh_cls_loss" | |
| loss_weight: 1 | |
| loss_param { | |
| ignore_label: -1 | |
| normalize: true | |
| } | |
| } | |
| layer { | |
| name: "m2@ssh_reg_loss" | |
| type: "SmoothL1Loss" | |
| bottom: "m2@ssh_bbox_pred_output" | |
| bottom: "m2@ssh_reg_tragets" | |
| bottom: 'm2@ssh_reg_inside_weights' | |
| bottom: 'm2@ssh_reg_outside_weights' | |
| top: "m2@ssh_reg_loss" | |
| loss_weight: 1 | |
| smooth_l1_loss_param { sigma: 3.0 } | |
| } | |
| #========== M1@SSH OHEM ========= | |
| layer { | |
| name: "m1@ssh_3x3_ohem" | |
| type: "Convolution" | |
| bottom: "conv4_fuse_final" | |
| top: "m1@ssh_3x3_output_ohem" | |
| param {name: "m1@ssh_3x3_param1"} | |
| param {name: "m1@ssh_3x3_param2"} | |
| convolution_param { | |
| num_output: 128 | |
| kernel_size: 3 pad: 1 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| propagate_down: false | |
| } | |
| # Dim red | |
| layer { | |
| name: "m1@ssh_dimred_ohem" | |
| type: "Convolution" | |
| bottom: "conv4_fuse_final" | |
| top: "m1@ssh_dimred_output_ohem" | |
| param {name: "m1@ssh_dimred_param1"} | |
| param {name: "m1@ssh_dimred_param2"} | |
| convolution_param { | |
| num_output: 64 | |
| kernel_size: 3 pad: 1 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| propagate_down: false | |
| } | |
| layer { | |
| name: "m1@ssh_dimred_relu_ohem" | |
| type: "ReLU" | |
| bottom: "m1@ssh_dimred_output_ohem" | |
| top: "m1@ssh_dimred_output_ohem" | |
| propagate_down: false | |
| } | |
| # 5x5 | |
| layer { | |
| name: "m1@ssh_5x5_ohem" | |
| type: "Convolution" | |
| bottom: "m1@ssh_dimred_output_ohem" | |
| top: "m1@ssh_5x5_output_ohem" | |
| param {name: "m1@ssh_5x5_param1"} | |
| param {name: "m1@ssh_5x5_param2"} | |
| convolution_param { | |
| num_output: 64 | |
| kernel_size: 3 pad: 1 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| propagate_down: false | |
| } | |
| #7x7 | |
| layer { | |
| name: "m1@ssh_7x7-1_ohem" | |
| type: "Convolution" | |
| bottom: "m1@ssh_dimred_output_ohem" | |
| top: "m1@ssh_7x7-1_output_ohem" | |
| param {name: "m1@ssh_7x7-1_param1"} | |
| param {name: "m1@ssh_7x7-1_param2"} | |
| convolution_param { | |
| num_output: 64 | |
| kernel_size: 3 pad: 1 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| propagate_down: false | |
| } | |
| layer { | |
| name: "m1@ssh_7x7-1_relu_ohem" | |
| type: "ReLU" | |
| bottom: "m1@ssh_7x7-1_output_ohem" | |
| top: "m1@ssh_7x7-1_output_ohem" | |
| propagate_down: false | |
| } | |
| layer { | |
| name: "m1@ssh_7x7" | |
| type: "Convolution" | |
| bottom: "m1@ssh_7x7-1_output_ohem" | |
| top: "m1@ssh_7x7_output_ohem" | |
| param {name: "m1@ssh_7x7_param1"} | |
| param {name: "m1@ssh_7x7_param2"} | |
| convolution_param { | |
| num_output: 64 | |
| kernel_size: 3 pad: 1 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| propagate_down: false | |
| } | |
| layer{ | |
| name: "m1@ssh_output_ohem" | |
| type: "Concat" | |
| bottom: "m1@ssh_3x3_output_ohem" | |
| bottom: "m1@ssh_5x5_output_ohem" | |
| bottom: "m1@ssh_7x7_output_ohem" | |
| top: "m1@ssh_output_ohem" | |
| concat_param{ | |
| axis: 1 | |
| } | |
| propagate_down: false | |
| propagate_down: false | |
| propagate_down: false | |
| } | |
| layer { | |
| name: "m1@ssh_output_relu_ohem" | |
| type: "ReLU" | |
| bottom: "m1@ssh_output_ohem" | |
| top: "m1@ssh_output_ohem" | |
| propagate_down: false | |
| } | |
| layer { | |
| name: "m1@ssh_cls_score_ohem" | |
| type: "Convolution" | |
| bottom: "m1@ssh_output_ohem" | |
| top: "m1@ssh_cls_score_output_ohem" | |
| param {name: "m1@ssh_cls_score_param1"} | |
| param {name: "m1@ssh_cls_score_param2"} | |
| convolution_param { | |
| num_output: 4 # 2(bg/fg) * 21(anchors) | |
| kernel_size: 1 pad: 0 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| propagate_down: false | |
| } | |
| layer { | |
| bottom: "m1@ssh_cls_score_output_ohem" | |
| top: "m1@ssh_cls_score_reshape_output_ohem" | |
| name: "m1@ssh_cls_reshape_ohem" | |
| type: "Reshape" | |
| reshape_param { shape { dim: 0 dim: 2 dim: -1 dim: 0 } } | |
| propagate_down: false | |
| } | |
| layer { | |
| name: "m1@ssh_cls_prob_ohem" | |
| type: "Softmax" | |
| bottom: "m1@ssh_cls_score_reshape_output_ohem" | |
| top: "m1@ssh_cls_prob_output_ohem" | |
| propagate_down: false | |
| } | |
| layer { | |
| name: 'm1@ssh_cls_prob_reshape_ohem' | |
| type: 'Reshape' | |
| bottom: 'm1@ssh_cls_prob_output_ohem' | |
| top: 'm1@ssh_cls_prob_reshape_output_ohem' | |
| reshape_param { shape { dim: 0 dim:4 dim: -1 dim: 0 } } | |
| propagate_down: false | |
| } | |
| #========== M1@SSH ========= | |
| layer { | |
| name: "m1@ssh_3x3" | |
| type: "Convolution" | |
| bottom: "conv4_fuse_final" | |
| top: "m1@ssh_3x3_output" | |
| param { lr_mult: 1.0 decay_mult: 1.0 name: "m1@ssh_3x3_param1"} | |
| param { lr_mult: 2.0 decay_mult: 0 name: "m1@ssh_3x3_param2"} | |
| convolution_param { | |
| num_output: 128 | |
| kernel_size: 3 pad: 1 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| } | |
| # Dim red | |
| layer { | |
| name: "m1@ssh_dimred" | |
| type: "Convolution" | |
| bottom: "conv4_fuse_final" | |
| top: "m1@ssh_dimred_output" | |
| param { lr_mult: 1.0 decay_mult: 1.0 name: "m1@ssh_dimred_param1"} | |
| param { lr_mult: 2.0 decay_mult: 0 name: "m1@ssh_dimred_param2"} | |
| convolution_param { | |
| num_output: 64 | |
| kernel_size: 3 pad: 1 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| } | |
| layer { | |
| name: "m1@ssh_dimred_relu" | |
| type: "ReLU" | |
| bottom: "m1@ssh_dimred_output" | |
| top: "m1@ssh_dimred_output" | |
| } | |
| # 5x5 | |
| layer { | |
| name: "m1@ssh_5x5" | |
| type: "Convolution" | |
| bottom: "m1@ssh_dimred_output" | |
| top: "m1@ssh_5x5_output" | |
| param { lr_mult: 1.0 decay_mult: 1.0 name: "m1@ssh_5x5_param1"} | |
| param { lr_mult: 2.0 decay_mult: 0 name: "m1@ssh_5x5_param2"} | |
| convolution_param { | |
| num_output: 64 | |
| kernel_size: 3 pad: 1 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| } | |
| #7x7 | |
| layer { | |
| name: "m1@ssh_7x7-1" | |
| type: "Convolution" | |
| bottom: "m1@ssh_dimred_output" | |
| top: "m1@ssh_7x7-1_output" | |
| param { lr_mult: 1.0 decay_mult: 1.0 name: "m1@ssh_7x7-1_param1"} | |
| param { lr_mult: 2.0 decay_mult: 0 name: "m1@ssh_7x7-1_param2"} | |
| convolution_param { | |
| num_output: 64 | |
| kernel_size: 3 pad: 1 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| } | |
| layer { | |
| name: "m1@ssh_7x7-1_relu" | |
| type: "ReLU" | |
| bottom: "m1@ssh_7x7-1_output" | |
| top: "m1@ssh_7x7-1_output" | |
| } | |
| layer { | |
| name: "m1@ssh_7x7" | |
| type: "Convolution" | |
| bottom: "m1@ssh_7x7-1_output" | |
| top: "m1@ssh_7x7_output" | |
| param { lr_mult: 1.0 decay_mult: 1.0 name: "m1@ssh_7x7_param1"} | |
| param { lr_mult: 2.0 decay_mult: 0 name: "m1@ssh_7x7_param2"} | |
| convolution_param { | |
| num_output: 64 | |
| kernel_size: 3 pad: 1 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| } | |
| layer{ | |
| name: "m1@ssh_output" | |
| type: "Concat" | |
| bottom: "m1@ssh_3x3_output" | |
| bottom: "m1@ssh_5x5_output" | |
| bottom: "m1@ssh_7x7_output" | |
| top: "m1@ssh_output" | |
| concat_param{ | |
| axis: 1 | |
| } | |
| } | |
| layer { | |
| name: "m1@ssh_output_relu" | |
| type: "ReLU" | |
| bottom: "m1@ssh_output" | |
| top: "m1@ssh_output" | |
| } | |
| layer { | |
| name: "m1@ssh_cls_score" | |
| type: "Convolution" | |
| bottom: "m1@ssh_output" | |
| top: "m1@ssh_cls_score_output" | |
| param { lr_mult: 1.0 decay_mult: 1.0 name: "m1@ssh_cls_score_param1"} | |
| param { lr_mult: 2.0 decay_mult: 0 name: "m1@ssh_cls_score_param2"} | |
| convolution_param { | |
| num_output: 4 # 2(bg/fg) * 21(anchors) | |
| kernel_size: 1 pad: 0 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| } | |
| layer { | |
| name: "m1@ssh_bbox_pred" | |
| type: "Convolution" | |
| bottom: "m1@ssh_output" | |
| top: "m1@ssh_bbox_pred_output" | |
| param { lr_mult: 1.0 decay_mult: 1.0 } | |
| param { lr_mult: 2.0 decay_mult: 0 } | |
| convolution_param { | |
| num_output: 8 # 4 * 21(anchors) | |
| kernel_size: 1 pad: 0 stride: 1 | |
| weight_filler { type: "gaussian" std: 0.01 } | |
| bias_filler { type: "constant" value: 0 } | |
| } | |
| } | |
| layer { | |
| bottom: "m1@ssh_cls_score_output" | |
| top: "m1@ssh_cls_score_reshape_output" | |
| name: "m1@ssh_cls_reshape" | |
| type: "Reshape" | |
| reshape_param { shape { dim: 0 dim: 2 dim: -1 dim: 0 } } | |
| } | |
| layer { | |
| name: 'm1@ssh_target_layer' | |
| type: 'Python' | |
| bottom: 'm1@ssh_cls_score_output' | |
| bottom: 'gt_boxes' | |
| bottom: 'im_info' | |
| bottom: 'data' | |
| bottom: 'm1@ssh_cls_prob_reshape_output_ohem' | |
| top: 'm1@ssh_anchor_labels' | |
| top: 'm1@ssh_reg_tragets' | |
| top: 'm1@ssh_reg_inside_weights' | |
| top: 'm1@ssh_reg_outside_weights' | |
| python_param { | |
| module: 'SSH.layers.anchor_target_layer' | |
| layer: 'AnchorTargetLayer' | |
| param_str: "{'feat_stride': 8,'scales': [1,2], 'ratios':[1,]}" | |
| } | |
| } | |
| layer { | |
| name: "m1@ssh_cls_loss" | |
| type: "SoftmaxWithLoss" | |
| bottom: "m1@ssh_cls_score_reshape_output" | |
| bottom: "m1@ssh_anchor_labels" | |
| propagate_down: 1 | |
| propagate_down: 0 | |
| top: "m1@ssh_cls_loss" | |
| loss_weight: 1 | |
| loss_param { | |
| ignore_label: -1 | |
| normalize: true | |
| } | |
| } | |
| layer { | |
| name: "m1@ssh_reg_loss" | |
| type: "SmoothL1Loss" | |
| bottom: "m1@ssh_bbox_pred_output" | |
| bottom: "m1@ssh_reg_tragets" | |
| bottom: 'm1@ssh_reg_inside_weights' | |
| bottom: 'm1@ssh_reg_outside_weights' | |
| top: "m1@ssh_reg_loss" | |
| loss_weight: 1 | |
| smooth_l1_loss_param { sigma: 3.0 } | |
| } |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment