-
Notifications
You must be signed in to change notification settings - Fork 82
Open
Description
name: "resnet_cifar10"
layer {
name: "data"
type: "Input"
top: "Data1"
top: "Data2"
input_param { shape: { dim: 1 dim: 3 dim: 28 dim: 28 } }
}
layer {
name: "Convolution1"
type: "Convolution"
bottom: "Data1"
top: "Convolution1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 16
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "gaussian"
std: 0.118
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "BatchNorm1"
type: "BatchNorm"
bottom: "Convolution1"
top: "Convolution1"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale1"
type: "Scale"
bottom: "Convolution1"
top: "Convolution1"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU1"
type: "ReLU"
bottom: "Convolution1"
top: "Convolution1"
}
layer {
name: "Convolution2"
type: "Convolution"
bottom: "Convolution1"
top: "Convolution2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 16
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "gaussian"
std: 0.118
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "BatchNorm2"
type: "BatchNorm"
bottom: "Convolution2"
top: "Convolution2"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale2"
type: "Scale"
bottom: "Convolution2"
top: "Convolution2"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU2"
type: "ReLU"
bottom: "Convolution2"
top: "Convolution2"
}
layer {
name: "Convolution3"
type: "Convolution"
bottom: "Convolution2"
top: "Convolution3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 16
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "gaussian"
std: 0.118
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "BatchNorm3"
type: "BatchNorm"
bottom: "Convolution3"
top: "Convolution3"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale3"
type: "Scale"
bottom: "Convolution3"
top: "Convolution3"
scale_param {
bias_term: true
}
}
layer {
name: "Eltwise1"
type: "Eltwise"
bottom: "Convolution1"
bottom: "Convolution3"
top: "Eltwise1"
eltwise_param {
operation: SUM
}
}
layer {
name: "ReLU3"
type: "ReLU"
bottom: "Eltwise1"
top: "Eltwise1"
}
layer {
name: "Convolution4"
type: "Convolution"
bottom: "Eltwise1"
top: "Convolution4"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 16
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "gaussian"
std: 0.118
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "BatchNorm4"
type: "BatchNorm"
bottom: "Convolution4"
top: "Convolution4"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale4"
type: "Scale"
bottom: "Convolution4"
top: "Convolution4"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU4"
type: "ReLU"
bottom: "Convolution4"
top: "Convolution4"
}
layer {
name: "Convolution5"
type: "Convolution"
bottom: "Convolution4"
top: "Convolution5"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 16
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "gaussian"
std: 0.118
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "BatchNorm5"
type: "BatchNorm"
bottom: "Convolution5"
top: "Convolution5"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale5"
type: "Scale"
bottom: "Convolution5"
top: "Convolution5"
scale_param {
bias_term: true
}
}
layer {
name: "Eltwise2"
type: "Eltwise"
bottom: "Eltwise1"
bottom: "Convolution5"
top: "Eltwise2"
eltwise_param {
operation: SUM
}
}
layer {
name: "ReLU5"
type: "ReLU"
bottom: "Eltwise2"
top: "Eltwise2"
}
layer {
name: "Convolution6"
type: "Convolution"
bottom: "Eltwise2"
top: "Convolution6"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 16
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "gaussian"
std: 0.118
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "BatchNorm6"
type: "BatchNorm"
bottom: "Convolution6"
top: "Convolution6"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale6"
type: "Scale"
bottom: "Convolution6"
top: "Convolution6"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU6"
type: "ReLU"
bottom: "Convolution6"
top: "Convolution6"
}
layer {
name: "Convolution7"
type: "Convolution"
bottom: "Convolution6"
top: "Convolution7"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 16
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "gaussian"
std: 0.118
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "BatchNorm7"
type: "BatchNorm"
bottom: "Convolution7"
top: "Convolution7"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale7"
type: "Scale"
bottom: "Convolution7"
top: "Convolution7"
scale_param {
bias_term: true
}
}
layer {
name: "Eltwise3"
type: "Eltwise"
bottom: "Eltwise2"
bottom: "Convolution7"
top: "Eltwise3"
eltwise_param {
operation: SUM
}
}
layer {
name: "ReLU7"
type: "ReLU"
bottom: "Eltwise3"
top: "Eltwise3"
}
layer {
name: "Convolution8"
type: "Convolution"
bottom: "Eltwise3"
top: "Convolution8"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 32
pad: 0
kernel_size: 1
stride: 2
weight_filler {
type: "gaussian"
std: 0.25
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "BatchNorm8"
type: "BatchNorm"
bottom: "Convolution8"
top: "Convolution8"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale8"
type: "Scale"
bottom: "Convolution8"
top: "Convolution8"
scale_param {
bias_term: true
}
}
layer {
name: "Convolution9"
type: "Convolution"
bottom: "Eltwise3"
top: "Convolution9"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 32
pad: 1
kernel_size: 3
stride: 2
weight_filler {
type: "gaussian"
std: 0.083
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "BatchNorm9"
type: "BatchNorm"
bottom: "Convolution9"
top: "Convolution9"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale9"
type: "Scale"
bottom: "Convolution9"
top: "Convolution9"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU8"
type: "ReLU"
bottom: "Convolution9"
top: "Convolution9"
}
layer {
name: "Convolution10"
type: "Convolution"
bottom: "Convolution9"
top: "Convolution10"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 32
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "gaussian"
std: 0.083
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "BatchNorm10"
type: "BatchNorm"
bottom: "Convolution10"
top: "Convolution10"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale10"
type: "Scale"
bottom: "Convolution10"
top: "Convolution10"
scale_param {
bias_term: true
}
}
layer {
name: "Eltwise4"
type: "Eltwise"
bottom: "Convolution8"
bottom: "Convolution10"
top: "Eltwise4"
eltwise_param {
operation: SUM
}
}
layer {
name: "ReLU9"
type: "ReLU"
bottom: "Eltwise4"
top: "Eltwise4"
}
layer {
name: "Convolution11"
type: "Convolution"
bottom: "Eltwise4"
top: "Convolution11"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 32
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "gaussian"
std: 0.083
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "BatchNorm11"
type: "BatchNorm"
bottom: "Convolution11"
top: "Convolution11"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale11"
type: "Scale"
bottom: "Convolution11"
top: "Convolution11"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU10"
type: "ReLU"
bottom: "Convolution11"
top: "Convolution11"
}
layer {
name: "Convolution12"
type: "Convolution"
bottom: "Convolution11"
top: "Convolution12"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 32
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "gaussian"
std: 0.083
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "BatchNorm12"
type: "BatchNorm"
bottom: "Convolution12"
top: "Convolution12"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale12"
type: "Scale"
bottom: "Convolution12"
top: "Convolution12"
scale_param {
bias_term: true
}
}
layer {
name: "Eltwise5"
type: "Eltwise"
bottom: "Eltwise4"
bottom: "Convolution12"
top: "Eltwise5"
eltwise_param {
operation: SUM
}
}
layer {
name: "ReLU11"
type: "ReLU"
bottom: "Eltwise5"
top: "Eltwise5"
}
layer {
name: "Convolution13"
type: "Convolution"
bottom: "Eltwise5"
top: "Convolution13"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 32
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "gaussian"
std: 0.083
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "BatchNorm13"
type: "BatchNorm"
bottom: "Convolution13"
top: "Convolution13"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale13"
type: "Scale"
bottom: "Convolution13"
top: "Convolution13"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU12"
type: "ReLU"
bottom: "Convolution13"
top: "Convolution13"
}
layer {
name: "Convolution14"
type: "Convolution"
bottom: "Convolution13"
top: "Convolution14"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 32
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "gaussian"
std: 0.083
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "BatchNorm14"
type: "BatchNorm"
bottom: "Convolution14"
top: "Convolution14"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale14"
type: "Scale"
bottom: "Convolution14"
top: "Convolution14"
scale_param {
bias_term: true
}
}
layer {
name: "Eltwise6"
type: "Eltwise"
bottom: "Eltwise5"
bottom: "Convolution14"
top: "Eltwise6"
eltwise_param {
operation: SUM
}
}
layer {
name: "ReLU13"
type: "ReLU"
bottom: "Eltwise6"
top: "Eltwise6"
}
layer {
name: "Convolution15"
type: "Convolution"
bottom: "Eltwise6"
top: "Convolution15"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 64
pad: 0
kernel_size: 1
stride: 2
weight_filler {
type: "gaussian"
std: 0.176776695297
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "BatchNorm15"
type: "BatchNorm"
bottom: "Convolution15"
top: "Convolution15"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale15"
type: "Scale"
bottom: "Convolution15"
top: "Convolution15"
scale_param {
bias_term: true
}
}
layer {
name: "Convolution16"
type: "Convolution"
bottom: "Eltwise6"
top: "Convolution16"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 64
pad: 1
kernel_size: 3
stride: 2
weight_filler {
type: "gaussian"
std: 0.059
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "BatchNorm16"
type: "BatchNorm"
bottom: "Convolution16"
top: "Convolution16"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale16"
type: "Scale"
bottom: "Convolution16"
top: "Convolution16"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU14"
type: "ReLU"
bottom: "Convolution16"
top: "Convolution16"
}
layer {
name: "Convolution17"
type: "Convolution"
bottom: "Convolution16"
top: "Convolution17"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 64
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "gaussian"
std: 0.059
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "BatchNorm17"
type: "BatchNorm"
bottom: "Convolution17"
top: "Convolution17"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale17"
type: "Scale"
bottom: "Convolution17"
top: "Convolution17"
scale_param {
bias_term: true
}
}
layer {
name: "Eltwise7"
type: "Eltwise"
bottom: "Convolution15"
bottom: "Convolution17"
top: "Eltwise7"
eltwise_param {
operation: SUM
}
}
layer {
name: "ReLU15"
type: "ReLU"
bottom: "Eltwise7"
top: "Eltwise7"
}
layer {
name: "Convolution18"
type: "Convolution"
bottom: "Eltwise7"
top: "Convolution18"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 64
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "gaussian"
std: 0.059
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "BatchNorm18"
type: "BatchNorm"
bottom: "Convolution18"
top: "Convolution18"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale18"
type: "Scale"
bottom: "Convolution18"
top: "Convolution18"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU16"
type: "ReLU"
bottom: "Convolution18"
top: "Convolution18"
}
layer {
name: "Convolution19"
type: "Convolution"
bottom: "Convolution18"
top: "Convolution19"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 64
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "gaussian"
std: 0.059
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "BatchNorm19"
type: "BatchNorm"
bottom: "Convolution19"
top: "Convolution19"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale19"
type: "Scale"
bottom: "Convolution19"
top: "Convolution19"
scale_param {
bias_term: true
}
}
layer {
name: "Eltwise8"
type: "Eltwise"
bottom: "Eltwise7"
bottom: "Convolution19"
top: "Eltwise8"
eltwise_param {
operation: SUM
}
}
layer {
name: "ReLU17"
type: "ReLU"
bottom: "Eltwise8"
top: "Eltwise8"
}
layer {
name: "Convolution20"
type: "Convolution"
bottom: "Eltwise8"
top: "Convolution20"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 64
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "gaussian"
std: 0.059
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "BatchNorm20"
type: "BatchNorm"
bottom: "Convolution20"
top: "Convolution20"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale20"
type: "Scale"
bottom: "Convolution20"
top: "Convolution20"
scale_param {
bias_term: true
}
}
layer {
name: "ReLU18"
type: "ReLU"
bottom: "Convolution20"
top: "Convolution20"
}
layer {
name: "Convolution21"
type: "Convolution"
bottom: "Convolution20"
top: "Convolution21"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 64
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "gaussian"
std: 0.059
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "BatchNorm21"
type: "BatchNorm"
bottom: "Convolution21"
top: "Convolution21"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "Scale21"
type: "Scale"
bottom: "Convolution21"
top: "Convolution21"
scale_param {
bias_term: true
}
}
layer {
name: "Eltwise9"
type: "Eltwise"
bottom: "Eltwise8"
bottom: "Convolution21"
top: "Eltwise9"
eltwise_param {
operation: SUM
}
}
layer {
name: "ReLU19"
type: "ReLU"
bottom: "Eltwise9"
top: "Eltwise9"
}
layer {
name: "Pooling1"
type: "Pooling"
bottom: "Eltwise9"
top: "Pooling1"
pooling_param {
pool: AVE
global_pooling: true
}
}
layer {
name: "InnerProduct1"
type: "InnerProduct"
bottom: "Pooling1"
top: "InnerProduct1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 1
}
inner_product_param {
num_output: 10
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
#layer {
# name: "SoftmaxWithLoss1"
# type: "SoftmaxWithLoss"
# bottom: "InnerProduct1"
# bottom: "Data2"
# top: "SoftmaxWithLoss1"
#}
layer {
bottom: "InnerProduct1"
top: "prob"
name: "prob"
type: "Softmax"
}
Metadata
Metadata
Assignees
Labels
No labels