From b9332698fe32968a6425a18998f3574c5166f930 Mon Sep 17 00:00:00 2001 From: Davis King Date: Mon, 23 May 2016 22:01:47 -0400 Subject: [PATCH] updated example --- examples/dnn_mnist_advanced_ex.cpp | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/examples/dnn_mnist_advanced_ex.cpp b/examples/dnn_mnist_advanced_ex.cpp index 800d9aadb..ef4e0acae 100644 --- a/examples/dnn_mnist_advanced_ex.cpp +++ b/examples/dnn_mnist_advanced_ex.cpp @@ -198,32 +198,32 @@ int main(int argc, char** argv) try layer<2> avg_pool (nr=0, nc=0, stride_y=1, stride_x=1, padding_y=0, padding_x=0) layer<3> prelu (initial_param_value=0.2) layer<4> add_prev - layer<5> bn_con eps=1e-05 learning_rate_mult=1 weight_decay_mult=0 + layer<5> bn_con eps=1e-05 learning_rate_mult=1 weight_decay_mult=0 bias_learning_rate_mult=1 bias_weight_decay_mult=1 layer<6> con (num_filters=8, nr=3, nc=3, stride_y=1, stride_x=1, padding_y=1, padding_x=1) learning_rate_mult=1 weight_decay_mult=1 bias_learning_rate_mult=1 bias_weight_decay_mult=0 layer<7> prelu (initial_param_value=0.25) - layer<8> bn_con eps=1e-05 learning_rate_mult=1 weight_decay_mult=0 + layer<8> bn_con eps=1e-05 learning_rate_mult=1 weight_decay_mult=0 bias_learning_rate_mult=1 bias_weight_decay_mult=1 layer<9> con (num_filters=8, nr=3, nc=3, stride_y=1, stride_x=1, padding_y=1, padding_x=1) learning_rate_mult=1 weight_decay_mult=1 bias_learning_rate_mult=1 bias_weight_decay_mult=0 layer<10> tag1 ... layer<34> relu - layer<35> bn_con eps=1e-05 learning_rate_mult=1 weight_decay_mult=0 + layer<35> bn_con eps=1e-05 learning_rate_mult=1 weight_decay_mult=0 bias_learning_rate_mult=1 bias_weight_decay_mult=1 layer<36> con (num_filters=8, nr=3, nc=3, stride_y=2, stride_x=2, padding_y=0, padding_x=0) learning_rate_mult=1 weight_decay_mult=1 bias_learning_rate_mult=1 bias_weight_decay_mult=0 layer<37> tag1 layer<38> tag4 layer<39> prelu (initial_param_value=0.3) layer<40> add_prev - layer<41> bn_con eps=1e-05 learning_rate_mult=1 weight_decay_mult=0 + layer<41> bn_con eps=1e-05 learning_rate_mult=1 weight_decay_mult=0 bias_learning_rate_mult=1 bias_weight_decay_mult=1 ... layer<118> relu - layer<119> bn_con eps=1e-05 learning_rate_mult=1 weight_decay_mult=0 + layer<119> bn_con eps=1e-05 learning_rate_mult=1 weight_decay_mult=0 bias_learning_rate_mult=1 bias_weight_decay_mult=1 layer<120> con (num_filters=8, nr=3, nc=3, stride_y=2, stride_x=2, padding_y=0, padding_x=0) learning_rate_mult=1 weight_decay_mult=1 bias_learning_rate_mult=1 bias_weight_decay_mult=0 layer<121> tag1 layer<122> relu layer<123> add_prev - layer<124> bn_con eps=1e-05 learning_rate_mult=1 weight_decay_mult=0 + layer<124> bn_con eps=1e-05 learning_rate_mult=1 weight_decay_mult=0 bias_learning_rate_mult=1 bias_weight_decay_mult=1 layer<125> con (num_filters=8, nr=3, nc=3, stride_y=1, stride_x=1, padding_y=1, padding_x=1) learning_rate_mult=1 weight_decay_mult=1 bias_learning_rate_mult=1 bias_weight_decay_mult=0 layer<126> relu - layer<127> bn_con eps=1e-05 learning_rate_mult=1 weight_decay_mult=0 + layer<127> bn_con eps=1e-05 learning_rate_mult=1 weight_decay_mult=0 bias_learning_rate_mult=1 bias_weight_decay_mult=1 layer<128> con (num_filters=8, nr=3, nc=3, stride_y=1, stride_x=1, padding_y=1, padding_x=1) learning_rate_mult=1 weight_decay_mult=1 bias_learning_rate_mult=1 bias_weight_decay_mult=0 layer<129> tag1 layer<130> input