From 80135c737cbcab4ae99f757ad66bc99c4b1bedb3 Mon Sep 17 00:00:00 2001 From: Brandon Amos Date: Mon, 28 Dec 2015 21:28:06 -0500 Subject: [PATCH] nn4.def: Add reasoning behind LRN layers. --- models/openface/nn4.def.lua | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/models/openface/nn4.def.lua b/models/openface/nn4.def.lua index 42d95aa..3fe0d7d 100644 --- a/models/openface/nn4.def.lua +++ b/models/openface/nn4.def.lua @@ -28,6 +28,17 @@ function createModel() net:add(nn.SpatialBatchNormalization(64)) net:add(nn.ReLU()) + -- The FaceNet paper just says `norm` and that the models are based + -- heavily on the inception paper (http://arxiv.org/pdf/1409.4842.pdf), + -- which uses pooling and normalization in the same way in the early layers. + -- + -- The Caffe and official versions of this network both use LRN: + -- + -- + https://github.com/BVLC/caffe/tree/master/models/bvlc_googlenet + -- + https://github.com/google/inception/blob/master/inception.ipynb + -- + -- The Caffe docs at http://caffe.berkeleyvision.org/tutorial/layers.html + -- define LRN to be across channels. net:add(nn.SpatialMaxPooling(3, 3, 2, 2, 1, 1)) net:add(nn.CrossMapNormalization(5, 0.0001, 0.75))