diff --git a/tests/openface_neural_net_training_tests.py b/tests/openface_neural_net_training_tests.py index 8381ae9..a8d2888 100644 --- a/tests/openface_neural_net_training_tests.py +++ b/tests/openface_neural_net_training_tests.py @@ -38,7 +38,7 @@ def test_dnn_training(): imgWorkDir = tempfile.mkdtemp(prefix='OpenFaceTrainingTest-Img-') cmd = ['python2', os.path.join(openfaceDir, 'util', 'align-dlib.py'), os.path.join(lfwSubset, 'raw'), 'align', 'outerEyesAndNose', - os.path.join(imgWorkDir, 'aligned', 'train')] + os.path.join(imgWorkDir, 'aligned')] p = Popen(cmd, stdout=PIPE, stderr=PIPE) (out, err) = p.communicate() print(out) @@ -54,7 +54,7 @@ def test_dnn_training(): '-nEpochs', '10', '-epochSize', '1', '-cache', netWorkDir, - '-cuda', '-cudnn', + '-cuda', '-cudnn', '-testing', '-nDonkeys', '-1'] p = Popen(cmd, stdout=PIPE, stderr=PIPE, cwd=os.path.join(openfaceDir, 'training')) (out, err) = p.communicate() @@ -65,7 +65,7 @@ def test_dnn_training(): # Training won't make much progress on lfw-subset, but as a sanity check, # make sure the training code runs and doesn't get worse than the initialize # loss value of 0.2. - trainLoss = pd.read_csv(os.path.join(netWorkDir, '1', 'train.log'), + trainLoss = pd.read_csv(os.path.join(netWorkDir, '001', 'train.log'), sep='\t').as_matrix()[:, 0] assert np.mean(trainLoss) < 0.3 diff --git a/training/donkey.lua b/training/donkey.lua index 37dad42..2f40072 100644 --- a/training/donkey.lua +++ b/training/donkey.lua @@ -52,7 +52,7 @@ if paths.filep(trainCache) then else print('Creating train metadata') trainLoader = dataLoader{ - paths = {paths.concat(opt.data, 'train')}, + paths = {paths.concat(opt.data)}, loadSize = loadSize, sampleSize = sampleSize, split = 100, diff --git a/training/main.lua b/training/main.lua index 48129c7..776b674 100755 --- a/training/main.lua +++ b/training/main.lua @@ -42,6 +42,8 @@ epoch = opt.epochNumber for _=1,opt.nEpochs do train() - test() + if opt.test then + test() + end epoch = epoch + 1 end diff --git a/training/opts.lua b/training/opts.lua index b06a418..d830535 100644 --- a/training/opts.lua +++ b/training/opts.lua @@ -40,6 +40,7 @@ function M.parse(arg) -- GPU memory usage depends on peoplePerBatch and imagesPerPerson. cmd:option('-peoplePerBatch', 15, 'Number of people to sample in each mini-batch.') cmd:option('-imagesPerPerson', 20, 'Number of images to sample per person in each mini-batch.') + cmd:option('-testing', true, 'Test with the LFW.') cmd:option('-testBatchSize', 800, 'Batch size for testing.') ---------- Model options ----------------------------------