Fix tests for training the DNN.

This commit is contained in:
Brandon Amos 2016-03-06 19:45:37 -05:00
parent 1786f20fb2
commit bbff0a01dd
4 changed files with 8 additions and 5 deletions

View File

@ -38,7 +38,7 @@ def test_dnn_training():
imgWorkDir = tempfile.mkdtemp(prefix='OpenFaceTrainingTest-Img-') imgWorkDir = tempfile.mkdtemp(prefix='OpenFaceTrainingTest-Img-')
cmd = ['python2', os.path.join(openfaceDir, 'util', 'align-dlib.py'), cmd = ['python2', os.path.join(openfaceDir, 'util', 'align-dlib.py'),
os.path.join(lfwSubset, 'raw'), 'align', 'outerEyesAndNose', os.path.join(lfwSubset, 'raw'), 'align', 'outerEyesAndNose',
os.path.join(imgWorkDir, 'aligned', 'train')] os.path.join(imgWorkDir, 'aligned')]
p = Popen(cmd, stdout=PIPE, stderr=PIPE) p = Popen(cmd, stdout=PIPE, stderr=PIPE)
(out, err) = p.communicate() (out, err) = p.communicate()
print(out) print(out)
@ -54,7 +54,7 @@ def test_dnn_training():
'-nEpochs', '10', '-nEpochs', '10',
'-epochSize', '1', '-epochSize', '1',
'-cache', netWorkDir, '-cache', netWorkDir,
'-cuda', '-cudnn', '-cuda', '-cudnn', '-testing',
'-nDonkeys', '-1'] '-nDonkeys', '-1']
p = Popen(cmd, stdout=PIPE, stderr=PIPE, cwd=os.path.join(openfaceDir, 'training')) p = Popen(cmd, stdout=PIPE, stderr=PIPE, cwd=os.path.join(openfaceDir, 'training'))
(out, err) = p.communicate() (out, err) = p.communicate()
@ -65,7 +65,7 @@ def test_dnn_training():
# Training won't make much progress on lfw-subset, but as a sanity check, # Training won't make much progress on lfw-subset, but as a sanity check,
# make sure the training code runs and doesn't get worse than the initialize # make sure the training code runs and doesn't get worse than the initialize
# loss value of 0.2. # loss value of 0.2.
trainLoss = pd.read_csv(os.path.join(netWorkDir, '1', 'train.log'), trainLoss = pd.read_csv(os.path.join(netWorkDir, '001', 'train.log'),
sep='\t').as_matrix()[:, 0] sep='\t').as_matrix()[:, 0]
assert np.mean(trainLoss) < 0.3 assert np.mean(trainLoss) < 0.3

View File

@ -52,7 +52,7 @@ if paths.filep(trainCache) then
else else
print('Creating train metadata') print('Creating train metadata')
trainLoader = dataLoader{ trainLoader = dataLoader{
paths = {paths.concat(opt.data, 'train')}, paths = {paths.concat(opt.data)},
loadSize = loadSize, loadSize = loadSize,
sampleSize = sampleSize, sampleSize = sampleSize,
split = 100, split = 100,

View File

@ -42,6 +42,8 @@ epoch = opt.epochNumber
for _=1,opt.nEpochs do for _=1,opt.nEpochs do
train() train()
test() if opt.test then
test()
end
epoch = epoch + 1 epoch = epoch + 1
end end

View File

@ -40,6 +40,7 @@ function M.parse(arg)
-- GPU memory usage depends on peoplePerBatch and imagesPerPerson. -- GPU memory usage depends on peoplePerBatch and imagesPerPerson.
cmd:option('-peoplePerBatch', 15, 'Number of people to sample in each mini-batch.') cmd:option('-peoplePerBatch', 15, 'Number of people to sample in each mini-batch.')
cmd:option('-imagesPerPerson', 20, 'Number of images to sample per person in each mini-batch.') cmd:option('-imagesPerPerson', 20, 'Number of images to sample per person in each mini-batch.')
cmd:option('-testing', true, 'Test with the LFW.')
cmd:option('-testBatchSize', 800, 'Batch size for testing.') cmd:option('-testBatchSize', 800, 'Batch size for testing.')
---------- Model options ---------------------------------- ---------- Model options ----------------------------------