Fixed a bug in the cuDNN binding that on rare occasions caused NaN outputs from

batch normalization.  The running mean and variance need to be initialized when
calling cuDNN, even if the averaging factor is 1.  I don't think this was the
case prior to cuDNN v5, but it certainly is the case now.  This patch fixes
this.
This commit is contained in:
Davis King 2016-08-09 16:33:44 -04:00
parent 98da589f9d
commit ceb6a119d5
1 changed files with 16 additions and 0 deletions

View File

@ -445,6 +445,14 @@ namespace dlib
invstds.copy_size(means);
running_means.copy_size(means);
running_variances.copy_size(means);
// cuDNN requires that running_means and running_variances be initialized to
// some valid float values even if the averaging factor would have ignored
// them.
if (averaging_factor == 1)
{
running_means = 0;
running_variances = 1;
}
CHECK_CUDNN(cudnnBatchNormalizationForwardTraining(
context(),
@ -627,6 +635,14 @@ namespace dlib
invstds.copy_size(means);
running_means.copy_size(means);
running_variances.copy_size(means);
// cuDNN requires that running_means and running_variances be initialized to
// some valid float values even if the averaging factor would have ignored
// them.
if (averaging_factor == 1)
{
running_means = 0;
running_variances = 1;
}
CHECK_CUDNN(cudnnBatchNormalizationForwardTraining(
context(),