From d4da6c53b6a469027612e2006bf3c598e151a627 Mon Sep 17 00:00:00 2001 From: Dennis Francis Date: Sat, 26 Nov 2016 09:07:36 +0530 Subject: [PATCH] adapt to dlib indentation style --- dlib/dnn/loss.h | 108 +++++++++++++++++++-------------------- dlib/dnn/loss_abstract.h | 1 - dlib/test/dnn.cpp | 6 +-- 3 files changed, 55 insertions(+), 60 deletions(-) diff --git a/dlib/dnn/loss.h b/dlib/dnn/loss.h index d34e78620..6a52f1f85 100644 --- a/dlib/dnn/loss.h +++ b/dlib/dnn/loss.h @@ -1305,70 +1305,70 @@ namespace dlib typename SUB_TYPE, typename label_iterator > - void to_label ( - const tensor& input_tensor, - const SUB_TYPE& sub, - label_iterator iter - ) const + void to_label ( + const tensor& input_tensor, + const SUB_TYPE& sub, + label_iterator iter + ) const + { + DLIB_CASSERT(sub.sample_expansion_factor() == 1); + + const tensor& output_tensor = sub.get_output(); + + DLIB_CASSERT(output_tensor.nr() == 1 && + output_tensor.nc() == 1 && + output_tensor.k() == 1); + DLIB_CASSERT(input_tensor.num_samples() == output_tensor.num_samples()); + + const float* out_data = output_tensor.host(); + for (long i = 0; i < output_tensor.num_samples(); ++i) { - DLIB_CASSERT(sub.sample_expansion_factor() == 1); - - const tensor& output_tensor = sub.get_output(); - - DLIB_CASSERT(output_tensor.nr() == 1 && - output_tensor.nc() == 1 && - output_tensor.k() == 1); - DLIB_CASSERT(input_tensor.num_samples() == output_tensor.num_samples()); - - const float* out_data = output_tensor.host(); - for (long i = 0; i < output_tensor.num_samples(); ++i) - { - *iter++ = out_data[i]; - } + *iter++ = out_data[i]; } + } template < typename const_label_iterator, typename SUBNET > - double compute_loss_value_and_gradient ( - const tensor& input_tensor, - const_label_iterator truth, - SUBNET& sub - ) const + double compute_loss_value_and_gradient ( + const tensor& input_tensor, + const_label_iterator truth, + SUBNET& sub + ) const + { + const tensor& output_tensor = sub.get_output(); + tensor& grad = sub.get_gradient_input(); + + DLIB_CASSERT(sub.sample_expansion_factor() == 1); + DLIB_CASSERT(input_tensor.num_samples() != 0); + DLIB_CASSERT(input_tensor.num_samples()%sub.sample_expansion_factor() == 0); + DLIB_CASSERT(input_tensor.num_samples() == grad.num_samples()); + DLIB_CASSERT(input_tensor.num_samples() == output_tensor.num_samples()); + DLIB_CASSERT(output_tensor.nr() == 1 && + output_tensor.nc() == 1 && + output_tensor.k() == 1); + DLIB_CASSERT(grad.nr() == 1 && + grad.nc() == 1 && + grad.k() == 1); + + // The loss we output is the average loss over the mini-batch. + const double scale = 1.0/output_tensor.num_samples(); + double loss = 0; + float* g = grad.host_write_only(); + const float* out_data = output_tensor.host(); + for (long i = 0; i < output_tensor.num_samples(); ++i) { - const tensor& output_tensor = sub.get_output(); - tensor& grad = sub.get_gradient_input(); + const float y = *truth++; + const float temp1 = y - out_data[i]; + const float temp2 = scale*temp1; + loss += 0.5*temp2*temp1; + g[i] = -temp2; - DLIB_CASSERT(sub.sample_expansion_factor() == 1); - DLIB_CASSERT(input_tensor.num_samples() != 0); - DLIB_CASSERT(input_tensor.num_samples()%sub.sample_expansion_factor() == 0); - DLIB_CASSERT(input_tensor.num_samples() == grad.num_samples()); - DLIB_CASSERT(input_tensor.num_samples() == output_tensor.num_samples()); - DLIB_CASSERT(output_tensor.nr() == 1 && - output_tensor.nc() == 1 && - output_tensor.k() == 1); - DLIB_CASSERT(grad.nr() == 1 && - grad.nc() == 1 && - grad.k() == 1); - - // The loss we output is the average loss over the mini-batch. - const double scale = 1.0/output_tensor.num_samples(); - double loss = 0; - float* g = grad.host_write_only(); - const float* out_data = output_tensor.host(); - for (long i = 0; i < output_tensor.num_samples(); ++i) - { - const float y = *truth++; - const float temp1 = y - out_data[i]; - const float temp2 = scale*temp1; - loss += 0.5*temp2*temp1; - g[i] = -temp2; - - } - return loss; } + return loss; + } friend void serialize(const loss_mean_squared_& , std::ostream& out) { @@ -1397,7 +1397,7 @@ namespace dlib }; template - using loss_mean_squared = add_loss_layer; + using loss_mean_squared = add_loss_layer; // ---------------------------------------------------------------------------------------- diff --git a/dlib/dnn/loss_abstract.h b/dlib/dnn/loss_abstract.h index 3587607a7..a51d5f88c 100644 --- a/dlib/dnn/loss_abstract.h +++ b/dlib/dnn/loss_abstract.h @@ -584,7 +584,6 @@ namespace dlib template using loss_mean_squared = add_loss_layer; - } #endif // DLIB_DNn_LOSS_ABSTRACT_H_ diff --git a/dlib/test/dnn.cpp b/dlib/test/dnn.cpp index e5ffc4c6c..98cc32202 100644 --- a/dlib/test/dnn.cpp +++ b/dlib/test/dnn.cpp @@ -1758,11 +1758,7 @@ namespace y[ii] = (true_intercept + true_slope*static_cast(val) + distribution(generator)); } - using net_type = loss_mean_squared< - fc< - 1, input> - > - >; + using net_type = loss_mean_squared>>>; net_type net; layer<1>(net).layer_details().set_bias_learning_rate_multiplier(300); sgd defsolver;