diff --git a/dlib/dnn/core.h b/dlib/dnn/core.h index 8046d8c30..7421ac0e4 100644 --- a/dlib/dnn/core.h +++ b/dlib/dnn/core.h @@ -113,6 +113,15 @@ namespace dlib return true; } + template + constexpr auto backward_requires_forward_output( + layer_type& layer, + SUBNET& sub + ) -> typename alwaysbool::type + { + return false; + } + template constexpr auto has_inplace_backward( layer_type& layer, @@ -140,6 +149,15 @@ namespace dlib return true; } + template + constexpr auto has_inplace_backward( + layer_type& layer, + SUBNET& sub + ) -> typename alwaysbool::type + { + return true; + } + template constexpr auto is_inplace_layer( layer_type& layer, @@ -194,6 +212,18 @@ namespace dlib layer.backward_inplace(computed_output,gradient_input,sub.get_gradient_input(),params_grad); } + template + auto call_layer_backward( + layer_type& layer, + const tensor& , + const tensor& gradient_input, + SUBNET& sub, + tensor& params_grad + ) -> decltype(layer.backward_inplace(gradient_input,sub.get_gradient_input(),params_grad)) + { + layer.backward_inplace(gradient_input,sub.get_gradient_input(),params_grad); + } + template auto call_layer_forward( diff --git a/dlib/dnn/layers.h b/dlib/dnn/layers.h index 2d3e86d06..e51e8f295 100644 --- a/dlib/dnn/layers.h +++ b/dlib/dnn/layers.h @@ -559,7 +559,6 @@ namespace dlib } void backward_inplace( - const tensor& /*computed_output*/, const tensor& gradient_input, tensor& data_grad, tensor& /*params_grad*/ diff --git a/dlib/dnn/layers_abstract.h b/dlib/dnn/layers_abstract.h index c3a77ee10..c0cd6d7bb 100644 --- a/dlib/dnn/layers_abstract.h +++ b/dlib/dnn/layers_abstract.h @@ -99,7 +99,7 @@ namespace dlib to document the interface that a layer object must implement. The central work of defining a layer is implementing the forward and backward - methods. When you do this you have three options: + methods. When you do this you have four options: - Implement the forward() and backward() methods according to the specification shown below. Do not implement forward_inplace() and backward_inplace(). @@ -113,6 +113,12 @@ namespace dlib according to the specification shown below. Do not implement forward() and backward(). These in-place methods allow some types of layers to be implemented more efficiently. + - Implement the forward_inplace() and backward_inplace() methods + according to the specification shown below, except exclude the + computed_output parameter from backward_inplace(). Doing this will + allow dlib to make some layers execute in-place and therefore run a + little faster and use less memory. Do not implement forward() and + backward(). !*/ public: @@ -239,7 +245,7 @@ namespace dlib !*/ void backward_inplace( - const tensor& computed_output, + const tensor& computed_output, // this parameter is optional const tensor& gradient_input, tensor& data_grad, tensor& params_grad @@ -503,7 +509,7 @@ namespace dlib template void setup (const SUBNET& sub); void forward_inplace(const tensor& input, tensor& output); - void backward_inplace(const tensor& computed_output, const tensor& gradient_input, tensor& data_grad, tensor& params_grad); + void backward_inplace(const tensor& gradient_input, tensor& data_grad, tensor& params_grad); const tensor& get_layer_params() const; tensor& get_layer_params(); /*!