diff --git a/dlib/dnn/layers.h b/dlib/dnn/layers.h index 250df286d..460524ba7 100644 --- a/dlib/dnn/layers.h +++ b/dlib/dnn/layers.h @@ -1791,7 +1791,7 @@ namespace dlib } template - void disable_duplicative_bias ( + void disable_duplicative_biases ( net_type& net ) { diff --git a/dlib/dnn/layers_abstract.h b/dlib/dnn/layers_abstract.h index 2bfe06a86..b7963ab0b 100644 --- a/dlib/dnn/layers_abstract.h +++ b/dlib/dnn/layers_abstract.h @@ -1810,7 +1810,7 @@ namespace dlib // ---------------------------------------------------------------------------------------- template - void disable_duplicative_bias ( + void disable_duplicative_biases ( const net_type& net ); /*! diff --git a/dlib/test/dnn.cpp b/dlib/test/dnn.cpp index a3ed2d14a..55e844a41 100644 --- a/dlib/test/dnn.cpp +++ b/dlib/test/dnn.cpp @@ -3918,7 +3918,7 @@ namespace relu>>>>>>>>; template using dense_layer_32 = dense_layer<32, 8, SUBNET>; - void test_disable_duplicative_bias() + void test_disable_duplicative_biases() { using net_type = fc<10, relu(net).layer_details().bias_is_disabled() == false); DLIB_TEST(layer<24>(net).layer_details().bias_is_disabled() == false); DLIB_TEST(layer<31>(net).layer_details().bias_is_disabled() == false); - disable_duplicative_bias(net); + disable_duplicative_biases(net); DLIB_TEST(layer<0>(net).layer_details().bias_is_disabled() == false); DLIB_TEST(layer<3>(net).layer_details().bias_is_disabled() == true); DLIB_TEST(layer<6>(net).layer_details().bias_is_disabled() == true); @@ -4130,7 +4130,7 @@ namespace test_loss_multimulticlass_log(); test_loss_mmod(); test_layers_scale_and_scale_prev(); - test_disable_duplicative_bias(); + test_disable_duplicative_biases(); } void perform_test() diff --git a/examples/dnn_dcgan_train_ex.cpp b/examples/dnn_dcgan_train_ex.cpp index bb155cb6f..9dab87633 100644 --- a/examples/dnn_dcgan_train_ex.cpp +++ b/examples/dnn_dcgan_train_ex.cpp @@ -134,8 +134,8 @@ int main(int argc, char** argv) try // setup all leaky_relu_ layers in the discriminator to have alpha = 0.2 visit_computational_layers(discriminator, [](leaky_relu_& l){ l = leaky_relu_(0.2); }); // Remove the bias learning from all bn_ inputs in both networks - disable_duplicative_bias(generator); - disable_duplicative_bias(discriminator); + disable_duplicative_biases(generator); + disable_duplicative_biases(discriminator); // Forward random noise so that we see the tensor size at each layer discriminator(generate_image(generator, make_noise(rnd))); cout << "generator (" << count_parameters(generator) << " parameters)" << endl;