Just renamed two functions to way better names.

This commit is contained in:
Davis King 2015-12-23 22:29:31 -05:00
parent 1f5aa6c1fa
commit d2516bc2f7
9 changed files with 16 additions and 16 deletions

View File

@ -110,7 +110,7 @@ namespace dlib
// ----------------------------------------------------------------------------------------
void add_bias_gradient (
void assign_bias_gradient (
tensor& grad,
const tensor& gradient_input
)

View File

@ -32,7 +32,7 @@ namespace dlib
const tensor& src
);
void add_bias_gradient (
void assign_bias_gradient (
tensor& grad,
const tensor& gradient_input
);

View File

@ -223,7 +223,7 @@ namespace dlib
}
}
void add_bias_gradient (
void assign_bias_gradient (
tensor& grad,
const tensor& gradient_input
)

View File

@ -71,7 +71,7 @@ namespace dlib
// -----------------------------------------------------------------------------------
void add_bias_gradient (
void assign_bias_gradient (
tensor& grad,
const tensor& gradient_input
);

View File

@ -248,7 +248,7 @@ namespace dlib
&value));
}
void add_conv_bias_gradient (
void assign_conv_bias_gradient (
tensor& grad,
const tensor& gradient_input
)

View File

@ -111,7 +111,7 @@ namespace dlib
// ------------------------------------------------------------------------------------
void add_conv_bias_gradient (
void assign_conv_bias_gradient (
tensor& grad,
const tensor& gradient_input
);

View File

@ -124,7 +124,7 @@ namespace dlib
auto filt = filters(params_grad,0);
conv.get_gradient_for_filters (gradient_input, sub.get_output(), filt);
auto b = biases(params_grad, filters.size());
tt::add_conv_bias_gradient(b, gradient_input);
tt::assign_conv_bias_gradient(b, gradient_input);
}
const tensor& get_layer_params() const { return params; }
@ -625,7 +625,7 @@ namespace dlib
{
// compute the gradient of the bias parameters.
auto pb = biases(params_grad, weights.size());
tt::add_bias_gradient(pb, gradient_input);
tt::assign_bias_gradient(pb, gradient_input);
}
// compute the gradient for the data
@ -812,7 +812,7 @@ namespace dlib
tt::multiply(data_grad, gradient_input, g);
tt::multiply(g_grad, gradient_input, computed_output);
tt::add_bias_gradient(b_grad, gradient_input);
tt::assign_bias_gradient(b_grad, gradient_input);
}
const tensor& get_layer_params() const { return params; }

View File

@ -325,13 +325,13 @@ namespace dlib { namespace tt
// ----------------------------------------------------------------------------------------
void add_conv_bias_gradient (
void assign_conv_bias_gradient (
tensor& grad,
const tensor& gradient_input
)
{
#ifdef DLIB_USE_CUDA
cuda::add_conv_bias_gradient(grad,gradient_input);
cuda::assign_conv_bias_gradient(grad,gradient_input);
#else
// TODO
DLIB_CASSERT(false,"");
@ -340,15 +340,15 @@ namespace dlib { namespace tt
// ----------------------------------------------------------------------------------------
void add_bias_gradient (
void assign_bias_gradient (
tensor& grad,
const tensor& gradient_input
)
{
#ifdef DLIB_USE_CUDA
cuda::add_bias_gradient(grad,gradient_input);
cuda::assign_bias_gradient(grad,gradient_input);
#else
cpu::add_bias_gradient(grad,gradient_input);
cpu::assign_bias_gradient(grad,gradient_input);
#endif
}

View File

@ -426,7 +426,7 @@ namespace dlib { namespace tt
// ----------------------------------------------------------------------------------------
void add_conv_bias_gradient (
void assign_conv_bias_gradient (
tensor& grad,
const tensor& gradient_input
);
@ -449,7 +449,7 @@ namespace dlib { namespace tt
// ----------------------------------------------------------------------------------------
void add_bias_gradient (
void assign_bias_gradient (
tensor& grad,
const tensor& gradient_input
);