mirror of https://github.com/davisking/dlib.git
adapt to dlib indentation style
This commit is contained in:
parent
af76e82633
commit
d4da6c53b6
108
dlib/dnn/loss.h
108
dlib/dnn/loss.h
|
@ -1305,70 +1305,70 @@ namespace dlib
|
|||
typename SUB_TYPE,
|
||||
typename label_iterator
|
||||
>
|
||||
void to_label (
|
||||
const tensor& input_tensor,
|
||||
const SUB_TYPE& sub,
|
||||
label_iterator iter
|
||||
) const
|
||||
void to_label (
|
||||
const tensor& input_tensor,
|
||||
const SUB_TYPE& sub,
|
||||
label_iterator iter
|
||||
) const
|
||||
{
|
||||
DLIB_CASSERT(sub.sample_expansion_factor() == 1);
|
||||
|
||||
const tensor& output_tensor = sub.get_output();
|
||||
|
||||
DLIB_CASSERT(output_tensor.nr() == 1 &&
|
||||
output_tensor.nc() == 1 &&
|
||||
output_tensor.k() == 1);
|
||||
DLIB_CASSERT(input_tensor.num_samples() == output_tensor.num_samples());
|
||||
|
||||
const float* out_data = output_tensor.host();
|
||||
for (long i = 0; i < output_tensor.num_samples(); ++i)
|
||||
{
|
||||
DLIB_CASSERT(sub.sample_expansion_factor() == 1);
|
||||
|
||||
const tensor& output_tensor = sub.get_output();
|
||||
|
||||
DLIB_CASSERT(output_tensor.nr() == 1 &&
|
||||
output_tensor.nc() == 1 &&
|
||||
output_tensor.k() == 1);
|
||||
DLIB_CASSERT(input_tensor.num_samples() == output_tensor.num_samples());
|
||||
|
||||
const float* out_data = output_tensor.host();
|
||||
for (long i = 0; i < output_tensor.num_samples(); ++i)
|
||||
{
|
||||
*iter++ = out_data[i];
|
||||
}
|
||||
*iter++ = out_data[i];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <
|
||||
typename const_label_iterator,
|
||||
typename SUBNET
|
||||
>
|
||||
double compute_loss_value_and_gradient (
|
||||
const tensor& input_tensor,
|
||||
const_label_iterator truth,
|
||||
SUBNET& sub
|
||||
) const
|
||||
double compute_loss_value_and_gradient (
|
||||
const tensor& input_tensor,
|
||||
const_label_iterator truth,
|
||||
SUBNET& sub
|
||||
) const
|
||||
{
|
||||
const tensor& output_tensor = sub.get_output();
|
||||
tensor& grad = sub.get_gradient_input();
|
||||
|
||||
DLIB_CASSERT(sub.sample_expansion_factor() == 1);
|
||||
DLIB_CASSERT(input_tensor.num_samples() != 0);
|
||||
DLIB_CASSERT(input_tensor.num_samples()%sub.sample_expansion_factor() == 0);
|
||||
DLIB_CASSERT(input_tensor.num_samples() == grad.num_samples());
|
||||
DLIB_CASSERT(input_tensor.num_samples() == output_tensor.num_samples());
|
||||
DLIB_CASSERT(output_tensor.nr() == 1 &&
|
||||
output_tensor.nc() == 1 &&
|
||||
output_tensor.k() == 1);
|
||||
DLIB_CASSERT(grad.nr() == 1 &&
|
||||
grad.nc() == 1 &&
|
||||
grad.k() == 1);
|
||||
|
||||
// The loss we output is the average loss over the mini-batch.
|
||||
const double scale = 1.0/output_tensor.num_samples();
|
||||
double loss = 0;
|
||||
float* g = grad.host_write_only();
|
||||
const float* out_data = output_tensor.host();
|
||||
for (long i = 0; i < output_tensor.num_samples(); ++i)
|
||||
{
|
||||
const tensor& output_tensor = sub.get_output();
|
||||
tensor& grad = sub.get_gradient_input();
|
||||
const float y = *truth++;
|
||||
const float temp1 = y - out_data[i];
|
||||
const float temp2 = scale*temp1;
|
||||
loss += 0.5*temp2*temp1;
|
||||
g[i] = -temp2;
|
||||
|
||||
DLIB_CASSERT(sub.sample_expansion_factor() == 1);
|
||||
DLIB_CASSERT(input_tensor.num_samples() != 0);
|
||||
DLIB_CASSERT(input_tensor.num_samples()%sub.sample_expansion_factor() == 0);
|
||||
DLIB_CASSERT(input_tensor.num_samples() == grad.num_samples());
|
||||
DLIB_CASSERT(input_tensor.num_samples() == output_tensor.num_samples());
|
||||
DLIB_CASSERT(output_tensor.nr() == 1 &&
|
||||
output_tensor.nc() == 1 &&
|
||||
output_tensor.k() == 1);
|
||||
DLIB_CASSERT(grad.nr() == 1 &&
|
||||
grad.nc() == 1 &&
|
||||
grad.k() == 1);
|
||||
|
||||
// The loss we output is the average loss over the mini-batch.
|
||||
const double scale = 1.0/output_tensor.num_samples();
|
||||
double loss = 0;
|
||||
float* g = grad.host_write_only();
|
||||
const float* out_data = output_tensor.host();
|
||||
for (long i = 0; i < output_tensor.num_samples(); ++i)
|
||||
{
|
||||
const float y = *truth++;
|
||||
const float temp1 = y - out_data[i];
|
||||
const float temp2 = scale*temp1;
|
||||
loss += 0.5*temp2*temp1;
|
||||
g[i] = -temp2;
|
||||
|
||||
}
|
||||
return loss;
|
||||
}
|
||||
return loss;
|
||||
}
|
||||
|
||||
friend void serialize(const loss_mean_squared_& , std::ostream& out)
|
||||
{
|
||||
|
@ -1397,7 +1397,7 @@ namespace dlib
|
|||
};
|
||||
|
||||
template <typename SUBNET>
|
||||
using loss_mean_squared = add_loss_layer<loss_mean_squared_, SUBNET>;
|
||||
using loss_mean_squared = add_loss_layer<loss_mean_squared_, SUBNET>;
|
||||
|
||||
// ----------------------------------------------------------------------------------------
|
||||
|
||||
|
|
|
@ -584,7 +584,6 @@ namespace dlib
|
|||
template <typename SUBNET>
|
||||
using loss_mean_squared = add_loss_layer<loss_mean_squared_, SUBNET>;
|
||||
|
||||
|
||||
}
|
||||
|
||||
#endif // DLIB_DNn_LOSS_ABSTRACT_H_
|
||||
|
|
|
@ -1758,11 +1758,7 @@ namespace
|
|||
y[ii] = (true_intercept + true_slope*static_cast<float>(val) + distribution(generator));
|
||||
}
|
||||
|
||||
using net_type = loss_mean_squared<
|
||||
fc<
|
||||
1, input<matrix<double>>
|
||||
>
|
||||
>;
|
||||
using net_type = loss_mean_squared<fc<1, input<matrix<double>>>>;
|
||||
net_type net;
|
||||
layer<1>(net).layer_details().set_bias_learning_rate_multiplier(300);
|
||||
sgd defsolver;
|
||||
|
|
Loading…
Reference in New Issue