mirror of https://github.com/davisking/dlib.git
differential std::clamp from c++17 with dlib:: prefix to clamp() (#734)
This commit is contained in:
parent
1c96db9ef6
commit
b4ec8d3d14
|
@ -339,8 +339,8 @@ namespace dlib
|
|||
for (unsigned long i = 0; i < horizon; ++i)
|
||||
{
|
||||
v_old[i] = v[i];
|
||||
v[i] = clamp(controls[i] - 1.0/lambda * df[i], lower, upper);
|
||||
controls[i] = clamp(v[i] + (std::sqrt(lambda)-1)/(std::sqrt(lambda)+1)*(v[i]-v_old[i]), lower, upper);
|
||||
v[i] = dlib::clamp(controls[i] - 1.0/lambda * df[i], lower, upper);
|
||||
controls[i] = dlib::clamp(v[i] + (std::sqrt(lambda)-1)/(std::sqrt(lambda)+1)*(v[i]-v_old[i]), lower, upper);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1646,7 +1646,7 @@ convergence:
|
|||
if (dot(delta, g) < 0)
|
||||
return p;
|
||||
else
|
||||
return vector<double,2>(p)+clamp(delta, -1, 1);
|
||||
return vector<double,2>(p)+dlib::clamp(delta, -1, 1);
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------------------
|
||||
|
|
|
@ -527,7 +527,7 @@ namespace dlib
|
|||
last_alpha = alpha;
|
||||
|
||||
// Take the search step indicated by the above line search
|
||||
x = clamp(x + alpha*s, x_lower, x_upper);
|
||||
x = dlib::clamp(x + alpha*s, x_lower, x_upper);
|
||||
g = der(x);
|
||||
|
||||
if (!is_finite(f_value))
|
||||
|
@ -658,7 +658,7 @@ namespace dlib
|
|||
last_alpha = alpha;
|
||||
|
||||
// Take the search step indicated by the above line search
|
||||
x = clamp(x + alpha*s, x_lower, x_upper);
|
||||
x = dlib::clamp(x + alpha*s, x_lower, x_upper);
|
||||
g = -der(x);
|
||||
|
||||
// Don't forget to negate the output from the line search since it is from the
|
||||
|
|
|
@ -286,7 +286,7 @@ namespace dlib
|
|||
lambda = A*alpha;
|
||||
else
|
||||
lambda = A*alpha + d;
|
||||
lambda = clamp(lambda, 0, max_lambda);
|
||||
lambda = dlib::clamp(lambda, 0, max_lambda);
|
||||
|
||||
// Compute f'(alpha) (i.e. the gradient of f(alpha) with respect to alpha) for the current alpha.
|
||||
matrix<T,NR,NC,MM,L> df = Q*alpha - b - trans(A)*lambda;
|
||||
|
@ -333,7 +333,7 @@ namespace dlib
|
|||
lambda = A*alpha;
|
||||
else
|
||||
lambda = A*alpha + d;
|
||||
lambda = clamp(lambda, 0, max_lambda);
|
||||
lambda = dlib::clamp(lambda, 0, max_lambda);
|
||||
df = Q*alpha - b - trans(A)*lambda;
|
||||
|
||||
if (trans(alpha)*df - C*min(df) < eps)
|
||||
|
@ -375,7 +375,7 @@ namespace dlib
|
|||
lambda = A*alpha;
|
||||
else
|
||||
lambda = A*alpha + d;
|
||||
lambda = clamp(lambda, 0, max_lambda);
|
||||
lambda = dlib::clamp(lambda, 0, max_lambda);
|
||||
|
||||
// Perform this form of the update every so often because doing so can help
|
||||
// avoid the buildup of numerical errors you get with the alternate update
|
||||
|
@ -524,7 +524,7 @@ namespace dlib
|
|||
df = Q*alpha + b;
|
||||
// now take a projected gradient step using Nesterov's method.
|
||||
v = clamp(alpha - 1.0/lipschitz_bound * df, lower, upper);
|
||||
alpha = clamp((1-gamma)*v + gamma*v_old, lower, upper);
|
||||
alpha = dlib::clamp((1-gamma)*v + gamma*v_old, lower, upper);
|
||||
|
||||
|
||||
// check for convergence every 10 iterations
|
||||
|
|
|
@ -379,23 +379,23 @@ namespace
|
|||
u = 2,2.2;
|
||||
|
||||
out = 2, 2.2;
|
||||
DLIB_TEST(equal(clamp(x, l, u) , out));
|
||||
DLIB_TEST(equal(dlib::clamp(x, l, u) , out));
|
||||
out = 3, 2.2;
|
||||
DLIB_TEST(!equal(clamp(x, l, u) , out));
|
||||
DLIB_TEST(!equal(dlib::clamp(x, l, u) , out));
|
||||
out = 2, 4.2;
|
||||
DLIB_TEST(!equal(clamp(x, l, u) , out));
|
||||
DLIB_TEST(!equal(dlib::clamp(x, l, u) , out));
|
||||
|
||||
x = 1.5, 1.5;
|
||||
out = x;
|
||||
DLIB_TEST(equal(clamp(x, l, u) , out));
|
||||
DLIB_TEST(equal(dlib::clamp(x, l, u) , out));
|
||||
|
||||
x = 0.5, 1.5;
|
||||
out = 1, 1.5;
|
||||
DLIB_TEST(equal(clamp(x, l, u) , out));
|
||||
DLIB_TEST(equal(dlib::clamp(x, l, u) , out));
|
||||
|
||||
x = 1.5, 0.5;
|
||||
out = 1.5, 1.0;
|
||||
DLIB_TEST(equal(clamp(x, l, u) , out));
|
||||
DLIB_TEST(equal(dlib::clamp(x, l, u) , out));
|
||||
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue