get rid of learning_rate_max

This commit is contained in:
Josh Veitch-Michaelis 2019-03-19 00:02:01 +00:00
parent 53081900e5
commit 6cbf08321a
2 changed files with 1 additions and 2 deletions

View File

@ -119,7 +119,7 @@ float get_current_rate(network net)
return net.learning_rate * (1./(1.+exp(net.gamma*(batch_num - net.step))));
case SGDR:
rate = net.learning_rate_min +
0.5*(net.learning_rate_max-net.learning_rate_min)
0.5*(net.learning_rate-net.learning_rate_min)
* (1. + cos( (float) (batch_num % net.batches_per_cycle)*3.14159265 / net.batches_per_cycle));
return rate;

View File

@ -639,7 +639,6 @@ void parse_net_options(list *options, network *net)
net->batch = option_find_int(options, "batch",1);
net->learning_rate = option_find_float(options, "learning_rate", .001);
net->learning_rate_min = option_find_float_quiet(options, "learning_rate_min", .00001);
net->learning_rate_max = option_find_float_quiet(options, "learning_rate_max", .001);
net->batches_per_cycle = option_find_int_quiet(options, "sgdr_cycle", 500);
net->momentum = option_find_float(options, "momentum", .9);
net->decay = option_find_float(options, "decay", .0001);