From 84b72278b56c9adca02d0db6fb299a8e2e18df7b Mon Sep 17 00:00:00 2001 From: davemers0160 Date: Sat, 19 Jan 2019 20:45:46 -0500 Subject: [PATCH] fixed check for excessive detections in loss_mmod_ (#1625) fixed check for excessive detections in loss_mmod_ Ran into the problem where dets.size() was equal to max_num_initial_dets which then throws a subscript out of range error when accesing: dets[max_num_initial_dets].detection_confidence. This fixes that issue. --- dlib/dnn/loss.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dlib/dnn/loss.h b/dlib/dnn/loss.h index e4776aa43..5b5c53ce8 100644 --- a/dlib/dnn/loss.h +++ b/dlib/dnn/loss.h @@ -1135,7 +1135,7 @@ namespace dlib // Prevent calls to tensor_to_dets() from running for a really long time // due to the production of an obscene number of detections. const unsigned long max_num_initial_dets = max_num_dets*100; - if (dets.size() >= max_num_initial_dets) + if (dets.size() > max_num_initial_dets) { det_thresh_speed_adjust = std::max(det_thresh_speed_adjust,dets[max_num_initial_dets].detection_confidence + options.loss_per_false_alarm); }