x86: mtrr_cleanup safe to get more spare regs now
Delay exit to make sure we can actually get the optimal result in as many cases as possible. Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
This commit is contained in:
parent
8f0afaa58e
commit
73436a1d25
|
@ -1353,10 +1353,8 @@ static int __init mtrr_cleanup(unsigned address_bits)
|
||||||
nr_mtrr_spare_reg = num_var_ranges - 1;
|
nr_mtrr_spare_reg = num_var_ranges - 1;
|
||||||
num_reg_good = -1;
|
num_reg_good = -1;
|
||||||
for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) {
|
for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) {
|
||||||
if (!min_loss_pfn[i]) {
|
if (!min_loss_pfn[i])
|
||||||
num_reg_good = i;
|
num_reg_good = i;
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
index_good = -1;
|
index_good = -1;
|
||||||
|
|
Loading…
Reference in New Issue