KVM: MMU: Fix off-by-one calculating large page count
The large page initialization code concludes there are two large pages spanned by a slot covering 1 (small) page starting at gfn 1. This is incorrect, and also results in incorrect write_count initialization in some cases (base = 1, npages = 513 for example). Cc: stable@kernel.org Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
0910697403
commit
99894a799f
|
@ -920,6 +920,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
|||
int r;
|
||||
gfn_t base_gfn;
|
||||
unsigned long npages;
|
||||
int largepages;
|
||||
unsigned long i;
|
||||
struct kvm_memory_slot *memslot;
|
||||
struct kvm_memory_slot old, new;
|
||||
|
@ -995,11 +996,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
|||
new.userspace_addr = 0;
|
||||
}
|
||||
if (npages && !new.lpage_info) {
|
||||
int largepages = npages / KVM_PAGES_PER_HPAGE;
|
||||
if (npages % KVM_PAGES_PER_HPAGE)
|
||||
largepages++;
|
||||
if (base_gfn % KVM_PAGES_PER_HPAGE)
|
||||
largepages++;
|
||||
largepages = 1 + (base_gfn + npages - 1) / KVM_PAGES_PER_HPAGE;
|
||||
largepages -= base_gfn / KVM_PAGES_PER_HPAGE;
|
||||
|
||||
new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info));
|
||||
|
||||
|
|
Loading…
Reference in New Issue