KVM: arm64: Check for kvm_vma_mte_allowed in the critical section
On page fault, we find about the VMA that backs the page fault early on, and quickly release the mmap_read_lock. However, using the VMA pointer after the critical section is pretty dangerous, as a teardown may happen in the meantime and the VMA be long gone. Move the sampling of the MTE permission early, and NULL-ify the VMA pointer after that, just to be on the safe side. Signed-off-by: Marc Zyngier <maz@kernel.org> Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230316174546.3777507-3-maz@kernel.org Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
parent
e86fc1a3a3
commit
8c2e8ac8ad
|
@ -1218,7 +1218,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|||
{
|
||||
int ret = 0;
|
||||
bool write_fault, writable, force_pte = false;
|
||||
bool exec_fault;
|
||||
bool exec_fault, mte_allowed;
|
||||
bool device = false;
|
||||
unsigned long mmu_seq;
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
|
@ -1309,6 +1309,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|||
fault_ipa &= ~(vma_pagesize - 1);
|
||||
|
||||
gfn = fault_ipa >> PAGE_SHIFT;
|
||||
mte_allowed = kvm_vma_mte_allowed(vma);
|
||||
|
||||
/* Don't use the VMA after the unlock -- it may have vanished */
|
||||
vma = NULL;
|
||||
|
||||
/*
|
||||
* Read mmu_invalidate_seq so that KVM can detect if the results of
|
||||
|
@ -1379,7 +1383,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|||
|
||||
if (fault_status != ESR_ELx_FSC_PERM && !device && kvm_has_mte(kvm)) {
|
||||
/* Check the VMM hasn't introduced a new disallowed VMA */
|
||||
if (kvm_vma_mte_allowed(vma)) {
|
||||
if (mte_allowed) {
|
||||
sanitise_mte_tags(kvm, pfn, vma_pagesize);
|
||||
} else {
|
||||
ret = -EFAULT;
|
||||
|
|
Loading…
Reference in New Issue