arm/mm: Convert to using lock_mm_and_find_vma()
arm has an additional check for address < FIRST_USER_ADDRESS before expanding the stack. Since FIRST_USER_ADDRESS is defined everywhere (generally as 0), move that check to the generic expand_downwards(). Signed-off-by: Ben Hutchings <ben@decadent.org.uk> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
7267ef7b0b
commit
8b35ca3e45
|
@ -125,6 +125,7 @@ config ARM
|
||||||
select HAVE_UID16
|
select HAVE_UID16
|
||||||
select HAVE_VIRT_CPU_ACCOUNTING_GEN
|
select HAVE_VIRT_CPU_ACCOUNTING_GEN
|
||||||
select IRQ_FORCED_THREADING
|
select IRQ_FORCED_THREADING
|
||||||
|
select LOCK_MM_AND_FIND_VMA
|
||||||
select MODULES_USE_ELF_REL
|
select MODULES_USE_ELF_REL
|
||||||
select NEED_DMA_MAP_STATE
|
select NEED_DMA_MAP_STATE
|
||||||
select OF_EARLY_FLATTREE if OF
|
select OF_EARLY_FLATTREE if OF
|
||||||
|
|
|
@ -232,37 +232,11 @@ static inline bool is_permission_fault(unsigned int fsr)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static vm_fault_t __kprobes
|
|
||||||
__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int flags,
|
|
||||||
unsigned long vma_flags, struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
struct vm_area_struct *vma = find_vma(mm, addr);
|
|
||||||
if (unlikely(!vma))
|
|
||||||
return VM_FAULT_BADMAP;
|
|
||||||
|
|
||||||
if (unlikely(vma->vm_start > addr)) {
|
|
||||||
if (!(vma->vm_flags & VM_GROWSDOWN))
|
|
||||||
return VM_FAULT_BADMAP;
|
|
||||||
if (addr < FIRST_USER_ADDRESS)
|
|
||||||
return VM_FAULT_BADMAP;
|
|
||||||
if (expand_stack(vma, addr))
|
|
||||||
return VM_FAULT_BADMAP;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* ok, we have a good vm_area for this memory access, check the
|
|
||||||
* permissions on the VMA allow for the fault which occurred.
|
|
||||||
*/
|
|
||||||
if (!(vma->vm_flags & vma_flags))
|
|
||||||
return VM_FAULT_BADACCESS;
|
|
||||||
|
|
||||||
return handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __kprobes
|
static int __kprobes
|
||||||
do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
|
struct vm_area_struct *vma;
|
||||||
int sig, code;
|
int sig, code;
|
||||||
vm_fault_t fault;
|
vm_fault_t fault;
|
||||||
unsigned int flags = FAULT_FLAG_DEFAULT;
|
unsigned int flags = FAULT_FLAG_DEFAULT;
|
||||||
|
@ -301,31 +275,21 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||||||
|
|
||||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
|
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
|
||||||
|
|
||||||
/*
|
|
||||||
* As per x86, we may deadlock here. However, since the kernel only
|
|
||||||
* validly references user space from well defined areas of the code,
|
|
||||||
* we can bug out early if this is from code which shouldn't.
|
|
||||||
*/
|
|
||||||
if (!mmap_read_trylock(mm)) {
|
|
||||||
if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
|
|
||||||
goto no_context;
|
|
||||||
retry:
|
retry:
|
||||||
mmap_read_lock(mm);
|
vma = lock_mm_and_find_vma(mm, addr, regs);
|
||||||
} else {
|
if (unlikely(!vma)) {
|
||||||
/*
|
fault = VM_FAULT_BADMAP;
|
||||||
* The above down_read_trylock() might have succeeded in
|
goto bad_area;
|
||||||
* which case, we'll have missed the might_sleep() from
|
|
||||||
* down_read()
|
|
||||||
*/
|
|
||||||
might_sleep();
|
|
||||||
#ifdef CONFIG_DEBUG_VM
|
|
||||||
if (!user_mode(regs) &&
|
|
||||||
!search_exception_tables(regs->ARM_pc))
|
|
||||||
goto no_context;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fault = __do_page_fault(mm, addr, flags, vm_flags, regs);
|
/*
|
||||||
|
* ok, we have a good vm_area for this memory access, check the
|
||||||
|
* permissions on the VMA allow for the fault which occurred.
|
||||||
|
*/
|
||||||
|
if (!(vma->vm_flags & vm_flags))
|
||||||
|
fault = VM_FAULT_BADACCESS;
|
||||||
|
else
|
||||||
|
fault = handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
|
||||||
|
|
||||||
/* If we need to retry but a fatal signal is pending, handle the
|
/* If we need to retry but a fatal signal is pending, handle the
|
||||||
* signal first. We do not need to release the mmap_lock because
|
* signal first. We do not need to release the mmap_lock because
|
||||||
|
@ -356,6 +320,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||||||
if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
|
if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
bad_area:
|
||||||
/*
|
/*
|
||||||
* If we are in kernel mode at this point, we
|
* If we are in kernel mode at this point, we
|
||||||
* have no context to handle this fault with.
|
* have no context to handle this fault with.
|
||||||
|
|
|
@ -2036,7 +2036,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
|
||||||
int error = 0;
|
int error = 0;
|
||||||
|
|
||||||
address &= PAGE_MASK;
|
address &= PAGE_MASK;
|
||||||
if (address < mmap_min_addr)
|
if (address < mmap_min_addr || address < FIRST_USER_ADDRESS)
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
/* Enforce stack_guard_gap */
|
/* Enforce stack_guard_gap */
|
||||||
|
|
Loading…
Reference in New Issue