mm/mmap: preallocate maple nodes for brk vma expansion

If the brk VMA is the last vma in a maple node and meets the rare criteria
that it can be expanded, then preallocation is necessary to avoid a
potential fs_reclaim circular lock issue on low resources.

At the same time use the actual vma start address (unaligned) when calling
vma_adjust_trans_huge().

Link: https://lkml.kernel.org/r/20221011160624.1253454-1-Liam.Howlett@oracle.com
Fixes: 2e7ce7d354 (mm/mmap: change do_brk_flags() to expand existing VMA and add do_brk_munmap())
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Reported-by: Yu Zhao <yuzhao@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Liam Howlett 2022-10-11 16:08:37 +00:00 committed by Andrew Morton
parent 7be1c1a3c7
commit 28c5609fb2
1 changed files with 6 additions and 12 deletions

View File

@ -2942,17 +2942,18 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma,
if (vma &&
(!vma->anon_vma || list_is_singular(&vma->anon_vma_chain)) &&
((vma->vm_flags & ~VM_SOFTDIRTY) == flags)) {
mas->index = vma->vm_start;
mas->last = addr + len - 1;
vma_adjust_trans_huge(vma, addr, addr + len, 0);
mas_set_range(mas, vma->vm_start, addr + len - 1);
if (mas_preallocate(mas, vma, GFP_KERNEL))
return -ENOMEM;
vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0);
if (vma->anon_vma) {
anon_vma_lock_write(vma->anon_vma);
anon_vma_interval_tree_pre_update_vma(vma);
}
vma->vm_end = addr + len;
vma->vm_flags |= VM_SOFTDIRTY;
if (mas_store_gfp(mas, vma, GFP_KERNEL))
goto mas_expand_failed;
mas_store_prealloc(mas, vma);
if (vma->anon_vma) {
anon_vma_interval_tree_post_update_vma(vma);
@ -2993,13 +2994,6 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma,
vma_alloc_fail:
vm_unacct_memory(len >> PAGE_SHIFT);
return -ENOMEM;
mas_expand_failed:
if (vma->anon_vma) {
anon_vma_interval_tree_post_update_vma(vma);
anon_vma_unlock_write(vma->anon_vma);
}
return -ENOMEM;
}
int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)