From 0818e739b5c061b0251c30152380600fb9b84c0c Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Mon, 4 Sep 2023 18:08:04 +0000 Subject: [PATCH] mm/vmalloc: add a safer version of find_vm_area() for debug It is unsafe to dump vmalloc area information when trying to do so from some contexts. Add a safer trylock version of the same function to do a best-effort VMA finding and use it from vmalloc_dump_obj(). [applied test robot feedback on unused function fix.] [applied Uladzislau feedback on locking.] Link: https://lkml.kernel.org/r/20230904180806.1002832-1-joel@joelfernandes.org Fixes: 98f180837a89 ("mm: Make mem_dump_obj() handle vmalloc() memory") Signed-off-by: Joel Fernandes (Google) Reviewed-by: Uladzislau Rezki (Sony) Reported-by: Zhen Lei Cc: Paul E. McKenney Cc: Zqiang Cc: Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- mm/vmalloc.c | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 228a4a5312f2..ef8599d394fd 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -4278,14 +4278,32 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) #ifdef CONFIG_PRINTK bool vmalloc_dump_obj(void *object) { - struct vm_struct *vm; void *objp = (void *)PAGE_ALIGN((unsigned long)object); + const void *caller; + struct vm_struct *vm; + struct vmap_area *va; + unsigned long addr; + unsigned int nr_pages; - vm = find_vm_area(objp); - if (!vm) + if (!spin_trylock(&vmap_area_lock)) return false; + va = __find_vmap_area((unsigned long)objp, &vmap_area_root); + if (!va) { + spin_unlock(&vmap_area_lock); + return false; + } + + vm = va->vm; + if (!vm) { + spin_unlock(&vmap_area_lock); + return false; + } + addr = (unsigned long)vm->addr; + caller = vm->caller; + nr_pages = vm->nr_pages; + spin_unlock(&vmap_area_lock); pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n", - vm->nr_pages, (unsigned long)vm->addr, vm->caller); + nr_pages, addr, caller); return true; } #endif