mm: hugetlb_vmemmap: fix a race between vmemmap pmd split
The local variable @page in __split_vmemmap_huge_pmd() to obtain a pmd
page without holding page_table_lock may possiblely get the page table
page instead of a huge pmd page.
The effect may be in set_pte_at() since we may pass an invalid page
struct, if set_pte_at() wants to access the page struct (e.g.
CONFIG_PAGE_TABLE_CHECK is enabled), it may crash the kernel.
So fix it. And inline __split_vmemmap_huge_pmd() since it only has one
user.
Link: https://lkml.kernel.org/r/20230707033859.16148-1-songmuchun@bytedance.com
Fixes: d8d55f5616
("mm: sparsemem: use page table lock to protect kernel pmd operations")
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
c200a7119b
commit
3ce2c24cb6
|
@ -36,14 +36,22 @@ struct vmemmap_remap_walk {
|
||||||
struct list_head *vmemmap_pages;
|
struct list_head *vmemmap_pages;
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
|
static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
|
||||||
{
|
{
|
||||||
pmd_t __pmd;
|
pmd_t __pmd;
|
||||||
int i;
|
int i;
|
||||||
unsigned long addr = start;
|
unsigned long addr = start;
|
||||||
struct page *page = pmd_page(*pmd);
|
struct page *head;
|
||||||
pte_t *pgtable = pte_alloc_one_kernel(&init_mm);
|
pte_t *pgtable;
|
||||||
|
|
||||||
|
spin_lock(&init_mm.page_table_lock);
|
||||||
|
head = pmd_leaf(*pmd) ? pmd_page(*pmd) : NULL;
|
||||||
|
spin_unlock(&init_mm.page_table_lock);
|
||||||
|
|
||||||
|
if (!head)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
pgtable = pte_alloc_one_kernel(&init_mm);
|
||||||
if (!pgtable)
|
if (!pgtable)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -53,7 +61,7 @@ static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
|
||||||
pte_t entry, *pte;
|
pte_t entry, *pte;
|
||||||
pgprot_t pgprot = PAGE_KERNEL;
|
pgprot_t pgprot = PAGE_KERNEL;
|
||||||
|
|
||||||
entry = mk_pte(page + i, pgprot);
|
entry = mk_pte(head + i, pgprot);
|
||||||
pte = pte_offset_kernel(&__pmd, addr);
|
pte = pte_offset_kernel(&__pmd, addr);
|
||||||
set_pte_at(&init_mm, addr, pte, entry);
|
set_pte_at(&init_mm, addr, pte, entry);
|
||||||
}
|
}
|
||||||
|
@ -65,8 +73,8 @@ static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
|
||||||
* be treated as indepdenent small pages (as they can be freed
|
* be treated as indepdenent small pages (as they can be freed
|
||||||
* individually).
|
* individually).
|
||||||
*/
|
*/
|
||||||
if (!PageReserved(page))
|
if (!PageReserved(head))
|
||||||
split_page(page, get_order(PMD_SIZE));
|
split_page(head, get_order(PMD_SIZE));
|
||||||
|
|
||||||
/* Make pte visible before pmd. See comment in pmd_install(). */
|
/* Make pte visible before pmd. See comment in pmd_install(). */
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
|
@ -80,20 +88,6 @@ static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
|
|
||||||
{
|
|
||||||
int leaf;
|
|
||||||
|
|
||||||
spin_lock(&init_mm.page_table_lock);
|
|
||||||
leaf = pmd_leaf(*pmd);
|
|
||||||
spin_unlock(&init_mm.page_table_lock);
|
|
||||||
|
|
||||||
if (!leaf)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
return __split_vmemmap_huge_pmd(pmd, start);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr,
|
static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr,
|
||||||
unsigned long end,
|
unsigned long end,
|
||||||
struct vmemmap_remap_walk *walk)
|
struct vmemmap_remap_walk *walk)
|
||||||
|
|
Loading…
Reference in New Issue