2019-03-29 14:12:17 +08:00
|
|
|
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
2018-10-11 02:06:46 +08:00
|
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
Date: Mon, 17 Sep 2018 16:29:14 +0200
|
2019-03-29 14:12:17 +08:00
|
|
|
Subject: [PATCH] x86/mm/cpa: Add sanity check for existing mappings
|
2018-10-11 02:06:46 +08:00
|
|
|
|
|
|
|
With the range check it is possible to do a quick verification that the
|
|
|
|
current mapping is correct vs. the static protection areas.
|
|
|
|
|
|
|
|
In case a incorrect mapping is detected a warning is emitted and the large
|
|
|
|
page is split up. If the large page is a 2M page, then the split code is
|
|
|
|
forced to check the static protections for the PTE entries to fix up the
|
|
|
|
incorrectness. For 1G pages this can't be done easily because that would
|
|
|
|
require to either find the offending 2M areas before the split or
|
|
|
|
afterwards. For now just warn about that case and revisit it when reported.
|
|
|
|
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
Reviewed-by: Dave Hansen <dave.hansen@intel.com>
|
|
|
|
Cc: Peter Zijlstra <peterz@infradead.org>
|
|
|
|
Cc: Bin Yang <bin.yang@intel.com>
|
|
|
|
Cc: Mark Gross <mark.gross@intel.com>
|
|
|
|
Link: https://lkml.kernel.org/r/20180917143546.331408643@linutronix.de
|
|
|
|
|
|
|
|
Cc: Zhang Ning <ning.a.zhang@intel.com>
|
|
|
|
Signed-off-by: Lili Li <lili.li@intel.com>
|
|
|
|
---
|
|
|
|
arch/x86/mm/pageattr.c | 77 ++++++++++++++++++++++++++++++++++++------
|
|
|
|
1 file changed, 67 insertions(+), 10 deletions(-)
|
|
|
|
|
|
|
|
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
|
2020-10-27 02:14:06 +08:00
|
|
|
index 0c9fd2fe8d57..9cc360e539cf 100644
|
2018-10-11 02:06:46 +08:00
|
|
|
--- a/arch/x86/mm/pageattr.c
|
|
|
|
+++ b/arch/x86/mm/pageattr.c
|
|
|
|
@@ -37,12 +37,14 @@ struct cpa_data {
|
|
|
|
unsigned long numpages;
|
|
|
|
int flags;
|
|
|
|
unsigned long pfn;
|
|
|
|
- unsigned force_split : 1;
|
|
|
|
+ unsigned force_split : 1,
|
|
|
|
+ force_static_prot : 1;
|
|
|
|
int curpage;
|
|
|
|
struct page **pages;
|
|
|
|
};
|
|
|
|
|
|
|
|
enum cpa_warn {
|
|
|
|
+ CPA_CONFLICT,
|
|
|
|
CPA_PROTECT,
|
|
|
|
CPA_DETECT,
|
|
|
|
};
|
|
|
|
@@ -501,6 +503,7 @@ static inline void check_conflict(int warnlvl, pgprot_t prot, pgprotval_t val,
|
|
|
|
unsigned long pfn, const char *txt)
|
|
|
|
{
|
|
|
|
static const char *lvltxt[] = {
|
|
|
|
+ [CPA_CONFLICT] = "conflict",
|
|
|
|
[CPA_PROTECT] = "protect",
|
|
|
|
[CPA_DETECT] = "detect",
|
|
|
|
};
|
|
|
|
@@ -743,7 +746,7 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
|
|
|
|
struct cpa_data *cpa)
|
|
|
|
{
|
|
|
|
unsigned long numpages, pmask, psize, lpaddr, addr, pfn, old_pfn;
|
|
|
|
- pgprot_t old_prot, new_prot, req_prot;
|
|
|
|
+ pgprot_t old_prot, new_prot, req_prot, chk_prot;
|
|
|
|
pte_t new_pte, old_pte, *tmp;
|
|
|
|
enum pg_level level;
|
|
|
|
int i;
|
|
|
|
@@ -819,6 +822,23 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
|
|
|
|
lpaddr = address & pmask;
|
|
|
|
numpages = psize >> PAGE_SHIFT;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Sanity check that the existing mapping is correct versus the static
|
|
|
|
+ * protections. static_protections() guards against !PRESENT, so no
|
|
|
|
+ * extra conditional required here.
|
|
|
|
+ */
|
|
|
|
+ chk_prot = static_protections(old_prot, lpaddr, old_pfn, numpages,
|
|
|
|
+ CPA_CONFLICT);
|
|
|
|
+
|
|
|
|
+ if (WARN_ON_ONCE(pgprot_val(chk_prot) != pgprot_val(old_prot))) {
|
|
|
|
+ /*
|
|
|
|
+ * Split the large page and tell the split code to
|
|
|
|
+ * enforce static protections.
|
|
|
|
+ */
|
|
|
|
+ cpa->force_static_prot = 1;
|
|
|
|
+ return 1;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
/*
|
|
|
|
* Make sure that the requested pgprot does not violate the static
|
|
|
|
* protections. Check the full large page whether one of the pages
|
|
|
|
@@ -828,8 +848,8 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
|
|
|
|
new_prot = static_protections(req_prot, address, pfn, 1, CPA_DETECT);
|
|
|
|
pfn = old_pfn;
|
|
|
|
for (i = 0, addr = lpaddr; i < numpages; i++, addr += PAGE_SIZE, pfn++) {
|
|
|
|
- pgprot_t chk_prot = static_protections(req_prot, addr, pfn, 1,
|
|
|
|
- CPA_DETECT);
|
|
|
|
+ chk_prot = static_protections(req_prot, addr, pfn, 1,
|
|
|
|
+ CPA_DETECT);
|
|
|
|
cpa_inc_4k_checked();
|
|
|
|
if (pgprot_val(chk_prot) != pgprot_val(new_prot))
|
|
|
|
return 1;
|
|
|
|
@@ -871,15 +891,50 @@ static int should_split_large_page(pte_t *kpte, unsigned long address,
|
|
|
|
return do_split;
|
|
|
|
}
|
|
|
|
|
|
|
|
+static void split_set_pte(struct cpa_data *cpa, pte_t *pte, unsigned long pfn,
|
|
|
|
+ pgprot_t ref_prot, unsigned long address,
|
|
|
|
+ unsigned long size)
|
|
|
|
+{
|
|
|
|
+ unsigned int npg = PFN_DOWN(size);
|
|
|
|
+ pgprot_t prot;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * If should_split_large_page() discovered an inconsistent mapping,
|
|
|
|
+ * remove the invalid protection in the split mapping.
|
|
|
|
+ */
|
|
|
|
+ if (!cpa->force_static_prot)
|
|
|
|
+ goto set;
|
|
|
|
+
|
|
|
|
+ prot = static_protections(ref_prot, address, pfn, npg, CPA_PROTECT);
|
|
|
|
+
|
|
|
|
+ if (pgprot_val(prot) == pgprot_val(ref_prot))
|
|
|
|
+ goto set;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * If this is splitting a PMD, fix it up. PUD splits cannot be
|
|
|
|
+ * fixed trivially as that would require to rescan the newly
|
|
|
|
+ * installed PMD mappings after returning from split_large_page()
|
|
|
|
+ * so an eventual further split can allocate the necessary PTE
|
|
|
|
+ * pages. Warn for now and revisit it in case this actually
|
|
|
|
+ * happens.
|
|
|
|
+ */
|
|
|
|
+ if (size == PAGE_SIZE)
|
|
|
|
+ ref_prot = prot;
|
|
|
|
+ else
|
|
|
|
+ pr_warn_once("CPA: Cannot fixup static protections for PUD split\n");
|
|
|
|
+set:
|
|
|
|
+ set_pte(pte, pfn_pte(pfn, ref_prot));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static int
|
|
|
|
__split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
|
|
|
|
struct page *base)
|
|
|
|
{
|
|
|
|
+ unsigned long lpaddr, lpinc, ref_pfn, pfn, pfninc = 1;
|
|
|
|
pte_t *pbase = (pte_t *)page_address(base);
|
|
|
|
- unsigned long ref_pfn, pfn, pfninc = 1;
|
|
|
|
unsigned int i, level;
|
|
|
|
- pte_t *tmp;
|
|
|
|
pgprot_t ref_prot;
|
|
|
|
+ pte_t *tmp;
|
|
|
|
|
|
|
|
spin_lock(&pgd_lock);
|
|
|
|
/*
|
|
|
|
@@ -902,15 +957,17 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
|
|
|
|
* PAT bit to correct position.
|
|
|
|
*/
|
|
|
|
ref_prot = pgprot_large_2_4k(ref_prot);
|
|
|
|
-
|
|
|
|
ref_pfn = pmd_pfn(*(pmd_t *)kpte);
|
|
|
|
+ lpaddr = address & PMD_MASK;
|
|
|
|
+ lpinc = PAGE_SIZE;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PG_LEVEL_1G:
|
|
|
|
ref_prot = pud_pgprot(*(pud_t *)kpte);
|
|
|
|
ref_pfn = pud_pfn(*(pud_t *)kpte);
|
|
|
|
pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
|
|
|
|
-
|
|
|
|
+ lpaddr = address & PUD_MASK;
|
|
|
|
+ lpinc = PMD_SIZE;
|
|
|
|
/*
|
|
|
|
* Clear the PSE flags if the PRESENT flag is not set
|
|
|
|
* otherwise pmd_present/pmd_huge will return true
|
|
|
|
@@ -931,8 +988,8 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
|
|
|
|
* Get the target pfn from the original entry:
|
|
|
|
*/
|
|
|
|
pfn = ref_pfn;
|
|
|
|
- for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
|
|
|
|
- set_pte(&pbase[i], pfn_pte(pfn, ref_prot));
|
|
|
|
+ for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc, lpaddr += lpinc)
|
|
|
|
+ split_set_pte(cpa, pbase + i, pfn, ref_prot, lpaddr, lpinc);
|
|
|
|
|
|
|
|
if (virt_addr_valid(address)) {
|
|
|
|
unsigned long pfn = PFN_DOWN(__pa(address));
|
|
|
|
--
|
2019-04-08 18:08:36 +08:00
|
|
|
https://clearlinux.org
|
2018-10-11 02:06:46 +08:00
|
|
|
|