From 3d412266bce92aba2053f2aa8cf998b6cc6d919b Mon Sep 17 00:00:00 2001 From: Binbin Wu Date: Mon, 18 Nov 2019 15:15:16 +0800 Subject: [PATCH] hv: ept: build 4KB page mapping in EPT for RTVM for MCE on PSC Deterministic is important for RTVM. The mitigation for MCE on Page Size Change converts a large page to 4KB pages runtimely during the vmexit triggered by the instruction fetch in the large page. These vmexits increase nondeterminacy, which should be avoided for RTVM. This patch builds 4KB page mapping in EPT for RTVM to avoid these vmexits. Tracked-On: #4101 Signed-off-by: Binbin Wu Acked-by: Eddie Dong --- hypervisor/arch/x86/page.c | 9 ++++++++- hypervisor/arch/x86/pagetable.c | 6 ++++-- hypervisor/include/arch/x86/page.h | 1 + 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/hypervisor/arch/x86/page.c b/hypervisor/arch/x86/page.c index 1328e5f2e..1b2ee5e87 100644 --- a/hypervisor/arch/x86/page.c +++ b/hypervisor/arch/x86/page.c @@ -12,6 +12,7 @@ #include #include #include +#include static struct page ppt_pml4_pages[PML4_PAGE_NUM(CONFIG_PLATFORM_RAM_SIZE + PLATFORM_LO_MMIO_SIZE)]; static struct page ppt_pdpt_pages[PDPT_PAGE_NUM(CONFIG_PLATFORM_RAM_SIZE + PLATFORM_LO_MMIO_SIZE)]; @@ -31,7 +32,7 @@ static inline uint64_t ppt_get_default_access_right(void) return (PAGE_PRESENT | PAGE_RW | PAGE_USER); } -static inline void ppt_clflush_pagewalk(const void* etry __attribute__((unused))) +static inline void ppt_clflush_pagewalk(const void* entry __attribute__((unused))) { } @@ -66,6 +67,7 @@ static inline void nop_recover_exe_right(uint64_t *entry __attribute__((unused)) const struct memory_ops ppt_mem_ops = { .info = &ppt_pages_info, + .large_page_enabled = true, .get_default_access_right = ppt_get_default_access_right, .pgentry_present = ppt_pgentry_present, .get_pml4_page = ppt_get_pml4_page, @@ -205,11 +207,16 @@ void init_ept_mem_ops(struct memory_ops *mem_ops, uint16_t vm_id) mem_ops->get_pd_page = ept_get_pd_page; mem_ops->get_pt_page = ept_get_pt_page; mem_ops->clflush_pagewalk = ept_clflush_pagewalk; + mem_ops->large_page_enabled = true; /* Mitigation for issue "Machine Check Error on Page Size Change" */ if (is_ept_force_4k_ipage()) { mem_ops->tweak_exe_right = ept_tweak_exe_right; mem_ops->recover_exe_right = ept_recover_exe_right; + /* For RTVM, build 4KB page mapping in EPT */ + if (is_rt_vm(get_vm_from_vmid(vm_id))) { + mem_ops->large_page_enabled = false; + } } else { mem_ops->tweak_exe_right = nop_tweak_exe_right; mem_ops->recover_exe_right = nop_recover_exe_right; diff --git a/hypervisor/arch/x86/pagetable.c b/hypervisor/arch/x86/pagetable.c index cbe0baf16..f2e919b2d 100644 --- a/hypervisor/arch/x86/pagetable.c +++ b/hypervisor/arch/x86/pagetable.c @@ -296,7 +296,8 @@ static void add_pde(const uint64_t *pdpte, uint64_t paddr_start, uint64_t vaddr_ pr_fatal("%s, pde 0x%lx is already present!\n", __func__, vaddr); } else { if (mem_ops->pgentry_present(*pde) == 0UL) { - if (mem_aligned_check(paddr, PDE_SIZE) && + if (mem_ops->large_page_enabled && + mem_aligned_check(paddr, PDE_SIZE) && mem_aligned_check(vaddr, PDE_SIZE) && (vaddr_next <= vaddr_end)) { mem_ops->tweak_exe_right(&prot); @@ -343,7 +344,8 @@ static void add_pdpte(const uint64_t *pml4e, uint64_t paddr_start, uint64_t vadd pr_fatal("%s, pdpte 0x%lx is already present!\n", __func__, vaddr); } else { if (mem_ops->pgentry_present(*pdpte) == 0UL) { - if (mem_aligned_check(paddr, PDPTE_SIZE) && + if (mem_ops->large_page_enabled && + mem_aligned_check(paddr, PDPTE_SIZE) && mem_aligned_check(vaddr, PDPTE_SIZE) && (vaddr_next <= vaddr_end)) { mem_ops->tweak_exe_right(&prot); diff --git a/hypervisor/include/arch/x86/page.h b/hypervisor/include/arch/x86/page.h index 27c3507b4..ea86ac33b 100644 --- a/hypervisor/include/arch/x86/page.h +++ b/hypervisor/include/arch/x86/page.h @@ -64,6 +64,7 @@ union pgtable_pages_info { struct memory_ops { union pgtable_pages_info *info; + bool large_page_enabled; uint64_t (*get_default_access_right)(void); uint64_t (*pgentry_present)(uint64_t pte); struct page *(*get_pml4_page)(const union pgtable_pages_info *info);