hv: ept: build 4KB page mapping in EPT for RTVM for MCE on PSC

Deterministic is important for RTVM. The mitigation for MCE on
Page Size Change converts a large page to 4KB pages runtimely during
the vmexit triggered by the instruction fetch in the large page.
These vmexits increase nondeterminacy, which should be avoided for RTVM.
This patch builds 4KB page mapping in EPT for RTVM to avoid these vmexits.

Tracked-On: #4101
Signed-off-by: Binbin Wu <binbin.wu@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Binbin Wu 2019-11-18 15:15:16 +08:00 committed by wenlingz
parent 0570993b40
commit 3d412266bc
3 changed files with 13 additions and 3 deletions

View File

@ -12,6 +12,7 @@
#include <vtd.h>
#include <vm_configurations.h>
#include <security.h>
#include <vm.h>
static struct page ppt_pml4_pages[PML4_PAGE_NUM(CONFIG_PLATFORM_RAM_SIZE + PLATFORM_LO_MMIO_SIZE)];
static struct page ppt_pdpt_pages[PDPT_PAGE_NUM(CONFIG_PLATFORM_RAM_SIZE + PLATFORM_LO_MMIO_SIZE)];
@ -31,7 +32,7 @@ static inline uint64_t ppt_get_default_access_right(void)
return (PAGE_PRESENT | PAGE_RW | PAGE_USER);
}
static inline void ppt_clflush_pagewalk(const void* etry __attribute__((unused)))
static inline void ppt_clflush_pagewalk(const void* entry __attribute__((unused)))
{
}
@ -66,6 +67,7 @@ static inline void nop_recover_exe_right(uint64_t *entry __attribute__((unused))
const struct memory_ops ppt_mem_ops = {
.info = &ppt_pages_info,
.large_page_enabled = true,
.get_default_access_right = ppt_get_default_access_right,
.pgentry_present = ppt_pgentry_present,
.get_pml4_page = ppt_get_pml4_page,
@ -205,11 +207,16 @@ void init_ept_mem_ops(struct memory_ops *mem_ops, uint16_t vm_id)
mem_ops->get_pd_page = ept_get_pd_page;
mem_ops->get_pt_page = ept_get_pt_page;
mem_ops->clflush_pagewalk = ept_clflush_pagewalk;
mem_ops->large_page_enabled = true;
/* Mitigation for issue "Machine Check Error on Page Size Change" */
if (is_ept_force_4k_ipage()) {
mem_ops->tweak_exe_right = ept_tweak_exe_right;
mem_ops->recover_exe_right = ept_recover_exe_right;
/* For RTVM, build 4KB page mapping in EPT */
if (is_rt_vm(get_vm_from_vmid(vm_id))) {
mem_ops->large_page_enabled = false;
}
} else {
mem_ops->tweak_exe_right = nop_tweak_exe_right;
mem_ops->recover_exe_right = nop_recover_exe_right;

View File

@ -296,7 +296,8 @@ static void add_pde(const uint64_t *pdpte, uint64_t paddr_start, uint64_t vaddr_
pr_fatal("%s, pde 0x%lx is already present!\n", __func__, vaddr);
} else {
if (mem_ops->pgentry_present(*pde) == 0UL) {
if (mem_aligned_check(paddr, PDE_SIZE) &&
if (mem_ops->large_page_enabled &&
mem_aligned_check(paddr, PDE_SIZE) &&
mem_aligned_check(vaddr, PDE_SIZE) &&
(vaddr_next <= vaddr_end)) {
mem_ops->tweak_exe_right(&prot);
@ -343,7 +344,8 @@ static void add_pdpte(const uint64_t *pml4e, uint64_t paddr_start, uint64_t vadd
pr_fatal("%s, pdpte 0x%lx is already present!\n", __func__, vaddr);
} else {
if (mem_ops->pgentry_present(*pdpte) == 0UL) {
if (mem_aligned_check(paddr, PDPTE_SIZE) &&
if (mem_ops->large_page_enabled &&
mem_aligned_check(paddr, PDPTE_SIZE) &&
mem_aligned_check(vaddr, PDPTE_SIZE) &&
(vaddr_next <= vaddr_end)) {
mem_ops->tweak_exe_right(&prot);

View File

@ -64,6 +64,7 @@ union pgtable_pages_info {
struct memory_ops {
union pgtable_pages_info *info;
bool large_page_enabled;
uint64_t (*get_default_access_right)(void);
uint64_t (*pgentry_present)(uint64_t pte);
struct page *(*get_pml4_page)(const union pgtable_pages_info *info);