hv: mmu: release 1GB cpu side support constrain

There're some platforms still doesn't support 1GB large page on CPU side.
Such as lakefield, TNT and EHL platforms on which have some silicon bug and
this case CPU don't support 1GB large page.

This patch tries to release this constrain to support more hardware platform.

Note this patch doesn't release the constrain on IOMMU side.

Tracked-On: #4550
Signed-off-by: Li Fei1 <fei1.li@intel.com>
This commit is contained in:
Li Fei1 2020-06-05 09:50:56 +08:00 committed by wenlingz
parent 6e57553015
commit 65e4a16e6a
5 changed files with 57 additions and 33 deletions

View File

@ -385,9 +385,14 @@ static int32_t check_vmx_mmu_cap(void)
!pcpu_has_vmx_vpid_cap(VMX_VPID_INVVPID_GLOBAL_CONTEXT)) {
printf("%s, invvpid not supported\n", __func__);
ret = -ENODEV;
} else if (!pcpu_has_vmx_ept_cap(VMX_EPT_1GB_PAGE)) {
printf("%s, ept not support 1GB large page\n", __func__);
} else if (!pcpu_has_vmx_ept_cap(VMX_EPT_2MB_PAGE)) {
printf("%s, ept not support 2MB large page\n", __func__);
ret = -ENODEV;
} else if (pcpu_has_vmx_ept_cap(VMX_EPT_1GB_PAGE) !=
pcpu_has_cap(X86_FEATURE_PAGE1GB)) {
/* This just for simple large_page_support in arch/x86/page.c */
ret = -ENODEV;
printf("%s ept support 1GB large page while mmu is not or opposite\n", __func__);
} else {
/* No other state currently, do nothing */
}
@ -439,9 +444,6 @@ int32_t detect_hardware_support(void)
} else if (!pcpu_has_cap(X86_FEATURE_CLFLUSHOPT)) {
printf("%s, CLFLUSHOPT not supported\n", __func__);
ret = -ENODEV;
} else if (!pcpu_has_cap(X86_FEATURE_PAGE1GB)) {
printf("%s, not support 1GB page\n", __func__);
ret = -ENODEV;
} else if (!pcpu_has_cap(X86_FEATURE_VMX)) {
printf("%s, vmx not supported\n", __func__);
ret = -ENODEV;

View File

@ -5,6 +5,7 @@
*/
#include <types.h>
#include <rtl.h>
#include <cpufeatures.h>
#include <pgtable.h>
#include <page.h>
#include <mmu.h>
@ -28,6 +29,22 @@ static union pgtable_pages_info ppt_pages_info = {
}
};
/* @pre: The PPT and EPT have same page granularity */
static inline bool large_page_support(enum _page_table_level level)
{
bool support;
if (level == IA32E_PD) {
support = true;
} else if (level == IA32E_PDPT) {
support = pcpu_has_vmx_ept_cap(VMX_EPT_1GB_PAGE);
} else {
support = false;
}
return support;
}
static inline uint64_t ppt_get_default_access_right(void)
{
return (PAGE_PRESENT | PAGE_RW | PAGE_USER);
@ -68,7 +85,7 @@ static inline void nop_recover_exe_right(uint64_t *entry __attribute__((unused))
const struct memory_ops ppt_mem_ops = {
.info = &ppt_pages_info,
.large_page_enabled = true,
.large_page_support = large_page_support,
.get_default_access_right = ppt_get_default_access_right,
.pgentry_present = ppt_pgentry_present,
.get_pml4_page = ppt_get_pml4_page,
@ -137,6 +154,11 @@ void *get_reserve_sworld_memory_base(void)
return post_uos_sworld_memory;
}
static inline bool large_page_not_support(__unused enum _page_table_level level)
{
return false;
}
static inline uint64_t ept_get_default_access_right(void)
{
return EPT_RWX;
@ -255,7 +277,7 @@ void init_ept_mem_ops(struct memory_ops *mem_ops, uint16_t vm_id)
mem_ops->get_pd_page = ept_get_pd_page;
mem_ops->get_pt_page = ept_get_pt_page;
mem_ops->clflush_pagewalk = ept_clflush_pagewalk;
mem_ops->large_page_enabled = true;
mem_ops->large_page_support = large_page_support;
/* Mitigation for issue "Machine Check Error on Page Size Change" */
if (is_ept_force_4k_ipage()) {
@ -263,7 +285,7 @@ void init_ept_mem_ops(struct memory_ops *mem_ops, uint16_t vm_id)
mem_ops->recover_exe_right = ept_recover_exe_right;
/* For RTVM, build 4KB page mapping in EPT */
if (is_rt_vm(vm)) {
mem_ops->large_page_enabled = false;
mem_ops->large_page_support = large_page_not_support;
}
} else {
mem_ops->tweak_exe_right = nop_tweak_exe_right;

View File

@ -296,7 +296,7 @@ static void add_pde(const uint64_t *pdpte, uint64_t paddr_start, uint64_t vaddr_
pr_fatal("%s, pde 0x%lx is already present!\n", __func__, vaddr);
} else {
if (mem_ops->pgentry_present(*pde) == 0UL) {
if (mem_ops->large_page_enabled &&
if (mem_ops->large_page_support(IA32E_PD) &&
mem_aligned_check(paddr, PDE_SIZE) &&
mem_aligned_check(vaddr, PDE_SIZE) &&
(vaddr_next <= vaddr_end)) {
@ -344,7 +344,7 @@ static void add_pdpte(const uint64_t *pml4e, uint64_t paddr_start, uint64_t vadd
pr_fatal("%s, pdpte 0x%lx is already present!\n", __func__, vaddr);
} else {
if (mem_ops->pgentry_present(*pdpte) == 0UL) {
if (mem_ops->large_page_enabled &&
if (mem_ops->large_page_support(IA32E_PDPT) &&
mem_aligned_check(paddr, PDPTE_SIZE) &&
mem_aligned_check(vaddr, PDPTE_SIZE) &&
(vaddr_next <= vaddr_end)) {

View File

@ -76,28 +76,6 @@ static inline uint64_t round_pde_down(uint64_t val)
return (val & PDE_MASK);
}
/**
* @brief Page tables level in IA32 paging mode
*/
enum _page_table_level {
/**
* @brief The PML4 level in the page tables
*/
IA32E_PML4 = 0,
/**
* @brief The Page-Directory-Pointer-Table level in the page tables
*/
IA32E_PDPT = 1,
/**
* @brief The Page-Directory level in the page tables
*/
IA32E_PD = 2,
/**
* @brief The Page-Table level in the page tables
*/
IA32E_PT = 3,
};
/* Page size */
#define PAGE_SIZE_4K MEM_4K
#define PAGE_SIZE_2M MEM_2M

View File

@ -51,6 +51,28 @@
#define TRUSTY_PGTABLE_PAGE_NUM(size) \
(TRUSTY_PML4_PAGE_NUM(size) + TRUSTY_PDPT_PAGE_NUM(size) + TRUSTY_PD_PAGE_NUM(size) + TRUSTY_PT_PAGE_NUM(size))
/**
* @brief Page tables level in IA32 paging mode
*/
enum _page_table_level {
/**
* @brief The PML4 level in the page tables
*/
IA32E_PML4 = 0,
/**
* @brief The Page-Directory-Pointer-Table level in the page tables
*/
IA32E_PDPT = 1,
/**
* @brief The Page-Directory level in the page tables
*/
IA32E_PD = 2,
/**
* @brief The Page-Table level in the page tables
*/
IA32E_PT = 3,
};
struct acrn_vm;
struct page {
@ -77,7 +99,7 @@ union pgtable_pages_info {
struct memory_ops {
union pgtable_pages_info *info;
bool large_page_enabled;
bool (*large_page_support)(enum _page_table_level level);
uint64_t (*get_default_access_right)(void);
uint64_t (*pgentry_present)(uint64_t pte);
struct page *(*get_pml4_page)(const union pgtable_pages_info *info);