hv: ept: flush cache for modified ept entries

EPT tables are shared by MMU and IOMMU.
Some IOMMUs don't support page-walk coherency, the cpu cache of EPT entires
should be flushed to memory after modifications, so that the modifications
are visible to the IOMMUs.

This patch adds a new interface to flush the cache of modified EPT entires.
There are different implementations for EPT/PPT entries:
- For PPT, there is no need to flush the cpu cache after update.
- For EPT, need to call iommu_flush_cache to make the modifications visible
to IOMMUs.

Tracked-On: #3607
Signed-off-by: Binbin Wu <binbin.wu@intel.com>
Reviewed-by: Anthony Xu <anthony.xu@intel.com>
This commit is contained in:
Binbin Wu 2019-07-01 13:39:20 +08:00 committed by ACRN System Integration
parent 2abd8b34ef
commit 5c81659713
7 changed files with 43 additions and 30 deletions

View File

@ -79,7 +79,7 @@ static void create_secure_world_ept(struct acrn_vm *vm, uint64_t gpa_orig,
pml4_base = vm->arch_vm.ept_mem_ops.info->ept.sworld_pgtable_base;
(void)memset(pml4_base, 0U, PAGE_SIZE);
vm->arch_vm.sworld_eptp = pml4_base;
sanitize_pte((uint64_t *)vm->arch_vm.sworld_eptp);
sanitize_pte((uint64_t *)vm->arch_vm.sworld_eptp, &vm->arch_vm.ept_mem_ops);
/* The trusty memory is remapped to guest physical address
* of gpa_rebased to gpa_rebased + size
@ -88,7 +88,7 @@ static void create_secure_world_ept(struct acrn_vm *vm, uint64_t gpa_orig,
TRUSTY_PML4_PAGE_NUM(TRUSTY_EPT_REBASE_GPA);
(void)memset(sub_table_addr, 0U, PAGE_SIZE);
sworld_pml4e = hva2hpa(sub_table_addr) | table_present;
set_pgentry((uint64_t *)pml4_base, sworld_pml4e);
set_pgentry((uint64_t *)pml4_base, sworld_pml4e, &vm->arch_vm.ept_mem_ops);
nworld_pml4e = get_pgentry((uint64_t *)vm->arch_vm.nworld_eptp);
@ -102,7 +102,7 @@ static void create_secure_world_ept(struct acrn_vm *vm, uint64_t gpa_orig,
pdpte = get_pgentry(src_pdpte_p);
if ((pdpte & table_present) != 0UL) {
pdpte &= ~EPT_EXE;
set_pgentry(dest_pdpte_p, pdpte);
set_pgentry(dest_pdpte_p, pdpte, &vm->arch_vm.ept_mem_ops);
}
src_pdpte_p++;
dest_pdpte_p++;
@ -133,7 +133,7 @@ void destroy_secure_world(struct acrn_vm *vm, bool need_clr_mem)
ept_del_mr(vm, vm->arch_vm.sworld_eptp, gpa_uos, size);
/* sanitize trusty ept page-structures */
sanitize_pte((uint64_t *)vm->arch_vm.sworld_eptp);
sanitize_pte((uint64_t *)vm->arch_vm.sworld_eptp, &vm->arch_vm.ept_mem_ops);
vm->arch_vm.sworld_eptp = NULL;
/* Restore memory to guest normal world */

View File

@ -429,9 +429,9 @@ int32_t create_vm(uint16_t vm_id, struct acrn_vm_config *vm_config, struct acrn_
init_ept_mem_ops(vm);
vm->arch_vm.nworld_eptp = vm->arch_vm.ept_mem_ops.get_pml4_page(vm->arch_vm.ept_mem_ops.info);
sanitize_pte((uint64_t *)vm->arch_vm.nworld_eptp);
sanitize_pte((uint64_t *)vm->arch_vm.nworld_eptp, &vm->arch_vm.ept_mem_ops);
/* Register default handlers for PIO & MMIO if it is SOS VM or Pre-launched VM */
/* Register default handlers for PIO & MMIO if it is, SOS VM or Pre-launched VM */
if ((vm_config->load_order == SOS_VM) || (vm_config->load_order == PRE_LAUNCHED_VM)) {
register_pio_default_emulation_handler(vm);
register_mmio_default_emulation_handler(vm);

View File

@ -148,16 +148,16 @@ static inline uint64_t get_sanitized_page(void)
return hva2hpa(sanitized_page);
}
void sanitize_pte_entry(uint64_t *ptep)
void sanitize_pte_entry(uint64_t *ptep, const struct memory_ops *mem_ops)
{
set_pgentry(ptep, get_sanitized_page());
set_pgentry(ptep, get_sanitized_page(), mem_ops);
}
void sanitize_pte(uint64_t *pt_page)
void sanitize_pte(uint64_t *pt_page, const struct memory_ops *mem_ops)
{
uint64_t i;
for (i = 0UL; i < PTRS_PER_PTE; i++) {
sanitize_pte_entry(pt_page + i);
sanitize_pte_entry(pt_page + i, mem_ops);
}
}
@ -294,7 +294,7 @@ void init_paging(void)
enable_paging();
/* set ptep in sanitized_page point to itself */
sanitize_pte((uint64_t *)sanitized_page);
sanitize_pte((uint64_t *)sanitized_page, &ppt_mem_ops);
}
/*

View File

@ -10,6 +10,7 @@
#include <mmu.h>
#include <vm.h>
#include <trusty.h>
#include <vtd.h>
static struct page ppt_pml4_pages[PML4_PAGE_NUM(CONFIG_PLATFORM_RAM_SIZE + PLATFORM_LO_MMIO_SIZE)];
static struct page ppt_pdpt_pages[PDPT_PAGE_NUM(CONFIG_PLATFORM_RAM_SIZE + PLATFORM_LO_MMIO_SIZE)];
@ -29,6 +30,10 @@ static inline uint64_t ppt_get_default_access_right(void)
return (PAGE_PRESENT | PAGE_RW | PAGE_USER);
}
static inline void ppt_clflush_pagewalk(const void* etry __attribute__((unused)))
{
}
static inline uint64_t ppt_pgentry_present(uint64_t pte)
{
return pte & PAGE_PRESENT;
@ -62,6 +67,7 @@ const struct memory_ops ppt_mem_ops = {
.get_pml4_page = ppt_get_pml4_page,
.get_pdpt_page = ppt_get_pdpt_page,
.get_pd_page = ppt_get_pd_page,
.clflush_pagewalk = ppt_clflush_pagewalk,
};
static struct page sos_vm_pml4_pages[PML4_PAGE_NUM(EPT_ADDRESS_SPACE(CONFIG_SOS_RAM_SIZE))];
@ -107,6 +113,11 @@ static inline uint64_t ept_pgentry_present(uint64_t pte)
return pte & EPT_RWX;
}
static inline void ept_clflush_pagewalk(const void* etry)
{
iommu_flush_cache(etry, sizeof(uint64_t));
}
static inline struct page *ept_get_pml4_page(const union pgtable_pages_info *info)
{
struct page *pml4_page = info->ept.nworld_pml4_base;
@ -175,5 +186,5 @@ void init_ept_mem_ops(struct acrn_vm *vm)
vm->arch_vm.ept_mem_ops.get_pdpt_page = ept_get_pdpt_page;
vm->arch_vm.ept_mem_ops.get_pd_page = ept_get_pd_page;
vm->arch_vm.ept_mem_ops.get_pt_page = ept_get_pt_page;
vm->arch_vm.ept_mem_ops.clflush_pagewalk = ept_clflush_pagewalk;
}

View File

@ -45,37 +45,37 @@ static void split_large_page(uint64_t *pte, enum _page_table_level level,
paddr = ref_paddr;
for (i = 0UL; i < PTRS_PER_PTE; i++) {
set_pgentry(pbase + i, paddr | ref_prot);
set_pgentry(pbase + i, paddr | ref_prot, mem_ops);
paddr += paddrinc;
}
ref_prot = mem_ops->get_default_access_right();
set_pgentry(pte, hva2hpa((void *)pbase) | ref_prot);
set_pgentry(pte, hva2hpa((void *)pbase) | ref_prot, mem_ops);
/* TODO: flush the TLB */
}
static inline void local_modify_or_del_pte(uint64_t *pte,
uint64_t prot_set, uint64_t prot_clr, uint32_t type)
uint64_t prot_set, uint64_t prot_clr, uint32_t type, const struct memory_ops *mem_ops)
{
if (type == MR_MODIFY) {
uint64_t new_pte = *pte;
new_pte &= ~prot_clr;
new_pte |= prot_set;
set_pgentry(pte, new_pte);
set_pgentry(pte, new_pte, mem_ops);
} else {
sanitize_pte_entry(pte);
sanitize_pte_entry(pte, mem_ops);
}
}
/*
* pgentry may means pml4e/pdpte/pde
*/
static inline void construct_pgentry(uint64_t *pde, void *pd_page, uint64_t prot)
static inline void construct_pgentry(uint64_t *pde, void *pd_page, uint64_t prot, const struct memory_ops *mem_ops)
{
sanitize_pte((uint64_t *)pd_page);
sanitize_pte((uint64_t *)pd_page, mem_ops);
set_pgentry(pde, hva2hpa(pd_page) | prot);
set_pgentry(pde, hva2hpa(pd_page) | prot, mem_ops);
}
/*
@ -99,7 +99,7 @@ static void modify_or_del_pte(const uint64_t *pde, uint64_t vaddr_start, uint64_
if (mem_ops->pgentry_present(*pte) == 0UL) {
ASSERT(false, "invalid op, pte not present");
} else {
local_modify_or_del_pte(pte, prot_set, prot_clr, type);
local_modify_or_del_pte(pte, prot_set, prot_clr, type, mem_ops);
vaddr += PTE_SIZE;
if (vaddr >= vaddr_end) {
break;
@ -134,7 +134,7 @@ static void modify_or_del_pde(const uint64_t *pdpte, uint64_t vaddr_start, uint6
if ((vaddr_next > vaddr_end) || (!mem_aligned_check(vaddr, PDE_SIZE))) {
split_large_page(pde, IA32E_PD, vaddr, mem_ops);
} else {
local_modify_or_del_pte(pde, prot_set, prot_clr, type);
local_modify_or_del_pte(pde, prot_set, prot_clr, type, mem_ops);
if (vaddr_next < vaddr_end) {
vaddr = vaddr_next;
continue;
@ -178,7 +178,7 @@ static void modify_or_del_pdpte(const uint64_t *pml4e, uint64_t vaddr_start, uin
(!mem_aligned_check(vaddr, PDPTE_SIZE))) {
split_large_page(pdpte, IA32E_PDPT, vaddr, mem_ops);
} else {
local_modify_or_del_pte(pdpte, prot_set, prot_clr, type);
local_modify_or_del_pte(pdpte, prot_set, prot_clr, type, mem_ops);
if (vaddr_next < vaddr_end) {
vaddr = vaddr_next;
continue;
@ -251,7 +251,7 @@ static void add_pte(const uint64_t *pde, uint64_t paddr_start, uint64_t vaddr_st
if (mem_ops->pgentry_present(*pte) != 0UL) {
ASSERT(false, "invalid op, pte present");
} else {
set_pgentry(pte, paddr | prot);
set_pgentry(pte, paddr | prot, mem_ops);
paddr += PTE_SIZE;
vaddr += PTE_SIZE;
@ -284,7 +284,7 @@ static void add_pde(const uint64_t *pdpte, uint64_t paddr_start, uint64_t vaddr_
if (mem_aligned_check(paddr, PDE_SIZE) &&
mem_aligned_check(vaddr, PDE_SIZE) &&
(vaddr_next <= vaddr_end)) {
set_pgentry(pde, paddr | (prot | PAGE_PSE));
set_pgentry(pde, paddr | (prot | PAGE_PSE), mem_ops);
if (vaddr_next < vaddr_end) {
paddr += (vaddr_next - vaddr);
vaddr = vaddr_next;
@ -293,7 +293,7 @@ static void add_pde(const uint64_t *pdpte, uint64_t paddr_start, uint64_t vaddr_
break; /* done */
} else {
void *pt_page = mem_ops->get_pt_page(mem_ops->info, vaddr);
construct_pgentry(pde, pt_page, mem_ops->get_default_access_right());
construct_pgentry(pde, pt_page, mem_ops->get_default_access_right(), mem_ops);
}
}
add_pte(pde, paddr, vaddr, vaddr_end, prot, mem_ops);
@ -326,7 +326,7 @@ static void add_pdpte(const uint64_t *pml4e, uint64_t paddr_start, uint64_t vadd
if (mem_aligned_check(paddr, PDPTE_SIZE) &&
mem_aligned_check(vaddr, PDPTE_SIZE) &&
(vaddr_next <= vaddr_end)) {
set_pgentry(pdpte, paddr | (prot | PAGE_PSE));
set_pgentry(pdpte, paddr | (prot | PAGE_PSE), mem_ops);
if (vaddr_next < vaddr_end) {
paddr += (vaddr_next - vaddr);
vaddr = vaddr_next;
@ -335,7 +335,7 @@ static void add_pdpte(const uint64_t *pml4e, uint64_t paddr_start, uint64_t vadd
break; /* done */
} else {
void *pd_page = mem_ops->get_pd_page(mem_ops->info, vaddr);
construct_pgentry(pdpte, pd_page, mem_ops->get_default_access_right());
construct_pgentry(pdpte, pd_page, mem_ops->get_default_access_right(), mem_ops);
}
}
add_pde(pdpte, paddr, vaddr, vaddr_end, prot, mem_ops);
@ -371,7 +371,7 @@ void mmu_add(uint64_t *pml4_page, uint64_t paddr_base, uint64_t vaddr_base, uint
pml4e = pml4e_offset(pml4_page, vaddr);
if (mem_ops->pgentry_present(*pml4e) == 0UL) {
void *pdpt_page = mem_ops->get_pdpt_page(mem_ops->info, vaddr);
construct_pgentry(pml4e, pdpt_page, mem_ops->get_default_access_right());
construct_pgentry(pml4e, pdpt_page, mem_ops->get_default_access_right(), mem_ops);
}
add_pdpte(pml4e, paddr, vaddr, vaddr_end, prot, mem_ops);

View File

@ -71,6 +71,7 @@ struct memory_ops {
struct page *(*get_pd_page)(const union pgtable_pages_info *info, uint64_t gpa);
struct page *(*get_pt_page)(const union pgtable_pages_info *info, uint64_t gpa);
void *(*get_sworld_memory_base)(const union pgtable_pages_info *info);
void (*clflush_pagewalk)(const void *p);
};
extern const struct memory_ops ppt_mem_ops;

View File

@ -257,9 +257,10 @@ static inline uint64_t get_pgentry(const uint64_t *pte)
/*
* pgentry may means pml4e/pdpte/pde/pte
*/
static inline void set_pgentry(uint64_t *ptep, uint64_t pte)
static inline void set_pgentry(uint64_t *ptep, uint64_t pte, const struct memory_ops *mem_ops)
{
*ptep = pte;
mem_ops->clflush_pagewalk(ptep);
}
static inline uint64_t pde_large(uint64_t pde)