hv: bug fix: normal world may get trusty world's pdpt page
Normal world would also setup 511 GB gpa EPT mapping when initialize. So we couldn't know which the world is from the gpa. But trusty is so special for that we know where it would get a pml4_page or pdpt_page. As a result, we could simpler this by just return the pml4_page or pdpt_page to it when it needs. Tracked-On: #861 Signed-off-by: Li, Fei1 <fei1.li@intel.com>
This commit is contained in:
parent
f1ed6c503c
commit
541f3713d2
|
@ -99,25 +99,14 @@ static inline uint64_t ept_pgentry_present(uint64_t pte)
|
|||
|
||||
static inline struct page *ept_get_pml4_page(const union pgtable_pages_info *info, __unused uint64_t gpa)
|
||||
{
|
||||
struct page *page;
|
||||
if (gpa < TRUSTY_EPT_REBASE_GPA) {
|
||||
page = info->ept.nworld_pml4_base;
|
||||
} else {
|
||||
page = info->ept.sworld_pgtable_base;
|
||||
}
|
||||
struct page *page = info->ept.nworld_pml4_base;
|
||||
(void)memset(page, 0U, PAGE_SIZE);
|
||||
return page;
|
||||
}
|
||||
|
||||
static inline struct page *ept_get_pdpt_page(const union pgtable_pages_info *info, uint64_t gpa)
|
||||
{
|
||||
struct page *page;
|
||||
if (gpa < TRUSTY_EPT_REBASE_GPA) {
|
||||
page = info->ept.nworld_pdpt_base + (gpa >> PML4E_SHIFT);
|
||||
} else {
|
||||
page = info->ept.sworld_pgtable_base + TRUSTY_PML4_PAGE_NUM(TRUSTY_EPT_REBASE_GPA) +
|
||||
((gpa - TRUSTY_EPT_REBASE_GPA) >> PML4E_SHIFT);
|
||||
}
|
||||
struct page *page = info->ept.nworld_pdpt_base + (gpa >> PML4E_SHIFT);
|
||||
(void)memset(page, 0U, PAGE_SIZE);
|
||||
return page;
|
||||
}
|
||||
|
|
|
@ -97,14 +97,16 @@ static void create_secure_world_ept(struct vm *vm, uint64_t gpa_orig,
|
|||
* Normal World.PD/PT are shared in both Secure world's EPT
|
||||
* and Normal World's EPT
|
||||
*/
|
||||
pml4_base = vm->arch_vm.ept_mem_ops.get_pml4_page(vm->arch_vm.ept_mem_ops.info, TRUSTY_EPT_REBASE_GPA);
|
||||
pml4_base = vm->arch_vm.ept_mem_ops.info->ept.sworld_pgtable_base;
|
||||
(void)memset(pml4_base, 0U, CPU_PAGE_SIZE);
|
||||
vm->arch_vm.sworld_eptp = pml4_base;
|
||||
sanitize_pte((uint64_t *)vm->arch_vm.sworld_eptp);
|
||||
|
||||
/* The trusty memory is remapped to guest physical address
|
||||
* of gpa_rebased to gpa_rebased + size
|
||||
*/
|
||||
sub_table_addr = vm->arch_vm.ept_mem_ops.get_pdpt_page(vm->arch_vm.ept_mem_ops.info, TRUSTY_EPT_REBASE_GPA);
|
||||
sub_table_addr = vm->arch_vm.ept_mem_ops.info->ept.sworld_pgtable_base + TRUSTY_PML4_PAGE_NUM(TRUSTY_EPT_REBASE_GPA);
|
||||
(void)memset(sub_table_addr, 0U, CPU_PAGE_SIZE);
|
||||
sworld_pml4e = hva2hpa(sub_table_addr) | table_present;
|
||||
set_pgentry((uint64_t *)pml4_base, sworld_pml4e);
|
||||
|
||||
|
|
Loading…
Reference in New Issue