hv: cleanup some hva/hpa conversion code
The init page tables installed in either cpu_primary.S or trampoline.S are 1:1 mapping and won't be changed in the future. The 'actual' hypervisor page table installed in enable_paging() is 1:1 mapping currently but it could be changed in the future. Both hva2hpa() and hpa2hva() are implemented based on these page tables and can't be used when the init page tables take effect. This patch does the following cleanup: - remove all hva2hpa()/hpa2hva() before calling enable_paging() - get_hv_image_base() returns HVA, not HPA. So add hva2hpa() for all cases that are called afte enable_paging(). Tracked-On: #2700 Signed-off-by: Zide Chen <zide.chen@intel.com> Acked-by: Eddie Dong <Eddie.dong@intel.com>
This commit is contained in:
parent
e9335fcee6
commit
518a82d80b
|
@ -111,11 +111,18 @@ void init_e820(void)
|
|||
uint32_t i;
|
||||
|
||||
if (boot_regs[0] == MULTIBOOT_INFO_MAGIC) {
|
||||
struct multiboot_info *mbi = (struct multiboot_info *)(hpa2hva((uint64_t)boot_regs[1]));
|
||||
/*
|
||||
* Before installing new PML4 table in enable_paging(), HPA->HVA is always 1:1 mapping
|
||||
* and hpa2hva() can't be used to do the conversion. Here we simply treat boot_reg[1] as HPA.
|
||||
*/
|
||||
uint64_t hpa = (uint64_t)boot_regs[1];
|
||||
struct multiboot_info *mbi = (struct multiboot_info *)hpa;
|
||||
|
||||
pr_info("Multiboot info detected\n");
|
||||
if ((mbi->mi_flags & MULTIBOOT_INFO_HAS_MMAP) != 0U) {
|
||||
struct multiboot_mmap *mmap = (struct multiboot_mmap *)hpa2hva((uint64_t)mbi->mi_mmap_addr);
|
||||
/* HPA->HVA is always 1:1 mapping at this moment */
|
||||
hpa = (uint64_t)mbi->mi_mmap_addr;
|
||||
struct multiboot_mmap *mmap = (struct multiboot_mmap *)hpa;
|
||||
|
||||
e820_entries_count = mbi->mi_mmap_length / sizeof(struct multiboot_mmap);
|
||||
if (e820_entries_count > E820_MAX_ENTRIES) {
|
||||
|
|
|
@ -187,7 +187,7 @@ static void create_sos_vm_e820(struct acrn_vm *vm)
|
|||
uint32_t i;
|
||||
uint64_t entry_start;
|
||||
uint64_t entry_end;
|
||||
uint64_t hv_start_pa = get_hv_image_base();
|
||||
uint64_t hv_start_pa = hva2hpa((void *)(get_hv_image_base()));
|
||||
uint64_t hv_end_pa = hv_start_pa + CONFIG_HV_RAM_SIZE;
|
||||
uint32_t entries_count = get_e820_entries_count();
|
||||
struct e820_entry *entry, new_entry = {0};
|
||||
|
@ -301,7 +301,7 @@ static void prepare_sos_vm_memmap(struct acrn_vm *vm)
|
|||
/* unmap hypervisor itself for safety
|
||||
* will cause EPT violation if sos accesses hv memory
|
||||
*/
|
||||
hv_hpa = get_hv_image_base();
|
||||
hv_hpa = hva2hpa((void *)(get_hv_image_base()));
|
||||
ept_mr_del(vm, pml4_page, hv_hpa, CONFIG_HV_RAM_SIZE);
|
||||
}
|
||||
|
||||
|
|
|
@ -204,8 +204,8 @@ void enable_paging(void)
|
|||
CPU_CR_READ(cr0, &tmp64);
|
||||
CPU_CR_WRITE(cr0, tmp64 | CR0_WP);
|
||||
|
||||
CPU_CR_WRITE(cr3, hva2hpa(ppt_mmu_pml4_addr));
|
||||
|
||||
/* HPA->HVA is 1:1 mapping at this moment, simply treat ppt_mmu_pml4_addr as HPA. */
|
||||
CPU_CR_WRITE(cr3, ppt_mmu_pml4_addr);
|
||||
}
|
||||
|
||||
void enable_smep(void)
|
||||
|
@ -293,6 +293,9 @@ void init_paging(void)
|
|||
/*
|
||||
* set the paging-structure entries' U/S flag to supervisor-mode for hypervisor owned memroy.
|
||||
* (exclude the memory reserve for trusty)
|
||||
*
|
||||
* Before the new PML4 take effect in enable_paging(), HPA->HVA is always 1:1 mapping,
|
||||
* simply treat the return value of get_hv_image_base() as HPA.
|
||||
*/
|
||||
hv_hpa = get_hv_image_base();
|
||||
mmu_modify_or_del((uint64_t *)ppt_mmu_pml4_addr, hv_hpa & PDE_MASK,
|
||||
|
|
|
@ -49,7 +49,7 @@ uint64_t get_hv_image_delta(void)
|
|||
return addr;
|
||||
}
|
||||
|
||||
/* get the actual Hypervisor load address */
|
||||
/* get the actual Hypervisor load address (HVA) */
|
||||
uint64_t get_hv_image_base(void)
|
||||
{
|
||||
return (get_hv_image_delta() + CONFIG_HV_RAM_START);
|
||||
|
|
|
@ -571,7 +571,7 @@ static int32_t add_vm_memory_region(struct acrn_vm *vm, struct acrn_vm *target_v
|
|||
__func__, vm->vm_id, region->sos_vm_gpa);
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
base_paddr = get_hv_image_base();
|
||||
base_paddr = hva2hpa((void *)(get_hv_image_base()));
|
||||
if (((hpa <= base_paddr) && ((hpa + region->size) > base_paddr)) ||
|
||||
((hpa >= base_paddr) && (hpa < (base_paddr + CONFIG_HV_RAM_SIZE)))) {
|
||||
pr_err("%s: overlap the HV memory region.", __func__);
|
||||
|
@ -719,7 +719,7 @@ static int32_t write_protect_page(struct acrn_vm *vm,const struct wp_data *wp)
|
|||
dev_dbg(ACRN_DBG_HYCALL, "[vm%d] gpa=0x%x hpa=0x%x",
|
||||
vm->vm_id, wp->gpa, hpa);
|
||||
|
||||
base_paddr = get_hv_image_base();
|
||||
base_paddr = hva2hpa((void *)(get_hv_image_base()));
|
||||
if (((hpa <= base_paddr) && ((hpa + PAGE_SIZE) > base_paddr)) ||
|
||||
((hpa >= base_paddr) &&
|
||||
(hpa < (base_paddr + CONFIG_HV_RAM_SIZE)))) {
|
||||
|
|
Loading…
Reference in New Issue