hv: ept: unify EPT API name to verb-object style

Rename ept_mr_add to ept_add_mr
Rename ept_mr_modify to ept_modify_mr
Rename ept_mr_del to ept_del_mr

Tracked-On: #1842
Signed-off-by: Li, Fei1 <fei1.li@intel.com>
This commit is contained in:
Li, Fei1 2019-06-14 18:32:19 +08:00 committed by ACRN System Integration
parent 4add405978
commit 9960ff98c5
12 changed files with 35 additions and 35 deletions

View File

@ -552,7 +552,7 @@ request as shown below.
* - ACRN_REQUEST_EPT_FLUSH
- Request for EPT flush
- ept_mr_add, ept_mr_modify, ept_mr_del, or vmx_write_cr0 disable cache
- ept_add_mr, ept_modify_mr, ept_del_mr, or vmx_write_cr0 disable cache
- invept
* - ACRN_REQUEST_TRP_FAULT

View File

@ -402,13 +402,13 @@ Address Space Translation
EPT
---
.. doxygenfunction:: ept_mr_add
.. doxygenfunction:: ept_add_mr
:project: Project ACRN
.. doxygenfunction:: ept_mr_del
.. doxygenfunction:: ept_del_mr
:project: Project ACRN
.. doxygenfunction:: ept_mr_modify
.. doxygenfunction:: ept_modify_mr
:project: Project ACRN
.. doxygenfunction:: destroy_ept

View File

@ -347,8 +347,8 @@ release in July 2018 (click on the CommitID link to see details):
- :acrn-commit:`5663dd7` hv: extend the decode_modrm
- :acrn-commit:`3b6ccf0` HV: remove callbacks registration for APICv functions
- :acrn-commit:`93c1b07` hv: mmu: remove old map_mem
- :acrn-commit:`f3b825d` hv: trusty: use ept_mr_add to add memory region
- :acrn-commit:`4bb8456` hv: ept: refine ept_mr_add base on mmu_add
- :acrn-commit:`f3b825d` hv: trusty: use ept_add_mr to add memory region
- :acrn-commit:`4bb8456` hv: ept: refine ept_add_mr base on mmu_add
- :acrn-commit:`da57284` hv: ptdev: simplify struct ptdev_msi_info
- :acrn-commit:`2371839` hv: ptdev: remove vector index from structure ptdev_msi_info
- :acrn-commit:`d8cc29b` hv: ptdev: check whether phys_pin is valid in add_intx_remapping
@ -727,7 +727,7 @@ release in July 2018 (click on the CommitID link to see details):
- :acrn-commit:`f4ca3cc` hv: instr_emul: fix 'Parameter indexing array too big at call'
- :acrn-commit:`84d320d` HV:treewide:Fix type conversion in VMX, timer and MTTR module
- :acrn-commit:`f7efd0f` hv: mmu: replace modify_mem with mmu_modify
- :acrn-commit:`0a33c0d` hv: mmu: replace ept_update_mt with ept_mr_modify
- :acrn-commit:`0a33c0d` hv: mmu: replace ept_update_mt with ept_modify_mr
- :acrn-commit:`1991823` hv: mmu: revisit mmu modify page table attributes
- :acrn-commit:`20c80ea` HV: bug fix on emulating msi message from guest
- :acrn-commit:`9695d3b` tools: replace payload[0] of struct mngr_msg with an union

View File

@ -97,7 +97,7 @@ int32_t ept_misconfig_vmexit_handler(__unused struct acrn_vcpu *vcpu)
return status;
}
void ept_mr_add(struct acrn_vm *vm, uint64_t *pml4_page,
void ept_add_mr(struct acrn_vm *vm, uint64_t *pml4_page,
uint64_t hpa, uint64_t gpa, uint64_t size, uint64_t prot_orig)
{
uint16_t i;
@ -122,7 +122,7 @@ void ept_mr_add(struct acrn_vm *vm, uint64_t *pml4_page,
}
}
void ept_mr_modify(struct acrn_vm *vm, uint64_t *pml4_page,
void ept_modify_mr(struct acrn_vm *vm, uint64_t *pml4_page,
uint64_t gpa, uint64_t size,
uint64_t prot_set, uint64_t prot_clr)
{
@ -145,7 +145,7 @@ void ept_mr_modify(struct acrn_vm *vm, uint64_t *pml4_page,
/**
* @pre [gpa,gpa+size) has been mapped into host physical memory region
*/
void ept_mr_del(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t gpa, uint64_t size)
void ept_del_mr(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t gpa, uint64_t size)
{
struct acrn_vcpu *vcpu;
uint16_t i;

View File

@ -67,7 +67,7 @@ static void create_secure_world_ept(struct acrn_vm *vm, uint64_t gpa_orig,
hpa = gpa2hpa(vm, gpa_orig);
/* Unmap gpa_orig~gpa_orig+size from guest normal world ept mapping */
ept_mr_del(vm, (uint64_t *)vm->arch_vm.nworld_eptp, gpa_orig, size);
ept_del_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, gpa_orig, size);
/* Copy PDPT entries from Normal world to Secure world
* Secure world can access Normal World's memory,
@ -109,7 +109,7 @@ static void create_secure_world_ept(struct acrn_vm *vm, uint64_t gpa_orig,
}
/* Map [gpa_rebased, gpa_rebased + size) to secure ept mapping */
ept_mr_add(vm, (uint64_t *)vm->arch_vm.sworld_eptp, hpa, gpa_rebased, size, EPT_RWX | EPT_WB);
ept_add_mr(vm, (uint64_t *)vm->arch_vm.sworld_eptp, hpa, gpa_rebased, size, EPT_RWX | EPT_WB);
/* Backup secure world info, will be used when destroy secure world and suspend UOS */
vm->sworld_control.sworld_memory.base_gpa_in_uos = gpa_orig;
@ -131,13 +131,13 @@ void destroy_secure_world(struct acrn_vm *vm, bool need_clr_mem)
clac();
}
ept_mr_del(vm, vm->arch_vm.sworld_eptp, gpa_uos, size);
ept_del_mr(vm, vm->arch_vm.sworld_eptp, gpa_uos, size);
/* sanitize trusty ept page-structures */
sanitize_pte((uint64_t *)vm->arch_vm.sworld_eptp);
vm->arch_vm.sworld_eptp = NULL;
/* Restore memory to guest normal world */
ept_mr_add(vm, vm->arch_vm.nworld_eptp, hpa, gpa_uos, size, EPT_RWX | EPT_WB);
ept_add_mr(vm, vm->arch_vm.nworld_eptp, hpa, gpa_uos, size, EPT_RWX | EPT_WB);
} else {
pr_err("sworld eptp is NULL, it's not created");
}

View File

@ -2171,11 +2171,11 @@ int32_t vlapic_create(struct acrn_vcpu *vcpu)
(uint64_t *)vcpu->vm->arch_vm.nworld_eptp;
/* only need unmap it from SOS as UOS never mapped it */
if (is_sos_vm(vcpu->vm)) {
ept_mr_del(vcpu->vm, pml4_page,
ept_del_mr(vcpu->vm, pml4_page,
DEFAULT_APIC_BASE, PAGE_SIZE);
}
ept_mr_add(vcpu->vm, pml4_page,
ept_add_mr(vcpu->vm, pml4_page,
vlapic_apicv_get_apic_access_addr(),
DEFAULT_APIC_BASE, PAGE_SIZE,
EPT_WR | EPT_RD | EPT_UNCACHED);

View File

@ -195,7 +195,7 @@ static void prepare_prelaunched_vm_memmap(struct acrn_vm *vm, const struct acrn_
/* Do EPT mapping for GPAs that are backed by physical memory */
if (entry->type == E820_TYPE_RAM) {
ept_mr_add(vm, (uint64_t *)vm->arch_vm.nworld_eptp, base_hpa, entry->baseaddr,
ept_add_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, base_hpa, entry->baseaddr,
entry->length, EPT_RWX | EPT_WB);
base_hpa += entry->length;
@ -203,7 +203,7 @@ static void prepare_prelaunched_vm_memmap(struct acrn_vm *vm, const struct acrn_
/* GPAs under 1MB are always backed by physical memory */
if ((entry->type != E820_TYPE_RAM) && (entry->baseaddr < (uint64_t)MEM_1M)) {
ept_mr_add(vm, (uint64_t *)vm->arch_vm.nworld_eptp, base_hpa, entry->baseaddr,
ept_add_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, base_hpa, entry->baseaddr,
entry->length, EPT_RWX | EPT_UNCACHED);
base_hpa += entry->length;
@ -337,14 +337,14 @@ static void prepare_sos_vm_memmap(struct acrn_vm *vm)
}
/* create real ept map for all ranges with UC */
ept_mr_add(vm, pml4_page, p_e820_mem_info->mem_bottom, p_e820_mem_info->mem_bottom,
ept_add_mr(vm, pml4_page, p_e820_mem_info->mem_bottom, p_e820_mem_info->mem_bottom,
(p_e820_mem_info->mem_top - p_e820_mem_info->mem_bottom), attr_uc);
/* update ram entries to WB attr */
for (i = 0U; i < entries_count; i++) {
entry = p_e820 + i;
if (entry->type == E820_TYPE_RAM) {
ept_mr_modify(vm, pml4_page, entry->baseaddr, entry->length, EPT_WB, EPT_MT_MASK);
ept_modify_mr(vm, pml4_page, entry->baseaddr, entry->length, EPT_WB, EPT_MT_MASK);
}
}
@ -361,19 +361,19 @@ static void prepare_sos_vm_memmap(struct acrn_vm *vm)
*/
epc_secs = get_phys_epc();
for (i = 0U; (i < MAX_EPC_SECTIONS) && (epc_secs[i].size != 0UL); i++) {
ept_mr_del(vm, pml4_page, epc_secs[i].base, epc_secs[i].size);
ept_del_mr(vm, pml4_page, epc_secs[i].base, epc_secs[i].size);
}
/* unmap hypervisor itself for safety
* will cause EPT violation if sos accesses hv memory
*/
hv_hpa = hva2hpa((void *)(get_hv_image_base()));
ept_mr_del(vm, pml4_page, hv_hpa, CONFIG_HV_RAM_SIZE);
ept_del_mr(vm, pml4_page, hv_hpa, CONFIG_HV_RAM_SIZE);
/* unmap prelaunch VM memory */
for (vm_id = 0U; vm_id < CONFIG_MAX_VM_NUM; vm_id++) {
vm_config = get_vm_config(vm_id);
if (vm_config->load_order == PRE_LAUNCHED_VM) {
ept_mr_del(vm, pml4_page, vm_config->memory.start_hpa, vm_config->memory.size);
ept_del_mr(vm, pml4_page, vm_config->memory.start_hpa, vm_config->memory.size);
}
}
}
@ -387,7 +387,7 @@ static void prepare_epc_vm_memmap(struct acrn_vm *vm)
if (is_vsgx_supported(vm->vm_id)) {
vm_epc_maps = get_epc_mapping(vm->vm_id);
for (i = 0U; (i < MAX_EPC_SECTIONS) && (vm_epc_maps[i].size != 0UL); i++) {
ept_mr_add(vm, (uint64_t *)vm->arch_vm.nworld_eptp, vm_epc_maps[i].hpa,
ept_add_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, vm_epc_maps[i].hpa,
vm_epc_maps[i].gpa, vm_epc_maps[i].size, EPT_RWX | EPT_WB);
}
}
@ -456,7 +456,7 @@ int32_t create_vm(uint16_t vm_id, struct acrn_vm_config *vm_config, struct acrn_
if (vm->sworld_control.flag.supported != 0UL) {
struct memory_ops *ept_mem_ops = &vm->arch_vm.ept_mem_ops;
ept_mr_add(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
ept_add_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
hva2hpa(ept_mem_ops->get_sworld_memory_base(ept_mem_ops->info)),
TRUSTY_EPT_REBASE_GPA, TRUSTY_RAM_SIZE, EPT_WB | EPT_RWX);
}

View File

@ -157,7 +157,7 @@ static void update_ept(struct acrn_vm *vm, uint64_t start,
break;
}
ept_mr_modify(vm, (uint64_t *)vm->arch_vm.nworld_eptp, start, size, attr, EPT_MT_MASK);
ept_modify_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, start, size, attr, EPT_MT_MASK);
}
static void update_ept_mem_type(const struct acrn_vmtrr *vmtrr)

View File

@ -647,7 +647,7 @@ static int32_t add_vm_memory_region(struct acrn_vm *vm, struct acrn_vm *target_v
prot |= EPT_UNCACHED;
}
/* create gpa to hpa EPT mapping */
ept_mr_add(target_vm, pml4_page, hpa,
ept_add_mr(target_vm, pml4_page, hpa,
region->gpa, region->size, prot);
ret = 0;
}
@ -687,7 +687,7 @@ static int32_t set_vm_memory_region(struct acrn_vm *vm,
if (region->type != MR_DEL) {
ret = add_vm_memory_region(vm, target_vm, region, pml4_page);
} else {
ept_mr_del(target_vm, pml4_page,
ept_del_mr(target_vm, pml4_page,
region->gpa, region->size);
ret = 0;
}
@ -775,7 +775,7 @@ static int32_t write_protect_page(struct acrn_vm *vm,const struct wp_data *wp)
prot_set = (wp->set != 0U) ? 0UL : EPT_WR;
prot_clr = (wp->set != 0U) ? EPT_WR : 0UL;
ept_mr_modify(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
ept_modify_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
wp->gpa, PAGE_SIZE, prot_set, prot_clr);
ret = 0;
}

View File

@ -633,7 +633,7 @@ void register_mmio_emulation_handler(struct acrn_vm *vm,
* need to unmap it.
*/
if (is_sos_vm(vm)) {
ept_mr_del(vm, (uint64_t *)vm->arch_vm.nworld_eptp, start, end - start);
ept_del_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, start, end - start);
}
}

View File

@ -179,14 +179,14 @@ static void vdev_pt_remap_generic_bar(const struct pci_vdev *vdev, uint32_t idx,
struct acrn_vm *vm = vdev->vpci->vm;
if (vdev->bar[idx].base != 0UL) {
ept_mr_del(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
ept_del_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
vdev->bar[idx].base,
vdev->bar[idx].size);
}
if (new_base != 0U) {
/* Map the physical BAR in the guest MMIO space */
ept_mr_add(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
ept_add_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp,
vdev->pdev->bar[idx].base, /* HPA */
new_base, /*GPA*/
vdev->bar[idx].size,

View File

@ -69,7 +69,7 @@ uint64_t sos_vm_hpa2gpa(uint64_t hpa);
*
* @return None
*/
void ept_mr_add(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t hpa,
void ept_add_mr(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t hpa,
uint64_t gpa, uint64_t size, uint64_t prot_orig);
/**
* @brief Guest-physical memory page access right or memory type updating
@ -86,7 +86,7 @@ void ept_mr_add(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t hpa,
*
* @return None
*/
void ept_mr_modify(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t gpa,
void ept_modify_mr(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t gpa,
uint64_t size, uint64_t prot_set, uint64_t prot_clr);
/**
* @brief Guest-physical memory region unmapping
@ -101,7 +101,7 @@ void ept_mr_modify(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t gpa,
*
* @pre [gpa,gpa+size) has been mapped into host physical memory region
*/
void ept_mr_del(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t gpa,
void ept_del_mr(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t gpa,
uint64_t size);
/**