hv: mmu: refine set guest memory region API

1. rename set_vm_memmap to set_vm_memory_region
2. split ept_mmap into ept_mr_add and ept_mr_del

Signed-off-by: Li, Fei1 <fei1.li@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Li, Fei1 2018-07-22 10:27:30 +08:00 committed by lijinxia
parent 27fbf9b215
commit 502e3e2e65
8 changed files with 139 additions and 148 deletions

View File

@ -250,8 +250,7 @@ int register_mmio_emulation_handler(struct vm *vm,
* need to unmap it.
*/
if (is_vm0(vm)) {
ept_mmap(vm, start, start, end - start,
MAP_UNMAP, 0);
ept_mr_del(vm, start, start, end - start);
}
/* Return success */
@ -486,8 +485,8 @@ int ept_misconfig_vmexit_handler(__unused struct vcpu *vcpu)
return status;
}
int ept_mmap(struct vm *vm, uint64_t hpa,
uint64_t gpa, uint64_t size, uint32_t type, uint32_t prot)
int ept_mr_add(struct vm *vm, uint64_t hpa,
uint64_t gpa, uint64_t size, uint32_t prot)
{
struct map_params map_params;
uint16_t i;
@ -498,30 +497,22 @@ int ept_mmap(struct vm *vm, uint64_t hpa,
map_params.pml4_base = vm->arch_vm.nworld_eptp;
map_params.pml4_inverted = vm->arch_vm.m2p;
if (type == MAP_MEM || type == MAP_MMIO) {
/* EPT & VT-d share the same page tables, set SNP bit
* to force snooping of PCIe devices if the page
* is cachable
*/
if ((prot & IA32E_EPT_MT_MASK) != IA32E_EPT_UNCACHED) {
prot |= IA32E_EPT_SNOOP_CTRL;
}
map_mem(&map_params, (void *)hpa,
(void *)gpa, size, prot);
} else if (type == MAP_UNMAP) {
unmap_mem(&map_params, (void *)hpa, (void *)gpa,
size, prot);
} else {
ASSERT(false, "unknown map type");
/* EPT & VT-d share the same page tables, set SNP bit
* to force snooping of PCIe devices if the page
* is cachable
*/
if ((prot & IA32E_EPT_MT_MASK) != IA32E_EPT_UNCACHED) {
prot |= IA32E_EPT_SNOOP_CTRL;
}
map_mem(&map_params, (void *)hpa,
(void *)gpa, size, prot);
foreach_vcpu(i, vm, vcpu) {
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
}
dev_dbg(ACRN_DBG_EPT, "ept map: %s hpa: 0x%016llx gpa: 0x%016llx ",
type == MAP_UNMAP ? "unmap" : "map", hpa, gpa);
dev_dbg(ACRN_DBG_EPT, "%s, hpa: 0x%016llx gpa: 0x%016llx ",
__func__, hpa, gpa);
dev_dbg(ACRN_DBG_EPT, "size: 0x%016llx prot: 0x%x\n", size, prot);
return 0;
@ -543,3 +534,27 @@ int ept_mr_modify(struct vm *vm, uint64_t gpa, uint64_t size,
return ret;
}
int ept_mr_del(struct vm *vm, uint64_t hpa,
uint64_t gpa, uint64_t size)
{
struct map_params map_params;
uint16_t i;
struct vcpu *vcpu;
/* Setup memory map parameters */
map_params.page_table_type = PTT_EPT;
map_params.pml4_base = vm->arch_vm.nworld_eptp;
map_params.pml4_inverted = vm->arch_vm.m2p;
unmap_mem(&map_params, (void *)hpa, (void *)gpa, size, 0U);
foreach_vcpu(i, vm, vcpu) {
vcpu_make_request(vcpu, ACRN_REQUEST_EPT_FLUSH);
}
dev_dbg(ACRN_DBG_EPT, "%s, hpa 0x%llx gpa 0x%llx size 0x%llx\n",
__func__, hpa, gpa, size);
return 0;
}

View File

@ -614,16 +614,16 @@ int prepare_vm0_memmap_and_e820(struct vm *vm)
e820_mem.mem_bottom, e820_mem.mem_top);
/* create real ept map for all ranges with UC */
ept_mmap(vm, e820_mem.mem_bottom, e820_mem.mem_bottom,
ept_mr_add(vm, e820_mem.mem_bottom, e820_mem.mem_bottom,
(e820_mem.mem_top - e820_mem.mem_bottom),
MAP_MMIO, attr_uc);
attr_uc);
/* update ram entries to WB attr */
for (i = 0U; i < e820_entries; i++) {
entry = &e820[i];
if (entry->type == E820_TYPE_RAM) {
ept_mmap(vm, entry->baseaddr, entry->baseaddr,
entry->length, MAP_MEM, attr_wb);
ept_mr_add(vm, entry->baseaddr, entry->baseaddr,
entry->length, attr_wb);
}
}
@ -641,7 +641,7 @@ int prepare_vm0_memmap_and_e820(struct vm *vm)
* will cause EPT violation if sos accesses hv memory
*/
hv_hpa = get_hv_image_base();
ept_mmap(vm, hv_hpa, hv_hpa, CONFIG_RAM_SIZE, MAP_UNMAP, 0U);
ept_mr_del(vm, hv_hpa, hv_hpa, CONFIG_RAM_SIZE);
return 0;
}

View File

@ -2111,9 +2111,9 @@ int vlapic_create(struct vcpu *vcpu)
}
if (is_vcpu_bsp(vcpu)) {
ept_mmap(vcpu->vm,
ept_mr_add(vcpu->vm,
apicv_get_apic_access_addr(vcpu->vm),
DEFAULT_APIC_BASE, CPU_PAGE_SIZE, MAP_MMIO,
DEFAULT_APIC_BASE, CPU_PAGE_SIZE,
IA32E_EPT_W_BIT | IA32E_EPT_R_BIT |
IA32E_EPT_UNCACHED);
}

View File

@ -102,13 +102,13 @@ int vmcall_vmexit_handler(struct vcpu *vcpu)
(uint16_t)param2);
break;
case HC_VM_SET_MEMMAP:
case HC_VM_SET_MEMORY_REGION:
/* param1: vmid */
ret = hcall_set_vm_memmap(vm, (uint16_t)param1, param2);
ret = hcall_set_vm_memory_region(vm, (uint16_t)param1, param2);
break;
case HC_VM_SET_MEMMAPS:
ret = hcall_set_vm_memmaps(vm, param1);
case HC_VM_SET_MEMORY_REGIONS:
ret = hcall_set_vm_memory_regions(vm, param1);
break;
case HC_VM_PCI_MSIX_REMAP:

View File

@ -405,129 +405,129 @@ int32_t hcall_notify_req_finish(uint16_t vmid, uint16_t vcpu_id)
return 0;
}
static int32_t
_set_vm_memmap(struct vm *vm, struct vm *target_vm,
struct vm_set_memmap *memmap)
static int32_t _set_vm_memory_region(struct vm *vm,
struct vm *target_vm, struct vm_memory_region *region)
{
uint64_t hpa, base_paddr;
uint64_t attr, prot;
uint64_t prot;
if ((memmap->length & 0xFFFUL) != 0UL) {
pr_err("%s: ERROR! [vm%d] map size 0x%x is not page aligned",
__func__, target_vm->attr.id, memmap->length);
return -1;
if ((region->size & (CPU_PAGE_SIZE - 1UL)) != 0UL) {
pr_err("%s: [vm%d] map size 0x%x is not page aligned",
__func__, target_vm->attr.id, region->size);
return -EINVAL;
}
hpa = gpa2hpa(vm, memmap->vm0_gpa);
hpa = gpa2hpa(vm, region->vm0_gpa);
dev_dbg(ACRN_DBG_HYCALL, "[vm%d] gpa=0x%x hpa=0x%x size=0x%x",
target_vm->attr.id, memmap->remote_gpa, hpa, memmap->length);
target_vm->attr.id, region->gpa, hpa, region->size);
base_paddr = get_hv_image_base();
if (((hpa <= base_paddr) &&
((hpa + memmap->length) > base_paddr)) ||
((hpa + region->size) > base_paddr)) ||
((hpa >= base_paddr) &&
(hpa < (base_paddr + CONFIG_RAM_SIZE)))) {
pr_err("%s: ERROR! overlap the HV memory region.", __func__);
return -1;
pr_err("%s: overlap the HV memory region.", __func__);
return -EFAULT;
}
/* Check prot */
attr = 0U;
if (memmap->type != MAP_UNMAP) {
prot = (memmap->prot != 0U) ? memmap->prot : memmap->prot_2;
if ((prot & MEM_ACCESS_READ) != 0U) {
attr |= IA32E_EPT_R_BIT;
if (region->type != MR_DEL) {
prot = 0UL;
/* access right */
if ((region->prot & MEM_ACCESS_READ) != 0U) {
prot |= EPT_RD;
}
if ((prot & MEM_ACCESS_WRITE) != 0U) {
attr |= IA32E_EPT_W_BIT;
if ((region->prot & MEM_ACCESS_WRITE) != 0U) {
prot |= EPT_WR;
}
if ((prot & MEM_ACCESS_EXEC) != 0U) {
attr |= IA32E_EPT_X_BIT;
if ((region->prot & MEM_ACCESS_EXEC) != 0U) {
prot |= EPT_EXE;
}
if ((prot & MEM_TYPE_WB) != 0U) {
attr |= IA32E_EPT_WB;
} else if ((prot & MEM_TYPE_WT) != 0U) {
attr |= IA32E_EPT_WT;
} else if ((prot & MEM_TYPE_WC) != 0U) {
attr |= IA32E_EPT_WC;
} else if ((prot & MEM_TYPE_WP) != 0U) {
attr |= IA32E_EPT_WP;
/* memory type */
if ((region->prot & MEM_TYPE_WB) != 0U) {
prot |= EPT_WB;
} else if ((region->prot & MEM_TYPE_WT) != 0U) {
prot |= EPT_WT;
} else if ((region->prot & MEM_TYPE_WC) != 0U) {
prot |= EPT_WC;
} else if ((region->prot & MEM_TYPE_WP) != 0U) {
prot |= EPT_WP;
} else {
attr |= IA32E_EPT_UNCACHED;
prot |= EPT_UNCACHED;
}
/* create gpa to hpa EPT mapping */
return ept_mr_add(target_vm, hpa,
region->gpa, region->size, prot);
} else {
return ept_mr_del(target_vm, hpa,
region->gpa, region->size);
}
/* create gpa to hpa EPT mapping */
return ept_mmap(target_vm, hpa,
memmap->remote_gpa, memmap->length, memmap->type, attr);
}
int32_t hcall_set_vm_memmap(struct vm *vm, uint16_t vmid, uint64_t param)
int32_t hcall_set_vm_memory_region(struct vm *vm, uint16_t vmid, uint64_t param)
{
struct vm_set_memmap memmap;
struct vm_memory_region region;
struct vm *target_vm = get_vm_from_vmid(vmid);
if ((vm == NULL) || (target_vm == NULL)) {
return -1;
return -EINVAL;
}
(void)memset((void *)&memmap, 0U, sizeof(memmap));
(void)memset((void *)&region, 0U, sizeof(region));
if (copy_from_gpa(vm, &memmap, param, sizeof(memmap)) != 0) {
if (copy_from_gpa(vm, &region, param, sizeof(region)) != 0) {
pr_err("%s: Unable copy param to vm\n", __func__);
return -1;
return -EFAULT;
}
if (!is_vm0(vm)) {
pr_err("%s: ERROR! Not coming from service vm", __func__);
return -1;
pr_err("%s: Not coming from service vm", __func__);
return -EPERM;
}
if (is_vm0(target_vm)) {
pr_err("%s: ERROR! Targeting to service vm", __func__);
return -1;
pr_err("%s: Targeting to service vm", __func__);
return -EPERM;
}
return _set_vm_memmap(vm, target_vm, &memmap);
return _set_vm_memory_region(vm, target_vm, &region);
}
int32_t hcall_set_vm_memmaps(struct vm *vm, uint64_t param)
int32_t hcall_set_vm_memory_regions(struct vm *vm, uint64_t param)
{
struct set_memmaps set_memmaps;
struct memory_map *regions;
struct set_regions set_regions;
struct vm_memory_region *regions;
struct vm *target_vm;
uint32_t idx;
if (!is_vm0(vm)) {
pr_err("%s: ERROR! Not coming from service vm",
__func__);
return -1;
pr_err("%s: Not coming from service vm", __func__);
return -EPERM;
}
(void)memset((void *)&set_memmaps, 0U, sizeof(set_memmaps));
(void)memset((void *)&set_regions, 0U, sizeof(set_regions));
if (copy_from_gpa(vm, &set_memmaps, param, sizeof(set_memmaps)) != 0) {
if (copy_from_gpa(vm, &set_regions, param, sizeof(set_regions)) != 0) {
pr_err("%s: Unable copy param from vm\n", __func__);
return -1;
return -EFAULT;
}
target_vm = get_vm_from_vmid(set_memmaps.vmid);
target_vm = get_vm_from_vmid(set_regions.vmid);
if (is_vm0(target_vm)) {
pr_err("%s: ERROR! Targeting to service vm",
__func__);
return -1;
pr_err("%s: Targeting to service vm", __func__);
return -EFAULT;
}
idx = 0U;
/*TODO: use copy_from_gpa for this buffer page */
regions = GPA2HVA(vm, set_memmaps.memmaps_gpa);
while (idx < set_memmaps.memmaps_num) {
regions = GPA2HVA(vm, set_regions.regions_gpa);
while (idx < set_regions.mr_num) {
/* the force pointer change below is for back compatible
* to struct vm_set_memmap, it will be removed in the future
* to struct vm_memory_region, it will be removed in the future
*/
if (_set_vm_memmap(vm, target_vm,
(struct vm_set_memmap *)&regions[idx]) < 0) {
return -1;
int ret = _set_vm_memory_region(vm, target_vm, &regions[idx]);
if (ret < 0) {
return ret;
}
idx++;
}

View File

@ -398,10 +398,12 @@ void destroy_ept(struct vm *vm);
uint64_t gpa2hpa(struct vm *vm, uint64_t gpa);
uint64_t _gpa2hpa(struct vm *vm, uint64_t gpa, uint32_t *size);
uint64_t hpa2gpa(struct vm *vm, uint64_t hpa);
int ept_mmap(struct vm *vm, uint64_t hpa,
uint64_t gpa, uint64_t size, uint32_t type, uint32_t prot);
int ept_mr_add(struct vm *vm, uint64_t hpa,
uint64_t gpa, uint64_t size, uint32_t prot);
int ept_mr_modify(struct vm *vm, uint64_t gpa, uint64_t size,
uint64_t attr_set, uint64_t attr_clr);
int ept_mr_del(struct vm *vm, uint64_t hpa,
uint64_t gpa, uint64_t size);
int ept_violation_vmexit_handler(struct vcpu *vcpu);
int ept_misconfig_vmexit_handler(struct vcpu *vcpu);

View File

@ -196,9 +196,6 @@ int32_t hcall_notify_req_finish(uint16_t vmid, uint16_t vcpu_id);
/**
* @brief setup ept memory mapping
*
* Set the ept memory mapping for a VM.
* The function will return -1 if the target VM does not exist.
*
* @param vm Pointer to VM data structure
* @param vmid ID of the VM
* @param param guest physical address. This gpa points to
@ -206,7 +203,7 @@ int32_t hcall_notify_req_finish(uint16_t vmid, uint16_t vcpu_id);
*
* @return 0 on success, non-zero on error.
*/
int32_t hcall_set_vm_memmap(struct vm *vm, uint16_t vmid, uint64_t param);
int32_t hcall_set_vm_memory_region(struct vm *vm, uint16_t vmid, uint64_t param);
/**
* @brief setup ept memory mapping for multi regions
@ -217,7 +214,7 @@ int32_t hcall_set_vm_memmap(struct vm *vm, uint16_t vmid, uint64_t param);
*
* @return 0 on success, non-zero on error.
*/
int32_t hcall_set_vm_memmaps(struct vm *vm, uint64_t param);
int32_t hcall_set_vm_memory_regions(struct vm *vm, uint64_t param);
/**
* @brief remap PCI MSI interrupt

View File

@ -49,9 +49,9 @@
/* Guest memory management */
#define HC_ID_MEM_BASE 0x40UL
#define HC_VM_SET_MEMMAP _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x00UL)
#define HC_VM_SET_MEMORY_REGION _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x00UL)
#define HC_VM_GPA2HPA _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x01UL)
#define HC_VM_SET_MEMMAPS _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x02UL)
#define HC_VM_SET_MEMORY_REGIONS _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x02UL)
/* PCI assignment*/
#define HC_ID_PCI_BASE 0x50UL
@ -101,56 +101,33 @@
*/
/**
* @brief Info to set ept mapping
* @brief Info to set guest memory region mapping
*
* the parameter for HC_VM_SET_MEMMAP hypercall
* the parameter for HC_VM_SET_MEMORY_REGION hypercall
*/
struct vm_set_memmap {
#define MAP_MEM 0U
#define MAP_MMIO 1U
#define MAP_UNMAP 2U
/** map type: MAP_MEM, MAP_MMIO or MAP_UNMAP */
struct vm_memory_region {
#define MR_ADD 0U
#define MR_DEL 2U
/** set memory region type: MR_ADD or MAP_DEL */
uint32_t type;
/** memory attributes: memory type + RWX access right */
uint32_t prot;
/** guest physical address to map */
uint64_t remote_gpa;
/** the beginning guest physical address of the memory reion*/
uint64_t gpa;
/** VM0's guest physcial address which remote gpa will be mapped to */
/** VM0's guest physcial address which gpa will be mapped to */
uint64_t vm0_gpa;
/** length of the map range */
uint64_t length;
/** old memory attributes(will be removed in the future):
* memory type + RWX access right */
uint32_t prot_2;
} __aligned(8);
struct memory_map {
/** map type: MAP_MEM, MAP_MMIO or MAP_UNMAP */
uint32_t type;
/** memory attributes: memory type + RWX access right */
uint32_t prot;
/** guest physical address to map */
uint64_t remote_gpa;
/** VM0's guest physcial address which remote gpa will be mapped to */
uint64_t vm0_gpa;
/** length of the map range */
uint64_t length;
/** size of the memory region */
uint64_t size;
} __aligned(8);
/**
* multi memmap regions hypercall, used for HC_VM_SET_MEMMAPS
* set multi memory regions, used for HC_VM_SET_MEMORY_REGIONS
*/
struct set_memmaps {
struct set_regions {
/** vmid for this hypercall */
uint16_t vmid;
@ -160,14 +137,14 @@ struct set_memmaps {
/** Reserved */
uint32_t reserved1;
/** multi memmaps numbers */
uint32_t memmaps_num;
/** memory region numbers */
uint32_t mr_num;
/** the gpa of memmaps buffer, point to the memmaps array:
* struct memory_map regions[memmaps_num]
/** the gpa of regions buffer, point to the regions array:
* struct vm_memory_region regions[mr_num]
* the max buffer size is one page.
*/
uint64_t memmaps_gpa;
uint64_t regions_gpa;
} __attribute__((aligned(8)));
/**