hypercall: add set_memmaps hypercall support

Add set_memmaps hypercall to support multi regions memmap.

Signed-off-by: Jason Chen CJ <jason.cj.chen@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Jason Chen CJ 2018-05-15 10:08:58 +08:00 committed by lijinxia
parent e2b7f3ca91
commit 71e1ae17ce
4 changed files with 140 additions and 42 deletions

View File

@ -110,6 +110,10 @@ int vmcall_vmexit_handler(struct vcpu *vcpu)
ret = hcall_set_vm_memmap(vm, param1, param2); ret = hcall_set_vm_memmap(vm, param1, param2);
break; break;
case HC_VM_SET_MEMMAPS:
ret = hcall_set_vm_memmaps(vm, param1);
break;
case HC_VM_PCI_MSIX_REMAP: case HC_VM_PCI_MSIX_REMAP:
ret = hcall_remap_pci_msix(vm, param1, param2); ret = hcall_remap_pci_msix(vm, param1, param2);
break; break;

View File

@ -413,11 +413,61 @@ int64_t hcall_notify_req_finish(uint64_t vmid, uint64_t vcpu_id)
return ret; return ret;
} }
int64_t hcall_set_vm_memmap(struct vm *vm, uint64_t vmid, uint64_t param) int64_t _set_vm_memmap(struct vm *vm, struct vm *target_vm,
struct vm_set_memmap *memmap)
{ {
int64_t ret = 0;
uint64_t hpa; uint64_t hpa;
uint32_t attr, prot; uint32_t attr, prot;
if ((memmap->length & 0xFFF) != 0) {
pr_err("%s: ERROR! [vm%d] map size 0x%x is not page aligned",
__func__, target_vm->attr.id, memmap->length);
return -1;
}
hpa = gpa2hpa(vm, memmap->vm0_gpa);
dev_dbg(ACRN_DBG_HYCALL, "[vm%d] gpa=0x%x hpa=0x%x size=0x%x",
target_vm->attr.id, memmap->remote_gpa, hpa, memmap->length);
if (((hpa <= CONFIG_RAM_START) &&
(hpa + memmap->length > CONFIG_RAM_START)) ||
((hpa >= CONFIG_RAM_START) &&
(hpa < CONFIG_RAM_START + CONFIG_RAM_SIZE))) {
pr_err("%s: ERROR! overlap the HV memory region.", __func__);
return -1;
}
/* Check prot */
attr = 0;
if (memmap->type != MAP_UNMAP) {
prot = (memmap->prot != 0) ? memmap->prot : memmap->prot_2;
if (prot & MEM_ACCESS_READ)
attr |= MMU_MEM_ATTR_READ;
if (prot & MEM_ACCESS_WRITE)
attr |= MMU_MEM_ATTR_WRITE;
if (prot & MEM_ACCESS_EXEC)
attr |= MMU_MEM_ATTR_EXECUTE;
if (prot & MEM_TYPE_WB)
attr |= MMU_MEM_ATTR_WB_CACHE;
else if (prot & MEM_TYPE_WT)
attr |= MMU_MEM_ATTR_WT_CACHE;
else if (prot & MEM_TYPE_UC)
attr |= MMU_MEM_ATTR_UNCACHED;
else if (prot & MEM_TYPE_WC)
attr |= MMU_MEM_ATTR_WC;
else if (prot & MEM_TYPE_WP)
attr |= MMU_MEM_ATTR_WP;
else
attr |= MMU_MEM_ATTR_UNCACHED;
}
/* create gpa to hpa EPT mapping */
return ept_mmap(target_vm, hpa,
memmap->remote_gpa, memmap->length, memmap->type, attr);
}
int64_t hcall_set_vm_memmap(struct vm *vm, uint64_t vmid, uint64_t param)
{
struct vm_set_memmap memmap; struct vm_set_memmap memmap;
struct vm *target_vm = get_vm_from_vmid(vmid); struct vm *target_vm = get_vm_from_vmid(vmid);
@ -441,53 +491,49 @@ int64_t hcall_set_vm_memmap(struct vm *vm, uint64_t vmid, uint64_t param)
return -1; return -1;
} }
if ((memmap.length & 0xFFF) != 0) { return _set_vm_memmap(vm, target_vm, &memmap);
pr_err("%s: ERROR! [vm%d] map size 0x%x is not page aligned", }
__func__, vmid, memmap.length);
int64_t hcall_set_vm_memmaps(struct vm *vm, uint64_t param)
{
struct set_memmaps set_memmaps;
struct memory_map *regions;
struct vm *target_vm;
unsigned int idx;
if (!is_vm0(vm)) {
pr_err("%s: ERROR! Not coming from service vm",
__func__);
return -1; return -1;
} }
hpa = gpa2hpa(vm, memmap.vm0_gpa); memset((void *)&set_memmaps, 0, sizeof(set_memmaps));
dev_dbg(ACRN_DBG_HYCALL, "[vm%d] gpa=0x%x hpa=0x%x size=0x%x",
vmid, memmap.remote_gpa, hpa, memmap.length);
if (((hpa <= CONFIG_RAM_START) && if (copy_from_vm(vm, &set_memmaps, param, sizeof(set_memmaps))) {
(hpa + memmap.length > CONFIG_RAM_START)) || pr_err("%s: Unable copy param from vm\n", __func__);
((hpa >= CONFIG_RAM_START) &&
(hpa < CONFIG_RAM_START + CONFIG_RAM_SIZE))) {
pr_err("%s: ERROR! overlap the HV memory region.", __func__);
return -1; return -1;
} }
/* Check prot */ target_vm = get_vm_from_vmid(set_memmaps.vmid);
attr = 0; if (is_vm0(target_vm)) {
if (memmap.type != MAP_UNMAP) { pr_err("%s: ERROR! Targeting to service vm",
prot = memmap.prot; __func__);
if (prot & MEM_ACCESS_READ) return -1;
attr |= MMU_MEM_ATTR_READ;
if (prot & MEM_ACCESS_WRITE)
attr |= MMU_MEM_ATTR_WRITE;
if (prot & MEM_ACCESS_EXEC)
attr |= MMU_MEM_ATTR_EXECUTE;
if (prot & MEM_TYPE_WB)
attr |= MMU_MEM_ATTR_WB_CACHE;
else if (prot & MEM_TYPE_WT)
attr |= MMU_MEM_ATTR_WT_CACHE;
else if (prot & MEM_TYPE_UC)
attr |= MMU_MEM_ATTR_UNCACHED;
else if (prot & MEM_TYPE_WC)
attr |= MMU_MEM_ATTR_WC;
else if (prot & MEM_TYPE_WP)
attr |= MMU_MEM_ATTR_WP;
else
attr |= MMU_MEM_ATTR_UNCACHED;
} }
/* create gpa to hpa EPT mapping */ idx = 0;
ret = ept_mmap(target_vm, hpa, /*TODO: use copy_from_vm for this buffer page */
memmap.remote_gpa, memmap.length, memmap.type, attr); regions = GPA2HVA(vm, set_memmaps.memmaps_gpa);
while (idx < set_memmaps.memmaps_num) {
return ret; /* the force pointer change below is for back compatible
* to struct vm_set_memmap, it will be removed in the future
*/
if (_set_vm_memmap(vm, target_vm,
(struct vm_set_memmap *)&regions[idx]) < 0)
return -1;
idx++;
}
return 0;
} }
int64_t hcall_remap_pci_msix(struct vm *vm, uint64_t vmid, uint64_t param) int64_t hcall_remap_pci_msix(struct vm *vm, uint64_t vmid, uint64_t param)

View File

@ -237,6 +237,17 @@ int64_t hcall_notify_req_finish(uint64_t vmid, uint64_t param);
*/ */
int64_t hcall_set_vm_memmap(struct vm *vm, uint64_t vmid, uint64_t param); int64_t hcall_set_vm_memmap(struct vm *vm, uint64_t vmid, uint64_t param);
/**
* @brief setup ept memmory mapping for multi regions
*
* @param vm Pointer to VM data structure
* @param param guest physical address. This gpa points to
* struct set_memmaps
*
* @return 0 on success, non-zero on error.
*/
int64_t hcall_set_vm_memmaps(struct vm *vm, uint64_t param);
/** /**
* @brief remap PCI MSI interrupt * @brief remap PCI MSI interrupt
* *

View File

@ -80,6 +80,7 @@
#define HC_ID_MEM_BASE 0x40UL #define HC_ID_MEM_BASE 0x40UL
#define HC_VM_SET_MEMMAP _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x00) #define HC_VM_SET_MEMMAP _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x00)
#define HC_VM_GPA2HPA _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x01) #define HC_VM_GPA2HPA _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x01)
#define HC_VM_SET_MEMMAPS _HC_ID(HC_ID, HC_ID_MEM_BASE + 0x02)
/* PCI assignment*/ /* PCI assignment*/
#define HC_ID_PCI_BASE 0x50UL #define HC_ID_PCI_BASE 0x50UL
@ -140,8 +141,8 @@ struct vm_set_memmap {
/** map type: MAP_MEM, MAP_MMIO or MAP_UNMAP */ /** map type: MAP_MEM, MAP_MMIO or MAP_UNMAP */
uint32_t type; uint32_t type;
/** reserved for alignment padding */ /** memory attributes: memory type + RWX access right */
uint32_t reserved; uint32_t prot;
/** guest physical address to map */ /** guest physical address to map */
uint64_t remote_gpa; uint64_t remote_gpa;
@ -152,10 +153,46 @@ struct vm_set_memmap {
/** length of the map range */ /** length of the map range */
uint64_t length; uint64_t length;
/** old memory attributes(will be removed in the future):
* memory type + RWX access right */
uint32_t prot_2;
} __aligned(8);
struct memory_map {
/** map type: MAP_MEM, MAP_MMIO or MAP_UNMAP */
uint32_t type;
/** memory attributes: memory type + RWX access right */ /** memory attributes: memory type + RWX access right */
uint32_t prot; uint32_t prot;
/** guest physical address to map */
uint64_t remote_gpa;
/** VM0's guest physcial address which remote gpa will be mapped to */
uint64_t vm0_gpa;
/** length of the map range */
uint64_t length;
} __aligned(8); } __aligned(8);
/**
* multi memmap regions hypercall, used for HC_VM_SET_MEMMAPS
*/
struct set_memmaps {
/** vmid for this hypercall */
uint64_t vmid;
/** multi memmaps numbers */
uint32_t memmaps_num;
/** the gpa of memmaps buffer, point to the memmaps array:
* struct memory_map regions[memmaps_num]
* the max buffer size is one page.
*/
uint64_t memmaps_gpa;
} __attribute__((aligned(8)));
/** /**
* Setup parameter for share buffer, used for HC_SETUP_SBUF hypercall * Setup parameter for share buffer, used for HC_SETUP_SBUF hypercall
*/ */