HV: Add support to assign non-contiguous HPA regions for pre-launched VM

On some platforms, HPA regions for Virtual Machine can not be
contiguous because of E820 reserved type or PCI hole. In such
cases, pre-launched VMs need to be assigned non-contiguous memory
regions and this patch addresses it.

To keep things simple, current design has the following assumptions,
	1. HPA2 always will be placed after HPA1
	2. HPA1 and HPA2 don’t share a single ve820 entry.
	(Create multiple entries if needed but not shared)
	3. Only support 2 non-contiguous HPA regions (can extend
	at a later point for multiple non-contiguous HPA)

Signed-off-by: Vijay Dhanraj <vijay.dhanraj@intel.com>
Tracked-On: #4195
Acked-by: Anthony Xu <anthony.xu@intel.com>
This commit is contained in:
Vijay Dhanraj 2019-11-20 12:50:55 -08:00 committed by wenlingz
parent 9b44e57d27
commit 6e8b413689
2 changed files with 18 additions and 2 deletions

View File

@ -188,7 +188,9 @@ static inline uint16_t get_vm_bsp_pcpu_id(const struct acrn_vm_config *vm_config
*/ */
static void prepare_prelaunched_vm_memmap(struct acrn_vm *vm, const struct acrn_vm_config *vm_config) static void prepare_prelaunched_vm_memmap(struct acrn_vm *vm, const struct acrn_vm_config *vm_config)
{ {
bool is_hpa1 = true;
uint64_t base_hpa = vm_config->memory.start_hpa; uint64_t base_hpa = vm_config->memory.start_hpa;
uint64_t remaining_hpa_size = vm_config->memory.size;
uint32_t i; uint32_t i;
for (i = 0U; i < vm->e820_entry_num; i++) { for (i = 0U; i < vm->e820_entry_num; i++) {
@ -199,19 +201,31 @@ static void prepare_prelaunched_vm_memmap(struct acrn_vm *vm, const struct acrn_
} }
/* Do EPT mapping for GPAs that are backed by physical memory */ /* Do EPT mapping for GPAs that are backed by physical memory */
if (entry->type == E820_TYPE_RAM) { if ((entry->type == E820_TYPE_RAM) && (remaining_hpa_size >= entry->length)) {
ept_add_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, base_hpa, entry->baseaddr, ept_add_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, base_hpa, entry->baseaddr,
entry->length, EPT_RWX | EPT_WB); entry->length, EPT_RWX | EPT_WB);
base_hpa += entry->length; base_hpa += entry->length;
remaining_hpa_size -= entry->length;
} else if ((entry->type == E820_TYPE_RAM) && (remaining_hpa_size < entry->length)) {
pr_warn("%s: HPA size incorrectly configured in v820\n", __func__);
} }
/* GPAs under 1MB are always backed by physical memory */ /* GPAs under 1MB are always backed by physical memory */
if ((entry->type != E820_TYPE_RAM) && (entry->baseaddr < (uint64_t)MEM_1M)) { if ((entry->type != E820_TYPE_RAM) && (entry->baseaddr < (uint64_t)MEM_1M) &&
(remaining_hpa_size >= entry->length)) {
ept_add_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, base_hpa, entry->baseaddr, ept_add_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, base_hpa, entry->baseaddr,
entry->length, EPT_RWX | EPT_UNCACHED); entry->length, EPT_RWX | EPT_UNCACHED);
base_hpa += entry->length; base_hpa += entry->length;
remaining_hpa_size -= entry->length;
}
if ((remaining_hpa_size == 0UL) && (is_hpa1)) {
is_hpa1 = false;
base_hpa = vm_config->memory.start_hpa2;
remaining_hpa_size = vm_config->memory.size_hpa2;
} }
} }
} }

View File

@ -37,6 +37,8 @@ enum acrn_vm_load_order {
struct acrn_vm_mem_config { struct acrn_vm_mem_config {
uint64_t start_hpa; /* the start HPA of VM memory configuration, for pre-launched VMs only */ uint64_t start_hpa; /* the start HPA of VM memory configuration, for pre-launched VMs only */
uint64_t size; /* VM memory size configuration */ uint64_t size; /* VM memory size configuration */
uint64_t start_hpa2; /* the start HPA of VM memory configuration, for pre-launched VMs only */
uint64_t size_hpa2; /* VM shared memory size configuration */
}; };
struct target_vuart { struct target_vuart {