hv: Replace dynamic allocation with static memory for vmxon_region

Remove vmxon_region_pa from structure per_cpu_region,
and define vmxon_region inside per_cpu_region.

Tracked-On: #861
Signed-off-by: Mingqiang Chi <mingqiang.chi@intel.com>
Reviewed-by: Junjie Mao <junjie.mao@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Mingqiang Chi 2018-09-05 13:51:32 +08:00 committed by lijinxia
parent 4360235edf
commit 65930809ee
2 changed files with 19 additions and 32 deletions

View File

@ -84,42 +84,28 @@ int exec_vmxon_instr(uint16_t pcpu_id)
{ {
uint64_t tmp64, vmcs_pa; uint64_t tmp64, vmcs_pa;
uint32_t tmp32; uint32_t tmp32;
int ret = -ENOMEM; int ret = 0;
void *vmxon_region_va; void *vmxon_region_va = (void *)per_cpu(vmxon_region, pcpu_id);
uint64_t vmxon_region_pa;
struct vcpu *vcpu = get_ever_run_vcpu(pcpu_id); struct vcpu *vcpu = get_ever_run_vcpu(pcpu_id);
/* Allocate page aligned memory for VMXON region */ /* Initialize vmxon page with revision id from IA32 VMX BASIC MSR */
if (per_cpu(vmxon_region_pa, pcpu_id) == 0UL) { tmp32 = (uint32_t)msr_read(MSR_IA32_VMX_BASIC);
vmxon_region_va = alloc_page(); (void)memcpy_s((uint32_t *) vmxon_region_va, 4U, (void *)&tmp32, 4U);
}
else {
vmxon_region_va = HPA2HVA(per_cpu(vmxon_region_pa, pcpu_id));
}
if (vmxon_region_va != NULL) { /* Turn on CR0.NE and CR4.VMXE */
/* Initialize vmxon page with revision id from IA32 VMX BASIC CPU_CR_READ(cr0, &tmp64);
* MSR CPU_CR_WRITE(cr0, tmp64 | CR0_NE);
*/ CPU_CR_READ(cr4, &tmp64);
tmp32 = (uint32_t)msr_read(MSR_IA32_VMX_BASIC); CPU_CR_WRITE(cr4, tmp64 | CR4_VMXE);
(void)memcpy_s((uint32_t *) vmxon_region_va, 4U, (void *)&tmp32, 4U);
/* Turn on CR0.NE and CR4.VMXE */ /* Turn ON VMX */
CPU_CR_READ(cr0, &tmp64); vmxon_region_pa = HVA2HPA(vmxon_region_va);
CPU_CR_WRITE(cr0, tmp64 | CR0_NE); ret = exec_vmxon(&vmxon_region_pa);
CPU_CR_READ(cr4, &tmp64);
CPU_CR_WRITE(cr4, tmp64 | CR4_VMXE);
/* Turn ON VMX */ if (vcpu != NULL) {
per_cpu(vmxon_region_pa, pcpu_id) = HVA2HPA(vmxon_region_va); vmcs_pa = HVA2HPA(vcpu->arch_vcpu.vmcs);
ret = exec_vmxon(&per_cpu(vmxon_region_pa, pcpu_id)); ret = exec_vmptrld(&vmcs_pa);
if (vcpu != NULL) {
vmcs_pa = HVA2HPA(vcpu->arch_vcpu.vmcs);
ret = exec_vmptrld(&vmcs_pa);
}
} else {
pr_err("%s, alloc memory for VMXON region failed\n",
__func__);
} }
return ret; return ret;

View File

@ -19,6 +19,8 @@
#include "arch/x86/guest/instr_emul.h" #include "arch/x86/guest/instr_emul.h"
struct per_cpu_region { struct per_cpu_region {
/* vmxon_region MUST be 4KB-aligned */
uint8_t vmxon_region[CPU_PAGE_SIZE];
#ifdef HV_DEBUG #ifdef HV_DEBUG
uint64_t *sbuf[ACRN_SBUF_ID_MAX]; uint64_t *sbuf[ACRN_SBUF_ID_MAX];
uint64_t vmexit_cnt[64]; uint64_t vmexit_cnt[64];
@ -28,7 +30,6 @@ struct per_cpu_region {
uint64_t irq_count[NR_IRQS]; uint64_t irq_count[NR_IRQS];
uint64_t softirq_pending; uint64_t softirq_pending;
uint64_t spurious; uint64_t spurious;
uint64_t vmxon_region_pa;
struct shared_buf *earlylog_sbuf; struct shared_buf *earlylog_sbuf;
void *vcpu; void *vcpu;
void *ever_run_vcpu; void *ever_run_vcpu;