diff --git a/hypervisor/arch/x86/vmx.c b/hypervisor/arch/x86/vmx.c index e63adae77..66ba26698 100644 --- a/hypervisor/arch/x86/vmx.c +++ b/hypervisor/arch/x86/vmx.c @@ -82,16 +82,23 @@ static inline int exec_vmxon(void *addr) return status; } -int exec_vmxon_instr(void) +/* Per cpu data to hold the vmxon_region_pa for each pcpu. + * It will be used again when we start a pcpu after the pcpu was down. + * S3 enter/exit will use it. + */ +int exec_vmxon_instr(uint32_t pcpu_id) { - uint64_t tmp64; + uint64_t tmp64, vmcs_pa; uint32_t tmp32; int ret = -ENOMEM; void *vmxon_region_va; - uint64_t vmxon_region_pa; + struct vcpu *vcpu = get_ever_run_vcpu(pcpu_id); /* Allocate page aligned memory for VMXON region */ - vmxon_region_va = alloc_page(); + if (per_cpu(vmxon_region_pa, pcpu_id) == 0) + vmxon_region_va = alloc_page(); + else + vmxon_region_va = HPA2HVA(per_cpu(vmxon_region_pa, pcpu_id)); if (vmxon_region_va != 0) { /* Initialize vmxon page with revision id from IA32 VMX BASIC @@ -107,8 +114,13 @@ int exec_vmxon_instr(void) CPU_CR_WRITE(cr4, tmp64 | CR4_VMXE); /* Turn ON VMX */ - vmxon_region_pa = HVA2HPA(vmxon_region_va); - ret = exec_vmxon(&vmxon_region_pa); + per_cpu(vmxon_region_pa, pcpu_id) = HVA2HPA(vmxon_region_va); + ret = exec_vmxon(&per_cpu(vmxon_region_pa, pcpu_id)); + + if (vcpu) { + vmcs_pa = HVA2HPA(vcpu->arch_vcpu.vmcs); + ret = exec_vmptrld(&vmcs_pa); + } } else pr_err("%s, alloc memory for VMXON region failed\n", __func__); @@ -116,6 +128,25 @@ int exec_vmxon_instr(void) return ret; } +int vmx_off(int pcpu_id) +{ + int ret = 0; + + struct vcpu *vcpu = get_ever_run_vcpu(pcpu_id); + uint64_t vmcs_pa; + + if (vcpu) { + vmcs_pa = HVA2HPA(vcpu->arch_vcpu.vmcs); + ret = exec_vmclear((void *)&vmcs_pa); + if (ret) + return ret; + } + + asm volatile ("vmxoff" : : : "memory"); + + return 0; +} + int exec_vmclear(void *addr) { uint64_t rflags; diff --git a/hypervisor/common/hv_main.c b/hypervisor/common/hv_main.c index 01fc9b036..b23a440fe 100644 --- a/hypervisor/common/hv_main.c +++ b/hypervisor/common/hv_main.c @@ -125,7 +125,7 @@ int hv_main(int cpu_id) } /* Enable virtualization extensions */ - ret = exec_vmxon_instr(); + ret = exec_vmxon_instr(cpu_id); if (ret != 0) return ret; diff --git a/hypervisor/include/arch/x86/per_cpu.h b/hypervisor/include/arch/x86/per_cpu.h index e24294bbb..f6a88746a 100644 --- a/hypervisor/include/arch/x86/per_cpu.h +++ b/hypervisor/include/arch/x86/per_cpu.h @@ -18,6 +18,7 @@ struct per_cpu_region { uint64_t vmexit_time[64]; uint64_t softirq_pending; uint64_t spurious; + uint64_t vmxon_region_pa; struct dev_handler_node *timer_node; struct shared_buf *earlylog_sbuf; void *vcpu; diff --git a/hypervisor/include/arch/x86/vmx.h b/hypervisor/include/arch/x86/vmx.h index 71810e2c1..a99bfb179 100644 --- a/hypervisor/include/arch/x86/vmx.h +++ b/hypervisor/include/arch/x86/vmx.h @@ -401,13 +401,16 @@ #define VMX_SUPPORT_UNRESTRICTED_GUEST (1<<5) /* External Interfaces */ -int exec_vmxon_instr(void); +int exec_vmxon_instr(uint32_t pcpu_id); uint64_t exec_vmread(uint32_t field); uint64_t exec_vmread64(uint32_t field_full); void exec_vmwrite(uint32_t field, uint64_t value); void exec_vmwrite64(uint32_t field_full, uint64_t value); int init_vmcs(struct vcpu *vcpu); +int vmx_off(int pcpu_id); +int vmx_restart(int pcpu_id); + int exec_vmclear(void *addr); int exec_vmptrld(void *addr);