hv: add vmx_off and update exec_vmxon_instr

To handle cpu down/up dynamically, arcn needs to support vmx off/on
dynamically. Following changes is introduced:
  vmx_off will be used when down AP. It does:
    - vmclear the mapped vcpu
    - off vmx.

  exec_vmxon_instr is updated to handle start and up AP both. It does
    - if vmx was on on AP, load the vmxon_region saved. Otherwise,
      allocate vmxon_region.
    - if there is mapped vcpu, vmptrld mapped vcpu.

Signed-off-by: Zheng Gen <gen.zheng@intel.com>
Signed-off-by: Yin Fegnwei <fengwei.yin@intel.com>
Acked-by: Eddie Dong <Eddie.dong@intel.com>
This commit is contained in:
Yin Fegnwei 2018-06-02 11:19:30 +08:00 committed by lijinxia
parent fbeafd500a
commit 08139c34f7
4 changed files with 43 additions and 8 deletions

View File

@ -82,16 +82,23 @@ static inline int exec_vmxon(void *addr)
return status;
}
int exec_vmxon_instr(void)
/* Per cpu data to hold the vmxon_region_pa for each pcpu.
* It will be used again when we start a pcpu after the pcpu was down.
* S3 enter/exit will use it.
*/
int exec_vmxon_instr(uint32_t pcpu_id)
{
uint64_t tmp64;
uint64_t tmp64, vmcs_pa;
uint32_t tmp32;
int ret = -ENOMEM;
void *vmxon_region_va;
uint64_t vmxon_region_pa;
struct vcpu *vcpu = get_ever_run_vcpu(pcpu_id);
/* Allocate page aligned memory for VMXON region */
vmxon_region_va = alloc_page();
if (per_cpu(vmxon_region_pa, pcpu_id) == 0)
vmxon_region_va = alloc_page();
else
vmxon_region_va = HPA2HVA(per_cpu(vmxon_region_pa, pcpu_id));
if (vmxon_region_va != 0) {
/* Initialize vmxon page with revision id from IA32 VMX BASIC
@ -107,8 +114,13 @@ int exec_vmxon_instr(void)
CPU_CR_WRITE(cr4, tmp64 | CR4_VMXE);
/* Turn ON VMX */
vmxon_region_pa = HVA2HPA(vmxon_region_va);
ret = exec_vmxon(&vmxon_region_pa);
per_cpu(vmxon_region_pa, pcpu_id) = HVA2HPA(vmxon_region_va);
ret = exec_vmxon(&per_cpu(vmxon_region_pa, pcpu_id));
if (vcpu) {
vmcs_pa = HVA2HPA(vcpu->arch_vcpu.vmcs);
ret = exec_vmptrld(&vmcs_pa);
}
} else
pr_err("%s, alloc memory for VMXON region failed\n",
__func__);
@ -116,6 +128,25 @@ int exec_vmxon_instr(void)
return ret;
}
int vmx_off(int pcpu_id)
{
int ret = 0;
struct vcpu *vcpu = get_ever_run_vcpu(pcpu_id);
uint64_t vmcs_pa;
if (vcpu) {
vmcs_pa = HVA2HPA(vcpu->arch_vcpu.vmcs);
ret = exec_vmclear((void *)&vmcs_pa);
if (ret)
return ret;
}
asm volatile ("vmxoff" : : : "memory");
return 0;
}
int exec_vmclear(void *addr)
{
uint64_t rflags;

View File

@ -125,7 +125,7 @@ int hv_main(int cpu_id)
}
/* Enable virtualization extensions */
ret = exec_vmxon_instr();
ret = exec_vmxon_instr(cpu_id);
if (ret != 0)
return ret;

View File

@ -18,6 +18,7 @@ struct per_cpu_region {
uint64_t vmexit_time[64];
uint64_t softirq_pending;
uint64_t spurious;
uint64_t vmxon_region_pa;
struct dev_handler_node *timer_node;
struct shared_buf *earlylog_sbuf;
void *vcpu;

View File

@ -401,13 +401,16 @@
#define VMX_SUPPORT_UNRESTRICTED_GUEST (1<<5)
/* External Interfaces */
int exec_vmxon_instr(void);
int exec_vmxon_instr(uint32_t pcpu_id);
uint64_t exec_vmread(uint32_t field);
uint64_t exec_vmread64(uint32_t field_full);
void exec_vmwrite(uint32_t field, uint64_t value);
void exec_vmwrite64(uint32_t field_full, uint64_t value);
int init_vmcs(struct vcpu *vcpu);
int vmx_off(int pcpu_id);
int vmx_restart(int pcpu_id);
int exec_vmclear(void *addr);
int exec_vmptrld(void *addr);