hv: optimize run_vcpu() for nested

This patch implements a separate path for L2 VMEntry in run_vcpu(),
which has several benefits:

- keep run_vcpu() clean, to reduce the number of is_vcpu_in_l2_guest()
  statements:
  - current code has three is_vcpu_in_l2_guest() already.
  - supposed to have another 2 statement so that nested VMEntry won't
    hit the "Starting vCPU" and "vCPU launched" pr_info() and a few
    other statements in the VM launch path.

- save few other things in run_vcpu() that are not needed for nested.

Tracked-On: #6289
Signed-off-by: Zide Chen <zide.chen@intel.com>
Acked-by: Eddie Dong <eddie.dong@Intel.com>
This commit is contained in:
Zide Chen 2021-08-25 21:27:55 -07:00 committed by wenlingz
parent 0c677fd3b4
commit e48962faa6
1 changed files with 59 additions and 55 deletions

View File

@ -667,79 +667,83 @@ int32_t run_vcpu(struct acrn_vcpu *vcpu)
write_cached_registers(vcpu);
}
/* If this VCPU is not already launched, launch it */
if (!vcpu->launched) {
pr_info("VM %d Starting VCPU %hu",
vcpu->vm->vm_id, vcpu->vcpu_id);
if (is_vcpu_in_l2_guest(vcpu)) {
int32_t launch_type;
/* VMX_VPID for l2 guests is not managed in this way */
if (!is_vcpu_in_l2_guest(vcpu) && (vcpu->arch.vpid != 0U)) {
exec_vmwrite16(VMX_VPID, vcpu->arch.vpid);
if (vcpu->launched) {
/* for nested VM-exits that don't need to be reflected to L1 hypervisor */
launch_type = VM_RESUME;
} else {
/* for VMEntry case, VMCS02 was VMCLEARed by ACRN */
launch_type = VM_LAUNCH;
vcpu->launched = true;
}
/*
* A power-up or a reset invalidates all linear mappings,
* guest-physical mappings, and combined mappings
*/
if (!is_vcpu_in_l2_guest(vcpu)) {
status = exec_vmentry(ctx, launch_type, ibrs_type);
} else {
/* If this VCPU is not already launched, launch it */
if (!vcpu->launched) {
pr_info("VM %d Starting VCPU %hu",
vcpu->vm->vm_id, vcpu->vcpu_id);
if (vcpu->arch.vpid != 0U) {
exec_vmwrite16(VMX_VPID, vcpu->arch.vpid);
}
/*
* A power-up or a reset invalidates all linear mappings,
* guest-physical mappings, and combined mappings
*/
flush_vpid_global();
}
#ifdef CONFIG_HYPERV_ENABLED
if (is_vcpu_bsp(vcpu)) {
hyperv_init_time(vcpu->vm);
}
if (is_vcpu_bsp(vcpu)) {
hyperv_init_time(vcpu->vm);
}
#endif
/* Set vcpu launched */
vcpu->launched = true;
/* Set vcpu launched */
vcpu->launched = true;
/* avoid VMCS recycling RSB usage, set IBPB.
* NOTE: this should be done for any time vmcs got switch
* currently, there is no other place to do vmcs switch
* Please add IBPB set for future vmcs switch case(like trusty)
*/
if (ibrs_type == IBRS_RAW) {
msr_write(MSR_IA32_PRED_CMD, PRED_SET_IBPB);
}
/* Launch the VM */
status = exec_vmentry(ctx, VM_LAUNCH, ibrs_type);
/* See if VM launched successfully */
if (status == 0) {
if (is_vcpu_bsp(vcpu)) {
pr_info("VM %d VCPU %hu successfully launched",
vcpu->vm->vm_id, vcpu->vcpu_id);
/* avoid VMCS recycling RSB usage, set IBPB.
* NOTE: this should be done for any time vmcs got switch
* currently, there is no other place to do vmcs switch
* Please add IBPB set for future vmcs switch case(like trusty)
*/
if (ibrs_type == IBRS_RAW) {
msr_write(MSR_IA32_PRED_CMD, PRED_SET_IBPB);
}
}
} else {
/* This VCPU was already launched, check if the last guest
* instruction needs to be repeated and resume VCPU accordingly
*/
if (vcpu->arch.inst_len != 0U) {
exec_vmwrite(VMX_GUEST_RIP, vcpu_get_rip(vcpu) + vcpu->arch.inst_len);
/* Launch the VM */
status = exec_vmentry(ctx, VM_LAUNCH, ibrs_type);
/* See if VM launched successfully */
if (status == 0) {
if (is_vcpu_bsp(vcpu)) {
pr_info("VM %d VCPU %hu successfully launched",
vcpu->vm->vm_id, vcpu->vcpu_id);
}
}
} else {
/* This VCPU was already launched, check if the last guest
* instruction needs to be repeated and resume VCPU accordingly
*/
if (vcpu->arch.inst_len != 0U) {
exec_vmwrite(VMX_GUEST_RIP, vcpu_get_rip(vcpu) + vcpu->arch.inst_len);
}
/* Resume the VM */
status = exec_vmentry(ctx, VM_RESUME, ibrs_type);
}
/* Resume the VM */
status = exec_vmentry(ctx, VM_RESUME, ibrs_type);
}
vcpu->reg_cached = 0UL;
/*
* - can not call vcpu_get_xxx() when vmcs02 is current, or it could mess up
* the cached registers for the L1 guest
* - the L2 guests' vcpu mode is managed by L1 hypervisor
* - ctx->cpu_regs.regs.rsp doesn't cache nested guests' RSP
*/
if (!is_vcpu_in_l2_guest(vcpu)) {
cs_attr = exec_vmread32(VMX_GUEST_CS_ATTR);
ia32_efer = vcpu_get_efer(vcpu);
cr0 = vcpu_get_cr0(vcpu);
set_vcpu_mode(vcpu, cs_attr, ia32_efer, cr0);
}
vcpu->reg_cached = 0UL;
/* Obtain current VCPU instruction length */
vcpu->arch.inst_len = exec_vmread32(VMX_EXIT_INSTR_LEN);