From f85106d1eda78e6d9f12d57d26fb3b37e6a681a4 Mon Sep 17 00:00:00 2001 From: Shuo A Liu Date: Tue, 22 Oct 2019 13:27:46 +0800 Subject: [PATCH] hv: Do not reset vcpu thread's stack when reset_vcpu vcpu thread's stack shouldn't follow reset_vcpu to reset. There is also a bug here: while vcpu B thread set vcpu->running to false, other vcpu A thread will treat the vcpu B is paused while it has not been switch out completely, then reset_vcpu will reset the vcpu B thread's stack and corrupt its running context. This patch will remove the vcpu thread's stack reset from reset_vcpu. With the change, we need do init_vmcs between vcpu startup address be settled and scheduled in. And switch_to_idle() is not needed anymore as S3 thread's stack will not be reset. Tracked-On: #3813 Signed-off-by: Fengwei Yin Signed-off-by: Shuo A Liu --- hypervisor/arch/x86/guest/vcpu.c | 1 - hypervisor/arch/x86/guest/vlapic.c | 2 ++ hypervisor/arch/x86/guest/vm.c | 2 +- hypervisor/arch/x86/guest/vmcs.c | 19 +++++++++++++++---- hypervisor/common/hv_main.c | 5 ----- 5 files changed, 18 insertions(+), 11 deletions(-) diff --git a/hypervisor/arch/x86/guest/vcpu.c b/hypervisor/arch/x86/guest/vcpu.c index 3c2f8c6eb..7843c2620 100644 --- a/hypervisor/arch/x86/guest/vcpu.c +++ b/hypervisor/arch/x86/guest/vcpu.c @@ -639,7 +639,6 @@ void reset_vcpu(struct acrn_vcpu *vcpu) vcpu->arch.exception_info.exception = VECTOR_INVALID; vcpu->arch.cur_context = NORMAL_WORLD; vcpu->arch.irq_window_enabled = false; - vcpu->thread_obj.host_sp = build_stack_frame(vcpu); (void)memset((void *)vcpu->arch.vmcs, 0U, PAGE_SIZE); for (i = 0; i < NR_WORLD; i++) { diff --git a/hypervisor/arch/x86/guest/vlapic.c b/hypervisor/arch/x86/guest/vlapic.c index 90bcdf718..d3ec24422 100644 --- a/hypervisor/arch/x86/guest/vlapic.c +++ b/hypervisor/arch/x86/guest/vlapic.c @@ -1170,6 +1170,8 @@ vlapic_process_init_sipi(struct acrn_vcpu* target_vcpu, uint32_t mode, uint32_t target_vcpu->vcpu_id, target_vcpu->vm->vm_id); set_vcpu_startup_entry(target_vcpu, (icr_low & APIC_VECTOR_MASK) << 12U); + /* init vmcs after set_vcpu_startup_entry */ + init_vmcs(target_vcpu); schedule_vcpu(target_vcpu); } } diff --git a/hypervisor/arch/x86/guest/vm.c b/hypervisor/arch/x86/guest/vm.c index 41904d53e..6da222061 100644 --- a/hypervisor/arch/x86/guest/vm.c +++ b/hypervisor/arch/x86/guest/vm.c @@ -647,6 +647,7 @@ void start_vm(struct acrn_vm *vm) /* Only start BSP (vid = 0) and let BSP start other APs */ bsp = vcpu_from_vid(vm, BOOT_CPU_ID); + init_vmcs(bsp); schedule_vcpu(bsp); } @@ -772,7 +773,6 @@ void resume_vm_from_s3(struct acrn_vm *vm, uint32_t wakeup_vec) init_vmcs(bsp); schedule_vcpu(bsp); - switch_to_idle(default_idle); } /** diff --git a/hypervisor/arch/x86/guest/vmcs.c b/hypervisor/arch/x86/guest/vmcs.c index c49e92a0c..5d85086a1 100644 --- a/hypervisor/arch/x86/guest/vmcs.c +++ b/hypervisor/arch/x86/guest/vmcs.c @@ -496,10 +496,7 @@ static void init_exit_ctrl(const struct acrn_vcpu *vcpu) exec_vmwrite64(VMX_EXIT_MSR_LOAD_ADDR_FULL, hva2hpa((void *)vcpu->arch.msr_area.host)); } -/** - * @pre vcpu != NULL - */ -void init_vmcs(struct acrn_vcpu *vcpu) +static void do_init_vmcs(struct acrn_vcpu *vcpu) { uint64_t vmx_rev_id; uint64_t vmcs_pa; @@ -532,6 +529,20 @@ void init_vmcs(struct acrn_vcpu *vcpu) init_exit_ctrl(vcpu); } +/** + * @pre vcpu != NULL + */ +void init_vmcs(struct acrn_vcpu *vcpu) +{ + uint16_t pcpu_id = vcpu->pcpu_id; + + if (pcpu_id == get_pcpu_id()) { + do_init_vmcs(vcpu); + } else { + smp_call_function((1UL << pcpu_id), (smp_call_func_t)do_init_vmcs, vcpu); + } +} + void switch_apicv_mode_x2apic(struct acrn_vcpu *vcpu) { uint32_t value32; diff --git a/hypervisor/common/hv_main.c b/hypervisor/common/hv_main.c index 1cba51355..240c244aa 100644 --- a/hypervisor/common/hv_main.c +++ b/hypervisor/common/hv_main.c @@ -21,11 +21,6 @@ void vcpu_thread(struct thread_object *obj) int32_t ret = 0; do { - /* If vcpu is not launched, we need to do init_vmcs first */ - if (!vcpu->launched) { - init_vmcs(vcpu); - } - if (!is_lapic_pt_enabled(vcpu)) { CPU_IRQ_DISABLE(); }