hv: Do not reset vcpu thread's stack when reset_vcpu

vcpu thread's stack shouldn't follow reset_vcpu to reset.
There is also a bug here:
while vcpu B thread set vcpu->running to false, other vcpu A thread
will treat the vcpu B is paused while it has not been switch out
completely, then reset_vcpu will reset the vcpu B thread's stack and
corrupt its running context.

This patch will remove the vcpu thread's stack reset from reset_vcpu.
With the change, we need do init_vmcs between vcpu startup address be
settled and scheduled in. And switch_to_idle() is not needed anymore
as S3 thread's stack will not be reset.

Tracked-On: #3813
Signed-off-by: Fengwei Yin <fengwei.yin@intel.com>
Signed-off-by: Shuo A Liu <shuo.a.liu@intel.com>
This commit is contained in:
Shuo A Liu 2019-10-22 13:27:46 +08:00 committed by ACRN System Integration
parent 3072b6fc6d
commit f85106d1ed
5 changed files with 18 additions and 11 deletions

View File

@ -639,7 +639,6 @@ void reset_vcpu(struct acrn_vcpu *vcpu)
vcpu->arch.exception_info.exception = VECTOR_INVALID;
vcpu->arch.cur_context = NORMAL_WORLD;
vcpu->arch.irq_window_enabled = false;
vcpu->thread_obj.host_sp = build_stack_frame(vcpu);
(void)memset((void *)vcpu->arch.vmcs, 0U, PAGE_SIZE);
for (i = 0; i < NR_WORLD; i++) {

View File

@ -1170,6 +1170,8 @@ vlapic_process_init_sipi(struct acrn_vcpu* target_vcpu, uint32_t mode, uint32_t
target_vcpu->vcpu_id,
target_vcpu->vm->vm_id);
set_vcpu_startup_entry(target_vcpu, (icr_low & APIC_VECTOR_MASK) << 12U);
/* init vmcs after set_vcpu_startup_entry */
init_vmcs(target_vcpu);
schedule_vcpu(target_vcpu);
}
}

View File

@ -647,6 +647,7 @@ void start_vm(struct acrn_vm *vm)
/* Only start BSP (vid = 0) and let BSP start other APs */
bsp = vcpu_from_vid(vm, BOOT_CPU_ID);
init_vmcs(bsp);
schedule_vcpu(bsp);
}
@ -772,7 +773,6 @@ void resume_vm_from_s3(struct acrn_vm *vm, uint32_t wakeup_vec)
init_vmcs(bsp);
schedule_vcpu(bsp);
switch_to_idle(default_idle);
}
/**

View File

@ -496,10 +496,7 @@ static void init_exit_ctrl(const struct acrn_vcpu *vcpu)
exec_vmwrite64(VMX_EXIT_MSR_LOAD_ADDR_FULL, hva2hpa((void *)vcpu->arch.msr_area.host));
}
/**
* @pre vcpu != NULL
*/
void init_vmcs(struct acrn_vcpu *vcpu)
static void do_init_vmcs(struct acrn_vcpu *vcpu)
{
uint64_t vmx_rev_id;
uint64_t vmcs_pa;
@ -532,6 +529,20 @@ void init_vmcs(struct acrn_vcpu *vcpu)
init_exit_ctrl(vcpu);
}
/**
* @pre vcpu != NULL
*/
void init_vmcs(struct acrn_vcpu *vcpu)
{
uint16_t pcpu_id = vcpu->pcpu_id;
if (pcpu_id == get_pcpu_id()) {
do_init_vmcs(vcpu);
} else {
smp_call_function((1UL << pcpu_id), (smp_call_func_t)do_init_vmcs, vcpu);
}
}
void switch_apicv_mode_x2apic(struct acrn_vcpu *vcpu)
{
uint32_t value32;

View File

@ -21,11 +21,6 @@ void vcpu_thread(struct thread_object *obj)
int32_t ret = 0;
do {
/* If vcpu is not launched, we need to do init_vmcs first */
if (!vcpu->launched) {
init_vmcs(vcpu);
}
if (!is_lapic_pt_enabled(vcpu)) {
CPU_IRQ_DISABLE();
}