schedule: add full context switch support

this patch added full context switch support for scheduling, it make sure
each VCPU has its own stack, and added host_sp field in struct sched_object
to record host stack pointer for each switch out object.

Arch related function arch_switch_to is added for context switch.

To benefit debugging, a name[] field is also added into struct sched_object.

Tracked-On: #2394
Signed-off-by: Jason Chen CJ <jason.cj.chen@intel.com>
Acked-by: Xu, Anthony <anthony.xu@intel.com>
This commit is contained in:
Jason Chen CJ 2019-01-18 16:30:25 +08:00 committed by wenlingz
parent 21092e6f6d
commit 4fc54f952e
7 changed files with 93 additions and 43 deletions

View File

@ -170,6 +170,7 @@ C_SRCS += arch/x86/pm.c
S_SRCS += arch/x86/wakeup.S
C_SRCS += arch/x86/static_checks.c
C_SRCS += arch/x86/trampoline.c
C_SRCS += arch/x86/sched.c
C_SRCS += arch/x86/guest/vcpuid.c
C_SRCS += arch/x86/guest/vcpu.c
C_SRCS += arch/x86/guest/vm.c

View File

@ -634,11 +634,33 @@ void schedule_vcpu(struct acrn_vcpu *vcpu)
release_schedule_lock(vcpu->pcpu_id);
}
static uint64_t build_stack_frame(struct acrn_vcpu *vcpu)
{
uint64_t rsp = (uint64_t)&vcpu->stack[CONFIG_STACK_SIZE - 1];
uint64_t *sp;
rsp &= ~(CPU_STACK_ALIGN - 1UL);
sp = (uint64_t *)rsp;
*sp-- = (uint64_t)run_sched_thread; /*return address*/
*sp-- = 0UL; /* flag */
*sp-- = 0UL; /* rbx */
*sp-- = 0UL; /* rbp */
*sp-- = 0UL; /* r12 */
*sp-- = 0UL; /* r13 */
*sp-- = 0UL; /* r14 */
*sp-- = 0UL; /* r15 */
*sp = (uint64_t)&vcpu->sched_obj; /*rdi*/
return (uint64_t)sp;
}
/* help function for vcpu create */
int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
{
int32_t ret = 0;
struct acrn_vcpu *vcpu = NULL;
char thread_name[16];
ret = create_vcpu(pcpu_id, vm, &vcpu);
if (ret != 0) {
@ -648,7 +670,10 @@ int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
set_pcpu_used(pcpu_id);
INIT_LIST_HEAD(&vcpu->sched_obj.run_list);
snprintf(thread_name, 16U, "vm%hu:vcpu%hu", vm->vm_id, vcpu->vcpu_id);
(void)strncpy_s(vcpu->sched_obj.name, 16U, thread_name, 16U);
vcpu->sched_obj.thread = vcpu_thread;
vcpu->sched_obj.host_sp = build_stack_frame(vcpu);
vcpu->sched_obj.prepare_switch_out = context_switch_out;
vcpu->sched_obj.prepare_switch_in = context_switch_in;

View File

@ -0,0 +1,36 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <types.h>
#include <spinlock.h>
#include <list.h>
#include <schedule.h>
void arch_switch_to(struct sched_object *prev, struct sched_object *next)
{
asm volatile ("pushf\n"
"pushq %%rbx\n"
"pushq %%rbp\n"
"pushq %%r12\n"
"pushq %%r13\n"
"pushq %%r14\n"
"pushq %%r15\n"
"pushq %%rdi\n"
"movq %%rsp, %0\n"
"movq %1, %%rsp\n"
"popq %%rdi\n"
"popq %%r15\n"
"popq %%r14\n"
"popq %%r13\n"
"popq %%r12\n"
"popq %%rbp\n"
"popq %%rbx\n"
"popf\n"
"retq\n"
: "=m"(prev->host_sp)
: "r"(next->host_sp)
: "memory");
}

View File

@ -23,14 +23,14 @@ void vcpu_thread(struct sched_object *obj)
uint32_t basic_exit_reason = 0U;
int32_t ret = 0;
/* If vcpu is not launched, we need to do init_vmcs first */
if (!vcpu->launched) {
init_vmcs(vcpu);
}
run_vcpu_pre_work(vcpu);
do {
/* If vcpu is not launched, we need to do init_vmcs first */
if (!vcpu->launched) {
init_vmcs(vcpu);
}
/* handle pending softirq when irq enable*/
do_softirq();
CPU_IRQ_DISABLE();

View File

@ -91,6 +91,8 @@ static struct sched_object *get_next_sched_obj(struct sched_context *ctx)
spinlock_obtain(&ctx->runqueue_lock);
if (!list_empty(&ctx->runqueue)) {
obj = get_first_item(&ctx->runqueue, struct sched_object, run_list);
} else {
obj = &get_cpu_var(idle);
}
spinlock_release(&ctx->runqueue_lock);
@ -131,33 +133,6 @@ int32_t need_offline(uint16_t pcpu_id)
return bitmap_test_and_clear_lock(NEED_OFFLINE, &ctx->flags);
}
static void switch_to_asm(struct sched_object *next, uint64_t cur_sp)
{
asm volatile ("movq %2, %%rsp\n"
"movq %0, %%rdi\n"
"call 22f\n"
"11: \n"
"pause\n"
"jmp 11b\n"
"22:\n"
"mov %1, (%%rsp)\n"
"ret\n"
:
: "c"(next), "a"(next->thread), "r"(cur_sp)
: "memory");
}
static void switch_to(struct sched_object *next)
{
/*
* reset stack pointer here. Otherwise, schedule
* is recursive call and stack will overflow finally.
*/
uint64_t cur_sp = (uint64_t)&get_cpu_var(stack)[CONFIG_STACK_SIZE];
switch_to_asm(next, cur_sp);
}
static void prepare_switch(struct sched_object *prev, struct sched_object *next)
{
if ((prev != NULL) && (prev->prepare_switch_out != NULL)) {
@ -189,25 +164,31 @@ void schedule(void)
prepare_switch(prev, next);
release_schedule_lock(pcpu_id);
if (next == NULL) {
next = &get_cpu_var(idle);
}
switch_to(next);
ASSERT(false, "Shouldn't go here");
arch_switch_to(prev, next);
}
}
void run_sched_thread(struct sched_object *obj)
{
if (obj->thread != NULL) {
obj->thread(obj);
}
panic("Shouldn't go here, invalid thread!");
}
void switch_to_idle(run_thread_t idle_thread)
{
struct sched_object *idle = &get_cpu_var(idle);
uint16_t pcpu_id = get_cpu_id();
struct sched_object *idle = &per_cpu(idle, pcpu_id);
char idle_name[16];
snprintf(idle_name, 16U, "idle%hu", pcpu_id);
(void)strncpy_s(idle->name, 16U, idle_name, 16U);
idle->thread = idle_thread;
idle->prepare_switch_out = NULL;
idle->prepare_switch_in = NULL;
get_cpu_var(sched_ctx).curr_obj = idle;
if (idle_thread != NULL) {
idle_thread(idle);
}
run_sched_thread(idle);
}

View File

@ -258,6 +258,8 @@ struct acrn_vcpu_arch {
struct acrn_vm;
struct acrn_vcpu {
uint8_t stack[CONFIG_STACK_SIZE] __aligned(16);
/* Architecture specific definitions for this VCPU */
struct acrn_vcpu_arch arch;
uint16_t pcpu_id; /* Physical CPU ID of this VCPU */

View File

@ -14,7 +14,9 @@ struct sched_object;
typedef void (*run_thread_t)(struct sched_object *obj);
typedef void (*prepare_switch_t)(struct sched_object *obj);
struct sched_object {
char name[16];
struct list_head run_list;
uint64_t host_sp;
run_thread_t thread;
prepare_switch_t prepare_switch_out;
prepare_switch_t prepare_switch_in;
@ -46,5 +48,8 @@ void make_pcpu_offline(uint16_t pcpu_id);
int32_t need_offline(uint16_t pcpu_id);
void schedule(void);
void run_sched_thread(struct sched_object *obj);
void arch_switch_to(struct sched_object *prev, struct sched_object *next);
#endif /* SCHEDULE_H */