scheduler: make scheduling based on struct sched_object

use struct sched_object as the main interface of scheduling, then
make scheduler as an independent module to vcpu:
- add struct sched_object as one field in struct vcpu
- define sched_object.thread for switch_to thread
- define sched_object.prepare_switch_out/in for prepare_switch before
  switch_to
- move context_switch_out/context_switch_in into vcpu.c as
  vcpu.sched_obj.prepare_switch_out/in
- make default_idle as global idle.thread for idle_thread
- make vcpu_thread as vcpu.sched_obj.thread for each vcpu thread
- simplify switch_to based on sched_object

Tracked-On: #1842
Signed-off-by: Jason Chen CJ <jason.cj.chen@intel.com>
Acked-by: Eddie Dong <edide.dong@intel.com>
This commit is contained in:
Jason Chen CJ 2018-12-20 15:15:20 +08:00 committed by wenlingz
parent 8aae0dff87
commit ff0703dd40
6 changed files with 116 additions and 104 deletions

View File

@ -600,6 +600,34 @@ void resume_vcpu(struct acrn_vcpu *vcpu)
release_schedule_lock(vcpu->pcpu_id);
}
static void context_switch_out(struct sched_object *prev)
{
struct acrn_vcpu *vcpu = list_entry(prev, struct acrn_vcpu, sched_obj);
/* cancel event(int, gp, nmi and exception) injection */
cancel_event_injection(vcpu);
atomic_store32(&vcpu->running, 0U);
/* do prev vcpu context switch out */
/* For now, we don't need to invalid ept.
* But if we have more than one vcpu on one pcpu,
* we need add ept invalid operation here.
*/
}
static void context_switch_in(struct sched_object *next)
{
struct acrn_vcpu *vcpu = list_entry(next, struct acrn_vcpu, sched_obj);
atomic_store32(&vcpu->running, 1U);
/* FIXME:
* Now, we don't need to load new vcpu VMCS because
* we only do switch between vcpu loop and idle loop.
* If we have more than one vcpu on on pcpu, need to
* add VMCS load operation here.
*/
}
void schedule_vcpu(struct acrn_vcpu *vcpu)
{
vcpu->state = VCPU_RUNNING;
@ -625,6 +653,9 @@ int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id)
set_pcpu_used(pcpu_id);
INIT_LIST_HEAD(&vcpu->sched_obj.run_list);
vcpu->sched_obj.thread = vcpu_thread;
vcpu->sched_obj.prepare_switch_out = context_switch_out;
vcpu->sched_obj.prepare_switch_in = context_switch_in;
return ret;
}

View File

@ -69,7 +69,7 @@ static void enter_guest_mode(uint16_t pcpu_id)
}
#endif
default_idle();
switch_to_idle(default_idle);
/* Control should not come here */
cpu_dead();

View File

@ -17,8 +17,9 @@ static void run_vcpu_pre_work(struct acrn_vcpu *vcpu)
}
}
void vcpu_thread(struct acrn_vcpu *vcpu)
void vcpu_thread(struct sched_object *obj)
{
struct acrn_vcpu *vcpu = list_entry(obj, struct acrn_vcpu, sched_obj);
uint32_t basic_exit_reason = 0U;
int32_t ret = 0;
@ -88,3 +89,21 @@ void vcpu_thread(struct acrn_vcpu *vcpu)
profiling_post_vmexit_handler(vcpu);
} while (1);
}
void default_idle(__unused struct sched_object *obj)
{
uint16_t pcpu_id = get_cpu_id();
while (1) {
if (need_reschedule(pcpu_id) != 0) {
schedule();
} else if (need_offline(pcpu_id) != 0) {
cpu_dead();
} else {
CPU_IRQ_ENABLE();
handle_complete_ioreq(pcpu_id);
cpu_do_idle();
CPU_IRQ_DISABLE();
}
}
}

View File

@ -8,6 +8,7 @@
#include <schedule.h>
static uint64_t pcpu_used_bitmap;
static struct sched_object idle;
void init_scheduler(void)
{
@ -21,7 +22,7 @@ void init_scheduler(void)
spinlock_init(&ctx->scheduler_lock);
INIT_LIST_HEAD(&ctx->runqueue);
ctx->flags = 0UL;
ctx->curr_vcpu = NULL;
ctx->curr_obj = NULL;
}
}
@ -94,18 +95,6 @@ static struct sched_object *get_next_sched_obj(uint16_t pcpu_id)
return obj;
}
static struct acrn_vcpu *select_next_vcpu(uint16_t pcpu_id)
{
struct acrn_vcpu *vcpu = NULL;
struct sched_object *obj = get_next_sched_obj(pcpu_id);
if (obj != NULL) {
vcpu = list_entry(obj, struct acrn_vcpu, sched_obj);
}
return vcpu;
}
void make_reschedule_request(uint16_t pcpu_id)
{
struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id);
@ -123,43 +112,6 @@ int32_t need_reschedule(uint16_t pcpu_id)
return bitmap_test_and_clear_lock(NEED_RESCHEDULE, &ctx->flags);
}
static void context_switch_out(struct acrn_vcpu *vcpu)
{
/* if it's idle thread, no action for switch out */
if (vcpu == NULL) {
return;
}
/* cancel event(int, gp, nmi and exception) injection */
cancel_event_injection(vcpu);
atomic_store32(&vcpu->running, 0U);
/* do prev vcpu context switch out */
/* For now, we don't need to invalid ept.
* But if we have more than one vcpu on one pcpu,
* we need add ept invalid operation here.
*/
}
static void context_switch_in(struct acrn_vcpu *vcpu)
{
/* update current_vcpu */
get_cpu_var(sched_ctx).curr_vcpu = vcpu;
/* if it's idle thread, no action for switch out */
if (vcpu == NULL) {
return;
}
atomic_store32(&vcpu->running, 1U);
/* FIXME:
* Now, we don't need to load new vcpu VMCS because
* we only do switch between vcpu loop and idle loop.
* If we have more than one vcpu on on pcpu, need to
* add VMCS load operation here.
*/
}
void make_pcpu_offline(uint16_t pcpu_id)
{
struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id);
@ -177,25 +129,23 @@ int32_t need_offline(uint16_t pcpu_id)
return bitmap_test_and_clear_lock(NEED_OFFLINE, &ctx->flags);
}
void default_idle(void)
static void switch_to_asm(struct sched_object *next, uint64_t cur_sp)
{
uint16_t pcpu_id = get_cpu_id();
while (1) {
if (need_reschedule(pcpu_id) != 0) {
schedule();
} else if (need_offline(pcpu_id) != 0) {
cpu_dead();
} else {
CPU_IRQ_ENABLE();
handle_complete_ioreq(pcpu_id);
cpu_do_idle();
CPU_IRQ_DISABLE();
}
}
asm volatile ("movq %2, %%rsp\n"
"movq %0, %%rdi\n"
"call 22f\n"
"11: \n"
"pause\n"
"jmp 11b\n"
"22:\n"
"mov %1, (%%rsp)\n"
"ret\n"
:
: "c"(next), "a"(next->thread), "r"(cur_sp)
: "memory");
}
static void switch_to(struct acrn_vcpu *curr)
static void switch_to(struct sched_object *next)
{
/*
* reset stack pointer here. Otherwise, schedule
@ -203,54 +153,60 @@ static void switch_to(struct acrn_vcpu *curr)
*/
uint64_t cur_sp = (uint64_t)&get_cpu_var(stack)[CONFIG_STACK_SIZE];
if (curr == NULL) {
asm volatile ("movq %1, %%rsp\n"
"movq $0, %%rdi\n"
"call 22f\n"
"11: \n"
"pause\n"
"jmp 11b\n"
"22:\n"
"mov %0, (%%rsp)\n"
"ret\n"
:
: "a"(default_idle), "r"(cur_sp)
: "memory");
} else {
asm volatile ("movq %2, %%rsp\n"
"movq %0, %%rdi\n"
"call 44f\n"
"33: \n"
"pause\n"
"jmp 33b\n"
"44:\n"
"mov %1, (%%rsp)\n"
"ret\n"
:
: "c"(curr), "a"(vcpu_thread), "r"(cur_sp)
: "memory");
switch_to_asm(next, cur_sp);
}
static void prepare_switch(struct sched_object *prev, struct sched_object *next)
{
if ((prev != NULL) && (prev->prepare_switch_out != NULL)) {
prev->prepare_switch_out(prev);
}
/* update current object */
get_cpu_var(sched_ctx).curr_obj = next;
if ((next != NULL) && (next->prepare_switch_in != NULL)) {
next->prepare_switch_in(next);
}
}
void schedule(void)
{
uint16_t pcpu_id = get_cpu_id();
struct acrn_vcpu *next = NULL;
struct acrn_vcpu *prev = per_cpu(sched_ctx, pcpu_id).curr_vcpu;
struct sched_object *next = NULL;
struct sched_object *prev = per_cpu(sched_ctx, pcpu_id).curr_obj;
get_schedule_lock(pcpu_id);
next = select_next_vcpu(pcpu_id);
next = get_next_sched_obj(pcpu_id);
if (prev == next) {
release_schedule_lock(pcpu_id);
return;
}
context_switch_out(prev);
context_switch_in(next);
prepare_switch(prev, next);
release_schedule_lock(pcpu_id);
if (next == NULL) {
next = &idle;
}
switch_to(next);
ASSERT(false, "Shouldn't go here");
}
void switch_to_idle(run_thread_t idle_thread)
{
uint16_t pcpu_id = get_cpu_id();
if (pcpu_id == BOOT_CPU_ID) {
idle.thread = idle_thread;
idle.prepare_switch_out = NULL;
idle.prepare_switch_in = NULL;
}
if (idle_thread != NULL) {
idle_thread(&idle);
}
}

View File

@ -299,6 +299,9 @@ vcpu_vlapic(struct acrn_vcpu *vcpu)
return &(vcpu->arch.vlapic);
}
void default_idle(struct sched_object *obj);
void vcpu_thread(struct sched_object *obj);
/* External Interfaces */
/**

View File

@ -10,19 +10,26 @@
#define NEED_RESCHEDULE (1U)
#define NEED_OFFLINE (2U)
struct sched_object;
typedef void (*run_thread_t)(struct sched_object *obj);
typedef void (*prepare_switch_t)(struct sched_object *obj);
struct sched_object {
struct list_head run_list;
run_thread_t thread;
prepare_switch_t prepare_switch_out;
prepare_switch_t prepare_switch_in;
};
struct sched_context {
spinlock_t runqueue_lock;
struct list_head runqueue;
uint64_t flags;
struct acrn_vcpu *curr_vcpu;
struct sched_object *curr_obj;
spinlock_t scheduler_lock;
};
void init_scheduler(void);
void switch_to_idle(run_thread_t idle_thread);
void get_schedule_lock(uint16_t pcpu_id);
void release_schedule_lock(uint16_t pcpu_id);
@ -33,15 +40,11 @@ void free_pcpu(uint16_t pcpu_id);
void add_to_cpu_runqueue(struct sched_object *obj, uint16_t pcpu_id);
void remove_from_cpu_runqueue(struct sched_object *obj, uint16_t pcpu_id);
void default_idle(void);
void make_reschedule_request(uint16_t pcpu_id);
int32_t need_reschedule(uint16_t pcpu_id);
void make_pcpu_offline(uint16_t pcpu_id);
int32_t need_offline(uint16_t pcpu_id);
void schedule(void);
void vcpu_thread(struct acrn_vcpu *vcpu);
#endif /* SCHEDULE_H */