hv: clear NEED_RESCHEDULE flag in schedule
Now, need_reschedule will test_and_clear the bit NEED_RESCHEDULE in schedule context, then call schedule. It is not a exact match with the name. This patch move the flag clearing into scheudle, and need_reschedule just check and return. Tracked-On: #1821 Signed-off-by: Shuo A Liu <shuo.a.liu@intel.com> Reviewed-by: Yin Fengwei <fengwei.yin@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
parent
e8ac97671f
commit
a0154223f6
|
@ -45,7 +45,7 @@ void vcpu_thread(struct sched_object *obj)
|
|||
continue;
|
||||
}
|
||||
|
||||
if (need_reschedule(vcpu->pcpu_id) != 0) {
|
||||
if (need_reschedule(vcpu->pcpu_id)) {
|
||||
/*
|
||||
* In extrem case, schedule() could return. Which
|
||||
* means the vcpu resume happens before schedule()
|
||||
|
@ -95,7 +95,7 @@ void default_idle(__unused struct sched_object *obj)
|
|||
uint16_t pcpu_id = get_cpu_id();
|
||||
|
||||
while (1) {
|
||||
if (need_reschedule(pcpu_id) != 0) {
|
||||
if (need_reschedule(pcpu_id)) {
|
||||
schedule();
|
||||
} else if (need_offline(pcpu_id) != 0) {
|
||||
cpu_dead();
|
||||
|
|
|
@ -81,9 +81,8 @@ void remove_from_cpu_runqueue(struct sched_object *obj, uint16_t pcpu_id)
|
|||
spinlock_release(&ctx->runqueue_lock);
|
||||
}
|
||||
|
||||
static struct sched_object *get_next_sched_obj(uint16_t pcpu_id)
|
||||
static struct sched_object *get_next_sched_obj(struct sched_context *ctx)
|
||||
{
|
||||
struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id);
|
||||
struct sched_object *obj = NULL;
|
||||
|
||||
spinlock_obtain(&ctx->runqueue_lock);
|
||||
|
@ -105,11 +104,11 @@ void make_reschedule_request(uint16_t pcpu_id)
|
|||
}
|
||||
}
|
||||
|
||||
int32_t need_reschedule(uint16_t pcpu_id)
|
||||
bool need_reschedule(uint16_t pcpu_id)
|
||||
{
|
||||
struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id);
|
||||
|
||||
return bitmap_test_and_clear_lock(NEED_RESCHEDULE, &ctx->flags);
|
||||
return bitmap_test(NEED_RESCHEDULE, &ctx->flags);
|
||||
}
|
||||
|
||||
void make_pcpu_offline(uint16_t pcpu_id)
|
||||
|
@ -173,11 +172,13 @@ static void prepare_switch(struct sched_object *prev, struct sched_object *next)
|
|||
void schedule(void)
|
||||
{
|
||||
uint16_t pcpu_id = get_cpu_id();
|
||||
struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id);
|
||||
struct sched_object *next = NULL;
|
||||
struct sched_object *prev = per_cpu(sched_ctx, pcpu_id).curr_obj;
|
||||
struct sched_object *prev = ctx->curr_obj;
|
||||
|
||||
get_schedule_lock(pcpu_id);
|
||||
next = get_next_sched_obj(pcpu_id);
|
||||
next = get_next_sched_obj(ctx);
|
||||
bitmap_clear_lock(NEED_RESCHEDULE, &ctx->flags);
|
||||
|
||||
if (prev == next) {
|
||||
release_schedule_lock(pcpu_id);
|
||||
|
|
|
@ -41,7 +41,7 @@ void add_to_cpu_runqueue(struct sched_object *obj, uint16_t pcpu_id);
|
|||
void remove_from_cpu_runqueue(struct sched_object *obj, uint16_t pcpu_id);
|
||||
|
||||
void make_reschedule_request(uint16_t pcpu_id);
|
||||
int32_t need_reschedule(uint16_t pcpu_id);
|
||||
bool need_reschedule(uint16_t pcpu_id);
|
||||
void make_pcpu_offline(uint16_t pcpu_id);
|
||||
int32_t need_offline(uint16_t pcpu_id);
|
||||
|
||||
|
|
Loading…
Reference in New Issue