hv: schedule: remove runqueue_lock in sched_context

Now sched_object and sched_context are protected by scheduler_lock. There's no
chance to use runqueue_lock to protect schedule runqueue if we have no plan to
support schedule migration.

Signed-off-by: Li, Fei1 <fei1.li@intel.com>
Reviewed-by: Yin Fengwei <fengwei.yin@intel.com>
This commit is contained in:
Li, Fei1 2019-07-09 00:17:51 +08:00 committed by ACRN System Integration
parent b1dd3e26f5
commit e69b3dcf67
3 changed files with 5 additions and 15 deletions

View File

@ -620,7 +620,7 @@ void pause_vcpu(struct acrn_vcpu *vcpu, enum vcpu_state new_state)
vcpu->state = new_state;
if (atomic_load32(&vcpu->running) == 1U) {
remove_from_cpu_runqueue(&vcpu->sched_obj, vcpu->pcpu_id);
remove_from_cpu_runqueue(&vcpu->sched_obj);
if (is_lapic_pt_enabled(vcpu)) {
make_reschedule_request(vcpu->pcpu_id, DEL_MODE_INIT);
@ -636,7 +636,7 @@ void pause_vcpu(struct acrn_vcpu *vcpu, enum vcpu_state new_state)
}
}
} else {
remove_from_cpu_runqueue(&vcpu->sched_obj, vcpu->pcpu_id);
remove_from_cpu_runqueue(&vcpu->sched_obj);
release_schedule_lock(vcpu->pcpu_id);
}
}

View File

@ -24,7 +24,6 @@ void init_scheduler(void)
for (i = 0U; i < pcpu_nums; i++) {
ctx = &per_cpu(sched_ctx, i);
spinlock_init(&ctx->runqueue_lock);
spinlock_init(&ctx->scheduler_lock);
INIT_LIST_HEAD(&ctx->runqueue);
ctx->flags = 0UL;
@ -74,33 +73,25 @@ void add_to_cpu_runqueue(struct sched_object *obj, uint16_t pcpu_id)
{
struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id);
spinlock_obtain(&ctx->runqueue_lock);
if (list_empty(&obj->run_list)) {
list_add_tail(&obj->run_list, &ctx->runqueue);
}
spinlock_release(&ctx->runqueue_lock);
}
void remove_from_cpu_runqueue(struct sched_object *obj, uint16_t pcpu_id)
void remove_from_cpu_runqueue(struct sched_object *obj)
{
struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id);
spinlock_obtain(&ctx->runqueue_lock);
list_del_init(&obj->run_list);
spinlock_release(&ctx->runqueue_lock);
}
static struct sched_object *get_next_sched_obj(struct sched_context *ctx)
{
struct sched_object *obj = NULL;
spinlock_obtain(&ctx->runqueue_lock);
if (!list_empty(&ctx->runqueue)) {
obj = get_first_item(&ctx->runqueue, struct sched_object, run_list);
} else {
obj = &get_cpu_var(idle);
}
spinlock_release(&ctx->runqueue_lock);
return obj;
}

View File

@ -28,11 +28,10 @@ struct sched_object {
};
struct sched_context {
spinlock_t runqueue_lock;
struct list_head runqueue;
uint64_t flags;
struct sched_object *curr_obj;
spinlock_t scheduler_lock;
spinlock_t scheduler_lock; /* to protect sched_context and sched_object */
};
void init_scheduler(void);
@ -45,7 +44,7 @@ uint16_t allocate_pcpu(void);
void free_pcpu(uint16_t pcpu_id);
void add_to_cpu_runqueue(struct sched_object *obj, uint16_t pcpu_id);
void remove_from_cpu_runqueue(struct sched_object *obj, uint16_t pcpu_id);
void remove_from_cpu_runqueue(struct sched_object *obj);
void make_reschedule_request(uint16_t pcpu_id, uint16_t delmode);
bool need_reschedule(uint16_t pcpu_id);