diff --git a/hypervisor/arch/x86/guest/vcpu.c b/hypervisor/arch/x86/guest/vcpu.c index 8bd9ba44f..0de0ce70f 100644 --- a/hypervisor/arch/x86/guest/vcpu.c +++ b/hypervisor/arch/x86/guest/vcpu.c @@ -572,7 +572,7 @@ void pause_vcpu(struct acrn_vcpu *vcpu, enum vcpu_state new_state) vcpu->state = new_state; if (atomic_load32(&vcpu->running) == 1U) { - remove_vcpu_from_runqueue(vcpu); + remove_from_cpu_runqueue(&vcpu->sched_obj, vcpu->pcpu_id); make_reschedule_request(vcpu); release_schedule_lock(vcpu->pcpu_id); @@ -581,7 +581,7 @@ void pause_vcpu(struct acrn_vcpu *vcpu, enum vcpu_state new_state) __asm__ __volatile("pause" ::: "memory"); } } else { - remove_vcpu_from_runqueue(vcpu); + remove_from_cpu_runqueue(&vcpu->sched_obj, vcpu->pcpu_id); release_schedule_lock(vcpu->pcpu_id); } } @@ -594,7 +594,7 @@ void resume_vcpu(struct acrn_vcpu *vcpu) vcpu->state = vcpu->prev_state; if (vcpu->state == VCPU_RUNNING) { - add_vcpu_to_runqueue(vcpu); + add_to_cpu_runqueue(&vcpu->sched_obj, vcpu->pcpu_id); make_reschedule_request(vcpu); } release_schedule_lock(vcpu->pcpu_id); @@ -606,7 +606,7 @@ void schedule_vcpu(struct acrn_vcpu *vcpu) pr_dbg("vcpu%hu scheduled", vcpu->vcpu_id); get_schedule_lock(vcpu->pcpu_id); - add_vcpu_to_runqueue(vcpu); + add_to_cpu_runqueue(&vcpu->sched_obj, vcpu->pcpu_id); make_reschedule_request(vcpu); release_schedule_lock(vcpu->pcpu_id); } @@ -624,7 +624,7 @@ int32_t prepare_vcpu(struct acrn_vm *vm, uint16_t pcpu_id) set_pcpu_used(pcpu_id); - INIT_LIST_HEAD(&vcpu->run_list); + INIT_LIST_HEAD(&vcpu->sched_obj.run_list); return ret; } diff --git a/hypervisor/common/schedule.c b/hypervisor/common/schedule.c index 3369a8009..01af5acab 100644 --- a/hypervisor/common/schedule.c +++ b/hypervisor/common/schedule.c @@ -60,38 +60,48 @@ void free_pcpu(uint16_t pcpu_id) bitmap_clear_lock(pcpu_id, &pcpu_used_bitmap); } -void add_vcpu_to_runqueue(struct acrn_vcpu *vcpu) +void add_to_cpu_runqueue(struct sched_object *obj, uint16_t pcpu_id) { - uint16_t pcpu_id = vcpu->pcpu_id; struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id); spinlock_obtain(&ctx->runqueue_lock); - if (list_empty(&vcpu->run_list)) { - list_add_tail(&vcpu->run_list, &ctx->runqueue); + if (list_empty(&obj->run_list)) { + list_add_tail(&obj->run_list, &ctx->runqueue); } spinlock_release(&ctx->runqueue_lock); } -void remove_vcpu_from_runqueue(struct acrn_vcpu *vcpu) +void remove_from_cpu_runqueue(struct sched_object *obj, uint16_t pcpu_id) { - uint16_t pcpu_id = vcpu->pcpu_id; struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id); spinlock_obtain(&ctx->runqueue_lock); - list_del_init(&vcpu->run_list); + list_del_init(&obj->run_list); spinlock_release(&ctx->runqueue_lock); } +static struct sched_object *get_next_sched_obj(uint16_t pcpu_id) +{ + struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id); + struct sched_object *obj = NULL; + + spinlock_obtain(&ctx->runqueue_lock); + if (!list_empty(&ctx->runqueue)) { + obj = get_first_item(&ctx->runqueue, struct sched_object, run_list); + } + spinlock_release(&ctx->runqueue_lock); + + return obj; +} + static struct acrn_vcpu *select_next_vcpu(uint16_t pcpu_id) { - struct sched_context *ctx = &per_cpu(sched_ctx, pcpu_id); struct acrn_vcpu *vcpu = NULL; + struct sched_object *obj = get_next_sched_obj(pcpu_id); - spinlock_obtain(&ctx->runqueue_lock); - if (!list_empty(&ctx->runqueue)) { - vcpu = get_first_item(&ctx->runqueue, struct acrn_vcpu, run_list); + if (obj != NULL) { + vcpu = list_entry(obj, struct acrn_vcpu, sched_obj); } - spinlock_release(&ctx->runqueue_lock); return vcpu; } diff --git a/hypervisor/include/arch/x86/guest/vcpu.h b/hypervisor/include/arch/x86/guest/vcpu.h index 069ca69f9..d9df48086 100644 --- a/hypervisor/include/arch/x86/guest/vcpu.h +++ b/hypervisor/include/arch/x86/guest/vcpu.h @@ -264,7 +264,7 @@ struct acrn_vcpu { volatile enum vcpu_state dbg_req_state; uint64_t sync; /*hold the bit events*/ - struct list_head run_list; /* inserted to schedule runqueue */ + struct sched_object sched_obj; uint64_t pending_pre_work; /* any pre work pending? */ bool launched; /* Whether the vcpu is launched on target pcpu */ uint32_t paused_cnt; /* how many times vcpu is paused */ diff --git a/hypervisor/include/arch/x86/hv_arch.h b/hypervisor/include/arch/x86/hv_arch.h index e385027e3..1d7718c46 100644 --- a/hypervisor/include/arch/x86/hv_arch.h +++ b/hypervisor/include/arch/x86/hv_arch.h @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include diff --git a/hypervisor/include/common/schedule.h b/hypervisor/include/common/schedule.h index 990b838ab..004775b84 100644 --- a/hypervisor/include/common/schedule.h +++ b/hypervisor/include/common/schedule.h @@ -10,6 +10,10 @@ #define NEED_RESCHEDULE (1U) #define NEED_OFFLINE (2U) +struct sched_object { + struct list_head run_list; +}; + struct sched_context { spinlock_t runqueue_lock; struct list_head runqueue; @@ -26,8 +30,8 @@ void set_pcpu_used(uint16_t pcpu_id); uint16_t allocate_pcpu(void); void free_pcpu(uint16_t pcpu_id); -void add_vcpu_to_runqueue(struct acrn_vcpu *vcpu); -void remove_vcpu_from_runqueue(struct acrn_vcpu *vcpu); +void add_to_cpu_runqueue(struct sched_object *obj, uint16_t pcpu_id); +void remove_from_cpu_runqueue(struct sched_object *obj, uint16_t pcpu_id); void default_idle(void);