hv: sched_iorr: Add IO sensitive Round-robin scheduler

IO sensitive Round-robin scheduler aim to schedule threads with
round-robin policy. Meanwhile, we also enhance it with some fairness
configuration, such as thread will be scheduled out without properly
timeslice. IO request on thread will be handled in high priority.

This patch only add a skeleton for the sched_iorr scheduler.

Tracked-On: #4178
Signed-off-by: Jason Chen CJ <jason.cj.chen@intel.com>
Signed-off-by: Yu Wang <yu1.wang@intel.com>
Signed-off-by: Shuo A Liu <shuo.a.liu@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
This commit is contained in:
Shuo A Liu 2019-06-20 11:10:53 +08:00 committed by wenlingz
parent 3c8d465a11
commit ed4008630d
4 changed files with 64 additions and 0 deletions

View File

@ -212,6 +212,7 @@ HW_C_SRCS += arch/x86/sgx.c
HW_C_SRCS += common/softirq.c
HW_C_SRCS += common/schedule.c
HW_C_SRCS += common/sched_noop.c
HW_C_SRCS += common/sched_iorr.c
HW_C_SRCS += hw/pci.c
HW_C_SRCS += arch/x86/configs/vm_config.c
HW_C_SRCS += arch/x86/configs/$(CONFIG_BOARD)/board.c

View File

@ -0,0 +1,54 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <list.h>
#include <per_cpu.h>
#include <schedule.h>
struct sched_iorr_data {
/* keep list as the first item */
struct list_head list;
uint64_t slice_cycles;
uint64_t last_cycles;
int64_t left_cycles;
};
int sched_iorr_init(__unused struct sched_control *ctl)
{
return 0;
}
void sched_iorr_deinit(__unused struct sched_control *ctl)
{
}
void sched_iorr_init_data(__unused struct thread_object *obj)
{
}
static struct thread_object *sched_iorr_pick_next(__unused struct sched_control *ctl)
{
return NULL;
}
static void sched_iorr_sleep(__unused struct thread_object *obj)
{
}
static void sched_iorr_wake(__unused struct thread_object *obj)
{
}
struct acrn_scheduler sched_iorr = {
.name = "sched_iorr",
.init = sched_iorr_init,
.init_data = sched_iorr_init_data,
.pick_next = sched_iorr_pick_next,
.sleep = sched_iorr_sleep,
.wake = sched_iorr_wake,
.deinit = sched_iorr_deinit,
};

View File

@ -38,6 +38,7 @@ struct per_cpu_region {
struct per_cpu_timers cpu_timers;
struct sched_control sched_ctl;
struct sched_noop_control sched_noop_ctl;
struct sched_iorr_control sched_iorr_ctl;
struct thread_object idle;
struct host_gdt gdt;
struct tss_64 tss;

View File

@ -7,6 +7,8 @@
#ifndef SCHEDULE_H
#define SCHEDULE_H
#include <spinlock.h>
#include <list.h>
#include <timer.h>
#define NEED_RESCHEDULE (1U)
@ -77,11 +79,17 @@ struct acrn_scheduler {
void (*deinit)(struct sched_control *ctl);
};
extern struct acrn_scheduler sched_noop;
extern struct acrn_scheduler sched_iorr;
struct sched_noop_control {
struct thread_object *noop_thread_obj;
};
struct sched_iorr_control {
struct list_head runqueue;
struct hv_timer tick_timer;
};
bool is_idle_thread(const struct thread_object *obj);
uint16_t sched_get_pcpuid(const struct thread_object *obj);
struct thread_object *sched_get_current(uint16_t pcpu_id);