Merge pull request #373 from xiulipan/merge12_2

Merge stable 1.2 task and scheduler fix into master
This commit is contained in:
Liam Girdwood 2018-09-17 19:49:47 +01:00 committed by GitHub
commit 40f9749660
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 82 additions and 39 deletions

View File

@ -101,20 +101,28 @@ static inline uint32_t task_get_irq(struct task *task)
static inline void task_set_data(struct task *task)
{
struct list_item *dst = NULL;
struct irq_task *irq_task;
uint32_t flags;
switch (task->priority) {
case TASK_PRI_MED + 1 ... TASK_PRI_LOW:
dst = &((*task_irq_low_get())->list);
irq_task = *task_irq_low_get();
dst = &irq_task->list;
break;
case TASK_PRI_HIGH ... TASK_PRI_MED - 1:
dst = &((*task_irq_high_get())->list);
irq_task = *task_irq_high_get();
dst = &irq_task->list;
break;
case TASK_PRI_MED:
default:
dst = &((*task_irq_med_get())->list);
irq_task = *task_irq_med_get();
dst = &irq_task->list;
break;
}
spin_lock_irq(&irq_task->lock, flags);
list_item_append(&task->irq_list, dst);
spin_unlock_irq(&irq_task->lock, flags);
}
/**
@ -129,21 +137,19 @@ static void _irq_task(void *arg)
struct task *task;
uint32_t flags;
/* intentionally don't lock list to have task added from schedule irq */
list_for_item(tlist, &irq_task->list) {
task = container_of(tlist, struct task, irq_list);
spin_lock_irq(&irq_task->lock, flags);
list_for_item_safe(clist, tlist, &irq_task->list) {
if (task->func)
task = container_of(clist, struct task, irq_list);
list_item_del(clist);
spin_unlock_irq(&irq_task->lock, flags);
if (task->func && task->state == TASK_STATE_RUNNING)
task->func(task->data);
schedule_task_complete(task);
}
spin_lock_irq(&irq_task->lock, flags);
list_for_item_safe(clist, tlist, &irq_task->list) {
task = container_of(clist, struct task, irq_list);
list_item_del(&task->irq_list);
}
interrupt_clear(irq_task->irq);

View File

@ -1275,7 +1275,12 @@ void pipeline_schedule_copy_idle(struct pipeline *p)
void pipeline_schedule_cancel(struct pipeline *p)
{
schedule_task_complete(&p->pipe_task);
int err;
/* cancel and wait for pipeline to complete */
err = schedule_task_cancel(&p->pipe_task);
if (err < 0)
trace_pipe_error("pC0");
}
static void pipeline_task(void *arg)

View File

@ -38,6 +38,7 @@
#include <sof/lock.h>
#include <sof/list.h>
#include <sof/work.h>
#include <sof/wait.h>
struct schedule_data;
struct sof;
@ -56,6 +57,10 @@ struct sof;
#define TASK_PRI_MED 0
#define TASK_PRI_HIGH -20
#define TASK_PRI_IPC 1
/* maximun task time slice in microseconds */
#define SCHEDULE_TASK_MAX_TIME_SLICE 5000
/* task descriptor */
struct task {
@ -73,6 +78,7 @@ struct task {
/* runtime duration in scheduling clock base */
uint64_t max_rtime; /* max time taken to run */
completion_t complete;
};
struct schedule_data **arch_schedule_get(void);
@ -83,6 +89,8 @@ void schedule_task(struct task *task, uint64_t start, uint64_t deadline);
void schedule_task_idle(struct task *task, uint64_t deadline);
int schedule_task_cancel(struct task *task);
void schedule_task_complete(struct task *task);
static inline void schedule_task_init(struct task *task, void (*func)(void *),

View File

@ -195,7 +195,7 @@ int platform_ipc_init(struct ipc *ipc)
/* schedule */
schedule_task_init(&_ipc->ipc_task, ipc_process_task, _ipc);
schedule_task_config(&_ipc->ipc_task, 0, 0);
schedule_task_config(&_ipc->ipc_task, TASK_PRI_IPC, 0);
#ifdef CONFIG_HOST_PTABLE
/* allocate page table buffer */

View File

@ -217,7 +217,7 @@ int platform_ipc_init(struct ipc *ipc)
/* schedule */
schedule_task_init(&_ipc->ipc_task, ipc_process_task, _ipc);
schedule_task_config(&_ipc->ipc_task, 0, 0);
schedule_task_config(&_ipc->ipc_task, TASK_PRI_IPC, 0);
#ifdef CONFIG_HOST_PTABLE
/* allocate page table buffer */

View File

@ -196,7 +196,7 @@ int platform_ipc_init(struct ipc *ipc)
/* schedule */
schedule_task_init(&_ipc->ipc_task, ipc_process_task, _ipc);
schedule_task_config(&_ipc->ipc_task, 0, 0);
schedule_task_config(&_ipc->ipc_task, TASK_PRI_IPC, 0);
#ifdef CONFIG_HOST_PTABLE
/* allocate page table buffer */

View File

@ -212,7 +212,7 @@ int platform_ipc_init(struct ipc *ipc)
/* schedule */
schedule_task_init(&_ipc->ipc_task, ipc_process_task, _ipc);
schedule_task_config(&_ipc->ipc_task, 0, 0);
schedule_task_config(&_ipc->ipc_task, TASK_PRI_IPC, 0);
#ifdef CONFIG_HOST_PTABLE
/* allocate page table buffer */

View File

@ -98,6 +98,7 @@ static inline struct task *edf_get_next(uint64_t current,
struct list_item *clist;
struct list_item *tlist;
uint64_t next_delta = UINT64_MAX;
int next_priority = TASK_PRI_LOW;
uint64_t delta;
uint64_t deadline;
int reschedule = 0;
@ -126,23 +127,31 @@ static inline struct task *edf_get_next(uint64_t current,
/* include the length of task in deadline calc */
deadline = task->deadline - task->max_rtime;
/* get earliest deadline */
if (current < deadline) {
delta = deadline - current;
/* get highest priority */
if (task->priority < next_priority) {
next_priority = task->priority;
next_delta = delta;
next_task = task;
} else if (task->priority == next_priority) {
/* get earliest deadline */
if (delta < next_delta) {
next_delta = delta;
next_task = task;
}
}
} else {
/* missed scheduling - will be rescheduled */
trace_pipe("ed!");
/* have we already tried to rescheule ? */
if (reschedule++)
if (!reschedule) {
reschedule++;
trace_pipe("edr");
edf_reschedule(task, current);
else {
} else {
/* reschedule failed */
list_item_del(&task->list);
task->state = TASK_STATE_CANCEL;
@ -170,9 +179,11 @@ static uint64_t sch_work(void *data, uint64_t delay)
*/
static struct task *schedule_edf(void)
{
struct schedule_data *sch = *arch_schedule_get();
struct task *task;
struct task *future_task = NULL;
uint64_t current;
uint32_t flags;
tracev_pipe("edf");
@ -195,7 +206,15 @@ static struct task *schedule_edf(void)
} else {
/* yes, run current task */
task->start = current;
/* init task for running */
wait_init(&task->complete);
spin_lock_irq(&sch->lock, flags);
task->state = TASK_STATE_RUNNING;
list_item_del(&task->list);
spin_unlock_irq(&sch->lock, flags);
/* now run task at correct run level */
run_task(task);
}
@ -203,9 +222,8 @@ static struct task *schedule_edf(void)
return future_task;
}
#if 0 /* FIXME: is this needed ? */
/* delete task from scheduler */
static int schedule_task_del(struct task *task)
/* cancel and delete task from scheduler - won't stop it if already running */
int schedule_task_cancel(struct task *task)
{
struct schedule_data *sch = *arch_schedule_get();
uint32_t flags;
@ -213,24 +231,21 @@ static int schedule_task_del(struct task *task)
tracev_pipe("del");
/* add task to list */
spin_lock_irq(&sch->lock, flags);
/* is task already running ? */
if (task->state == TASK_STATE_RUNNING) {
ret = -EAGAIN;
goto out;
/* check current task state, delete it if it is queued
* if it is already running, nothing we can do about it atm
*/
if (task->state == TASK_STATE_QUEUED) {
/* delete task */
task->state = TASK_STATE_CANCEL;
list_item_del(&task->list);
}
list_item_del(&task->list);
task->state = TASK_STATE_COMPLETED;
out:
spin_unlock_irq(&sch->lock, flags);
return ret;
}
#endif
static int _schedule_task(struct task *task, uint64_t start, uint64_t deadline)
{
@ -249,6 +264,13 @@ static int _schedule_task(struct task *task, uint64_t start, uint64_t deadline)
return 0;
}
/* is task already running ? - not enough MIPS to complete ? */
if (task->state == TASK_STATE_QUEUED) {
trace_pipe("tsq");
spin_unlock_irq(&sch->lock, flags);
return 0;
}
/* get the current time */
current = platform_timer_get(platform_timer);
@ -312,9 +334,11 @@ void schedule_task_complete(struct task *task)
tracev_pipe("com");
spin_lock_irq(&sch->lock, flags);
list_item_del(&task->list);
task->state = TASK_STATE_COMPLETED;
spin_unlock_irq(&sch->lock, flags);
/* tell any waiter that task has completed */
wait_completed(&task->complete);
}
static void scheduler_run(void *unused)