2020-06-23 23:43:56 +08:00
|
|
|
// SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
//
|
|
|
|
// Copyright(c) 2019 Intel Corporation. All rights reserved.
|
|
|
|
//
|
|
|
|
// Author: Bartosz Kokoszko <bartoszx.kokoszko@linux.intel.com>
|
|
|
|
|
|
|
|
#include <sof/audio/component.h>
|
2023-02-22 04:01:47 +08:00
|
|
|
#include <rtos/task.h>
|
2020-06-23 23:43:56 +08:00
|
|
|
#include <stdint.h>
|
|
|
|
#include <sof/schedule/edf_schedule.h>
|
2022-09-01 05:19:35 +08:00
|
|
|
#include <rtos/wait.h>
|
2020-06-23 23:43:56 +08:00
|
|
|
|
2022-07-13 10:19:27 +08:00
|
|
|
#include <zephyr/kernel.h>
|
|
|
|
#include <zephyr/sys_clock.h>
|
2020-06-23 23:43:56 +08:00
|
|
|
|
2022-09-28 21:42:54 +08:00
|
|
|
static struct k_work_q edf_workq;
|
|
|
|
static K_THREAD_STACK_DEFINE(edf_workq_stack, 8192);
|
2020-06-23 23:43:56 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* since only IPC is using the EDF scheduler - we schedule the work in the
|
|
|
|
* next timer_domain time slice
|
|
|
|
*/
|
|
|
|
#define EDF_SCHEDULE_DELAY 0
|
|
|
|
|
|
|
|
static void edf_work_handler(struct k_work *work)
|
|
|
|
{
|
|
|
|
struct task *task = CONTAINER_OF(work, struct task, z_delayed_work);
|
|
|
|
|
|
|
|
task->state = SOF_TASK_STATE_RUNNING;
|
|
|
|
|
2022-08-27 00:43:51 +08:00
|
|
|
task->state = task_run(task);
|
|
|
|
|
|
|
|
if (task->state == SOF_TASK_STATE_RESCHEDULE) {
|
|
|
|
uint64_t deadline = task_get_deadline(task);
|
|
|
|
uint64_t now = k_uptime_ticks();
|
|
|
|
k_timeout_t timeout = K_MSEC(0);
|
|
|
|
|
|
|
|
if (deadline > now)
|
|
|
|
timeout = K_TICKS(deadline - now);
|
|
|
|
|
|
|
|
k_work_reschedule_for_queue(&edf_workq,
|
|
|
|
&task->z_delayed_work,
|
|
|
|
timeout);
|
|
|
|
task->state = SOF_TASK_STATE_QUEUED;
|
|
|
|
} else {
|
|
|
|
task_complete(task);
|
|
|
|
task->state = SOF_TASK_STATE_COMPLETED;
|
|
|
|
}
|
2020-06-23 23:43:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* schedule task */
|
|
|
|
static int schedule_edf_task(void *data, struct task *task, uint64_t start,
|
|
|
|
uint64_t period)
|
|
|
|
{
|
|
|
|
/* start time is microseconds from now */
|
|
|
|
k_timeout_t start_time = K_USEC(start + EDF_SCHEDULE_DELAY);
|
|
|
|
|
2021-05-08 04:26:52 +08:00
|
|
|
k_work_reschedule_for_queue(&edf_workq,
|
2021-09-01 19:07:19 +08:00
|
|
|
&task->z_delayed_work,
|
|
|
|
start_time);
|
2020-06-23 23:43:56 +08:00
|
|
|
|
|
|
|
task->state = SOF_TASK_STATE_QUEUED;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int schedule_edf_task_cancel(void *data, struct task *task)
|
|
|
|
{
|
|
|
|
if (task->state == SOF_TASK_STATE_QUEUED) {
|
2021-05-10 16:28:07 +08:00
|
|
|
k_work_cancel_delayable(&task->z_delayed_work);
|
2020-06-23 23:43:56 +08:00
|
|
|
|
|
|
|
/* delete task */
|
|
|
|
task->state = SOF_TASK_STATE_CANCEL;
|
|
|
|
}
|
|
|
|
|
2021-05-10 16:28:07 +08:00
|
|
|
return 0;
|
2020-06-23 23:43:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int schedule_edf_task_running(void *data, struct task *task)
|
|
|
|
{
|
|
|
|
task->state = SOF_TASK_STATE_RUNNING;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int schedule_edf_task_free(void *data, struct task *task)
|
|
|
|
{
|
|
|
|
task->state = SOF_TASK_STATE_FREE;
|
|
|
|
task->ops.run = NULL;
|
|
|
|
task->data = NULL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-04-30 19:50:18 +08:00
|
|
|
static struct scheduler_ops schedule_edf_ops = {
|
2020-06-23 23:43:56 +08:00
|
|
|
.schedule_task = schedule_edf_task,
|
|
|
|
.schedule_task_running = schedule_edf_task_running,
|
|
|
|
.schedule_task_cancel = schedule_edf_task_cancel,
|
|
|
|
.schedule_task_free = schedule_edf_task_free,
|
|
|
|
};
|
|
|
|
|
|
|
|
int scheduler_init_edf(void)
|
|
|
|
{
|
2021-04-30 19:50:18 +08:00
|
|
|
struct k_thread *thread = &edf_workq.thread;
|
|
|
|
|
2020-06-23 23:43:56 +08:00
|
|
|
scheduler_init(SOF_SCHEDULE_EDF, &schedule_edf_ops, NULL);
|
|
|
|
|
2021-05-08 04:26:52 +08:00
|
|
|
k_work_queue_start(&edf_workq,
|
2020-06-23 23:43:56 +08:00
|
|
|
edf_workq_stack,
|
|
|
|
K_THREAD_STACK_SIZEOF(edf_workq_stack),
|
2023-02-22 17:59:43 +08:00
|
|
|
EDF_ZEPHYR_PRIORITY, NULL);
|
2021-04-30 19:50:18 +08:00
|
|
|
|
|
|
|
k_thread_suspend(thread);
|
|
|
|
|
|
|
|
k_thread_cpu_mask_clear(thread);
|
|
|
|
k_thread_cpu_mask_enable(thread, PLATFORM_PRIMARY_CORE_ID);
|
|
|
|
k_thread_name_set(thread, "edf_workq");
|
|
|
|
|
|
|
|
k_thread_resume(thread);
|
2020-06-23 23:43:56 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-09-03 20:07:28 +08:00
|
|
|
int schedule_task_init_edf(struct task *task, const struct sof_uuid_entry *uid,
|
2020-06-23 23:43:56 +08:00
|
|
|
const struct task_ops *ops,
|
|
|
|
void *data, uint16_t core, uint32_t flags)
|
|
|
|
{
|
2021-05-19 16:38:46 +08:00
|
|
|
int ret;
|
2020-06-23 23:43:56 +08:00
|
|
|
|
|
|
|
ret = schedule_task_init(task, uid, SOF_SCHEDULE_EDF, 0, ops->run, data,
|
|
|
|
core, flags);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
task->ops = *ops;
|
|
|
|
|
2021-05-08 04:26:52 +08:00
|
|
|
k_work_init_delayable(&task->z_delayed_work, edf_work_handler);
|
2020-06-23 23:43:56 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-09-03 20:07:28 +08:00
|
|
|
int schedule_task_init_edf_with_budget(struct task *task,
|
|
|
|
const struct sof_uuid_entry *uid,
|
2020-06-23 23:43:56 +08:00
|
|
|
const struct task_ops *ops,
|
|
|
|
void *data, uint16_t core,
|
|
|
|
uint32_t flags, uint32_t cycles_budget)
|
|
|
|
{
|
|
|
|
return schedule_task_init_edf(task, uid, ops, data, core, flags);
|
|
|
|
}
|