dp: introduce per-core DP scheduler

This commit changes implementation of DP scheduler
At start point an incorrect assumption has been taken
that it is enough to have one single instance of
DP scheduler located on a primary core
This commit introduces one DP instance per core

Signed-off-by: Marcin Szkudlinski <marcin.szkudlinski@intel.com>
This commit is contained in:
Marcin Szkudlinski 2023-06-25 12:07:14 +02:00 committed by Michal Wasko
parent a1d321734f
commit e651c70531
3 changed files with 49 additions and 56 deletions

View File

@ -16,20 +16,10 @@
/** /**
* *
* DP scheduler is a scheduler that creates a separate preemptible Zephyr thread for each SOF task * DP scheduler is a scheduler that creates a separate preemptible Zephyr thread for each SOF task
* There's only one instance of DP in the system, however, threads can be assigned and pinned
* to any core in the system for its execution, there's no SMP processing.
* *
* The task execution may be delayed and task may be re-scheduled periodically * The task execution may be delayed and task may be re-scheduled periodically
* NOTE: delayed start and rescheduling takes place in sync with LL scheduler, meaning the * NOTE: delayed start and rescheduling takes place in sync with LL scheduler, meaning the
* DP scheduler is triggered as the last task of LL running on a primary core. * DP scheduler is triggered on each core after all LL task have been completed
* That implies a limitation: LL scheduler MUST be running on primary core in order to have
* this feature working.
* It is fine, because rescheduling is a feature used for data processing when a pipeline is
* running.
*
* Other possible usage of DP scheduler is to schedule task with DP_SCHEDULER_RUN_TASK_IMMEDIATELY
* as start parameter. It will force the task to work without any delays and async to LL.
* This kind of scheduling may be used for staring regular zephyr tasks using SOF api
* *
* Task run() may return: * Task run() may return:
* SOF_TASK_STATE_RESCHEDULE - the task will be rescheduled as specified in scheduler period * SOF_TASK_STATE_RESCHEDULE - the task will be rescheduled as specified in scheduler period
@ -67,13 +57,9 @@
*/ */
int scheduler_dp_init(void); int scheduler_dp_init(void);
/**
* \brief Set the Data Processing scheduler to be accessible at secondary cores
*/
int scheduler_dp_init_secondary_core(void);
/** /**
* \brief initialize a DP task and add it to scheduling * \brief initialize a DP task and add it to scheduling
* It must be called on core the task is declared to run on
* *
* \param[out] task pointer, pointer to allocated task structure will be return * \param[out] task pointer, pointer to allocated task structure will be return
* \param[in] uid pointer to UUID of the task * \param[in] uid pointer to UUID of the task

View File

@ -186,7 +186,7 @@ int secondary_core_init(struct sof *sof)
scheduler_init_ll(dma_domain); scheduler_init_ll(dma_domain);
#if CONFIG_ZEPHYR_DP_SCHEDULER #if CONFIG_ZEPHYR_DP_SCHEDULER
err = scheduler_dp_init_secondary_core(); err = scheduler_dp_init();
if (err < 0) if (err < 0)
return err; return err;
#endif /* CONFIG_ZEPHYR_DP_SCHEDULER */ #endif /* CONFIG_ZEPHYR_DP_SCHEDULER */

View File

@ -9,6 +9,7 @@
#include <rtos/task.h> #include <rtos/task.h>
#include <stdint.h> #include <stdint.h>
#include <sof/schedule/dp_schedule.h> #include <sof/schedule/dp_schedule.h>
#include <sof/schedule/ll_schedule.h>
#include <sof/schedule/ll_schedule_domain.h> #include <sof/schedule/ll_schedule_domain.h>
#include <sof/trace/trace.h> #include <sof/trace/trace.h>
#include <rtos/wait.h> #include <rtos/wait.h>
@ -28,7 +29,7 @@ DECLARE_TR_CTX(dp_tr, SOF_UUID(dp_sched_uuid), LOG_LEVEL_INFO);
struct scheduler_dp_data { struct scheduler_dp_data {
struct list_item tasks; /* list of active dp tasks */ struct list_item tasks; /* list of active dp tasks */
struct k_spinlock lock; /* synchronization between cores */ struct task task; /* LL task - source of DP tick */
}; };
struct task_dp_pdata { struct task_dp_pdata {
@ -39,20 +40,24 @@ struct task_dp_pdata {
struct k_sem sem; /* semaphore for task scheduling */ struct k_sem sem; /* semaphore for task scheduling */
}; };
/* /* Single CPU-wide lock
* there's only one instance of DP scheduler for all cores * as each per-core instance if dp-scheduler has separate structures, it is enough to
* Keep pointer to it here * use irq_lock instead of cross-core spinlocks
*/ */
static struct scheduler_dp_data *dp_sch; static inline unsigned int scheduler_dp_lock(void)
static inline k_spinlock_key_t scheduler_dp_lock(void)
{ {
return k_spin_lock(&dp_sch->lock); return irq_lock();
} }
static inline void scheduler_dp_unlock(k_spinlock_key_t key) static inline void scheduler_dp_unlock(unsigned int key)
{ {
k_spin_unlock(&dp_sch->lock, key); irq_unlock(key);
}
/* dummy LL task - to start LL on secondary cores */
static enum task_state scheduler_dp_ll_tick_dummy(void *data)
{
return SOF_TASK_STATE_RESCHEDULE;
} }
/* /*
@ -70,13 +75,8 @@ void scheduler_dp_ll_tick(void *receiver_data, enum notify_id event_type, void *
struct list_item *tlist; struct list_item *tlist;
struct task *curr_task; struct task *curr_task;
struct task_dp_pdata *pdata; struct task_dp_pdata *pdata;
k_spinlock_key_t lock_key; unsigned int lock_key;
struct scheduler_dp_data *dp_sch = scheduler_get_data(SOF_SCHEDULE_DP);
if (cpu_get_id() != PLATFORM_PRIMARY_CORE_ID)
return;
if (!dp_sch)
return;
lock_key = scheduler_dp_lock(); lock_key = scheduler_dp_lock();
list_for_item(tlist, &dp_sch->tasks) { list_for_item(tlist, &dp_sch->tasks) {
@ -102,8 +102,7 @@ void scheduler_dp_ll_tick(void *receiver_data, enum notify_id event_type, void *
static int scheduler_dp_task_cancel(void *data, struct task *task) static int scheduler_dp_task_cancel(void *data, struct task *task)
{ {
(void)(data); unsigned int lock_key;
k_spinlock_key_t lock_key;
/* this is asyn cancel - mark the task as canceled and remove it from scheduling */ /* this is asyn cancel - mark the task as canceled and remove it from scheduling */
lock_key = scheduler_dp_lock(); lock_key = scheduler_dp_lock();
@ -118,7 +117,7 @@ static int scheduler_dp_task_cancel(void *data, struct task *task)
static int scheduler_dp_task_free(void *data, struct task *task) static int scheduler_dp_task_free(void *data, struct task *task)
{ {
k_spinlock_key_t lock_key; unsigned int lock_key;
struct task_dp_pdata *pdata = task->priv_data; struct task_dp_pdata *pdata = task->priv_data;
/* abort the execution of the thread */ /* abort the execution of the thread */
@ -144,7 +143,7 @@ static void dp_thread_fn(void *p1, void *p2, void *p3)
(void)p2; (void)p2;
(void)p3; (void)p3;
struct task_dp_pdata *task_pdata = task->priv_data; struct task_dp_pdata *task_pdata = task->priv_data;
k_spinlock_key_t lock_key; unsigned int lock_key;
enum task_state state; enum task_state state;
while (1) { while (1) {
@ -192,7 +191,6 @@ static void dp_thread_fn(void *p1, void *p2, void *p3)
} else { } else {
scheduler_dp_unlock(lock_key); scheduler_dp_unlock(lock_key);
} }
}; };
/* never be here */ /* never be here */
@ -201,9 +199,9 @@ static void dp_thread_fn(void *p1, void *p2, void *p3)
static int scheduler_dp_task_shedule(void *data, struct task *task, uint64_t start, static int scheduler_dp_task_shedule(void *data, struct task *task, uint64_t start,
uint64_t period) uint64_t period)
{ {
struct scheduler_dp_data *sch = data; struct scheduler_dp_data *dp_sch = (struct scheduler_dp_data *)data;
struct task_dp_pdata *pdata = task->priv_data; struct task_dp_pdata *pdata = task->priv_data;
k_spinlock_key_t lock_key; unsigned int lock_key;
lock_key = scheduler_dp_lock(); lock_key = scheduler_dp_lock();
@ -218,7 +216,7 @@ static int scheduler_dp_task_shedule(void *data, struct task *task, uint64_t sta
pdata->ticks_period = period / LL_TIMER_PERIOD_US; pdata->ticks_period = period / LL_TIMER_PERIOD_US;
/* add a task to DP scheduler list */ /* add a task to DP scheduler list */
list_item_prepend(&task->list, &sch->tasks); list_item_prepend(&task->list, &dp_sch->tasks);
if (start == SCHEDULER_DP_RUN_TASK_IMMEDIATELY) { if (start == SCHEDULER_DP_RUN_TASK_IMMEDIATELY) {
/* trigger the task immediately, don't wait for LL tick */ /* trigger the task immediately, don't wait for LL tick */
@ -233,6 +231,8 @@ static int scheduler_dp_task_shedule(void *data, struct task *task, uint64_t sta
scheduler_dp_unlock(lock_key); scheduler_dp_unlock(lock_key);
/* start LL task - run DP tick start and period are irrelevant for LL (that's bad)*/
schedule_task(&dp_sch->task, 0, 0);
return 0; return 0;
} }
@ -242,20 +242,11 @@ static struct scheduler_ops schedule_dp_ops = {
.schedule_task_free = scheduler_dp_task_free, .schedule_task_free = scheduler_dp_task_free,
}; };
int scheduler_dp_init_secondary_core(void)
{
if (!dp_sch)
return -ENOMEM;
/* register the scheduler instance for secondary core */
scheduler_init(SOF_SCHEDULE_DP, &schedule_dp_ops, dp_sch);
return 0;
}
int scheduler_dp_init(void) int scheduler_dp_init(void)
{ {
dp_sch = rzalloc(SOF_MEM_ZONE_SYS_SHARED, 0, SOF_MEM_CAPS_RAM, sizeof(*dp_sch)); int ret;
struct scheduler_dp_data *dp_sch = rzalloc(SOF_MEM_ZONE_SYS_RUNTIME, 0, SOF_MEM_CAPS_RAM,
sizeof(struct scheduler_dp_data));
if (!dp_sch) if (!dp_sch)
return -ENOMEM; return -ENOMEM;
@ -263,6 +254,16 @@ int scheduler_dp_init(void)
scheduler_init(SOF_SCHEDULE_DP, &schedule_dp_ops, dp_sch); scheduler_init(SOF_SCHEDULE_DP, &schedule_dp_ops, dp_sch);
/* init src of DP tick */
ret = schedule_task_init_ll(&dp_sch->task,
SOF_UUID(dp_sched_uuid),
SOF_SCHEDULE_LL_TIMER,
0, scheduler_dp_ll_tick_dummy, dp_sch,
cpu_get_id(), 0);
if (ret)
return ret;
notifier_register(NULL, NULL, NOTIFIER_ID_LL_POST_RUN, scheduler_dp_ll_tick, 0); notifier_register(NULL, NULL, NOTIFIER_ID_LL_POST_RUN, scheduler_dp_ll_tick, 0);
return 0; return 0;
@ -288,18 +289,24 @@ int scheduler_dp_task_init(struct task **task,
k_tid_t thread_id = NULL; k_tid_t thread_id = NULL;
int ret; int ret;
/* must be called on the same core the task will be binded to */
assert(cpu_get_id() == core);
/* /*
* allocate memory * allocate memory
* to avoid multiple malloc operations allocate all required memory as a single structure * to avoid multiple malloc operations allocate all required memory as a single structure
* and return pointer to task_memory->task * and return pointer to task_memory->task
* As the structure contains zephyr kernel specific data, it must be located in
* shared, non cached memory
*/ */
task_memory = rzalloc(SOF_MEM_ZONE_RUNTIME, 0, SOF_MEM_CAPS_RAM, sizeof(*task_memory)); task_memory = rzalloc(SOF_MEM_ZONE_RUNTIME_SHARED, 0, SOF_MEM_CAPS_RAM,
sizeof(*task_memory));
if (!task_memory) { if (!task_memory) {
tr_err(&dp_tr, "zephyr_dp_task_init(): memory alloc failed"); tr_err(&dp_tr, "zephyr_dp_task_init(): memory alloc failed");
return -ENOMEM; return -ENOMEM;
} }
/* allocate stack - must be aligned so a separate alloc */ /* allocate stack - must be aligned and cached so a separate alloc */
stack_size = Z_KERNEL_STACK_SIZE_ADJUST(stack_size); stack_size = Z_KERNEL_STACK_SIZE_ADJUST(stack_size);
p_stack = (__sparse_force void __sparse_cache *) p_stack = (__sparse_force void __sparse_cache *)
rballoc_align(0, SOF_MEM_CAPS_RAM, stack_size, Z_KERNEL_STACK_OBJ_ALIGN); rballoc_align(0, SOF_MEM_CAPS_RAM, stack_size, Z_KERNEL_STACK_OBJ_ALIGN);