diff --git a/src/include/sof/schedule/ll_schedule_domain.h b/src/include/sof/schedule/ll_schedule_domain.h index c6f2b7026..f555c2563 100644 --- a/src/include/sof/schedule/ll_schedule_domain.h +++ b/src/include/sof/schedule/ll_schedule_domain.h @@ -48,7 +48,7 @@ struct ll_schedule_domain_ops { void (*domain_clear)(struct ll_schedule_domain *domain); bool (*domain_is_pending)(struct ll_schedule_domain *domain, struct task *task, struct comp_dev **comp); - void (*domain_task_cancel)(struct ll_schedule_domain *domain, uint32_t num_tasks); + void (*domain_task_cancel)(struct ll_schedule_domain *domain, struct task *task); }; struct ll_schedule_domain { @@ -130,10 +130,10 @@ static inline void domain_clear(struct ll_schedule_domain *domain) /* let the domain know that a task has been cancelled */ static inline void domain_task_cancel(struct ll_schedule_domain *domain, - uint32_t num_tasks) + struct task *task) { if (domain->ops->domain_task_cancel) - domain->ops->domain_task_cancel(domain, num_tasks); + domain->ops->domain_task_cancel(domain, task); } static inline int domain_register(struct ll_schedule_domain *domain, diff --git a/src/schedule/zephyr_dma_domain.c b/src/schedule/zephyr_dma_domain.c index edc3d4df8..edd9e2781 100644 --- a/src/schedule/zephyr_dma_domain.c +++ b/src/schedule/zephyr_dma_domain.c @@ -36,7 +36,6 @@ LOG_MODULE_DECLARE(ll_schedule, CONFIG_SOF_LOG_LEVEL); #define interrupt_clear_mask(irq, bit) #endif /* CONFIG_ARM64 */ -#define SEM_LIMIT 1 #define ZEPHYR_PDOMAIN_STACK_SIZE 8192 #if CONFIG_LOG_PROCESS_THREAD_CUSTOM_PRIORITY @@ -67,6 +66,11 @@ LOG_MODULE_DECLARE(ll_schedule, CONFIG_SOF_LOG_LEVEL); BUILD_ASSERT(ZEPHYR_DMA_DOMAIN_THREAD_PRIO >= 0, "Invalid DMA domain thread priority. Please make sure that logging threads priority is >= 1 or, preferably, >= 3"); +/* sanity check - make sure CONFIG_DMA_DOMAIN_SEM_LIMIT is not some + * garbage value. + */ +BUILD_ASSERT(CONFIG_DMA_DOMAIN_SEM_LIMIT > 0, "Invalid DMA domain SEM_LIMIT"); + K_KERNEL_STACK_ARRAY_DEFINE(zephyr_dma_domain_stack, CONFIG_CORE_COUNT, ZEPHYR_PDOMAIN_STACK_SIZE); @@ -103,7 +107,7 @@ static int zephyr_dma_domain_unregister(struct ll_schedule_domain *domain, struct task *task, uint32_t num_tasks); static void zephyr_dma_domain_task_cancel(struct ll_schedule_domain *domain, - uint32_t num_tasks); + struct task *task); static const struct ll_schedule_domain_ops zephyr_dma_domain_ops = { .domain_register = zephyr_dma_domain_register, @@ -335,7 +339,7 @@ static int zephyr_dma_domain_register(struct ll_schedule_domain *domain, dt->arg = arg; /* prepare work semaphore */ - k_sem_init(&dt->sem, 0, SEM_LIMIT); + k_sem_init(&dt->sem, 0, CONFIG_DMA_DOMAIN_SEM_LIMIT); thread_name[sizeof(thread_name) - 2] = '0' + core; @@ -482,20 +486,29 @@ static int zephyr_dma_domain_unregister(struct ll_schedule_domain *domain, } static void zephyr_dma_domain_task_cancel(struct ll_schedule_domain *domain, - uint32_t num_tasks) + struct task *task) { struct zephyr_dma_domain *zephyr_dma_domain; struct zephyr_dma_domain_thread *dt; + struct pipeline_task *pipe_task; int core; zephyr_dma_domain = ll_sch_get_pdata(domain); core = cpu_get_id(); dt = zephyr_dma_domain->domain_thread + core; + pipe_task = pipeline_task_get(task); - if (!num_tasks) { - /* DMA IRQs got cut off, we need to let the Zephyr - * thread execute the handler one more time so as to be - * able to remove the task from the task queue + if (pipe_task->sched_comp->state != COMP_STATE_ACTIVE) { + /* If the state of the scheduling component + * corresponding to a pipeline task is != + * COMP_STATE_ACTIVE then that means the DMA IRQs are + * disabled. Because of this, when a task is cancelled + * we need to give resources to the semaphore to make + * sure that zephyr_ll_run() is still executed and the + * tasks can be safely cancelled. + * + * This works because the state of the scheduling + * component is updated before the trigger operation. */ k_sem_give(&dt->sem); } diff --git a/src/schedule/zephyr_ll.c b/src/schedule/zephyr_ll.c index a2e471063..7405dc0b2 100644 --- a/src/schedule/zephyr_ll.c +++ b/src/schedule/zephyr_ll.c @@ -439,7 +439,7 @@ static int zephyr_ll_task_cancel(void *data, struct task *task) if (task->state != SOF_TASK_STATE_FREE) { task->state = SOF_TASK_STATE_CANCEL; /* let domain know that a task has been cancelled */ - domain_task_cancel(sch->ll_domain, sch->n_tasks - 1); + domain_task_cancel(sch->ll_domain, task); } zephyr_ll_unlock(sch, &flags); diff --git a/zephyr/Kconfig b/zephyr/Kconfig index 57ab1fd44..86c94a1de 100644 --- a/zephyr/Kconfig +++ b/zephyr/Kconfig @@ -34,6 +34,15 @@ config DMA_DOMAIN help This enables the usage of the DMA domain in scheduling. +config DMA_DOMAIN_SEM_LIMIT + int "Number of resources the Zephyr's DMA domain can accumulate" + depends on DMA_DOMAIN + default 10 + help + Set this value according to the load of the system. Please make sure + that SEM_LIMIT covers the maximum number of tasks your system will be + executing at some point (worst case). + config ZEPHYR_DP_SCHEDULER bool "use Zephyr thread based DP scheduler" default y if ACE