zephyr_dma_domain: Give semaphore resources based on sched_comp's state upon cancel

This implies the following changes:
	1) domain_task_cancel() shall no longer receive the number
	of tasks, but, instead, will receive the task to be cancelled.

	2) zephyr_dma_domain_task_cancel() will do k_sem_give() if the
	sched_comp associated with the given task is != COMP_STATE_ACTIVE.

	3) SEM_LIMIT is changed to CONFIG_DMA_DOMAIN_SEM_LIMIT and can
	be configured.

The reasoning for the changes are the following:
	1) and 2): In the case of mixers, domain_task_cancel()'s
	num_tasks is not a reliable way to determine if the DMA
	IRQs got cut off. Let's consider the following scenario:

	We have a mixer with 1 non-registrable pipeline task and
	1 registrable pipeline task. Upon TRIGGER_STOP we'd have
	the following flow (i.MX boards):
		a) SAI_STOP => DMA IRQs get cut off.
		b) Cancel non-registrable pipeline task.
		c) Cancel registrable pipeline task.

	During b) and c), domain_task_cancel() would get the following
	arguments:
		b) domain_task_cancel(sch, 1)
		c) domain_task_cancel(sch, 1)

	This is because the non-registrable pipeline task wasn't
	dequeued before c) so, even though the DMA IRQs got cut
	off during a), zephyr_dma_domain_task_cancel() does not give
	resources to the semaphore so what happens is zephyr_ll_run()
	will no longer execute and the pipeline tasks remain queued.

	3) Since the semaphore can accumulate more than 1 resource
	at a given time (and since it's safe to make SEM_LIMIT depend
	on the load of the system), SEM_LIMIT was changed into a config.
	This allows the user to change SEM_LIMIT based on the system
	load. For example, if there's 2 non-registrable pipeline tasks
	and 1 registrable pipeline task (same scheduling component),
	an appropriate value for SEM_LIMIT should be 3 (since the
	semaphore can be given at most 3 resources during the task
	cancellation process). Of course, making SEM_LIMIT depend on
	the system load is the worst case but this way we can make sure
	that the cancelled tasks get dequeued properly.

Signed-off-by: Laurentiu Mihalcea <laurentiu.mihalcea@nxp.com>
This commit is contained in:
Laurentiu Mihalcea 2023-05-09 12:10:20 +03:00 committed by Daniel Baluta
parent 465605f0ef
commit b185ffa6b8
4 changed files with 34 additions and 12 deletions

View File

@ -48,7 +48,7 @@ struct ll_schedule_domain_ops {
void (*domain_clear)(struct ll_schedule_domain *domain);
bool (*domain_is_pending)(struct ll_schedule_domain *domain,
struct task *task, struct comp_dev **comp);
void (*domain_task_cancel)(struct ll_schedule_domain *domain, uint32_t num_tasks);
void (*domain_task_cancel)(struct ll_schedule_domain *domain, struct task *task);
};
struct ll_schedule_domain {
@ -130,10 +130,10 @@ static inline void domain_clear(struct ll_schedule_domain *domain)
/* let the domain know that a task has been cancelled */
static inline void domain_task_cancel(struct ll_schedule_domain *domain,
uint32_t num_tasks)
struct task *task)
{
if (domain->ops->domain_task_cancel)
domain->ops->domain_task_cancel(domain, num_tasks);
domain->ops->domain_task_cancel(domain, task);
}
static inline int domain_register(struct ll_schedule_domain *domain,

View File

@ -36,7 +36,6 @@ LOG_MODULE_DECLARE(ll_schedule, CONFIG_SOF_LOG_LEVEL);
#define interrupt_clear_mask(irq, bit)
#endif /* CONFIG_ARM64 */
#define SEM_LIMIT 1
#define ZEPHYR_PDOMAIN_STACK_SIZE 8192
#if CONFIG_LOG_PROCESS_THREAD_CUSTOM_PRIORITY
@ -67,6 +66,11 @@ LOG_MODULE_DECLARE(ll_schedule, CONFIG_SOF_LOG_LEVEL);
BUILD_ASSERT(ZEPHYR_DMA_DOMAIN_THREAD_PRIO >= 0,
"Invalid DMA domain thread priority. Please make sure that logging threads priority is >= 1 or, preferably, >= 3");
/* sanity check - make sure CONFIG_DMA_DOMAIN_SEM_LIMIT is not some
* garbage value.
*/
BUILD_ASSERT(CONFIG_DMA_DOMAIN_SEM_LIMIT > 0, "Invalid DMA domain SEM_LIMIT");
K_KERNEL_STACK_ARRAY_DEFINE(zephyr_dma_domain_stack,
CONFIG_CORE_COUNT,
ZEPHYR_PDOMAIN_STACK_SIZE);
@ -103,7 +107,7 @@ static int zephyr_dma_domain_unregister(struct ll_schedule_domain *domain,
struct task *task,
uint32_t num_tasks);
static void zephyr_dma_domain_task_cancel(struct ll_schedule_domain *domain,
uint32_t num_tasks);
struct task *task);
static const struct ll_schedule_domain_ops zephyr_dma_domain_ops = {
.domain_register = zephyr_dma_domain_register,
@ -335,7 +339,7 @@ static int zephyr_dma_domain_register(struct ll_schedule_domain *domain,
dt->arg = arg;
/* prepare work semaphore */
k_sem_init(&dt->sem, 0, SEM_LIMIT);
k_sem_init(&dt->sem, 0, CONFIG_DMA_DOMAIN_SEM_LIMIT);
thread_name[sizeof(thread_name) - 2] = '0' + core;
@ -482,20 +486,29 @@ static int zephyr_dma_domain_unregister(struct ll_schedule_domain *domain,
}
static void zephyr_dma_domain_task_cancel(struct ll_schedule_domain *domain,
uint32_t num_tasks)
struct task *task)
{
struct zephyr_dma_domain *zephyr_dma_domain;
struct zephyr_dma_domain_thread *dt;
struct pipeline_task *pipe_task;
int core;
zephyr_dma_domain = ll_sch_get_pdata(domain);
core = cpu_get_id();
dt = zephyr_dma_domain->domain_thread + core;
pipe_task = pipeline_task_get(task);
if (!num_tasks) {
/* DMA IRQs got cut off, we need to let the Zephyr
* thread execute the handler one more time so as to be
* able to remove the task from the task queue
if (pipe_task->sched_comp->state != COMP_STATE_ACTIVE) {
/* If the state of the scheduling component
* corresponding to a pipeline task is !=
* COMP_STATE_ACTIVE then that means the DMA IRQs are
* disabled. Because of this, when a task is cancelled
* we need to give resources to the semaphore to make
* sure that zephyr_ll_run() is still executed and the
* tasks can be safely cancelled.
*
* This works because the state of the scheduling
* component is updated before the trigger operation.
*/
k_sem_give(&dt->sem);
}

View File

@ -439,7 +439,7 @@ static int zephyr_ll_task_cancel(void *data, struct task *task)
if (task->state != SOF_TASK_STATE_FREE) {
task->state = SOF_TASK_STATE_CANCEL;
/* let domain know that a task has been cancelled */
domain_task_cancel(sch->ll_domain, sch->n_tasks - 1);
domain_task_cancel(sch->ll_domain, task);
}
zephyr_ll_unlock(sch, &flags);

View File

@ -34,6 +34,15 @@ config DMA_DOMAIN
help
This enables the usage of the DMA domain in scheduling.
config DMA_DOMAIN_SEM_LIMIT
int "Number of resources the Zephyr's DMA domain can accumulate"
depends on DMA_DOMAIN
default 10
help
Set this value according to the load of the system. Please make sure
that SEM_LIMIT covers the maximum number of tasks your system will be
executing at some point (worst case).
config ZEPHYR_DP_SCHEDULER
bool "use Zephyr thread based DP scheduler"
default y if ACE