ipc: fix a IPC completion race

A race currently exists in IPC completion code. When processing the
pipeline TRIG_START command on timer-domain pipelines, processing is
postponed to the pipeline task. Usually the IPC task continues and
completes before the pipeline task. The IPC task notes, that the
command is processed in the pipeline context and doesn't send a
reply to the host. Then the pipeline task executes the command and
notifies the host.

However, occasionally the timer interrupt occurs while the IPC task
is still active. It will start processing the IPC command in the
pipeline context and reply to the host. The host then can send the
next IPC command before the IPC task in the firmware has completed.
This then produces an error trace:

ERROR schedule_edf_task(), task already queued or running 3

and the IPC is dropped.

This commit fixes the race by making sure in asynchronous cases the
last of the two contexts notifies the host.

BugLink: https://github.com/thesofproject/sof/issues/4706
Signed-off-by: Guennadi Liakhovetski <guennadi.liakhovetski@linux.intel.com>
This commit is contained in:
Guennadi Liakhovetski 2021-09-03 15:01:20 +02:00 committed by Liam Girdwood
parent e963aa0e4a
commit 82857a78ba
7 changed files with 46 additions and 16 deletions

View File

@ -115,11 +115,14 @@ void ipc_platform_complete_cmd(void *data)
// TODO: signal audio work to enter D3 in normal context
/* are we about to enter D3 ? */
if (ipc->pm_prepare_D3) {
while (1)
/*
* Note, that this function is now called with
* interrupts disabled, so this wait will never even
* return anyway
*/
wait_for_interrupt(0);
}
}
int ipc_platform_send_msg(struct ipc_msg *msg)

View File

@ -201,12 +201,10 @@ enum task_state ipc_platform_do_cmd(void *data)
void ipc_platform_complete_cmd(void *data)
{
struct ipc *ipc = data;
uint32_t flags;
if (!cpu_is_me(ipc->core) || ipc->delayed_response)
if (!cpu_is_me(ipc->core))
return;
spin_lock_irq(&ipc->lock, flags);
/* write 1 to clear busy, and trigger interrupt to host*/
#if CAVS_VERSION == CAVS_VERSION_1_5
@ -225,15 +223,11 @@ void ipc_platform_complete_cmd(void *data)
#if CONFIG_SUECREEK
if (ipc->pm_prepare_D3) {
//TODO: add support for Icelake
while (1)
wait_for_interrupt(0);
}
#endif
spin_unlock_irq(&ipc->lock, flags);
}
int ipc_platform_send_msg(struct ipc_msg *msg)

View File

@ -104,8 +104,12 @@ void ipc_platform_complete_cmd(void *data)
// TODO: signal audio work to enter D3 in normal context
/* are we about to enter D3 ? */
if (ipc->pm_prepare_D3) {
while (1)
/*
* Note, that this function is now called with
* interrupts disabled, so this wait will never even
* return anyway
*/
wait_for_interrupt(0);
}

View File

@ -125,7 +125,7 @@ static void idc_ipc(void)
ipc_cmd(ipc->comp_data);
/* Signal the host */
ipc_platform_complete_cmd(ipc);
ipc_complete_cmd(ipc);
}
/**

View File

@ -203,4 +203,9 @@ int ipc_process_on_core(uint32_t core, bool blocking);
*/
void ipc_msg_reply(struct sof_ipc_reply *reply);
/**
* \brief Call platform-specific IPC completion function.
*/
void ipc_complete_cmd(void *data);
#endif /* __SOF_DRIVERS_IPC_H__ */

View File

@ -224,9 +224,8 @@ void ipc_msg_reply(struct sof_ipc_reply *reply)
{
struct ipc *ipc = ipc_get();
ipc->delayed_response = false;
mailbox_hostbox_write(0, reply, reply->hdr.size);
ipc_platform_complete_cmd(ipc);
ipc_complete_cmd(ipc);
}
void ipc_schedule_process(struct ipc *ipc)
@ -250,8 +249,32 @@ int ipc_init(struct sof *sof)
return platform_ipc_init(sof->ipc);
}
void ipc_complete_cmd(void *data)
{
struct ipc *ipc = data;
uint32_t flags;
bool skip_first_entry;
spin_lock_irq(&ipc->lock, flags);
/*
* IPC commands can be completed synchronously from the IPC task
* completion method, or asynchronously: either from the pipeline task
* thread or from another core. In the asynchronous case the order of
* the two events is unknown. It is important that the latter of them
* completes the IPC to avoid the host sending the next IPC too early.
* .delayed_response is used for this in such asynchronous cases.
*/
skip_first_entry = ipc->delayed_response;
ipc->delayed_response = false;
if (!skip_first_entry)
ipc_platform_complete_cmd(data);
spin_unlock_irq(&ipc->lock, flags);
}
struct task_ops ipc_task_ops = {
.run = ipc_platform_do_cmd,
.complete = ipc_platform_complete_cmd,
.complete = ipc_complete_cmd,
.get_deadline = ipc_task_deadline,
};

View File

@ -444,9 +444,10 @@ static int ipc_stream_trigger(uint32_t header)
* synchronously.
*/
if (pipeline_is_timer_driven(pcm_dev->cd->pipeline)) {
ret = pipeline_trigger(pcm_dev->cd->pipeline, pcm_dev->cd, cmd);
if (ret > 0)
ipc->delayed_response = true;
ret = pipeline_trigger(pcm_dev->cd->pipeline, pcm_dev->cd, cmd);
if (ret <= 0)
ipc->delayed_response = false;
} else {
ret = pipeline_trigger_run(pcm_dev->cd->pipeline, pcm_dev->cd, cmd);
}