pm_runtime: add core synchronization for Host DMA L1 Exit

Implements multicore synchronization mechanism for Host DMA L1 Exit.
Simple reference counter is added to avoid a situation, where shorter
processing on one core forces Host DMA bus to exit L1, when there is
still transfer happening on other core. PM_RUNTIME_HOST_DMA_L1 with get
is called in new NOTIFIER_ID_LL_PRE_RUN notification event.

Signed-off-by: Tomasz Lauda <tomasz.lauda@linux.intel.com>
This commit is contained in:
Tomasz Lauda 2020-04-06 13:45:13 +02:00 committed by Janusz Jankowski
parent a70326a8de
commit 3cba349741
3 changed files with 51 additions and 1 deletions

View File

@ -221,6 +221,12 @@ static void hda_dma_get_dbg_vals(struct dma_chan_data *chan,
#define hda_dma_ptr_trace(...)
#endif
static void hda_dma_l1_entry_notify(void *arg, enum notify_id type, void *data)
{
/* Notify about Host DMA usage */
pm_runtime_get(PM_RUNTIME_HOST_DMA_L1, 0);
}
static void hda_dma_l1_exit_notify(void *arg, enum notify_id type, void *data)
{
/* Force Host DMA to exit L1 */
@ -344,6 +350,15 @@ static int hda_dma_host_start(struct dma_chan_data *channel)
if (!hda_chan->irq_disabled)
return ret;
/* Inform about Host DMA usage */
ret = notifier_register(NULL, scheduler_get_data(SOF_SCHEDULE_LL_TIMER),
NOTIFIER_ID_LL_PRE_RUN, hda_dma_l1_entry_notify,
NOTIFIER_FLAG_AGGREGATE);
if (ret < 0)
trace_hddma_error("hda-dmac: %d channel %d, cannot register notification %d",
channel->dma->plat_data.id, channel->index,
ret);
/* Register common L1 exit for all channels */
ret = notifier_register(NULL, scheduler_get_data(SOF_SCHEDULE_LL_TIMER),
NOTIFIER_ID_LL_POST_RUN, hda_dma_l1_exit_notify,
@ -363,6 +378,10 @@ static void hda_dma_host_stop(struct dma_chan_data *channel)
if (!hda_chan->irq_disabled)
return;
/* Unregister L1 entry */
notifier_unregister(NULL, scheduler_get_data(SOF_SCHEDULE_LL_TIMER),
NOTIFIER_ID_LL_PRE_RUN);
/* Unregister L1 exit */
notifier_unregister(NULL, scheduler_get_data(SOF_SCHEDULE_LL_TIMER),
NOTIFIER_ID_LL_POST_RUN);
@ -389,6 +408,7 @@ static int hda_dma_enable_unlock(struct dma_chan_data *channel)
if (channel->direction == DMA_DIR_HMEM_TO_LMEM ||
channel->direction == DMA_DIR_LMEM_TO_HMEM) {
pm_runtime_get(PM_RUNTIME_HOST_DMA_L1, 0);
ret = hda_dma_host_start(channel);
if (ret < 0)
return ret;
@ -418,6 +438,7 @@ static int hda_dma_link_copy(struct dma_chan_data *channel, int bytes,
static int hda_dma_host_copy(struct dma_chan_data *channel, int bytes,
uint32_t flags)
{
struct hda_chan_data *hda_chan = dma_chan_get_data(channel);
int ret;
tracev_hddma("hda-dmac: %d channel %d -> copy 0x%x bytes",
@ -425,6 +446,10 @@ static int hda_dma_host_copy(struct dma_chan_data *channel, int bytes,
hda_dma_get_dbg_vals(channel, HDA_DBG_PRE, HDA_DBG_HOST);
/* Register Host DMA usage */
if (!hda_chan->irq_disabled)
pm_runtime_get(PM_RUNTIME_HOST_DMA_L1, 0);
/* blocking mode copy */
if (flags & DMA_COPY_BLOCKING) {
ret = channel->direction == DMA_DIR_HMEM_TO_LMEM ?

View File

@ -21,6 +21,7 @@ struct pm_runtime_data;
/** \brief cAVS specific runtime power management data. */
struct cavs_pm_runtime_data {
int dsp_d0_sref; /**< simple ref counter, accessed by core 0 only */
int host_dma_l1_sref; /**< ref counter for Host DMA accesses */
};
#endif

View File

@ -40,17 +40,35 @@
#include <cavs/lib/power_down.h>
#endif
/** \brief Registers Host DMA access by incrementing ref counter. */
static void cavs_pm_runtime_host_dma_l1_entry(void)
{
struct pm_runtime_data *prd = pm_runtime_data_get();
struct cavs_pm_runtime_data *pprd = prd->platform_data;
uint32_t flags;
spin_lock_irq(&prd->lock, flags);
pprd->host_dma_l1_sref++;
platform_shared_commit(prd, sizeof(*prd));
platform_shared_commit(pprd, sizeof(*pprd));
spin_unlock_irq(&prd->lock, flags);
}
/**
* \brief Forces Host DMAs to exit L1.
*/
static inline void cavs_pm_runtime_force_host_dma_l1_exit(void)
{
struct pm_runtime_data *prd = pm_runtime_data_get();
struct cavs_pm_runtime_data *pprd = prd->platform_data;
uint32_t flags;
spin_lock_irq(&prd->lock, flags);
if (!(shim_read(SHIM_SVCFG) & SHIM_SVCFG_FORCE_L1_EXIT)) {
if (!--pprd->host_dma_l1_sref) {
shim_write(SHIM_SVCFG,
shim_read(SHIM_SVCFG) | SHIM_SVCFG_FORCE_L1_EXIT);
@ -60,6 +78,9 @@ static inline void cavs_pm_runtime_force_host_dma_l1_exit(void)
shim_read(SHIM_SVCFG) & ~(SHIM_SVCFG_FORCE_L1_EXIT));
}
platform_shared_commit(prd, sizeof(*prd));
platform_shared_commit(pprd, sizeof(*pprd));
spin_unlock_irq(&prd->lock, flags);
}
@ -382,6 +403,9 @@ void platform_pm_runtime_get(enum pm_runtime_context context, uint32_t index,
{
/* Action based on context */
switch (context) {
case PM_RUNTIME_HOST_DMA_L1:
cavs_pm_runtime_host_dma_l1_entry();
break;
#if CONFIG_CAVS_SSP
case SSP_CLK:
cavs_pm_runtime_dis_ssp_clk_gating(index);