dw-dma: allocate uncached memory for LLI always

Multi-core configurations already use uncached memory for LLI. This
also makes sense to avoid having to manually synchronise cache. Force
LLI objects uncached.

Signed-off-by: Guennadi Liakhovetski <guennadi.liakhovetski@linux.intel.com>
This commit is contained in:
Guennadi Liakhovetski 2022-02-25 10:06:39 +01:00 committed by Liam Girdwood
parent cd0c1f99c2
commit e1ca6f6252
2 changed files with 6 additions and 8 deletions

View File

@ -442,8 +442,10 @@ static int dw_dma_stop(struct dma_chan_data *channel)
lli++; lli++;
} }
#ifndef __ZEPHYR__
dcache_writeback_region(dw_chan->lli, dcache_writeback_region(dw_chan->lli,
sizeof(struct dw_lli) * channel->desc_count); sizeof(struct dw_lli) * channel->desc_count);
#endif
#endif #endif
channel->status = COMP_STATE_PREPARE; channel->status = COMP_STATE_PREPARE;
@ -564,7 +566,7 @@ static int dw_dma_set_config(struct dma_chan_data *channel,
if (dw_chan->lli) if (dw_chan->lli)
rfree(dw_chan->lli); rfree(dw_chan->lli);
dw_chan->lli = rballoc(SOF_MEM_FLAG_COHERENT, dw_chan->lli = rmalloc(SOF_MEM_ZONE_RUNTIME, SOF_MEM_FLAG_COHERENT,
SOF_MEM_CAPS_RAM | SOF_MEM_CAPS_DMA, SOF_MEM_CAPS_RAM | SOF_MEM_CAPS_DMA,
sizeof(struct dw_lli) * channel->desc_count); sizeof(struct dw_lli) * channel->desc_count);
if (!dw_chan->lli) { if (!dw_chan->lli) {
@ -775,8 +777,10 @@ static int dw_dma_set_config(struct dma_chan_data *channel,
} }
/* write back descriptors so DMA engine can read them directly */ /* write back descriptors so DMA engine can read them directly */
#ifndef __ZEPHYR__
dcache_writeback_region(dw_chan->lli, dcache_writeback_region(dw_chan->lli,
sizeof(struct dw_lli) * channel->desc_count); sizeof(struct dw_lli) * channel->desc_count);
#endif
channel->status = COMP_STATE_PREPARE; channel->status = COMP_STATE_PREPARE;
dw_chan->lli_current = dw_chan->lli; dw_chan->lli_current = dw_chan->lli;
@ -816,14 +820,8 @@ static void dw_dma_verify_transfer(struct dma_chan_data *channel,
* sure the cache is coherent between DSP and DMAC. * sure the cache is coherent between DSP and DMAC.
*/ */
#if defined __ZEPHYR__ #if defined __ZEPHYR__
dcache_invalidate_region(dw_chan->lli,
sizeof(struct dw_lli) * channel->desc_count);
for (i = 0; i < channel->desc_count; i++) for (i = 0; i < channel->desc_count; i++)
dw_chan->lli[i].ctrl_hi &= ~DW_CTLH_DONE(1); dw_chan->lli[i].ctrl_hi &= ~DW_CTLH_DONE(1);
dcache_writeback_region(dw_chan->lli,
sizeof(struct dw_lli) * channel->desc_count);
#else #else
while (lli->ctrl_hi & DW_CTLH_DONE(1)) { while (lli->ctrl_hi & DW_CTLH_DONE(1)) {
lli->ctrl_hi &= ~DW_CTLH_DONE(1); lli->ctrl_hi &= ~DW_CTLH_DONE(1);

View File

@ -167,7 +167,7 @@ void *rmalloc(enum mem_zone zone, uint32_t flags, uint32_t caps, size_t bytes)
{ {
void *ptr; void *ptr;
if (zone_is_cached(zone)) { if (zone_is_cached(zone) && !(flags & SOF_MEM_FLAG_COHERENT)) {
ptr = heap_alloc_aligned_cached(&sof_heap, 0, bytes); ptr = heap_alloc_aligned_cached(&sof_heap, 0, bytes);
} else { } else {
/* /*