pipeline: cache fixes for multicore processing

Fixes cache issues with multicore processing of multiple pipelines:
- Changes pipeline_cache method as previous implementation
  could not work, especially for invalidation.
- Allocates dma and dai private data in uncached memory,
  because those resources are shared between different
  pipelines, so potentially also between different cores.

Signed-off-by: Tomasz Lauda <tomasz.lauda@linux.intel.com>
This commit is contained in:
Tomasz Lauda 2018-10-02 12:27:56 +02:00
parent 7aa627a10a
commit f35c9d9c65
11 changed files with 52 additions and 78 deletions

View File

@ -765,11 +765,7 @@ static void dai_cache(struct comp_dev *dev, int cmd)
}
dcache_writeback_invalidate_region(dd->dai, sizeof(*dd->dai));
dcache_writeback_invalidate_region(dd->dai->private,
dd->dai->private_size);
dcache_writeback_invalidate_region(dd->dma, sizeof(*dd->dma));
dcache_writeback_invalidate_region(dd->dma->private,
dd->dma->private_size);
dcache_writeback_invalidate_region(dd, sizeof(*dd));
dcache_writeback_invalidate_region(dev, sizeof(*dev));
break;
@ -782,11 +778,7 @@ static void dai_cache(struct comp_dev *dev, int cmd)
dd = comp_get_drvdata(dev);
dcache_invalidate_region(dd, sizeof(*dd));
dcache_invalidate_region(dd->dma, sizeof(*dd->dma));
dcache_invalidate_region(dd->dma->private,
dd->dma->private_size);
dcache_invalidate_region(dd->dai, sizeof(*dd->dai));
dcache_invalidate_region(dd->dai->private,
dd->dai->private_size);
list_for_item(item, &dd->config.elem_list) {
dcache_invalidate_region(item, sizeof(*item));

View File

@ -786,8 +786,6 @@ static void host_cache(struct comp_dev *dev, int cmd)
#endif
dcache_writeback_invalidate_region(hd->dma, sizeof(*hd->dma));
dcache_writeback_invalidate_region(hd->dma->private,
hd->dma->private_size);
dcache_writeback_invalidate_region(hd, sizeof(*hd));
dcache_writeback_invalidate_region(dev, sizeof(*dev));
break;
@ -800,8 +798,6 @@ static void host_cache(struct comp_dev *dev, int cmd)
hd = comp_get_drvdata(dev);
dcache_invalidate_region(hd, sizeof(*hd));
dcache_invalidate_region(hd->dma, sizeof(*hd->dma));
dcache_invalidate_region(hd->dma->private,
hd->dma->private_size);
#if !defined CONFIG_DMA_GW
list_for_item(item, &hd->local.elem_list) {

View File

@ -397,11 +397,8 @@ static int component_op_downstream(struct op_data *op_data,
/* component should reset and free resources */
err = comp_reset(current);
break;
case COMP_OPS_CACHE:
/* cache operation */
comp_cache(current, op_data->cmd);
break;
case COMP_OPS_BUFFER: /* handled by other API call */
case COMP_OPS_CACHE:
default:
trace_pipe_error("eOi");
trace_error_value(op_data->op);
@ -482,11 +479,8 @@ static int component_op_upstream(struct op_data *op_data,
/* component should reset and free resources */
err = comp_reset(current);
break;
case COMP_OPS_CACHE:
/* cache operation */
comp_cache(current, op_data->cmd);
break;
case COMP_OPS_BUFFER: /* handled by other API call */
case COMP_OPS_CACHE:
default:
trace_pipe_error("eOi");
trace_error_value(op_data->op);
@ -639,89 +633,79 @@ out:
return ret;
}
static void component_cache_buffers_downstream(struct comp_dev *start,
struct comp_dev *current,
struct comp_buffer *buffer,
cache_command cache_cmd)
static void component_cache_downstream(int cmd, struct comp_dev *start,
struct comp_dev *current,
struct comp_dev *previous)
{
cache_command cache_cmd = comp_get_cache_command(cmd);
struct list_item *clist;
struct comp_buffer *buffer;
if (current != start && buffer) {
cache_cmd(buffer, sizeof(*buffer));
comp_cache(current, cmd);
/* stop if we reach an endpoint */
if (current->is_endpoint)
return;
}
/* we finish walking the graph if we reach the DAI */
if (current != start && current->is_endpoint)
return;
/* travel further */
/* now run this operation downstream */
list_for_item(clist, &current->bsink_list) {
buffer = container_of(clist, struct comp_buffer, source_list);
/* stop going if this component is not connected */
if (cache_cmd)
cache_cmd(buffer, sizeof(*buffer));
/* don't go downstream if this component is not connected */
if (!buffer->connected)
continue;
component_cache_buffers_downstream(start, buffer->sink,
buffer, cache_cmd);
component_cache_downstream(cmd, start, buffer->sink, current);
}
}
static void component_cache_buffers_upstream(struct comp_dev *start,
struct comp_dev *current,
struct comp_buffer *buffer,
cache_command cache_cmd)
static void component_cache_upstream(int cmd, struct comp_dev *start,
struct comp_dev *current,
struct comp_dev *previous)
{
cache_command cache_cmd = comp_get_cache_command(cmd);
struct list_item *clist;
struct comp_buffer *buffer;
if (current != start && buffer) {
cache_cmd(buffer, sizeof(*buffer));
comp_cache(current, cmd);
/* stop if we reach an endpoint */
if (current->is_endpoint)
return;
}
/* we finish walking the graph if we reach the DAI */
if (current != start && current->is_endpoint)
return;
/* travel further */
/* now run this operation upstream */
list_for_item(clist, &current->bsource_list) {
buffer = container_of(clist, struct comp_buffer, sink_list);
/* stop going if this component is not connected */
if (cache_cmd)
cache_cmd(buffer, sizeof(*buffer));
/* don't go upstream if this component is not connected */
if (!buffer->connected)
continue;
component_cache_buffers_upstream(start, buffer->source,
buffer, cache_cmd);
component_cache_upstream(cmd, start, buffer->source, current);
}
}
void pipeline_cache(struct pipeline *p, struct comp_dev *dev, int cmd)
{
cache_command cache_cmd = comp_get_cache_command(cmd);
struct op_data op_data;
uint32_t flags;
trace_pipe("cac");
op_data.p = p;
op_data.op = COMP_OPS_CACHE;
op_data.cmd = cmd;
spin_lock_irq(&p->lock, flags);
if (dev->params.direction == SOF_IPC_STREAM_PLAYBACK) {
/* execute cache operation on components downstream */
component_op_downstream(&op_data, dev, dev, NULL);
/* execute cache operation on buffers downstream */
component_cache_buffers_downstream(dev, dev, NULL, cache_cmd);
} else {
/* execute cache operation on components upstream */
component_op_upstream(&op_data, dev, dev, NULL);
/* execute cache operation on buffers upstream */
component_cache_buffers_upstream(dev, dev, NULL, cache_cmd);
}
if (dev->params.direction == SOF_IPC_STREAM_PLAYBACK)
/* execute cache op on components and buffers downstream */
component_cache_downstream(cmd, dev, dev, NULL);
else
/* execute cache op on components and buffers upstream */
component_cache_upstream(cmd, dev, dev, NULL);
/* execute cache operation on pipeline itself */
if (cache_cmd)

View File

@ -603,7 +603,8 @@ static int ssp_probe(struct dai *dai)
struct ssp_pdata *ssp;
/* allocate private data */
ssp = rzalloc(RZONE_SYS, SOF_MEM_CAPS_RAM, sizeof(*ssp));
ssp = rzalloc(RZONE_SYS | RZONE_FLAG_UNCACHED, SOF_MEM_CAPS_RAM,
sizeof(*ssp));
dai_set_drvdata(dai, ssp);
spinlock_init(&ssp->lock);

View File

@ -1447,7 +1447,8 @@ static int dmic_probe(struct dai *dai)
trace_dmic("pro");
/* allocate private data */
dmic = rzalloc(RZONE_SYS, SOF_MEM_CAPS_RAM, sizeof(*dmic));
dmic = rzalloc(RZONE_SYS | RZONE_FLAG_UNCACHED, SOF_MEM_CAPS_RAM,
sizeof(*dmic));
if (!dmic) {
trace_dmic_error("eap");
return -ENOMEM;

View File

@ -613,7 +613,8 @@ static int hda_dma_probe(struct dma *dma)
struct hda_chan_data *chan;
/* allocate private data */
hda_pdata = rzalloc(RZONE_SYS, SOF_MEM_CAPS_RAM, sizeof(*hda_pdata));
hda_pdata = rzalloc(RZONE_SYS | RZONE_FLAG_UNCACHED, SOF_MEM_CAPS_RAM,
sizeof(*hda_pdata));
dma_set_drvdata(dma, hda_pdata);
spinlock_init(&dma->lock);

View File

@ -872,7 +872,8 @@ static int ssp_probe(struct dai *dai)
struct ssp_pdata *ssp;
/* allocate private data */
ssp = rzalloc(RZONE_SYS, SOF_MEM_CAPS_RAM, sizeof(*ssp));
ssp = rzalloc(RZONE_SYS | RZONE_FLAG_UNCACHED, SOF_MEM_CAPS_RAM,
sizeof(*ssp));
dai_set_drvdata(dai, ssp);
spinlock_init(&ssp->lock);

View File

@ -1113,7 +1113,8 @@ static int dw_dma_probe(struct dma *dma)
int i;
/* allocate private data */
dw_pdata = rzalloc(RZONE_SYS, SOF_MEM_CAPS_RAM, sizeof(*dw_pdata));
dw_pdata = rzalloc(RZONE_SYS | RZONE_FLAG_UNCACHED, SOF_MEM_CAPS_RAM,
sizeof(*dw_pdata));
dma_set_drvdata(dma, dw_pdata);
spinlock_init(&dma->lock);

View File

@ -500,7 +500,8 @@ static int ssp_probe(struct dai *dai)
struct ssp_pdata *ssp;
/* allocate private data */
ssp = rzalloc(RZONE_SYS, SOF_MEM_CAPS_RAM, sizeof(*ssp));
ssp = rzalloc(RZONE_SYS | RZONE_FLAG_UNCACHED, SOF_MEM_CAPS_RAM,
sizeof(*ssp));
dai_set_drvdata(dai, ssp);
spinlock_init(&ssp->lock);

View File

@ -122,7 +122,6 @@ struct dai {
struct dai_plat_data plat_data;
const struct dai_ops *ops;
void *private;
uint32_t private_size;
};
/**
@ -153,8 +152,7 @@ void dai_install(struct dai_type_info *dai_type_array, size_t num_dai_types);
struct dai *dai_get(uint32_t type, uint32_t index);
#define dai_set_drvdata(dai, data) \
dai->private = data; \
dai->private_size = sizeof(*data)
dai->private = data;
#define dai_get_drvdata(dai) \
dai->private;
#define dai_base(dai) \

View File

@ -164,7 +164,6 @@ struct dma {
const struct dma_ops *ops;
atomic_t num_channels_busy; /* number of busy channels */
void *private;
uint32_t private_size;
};
/**
@ -188,8 +187,7 @@ void dma_install(struct dma *dma_array, size_t num_dmas);
struct dma *dma_get(uint32_t dir, uint32_t caps, uint32_t dev, uint32_t flags);
#define dma_set_drvdata(dma, data) \
dma->private = data; \
dma->private_size = sizeof(*data)
dma->private = data;
#define dma_get_drvdata(dma) \
dma->private;
#define dma_base(dma) \