schedule: use DMA multi channel scheduling domain

Registers low latency scheduler with DMA multi channel
domain as scheduling source. This way we can finally get rid
of scheduling code from DW-DMA driver and DAI component.
Functionally it works the same as the previous implementation,
but transfers scheduling layer to the right places.

Signed-off-by: Tomasz Lauda <tomasz.lauda@linux.intel.com>
This commit is contained in:
Tomasz Lauda 2019-09-16 16:33:31 +02:00 committed by Tomasz Lauda
parent ed9e888358
commit 99fcd37f13
15 changed files with 88 additions and 83 deletions

View File

@ -132,6 +132,7 @@ int slave_core_init(struct sof *sof)
trace_point(TRACE_BOOT_PLATFORM_SCHED);
scheduler_init_edf(sof);
scheduler_init_ll(platform_timer_domain);
scheduler_init_ll(platform_dma_domain);
/* initialize IDC mechanism */
trace_point(TRACE_BOOT_PLATFORM_IDC);

View File

@ -73,18 +73,17 @@ struct dai_data {
uint64_t wallclock; /* wall clock at stream start */
};
static void dai_buffer_process(struct comp_dev *dev, struct dma_cb_data *next)
/* this is called by DMA driver every time descriptor has completed */
static void dai_dma_cb(void *data, uint32_t type, struct dma_cb_data *next)
{
struct comp_dev *dev = (struct comp_dev *)data;
struct dai_data *dd = comp_get_drvdata(dev);
struct dma_sg_config *config = &dd->config;
uint32_t bytes = next->elem.size;
void *buffer_ptr;
tracev_dai_with_ids(dev, "dai_buffer_process()");
tracev_dai_with_ids(dev, "dai_dma_cb()");
/* lli needs to be reloaded if irq is disabled */
next->status = config->irq_disabled ? DMA_CB_STATUS_RELOAD :
DMA_CB_STATUS_IGNORE;
next->status = DMA_CB_STATUS_RELOAD;
/* stop dma copy for pause/stop/xrun */
if (dev->state != COMP_STATE_ACTIVE || dd->xrun) {
@ -126,27 +125,6 @@ static void dai_buffer_process(struct comp_dev *dev, struct dma_cb_data *next)
}
}
/* this is called by DMA driver every time descriptor has completed */
static void dai_dma_cb(void *data, uint32_t type, struct dma_cb_data *next)
{
struct comp_dev *dev = (struct comp_dev *)data;
tracev_dai_with_ids(dev, "dai_dma_cb()");
switch (type) {
case DMA_CB_TYPE_IRQ:
pipeline_schedule_copy(dev->pipeline, 0);
break;
case DMA_CB_TYPE_COPY:
dai_buffer_process(dev, next);
break;
default:
trace_dai_error_with_ids(dev, "dai_dma_cb() error: Wrong "
"callback type = %u", type);
break;
}
}
static struct comp_dev *dai_new(struct sof_ipc_comp *comp)
{
struct comp_dev *dev;
@ -822,8 +800,7 @@ static int dai_config(struct comp_dev *dev, struct sof_ipc_dai_config *config)
}
/* set up callback */
dma_set_cb(dd->chan, DMA_CB_TYPE_IRQ | DMA_CB_TYPE_COPY,
dai_dma_cb, dev);
dma_set_cb(dd->chan, DMA_CB_TYPE_COPY, dai_dma_cb, dev);
dev->is_dma_connected = 1;
}

View File

@ -39,6 +39,7 @@ struct pipeline_data {
static enum task_state pipeline_task(void *arg);
static enum task_state pipeline_preload_task(void *arg);
static void pipeline_schedule_preload(struct pipeline *p);
/* create new pipeline - returns pipeline id or negative error */
struct pipeline *pipeline_new(struct sof_ipc_pipe_new *pipe_desc,
@ -428,8 +429,11 @@ int pipeline_prepare(struct pipeline *p, struct comp_dev *dev)
/* initialize task if necessary */
if (!p->pipe_task) {
/* right now we always consider pipeline as a low latency
* component, but it may change in the future
*/
type = pipeline_is_timer_driven(p) ? SOF_SCHEDULE_LL_TIMER :
SOF_SCHEDULE_EDF;
SOF_SCHEDULE_LL_DMA;
p->pipe_task = pipeline_task_init(p, type, pipeline_task);
if (!p->pipe_task) {
@ -530,15 +534,13 @@ static void pipeline_comp_trigger_sched_comp(struct pipeline *p,
case COMP_TRIGGER_START:
p->xrun_bytes = 0;
/* playback pipelines need to be scheduled now,
* capture pipelines are scheduled only for
* timer driven scheduling
*/
if (comp->params.direction == SOF_IPC_STREAM_PLAYBACK ||
pipeline_is_timer_driven(p)) {
/* schedule initial pipeline fill */
/* schedule preload if pipeline is not timer driven */
if (p->preload && !pipeline_is_timer_driven(p))
/* schedule pipeline preload */
pipeline_schedule_preload(p);
else
pipeline_schedule_copy(p, 0);
}
p->status = COMP_STATE_ACTIVE;
break;
case COMP_TRIGGER_SUSPEND:
@ -849,8 +851,10 @@ static int pipeline_copy(struct pipeline *p)
start->comp.id, dir);
/* stop preload only after full walkthrough */
if (ret != PPL_STATUS_PATH_STOP)
if (ret != PPL_STATUS_PATH_STOP && p->preload) {
p->preload = false;
pipeline_schedule_copy(p, 0);
}
return ret;
}
@ -999,6 +1003,11 @@ void pipeline_schedule_cancel(struct pipeline *p)
schedule_task_cancel(p->pipe_task);
}
static void pipeline_schedule_preload(struct pipeline *p)
{
schedule_task(p->preload_task, 0, p->ipc_pipe.period);
}
static enum task_state pipeline_task(void *arg)
{
struct pipeline *p = arg;
@ -1030,9 +1039,7 @@ static enum task_state pipeline_task(void *arg)
tracev_pipe("pipeline_task() sched");
/* automatically reschedule for timer or not finished preload */
return (pipeline_is_timer_driven(p) || p->preload) ?
SOF_TASK_STATE_RESCHEDULE : SOF_TASK_STATE_COMPLETED;
return SOF_TASK_STATE_RESCHEDULE;
}
static enum task_state pipeline_preload_task(void *arg)

View File

@ -134,9 +134,6 @@ static void dw_dma_interrupt_unmask(struct dma_chan_data *channel)
static void dw_dma_interrupt_clear(struct dma_chan_data *channel)
{
#if CONFIG_DW_DMA_AGGREGATED_IRQ
const struct dma_pdata *p = dma_get_drvdata(channel->dma);
#endif
const struct dw_dma_chan_data *chan = dma_chan_get_data(channel);
if (chan->irq_disabled) {
@ -152,13 +149,6 @@ static void dw_dma_interrupt_clear(struct dma_chan_data *channel)
dma_reg_write(channel->dma, DW_CLEAR_SRC_TRAN, DW_CHAN(channel->index));
dma_reg_write(channel->dma, DW_CLEAR_DST_TRAN, DW_CHAN(channel->index));
dma_reg_write(channel->dma, DW_CLEAR_ERR, DW_CHAN(channel->index));
/* clear platform interrupt */
#if CONFIG_DW_DMA_AGGREGATED_IRQ
interrupt_clear_mask(p->irq, DW_CHAN(channel->index));
#else
interrupt_clear_mask(chan->irq, DW_CHAN(channel->index));
#endif
}
static uint32_t dw_dma_interrupt_status(struct dma_chan_data *channel)
@ -292,8 +282,6 @@ static int dw_dma_start(struct dma_chan_data *channel)
goto out;
}
dw_dma_interrupt_clear(channel);
#if CONFIG_HW_LLI
/* LLP mode - write LLP pointer unless in scatter mode */
dma_reg_write(dma, DW_LLP(channel->index), lli->ctrl_lo &
@ -323,19 +311,12 @@ static int dw_dma_start(struct dma_chan_data *channel)
}
#endif
/* enable interrupt only for the first start */
if (channel->status == COMP_STATE_PREPARE)
ret = dw_dma_interrupt_register(channel);
/* assign core */
channel->core = cpu_get_id();
if (!ret) {
/* assign core */
channel->core = cpu_get_id();
/* enable the channel */
channel->status = COMP_STATE_ACTIVE;
dma_reg_write(dma, DW_DMA_CHAN_EN,
DW_CHAN_UNMASK(channel->index));
}
/* enable the channel */
channel->status = COMP_STATE_ACTIVE;
dma_reg_write(dma, DW_DMA_CHAN_EN, DW_CHAN_UNMASK(channel->index));
out:
irq_local_enable(flags);
@ -449,9 +430,6 @@ static int dw_dma_stop(struct dma_chan_data *channel)
dma_reg_write(dma, DW_DMA_CHAN_EN, DW_CHAN_MASK(channel->index));
/* disable interrupt */
dw_dma_interrupt_unregister(channel);
#if CONFIG_HW_LLI
/* clear block interrupt */
dma_reg_write(dma, DW_CLEAR_BLOCK, DW_CHAN(channel->index));
@ -600,8 +578,6 @@ static int dw_dma_set_config(struct dma_chan_data *channel,
}
}
dw_dma_interrupt_unmask(channel);
dw_chan->ptr_data.buffer_bytes = 0;
/* fill in lli for the elems in the list */
@ -961,8 +937,7 @@ static void dw_dma_irq_callback(struct dma_chan_data *channel,
if (channel->cb && channel->cb_type & type)
channel->cb(channel->cb_data, type, next);
if (next->status != DMA_CB_STATUS_IGNORE)
dw_dma_verify_transfer(channel, next);
dw_dma_verify_transfer(channel, next);
}
static int dw_dma_copy(struct dma_chan_data *channel, int bytes,
@ -972,7 +947,7 @@ static int dw_dma_copy(struct dma_chan_data *channel, int bytes,
int ret = 0;
struct dma_cb_data next = {
.elem = { .size = bytes },
.status = DMA_CB_STATUS_RELOAD
.status = DMA_CB_STATUS_END
};
/* for preload and one shot copy just start the DMA and wait */
@ -1307,7 +1282,11 @@ static int dw_dma_avail_data_size(struct dma_chan_data *channel)
{
struct dw_dma_chan_data *dw_chan = dma_chan_get_data(channel);
int32_t read_ptr = dw_chan->ptr_data.current_ptr;
#if CONFIG_HW_LLI
int32_t write_ptr = dma_reg_read(channel->dma, DW_DAR(channel->index));
#else
int32_t write_ptr = ((struct dw_lli *)dw_chan->lli_current->llp)->dar;
#endif
int size;
size = write_ptr - read_ptr;
@ -1325,7 +1304,11 @@ static int dw_dma_avail_data_size(struct dma_chan_data *channel)
static int dw_dma_free_data_size(struct dma_chan_data *channel)
{
struct dw_dma_chan_data *dw_chan = dma_chan_get_data(channel);
#if CONFIG_HW_LLI
int32_t read_ptr = dma_reg_read(channel->dma, DW_SAR(channel->index));
#else
int32_t read_ptr = ((struct dw_lli *)dw_chan->lli_current->llp)->sar;
#endif
int32_t write_ptr = dw_chan->ptr_data.current_ptr;
int size;

View File

@ -192,9 +192,8 @@ static int idc_pipeline_trigger(uint32_t cmd)
* Should be removed after changing memory management for
* slave cores.
*/
if (pcm_dev->cd->pipeline->pipe_task->type ==
SOF_SCHEDULE_EDF) {
task = pcm_dev->cd->pipeline->pipe_task;
if (pcm_dev->cd->pipeline->preload_task) {
task = pcm_dev->cd->pipeline->preload_task;
edf_pdata = edf_sch_get_pdata(task);
dcache_invalidate_region(edf_pdata, sizeof(*edf_pdata));
task_context_cache(edf_pdata->ctx, CACHE_INVALIDATE);
@ -214,9 +213,8 @@ static int idc_pipeline_trigger(uint32_t cmd)
* Should be removed after changing memory management for
* slave cores.
*/
if (pcm_dev->cd->pipeline->pipe_task->type ==
SOF_SCHEDULE_EDF) {
task = pcm_dev->cd->pipeline->pipe_task;
if (pcm_dev->cd->pipeline->preload_task) {
task = pcm_dev->cd->pipeline->preload_task;
edf_pdata = edf_sch_get_pdata(task);
task_context_cache(edf_pdata->ctx, CACHE_WRITEBACK_INV);
dcache_writeback_invalidate_region(edf_pdata,

View File

@ -143,6 +143,7 @@ void platform_wait_for_interrupt(int level);
extern struct timer *platform_timer;
extern struct ll_schedule_domain *platform_timer_domain;
extern struct ll_schedule_domain *platform_dma_domain;
extern intptr_t _module_init_start;
extern intptr_t _module_init_end;

View File

@ -128,6 +128,7 @@ static inline void platform_wait_for_interrupt(int level)
extern struct timer *platform_timer;
extern struct ll_schedule_domain *platform_timer_domain;
extern struct ll_schedule_domain *platform_dma_domain;
extern intptr_t _module_init_start;
extern intptr_t _module_init_end;

View File

@ -6,6 +6,7 @@
// Keyon Jie <yang.jie@linux.intel.com>
#include <sof/debug/debug.h>
#include <sof/drivers/dw-dma.h>
#include <sof/drivers/interrupt.h>
#include <sof/drivers/ipc.h>
#include <sof/drivers/pmc.h>
@ -33,6 +34,7 @@
#include <config.h>
#include <version.h>
#include <errno.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
@ -123,6 +125,7 @@ struct timer timer = {
struct timer *platform_timer = &timer;
struct ll_schedule_domain *platform_timer_domain;
struct ll_schedule_domain *platform_dma_domain;
int platform_boot_complete(uint32_t boot_message)
{
@ -189,12 +192,20 @@ int platform_init(struct sof *sof)
trace_point(TRACE_BOOT_PLATFORM_SCHED);
scheduler_init_edf(sof);
/* init low latency domains and schedulers */
/* init low latency timer domain and scheduler */
platform_timer_domain =
timer_domain_init(platform_timer, PLATFORM_DEFAULT_CLOCK,
PLATFORM_LL_DEFAULT_TIMEOUT);
scheduler_init_ll(platform_timer_domain);
/* init low latency multi channel DW-DMA domain and scheduler */
platform_dma_domain =
dma_multi_chan_domain_init(
&dma[PLATFORM_DW_DMA_INDEX],
PLATFORM_NUM_DW_DMACS,
PLATFORM_DEFAULT_CLOCK, true);
scheduler_init_ll(platform_dma_domain);
/* init the system agent */
trace_point(TRACE_BOOT_PLATFORM_AGENT);
sa_init(sof);

View File

@ -140,6 +140,7 @@ void platform_wait_for_interrupt(int level);
extern struct timer *platform_timer;
extern struct ll_schedule_domain *platform_timer_domain;
extern struct ll_schedule_domain *platform_dma_domain;
extern intptr_t _module_init_start;
extern intptr_t _module_init_end;

View File

@ -117,6 +117,7 @@ static inline void platform_wait_for_interrupt(int level)
extern struct timer *platform_timer;
extern struct ll_schedule_domain *platform_timer_domain;
extern struct ll_schedule_domain *platform_dma_domain;
extern intptr_t _module_init_start;
extern intptr_t _module_init_end;

View File

@ -5,6 +5,7 @@
// Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
#include <sof/debug/debug.h>
#include <sof/drivers/dw-dma.h>
#include <sof/drivers/interrupt.h>
#include <sof/drivers/ipc.h>
#include <sof/drivers/timer.h>
@ -120,6 +121,7 @@ struct timer timer = {
struct timer *platform_timer = &timer;
struct ll_schedule_domain *platform_timer_domain;
struct ll_schedule_domain *platform_dma_domain;
int platform_boot_complete(uint32_t boot_message)
{
@ -178,12 +180,20 @@ int platform_init(struct sof *sof)
trace_point(TRACE_BOOT_PLATFORM_SCHED);
scheduler_init_edf(sof);
/* init low latency domains and schedulers */
/* init low latency timer domain and scheduler */
platform_timer_domain =
timer_domain_init(platform_timer, PLATFORM_DEFAULT_CLOCK,
PLATFORM_LL_DEFAULT_TIMEOUT);
scheduler_init_ll(platform_timer_domain);
/* init low latency multi channel DW-DMA domain and scheduler */
platform_dma_domain =
dma_multi_chan_domain_init(
&dma[PLATFORM_DW_DMA_INDEX],
PLATFORM_NUM_DW_DMACS,
PLATFORM_DEFAULT_CLOCK, true);
scheduler_init_ll(platform_dma_domain);
/* init the system agent */
trace_point(TRACE_BOOT_PLATFORM_AGENT);
sa_init(sof);

View File

@ -140,6 +140,7 @@ void platform_wait_for_interrupt(int level);
extern struct timer *platform_timer;
extern struct ll_schedule_domain *platform_timer_domain;
extern struct ll_schedule_domain *platform_dma_domain;
extern intptr_t _module_init_start;
extern intptr_t _module_init_end;

View File

@ -10,6 +10,7 @@
#include <cavs/version.h>
#include <sof/common.h>
#include <sof/debug/debug.h>
#include <sof/drivers/dw-dma.h>
#include <sof/drivers/idc.h>
#include <sof/drivers/interrupt.h>
#include <sof/drivers/ipc.h>
@ -244,6 +245,7 @@ struct timer timer = {
struct timer *platform_timer = &timer;
struct ll_schedule_domain *platform_timer_domain;
struct ll_schedule_domain *platform_dma_domain;
#if CONFIG_DW_SPI
@ -378,12 +380,21 @@ int platform_init(struct sof *sof)
trace_point(TRACE_BOOT_PLATFORM_SCHED);
scheduler_init_edf(sof);
/* init low latency domains and schedulers */
/* init low latency timer domain and scheduler */
platform_timer_domain =
timer_domain_init(platform_timer, PLATFORM_DEFAULT_CLOCK,
PLATFORM_LL_DEFAULT_TIMEOUT);
scheduler_init_ll(platform_timer_domain);
/* init low latency multi channel DW-DMA domain and scheduler */
platform_dma_domain =
dma_multi_chan_domain_init(
&dma[PLATFORM_DW_DMA_INDEX],
PLATFORM_NUM_DW_DMACS,
PLATFORM_DEFAULT_CLOCK,
IS_ENABLED(CONFIG_DW_DMA_AGGREGATED_IRQ));
scheduler_init_ll(platform_dma_domain);
/* init the system agent */
trace_point(TRACE_BOOT_PLATFORM_AGENT);
sa_init(sof);

View File

@ -150,6 +150,7 @@ void platform_wait_for_interrupt(int level);
extern struct timer *platform_timer;
extern struct ll_schedule_domain *platform_timer_domain;
extern struct ll_schedule_domain *platform_dma_domain;
extern intptr_t _module_init_start;
extern intptr_t _module_init_end;

View File

@ -140,6 +140,7 @@ void platform_wait_for_interrupt(int level);
extern struct timer *platform_timer;
extern struct ll_schedule_domain *platform_timer_domain;
extern struct ll_schedule_domain *platform_dma_domain;
extern intptr_t _module_init_start;
extern intptr_t _module_init_end;