dma: hda: playback preload phase synced with logical buffer state

Removed dma/dsp race condition that happened within preload phase.

Min buffer size set to entire buffer to avoid dma and dsp
pointers misalignmen in case the period size is not multiple
of the hda dma burst size.

Signed-off-by: Marcin Maka <marcin.maka@linux.intel.com>
This commit is contained in:
Marcin Maka 2018-09-06 21:50:54 +02:00
parent 7abb55fe72
commit fee9f9656d
4 changed files with 112 additions and 39 deletions

View File

@ -48,6 +48,9 @@
#define DAI_PLAYBACK_STREAM 0 #define DAI_PLAYBACK_STREAM 0
#define DAI_CAPTURE_STREAM 1 #define DAI_CAPTURE_STREAM 1
#define DAI_PTR_INIT_DAI 1 /* buffer ptr initialized by dai */
#define DAI_PTR_INIT_HOST 2 /* buffer ptr initialized by host */
/* tracing */ /* tracing */
#define trace_dai(__e) trace_event(TRACE_CLASS_DAI, __e) #define trace_dai(__e) trace_event(TRACE_CLASS_DAI, __e)
#define trace_dai_error(__e) trace_error(TRACE_CLASS_DAI, __e) #define trace_dai_error(__e) trace_error(TRACE_CLASS_DAI, __e)
@ -523,6 +526,8 @@ static void dai_pointer_init(struct comp_dev *dev)
struct comp_buffer *dma_buffer; struct comp_buffer *dma_buffer;
struct dai_data *dd = comp_get_drvdata(dev); struct dai_data *dd = comp_get_drvdata(dev);
dd->pointer_init = DAI_PTR_INIT_DAI;
/* not required for capture streams */ /* not required for capture streams */
if (dev->params.direction == SOF_IPC_STREAM_PLAYBACK) { if (dev->params.direction == SOF_IPC_STREAM_PLAYBACK) {
dma_buffer = list_first_item(&dev->bsource_list, dma_buffer = list_first_item(&dev->bsource_list,
@ -532,6 +537,7 @@ static void dai_pointer_init(struct comp_dev *dev)
case SOF_COMP_HOST: case SOF_COMP_HOST:
case SOF_COMP_SG_HOST: case SOF_COMP_SG_HOST:
/* buffer is preloaded and advanced by host DMA engine */ /* buffer is preloaded and advanced by host DMA engine */
dd->pointer_init = DAI_PTR_INIT_HOST;
break; break;
default: default:
/* advance source pipeline w_ptr by one period /* advance source pipeline w_ptr by one period
@ -540,8 +546,6 @@ static void dai_pointer_init(struct comp_dev *dev)
break; break;
} }
} }
dd->pointer_init = 1;
} }
/* used to pass standard and bespoke command (with data) to component */ /* used to pass standard and bespoke command (with data) to component */
@ -563,8 +567,12 @@ static int dai_comp_trigger(struct comp_dev *dev, int cmd)
trace_dai("tsa"); trace_dai("tsa");
if (!dd->pointer_init) if (!dd->pointer_init)
dai_pointer_init(dev); dai_pointer_init(dev);
/* only start the DAI if we are not XRUN handling */ /* only start the DAI if we are not XRUN handling
if (dd->xrun == 0) { * and the ptr is not initialized by the host as in this
* case start is deferred to the first copy call as the buffer
* is populated by the host only then
*/
if (dd->xrun == 0 && dd->pointer_init != DAI_PTR_INIT_HOST) {
/* start the DAI */ /* start the DAI */
ret = dma_start(dd->dma, dd->chan); ret = dma_start(dd->dma, dd->chan);
if (ret < 0) if (ret < 0)
@ -632,6 +640,18 @@ static int dai_comp_trigger(struct comp_dev *dev, int cmd)
/* copy and process stream data from source to sink buffers */ /* copy and process stream data from source to sink buffers */
static int dai_copy(struct comp_dev *dev) static int dai_copy(struct comp_dev *dev)
{ {
struct dai_data *dd = comp_get_drvdata(dev);
int ret;
if (dd->pointer_init == DAI_PTR_INIT_HOST) {
/* start the DAI */
ret = dma_start(dd->dma, dd->chan);
if (ret < 0)
return ret;
dai_trigger(dd->dai, COMP_TRIGGER_START, dev->params.direction);
dd->pointer_init = DAI_PTR_INIT_DAI; /* next copy just quits */
platform_dai_wallclock(dev, &dd->wallclock);
}
return 0; return 0;
} }

View File

@ -741,7 +741,7 @@ static int host_copy_int(struct comp_dev *dev, bool preload_run)
#if defined CONFIG_DMA_GW #if defined CONFIG_DMA_GW
/* tell gateway to copy another period */ /* tell gateway to copy another period */
ret = dma_copy(hd->dma, hd->chan, hd->period_bytes, ret = dma_copy(hd->dma, hd->chan, hd->period_bytes,
preload_run ? DMA_COPY_NO_COMMIT : 0); preload_run ? DMA_COPY_PRELOAD : 0);
if (ret < 0) if (ret < 0)
goto out; goto out;

View File

@ -86,11 +86,15 @@
#define HDA_LINK_1MS_US 1000 #define HDA_LINK_1MS_US 1000
#define HDA_STATE_PRELOAD BIT(0)
#define HDA_STATE_BF_WAIT BIT(1)
struct hda_chan_data { struct hda_chan_data {
struct dma *dma; struct dma *dma;
uint32_t index; uint32_t index;
uint32_t stream_id; uint32_t stream_id;
uint32_t status; uint32_t status; /* common status */
uint32_t state; /* hda specific additional state */
uint32_t desc_count; uint32_t desc_count;
uint32_t desc_avail; uint32_t desc_avail;
uint32_t direction; uint32_t direction;
@ -186,42 +190,77 @@ static inline uint32_t hda_dma_get_data_size(struct dma *dma, uint32_t chan)
return ds; return ds;
} }
static int hda_dma_copy_ch(struct dma *dma, struct hda_chan_data *chan, static int hda_dma_preload(struct dma *dma, struct hda_chan_data *chan)
int bytes, uint32_t flags)
{ {
struct dma_sg_elem next; struct dma_sg_elem next = {
.src = DMA_RELOAD_LLI,
.dest = DMA_RELOAD_LLI,
.size = DMA_RELOAD_LLI
};
int i;
int period_cnt;
/* waiting for buffer full after start
* first try is unblocking, then blocking
*/
while (!(host_dma_reg_read(dma, chan->index, DGCS) & DGCS_BF) &&
(chan->state & HDA_STATE_BF_WAIT))
;
if (host_dma_reg_read(dma, chan->index, DGCS) & DGCS_BF) {
chan->state &= ~(HDA_STATE_PRELOAD | HDA_STATE_BF_WAIT);
if (chan->cb) {
/* loop over each period */
period_cnt = chan->buffer_bytes /
chan->period_bytes;
for (i = 0; i < period_cnt; i++)
chan->cb(chan->cb_data,
DMA_IRQ_TYPE_LLIST, &next);
/* do not need to test out next in this path */
}
} else {
/* next call in pre-load state will be blocking */
chan->state |= HDA_STATE_BF_WAIT;
}
return 0;
}
static int hda_dma_copy_ch(struct dma *dma, struct hda_chan_data *chan,
int bytes)
{
struct dma_sg_elem next = {
.src = DMA_RELOAD_LLI,
.dest = DMA_RELOAD_LLI,
.size = DMA_RELOAD_LLI
};
uint32_t flags;
uint32_t dgcs = 0; uint32_t dgcs = 0;
tracev_host("GwU"); tracev_host("GwU");
if (flags & DMA_COPY_NO_COMMIT) {
/* delay to fill the buffer, only for the first time
* for _input_ dma.
*/
idelay(PLATFORM_DEFAULT_DELAY);
} else {
/* clear link xruns */
dgcs = host_dma_reg_read(dma, chan->index, DGCS);
if (dgcs & DGCS_BOR)
hda_update_bits(dma, chan->index,
DGCS, DGCS_BOR, DGCS_BOR);
/* make sure that previous transfer is complete */ /* clear link xruns */
if (chan->direction == DMA_DIR_MEM_TO_DEV) { dgcs = host_dma_reg_read(dma, chan->index, DGCS);
while (hda_dma_get_free_size(dma, chan->index) < if (dgcs & DGCS_BOR)
bytes) hda_update_bits(dma, chan->index,
idelay(PLATFORM_DEFAULT_DELAY); DGCS, DGCS_BOR, DGCS_BOR);
}
/* /* make sure that previous transfer is complete */
* set BFPI to let host gateway knows we have read size, if (chan->direction == DMA_DIR_MEM_TO_DEV) {
* which will trigger next copy start. while (hda_dma_get_free_size(dma, chan->index) <
*/ bytes)
if (chan->direction == DMA_DIR_MEM_TO_DEV) idelay(PLATFORM_DEFAULT_DELAY);
hda_dma_inc_link_fp(dma, chan->index, bytes);
else
hda_dma_inc_fp(dma, chan->index, bytes);
} }
/*
* set BFPI to let host gateway knows we have read size,
* which will trigger next copy start.
*/
if (chan->direction == DMA_DIR_MEM_TO_DEV)
hda_dma_inc_link_fp(dma, chan->index, bytes);
else
hda_dma_inc_fp(dma, chan->index, bytes);
spin_lock_irq(&dma->lock, flags); spin_lock_irq(&dma->lock, flags);
if (chan->cb) { if (chan->cb) {
next.src = DMA_RELOAD_LLI; next.src = DMA_RELOAD_LLI;
@ -245,8 +284,8 @@ static uint64_t hda_dma_work(void *data, uint64_t delay)
{ {
struct hda_chan_data *chan = (struct hda_chan_data *)data; struct hda_chan_data *chan = (struct hda_chan_data *)data;
hda_dma_copy_ch(chan->dma, chan, chan->period_bytes, 0); hda_dma_copy_ch(chan->dma, chan, chan->period_bytes);
/* next time to re-arm (TODO: should sub elapsed time?) */ /* next time to re-arm */
return HDA_LINK_1MS_US; return HDA_LINK_1MS_US;
} }
@ -254,8 +293,15 @@ static uint64_t hda_dma_work(void *data, uint64_t delay)
static int hda_dma_copy(struct dma *dma, int channel, int bytes, uint32_t flags) static int hda_dma_copy(struct dma *dma, int channel, int bytes, uint32_t flags)
{ {
struct dma_pdata *p = dma_get_drvdata(dma); struct dma_pdata *p = dma_get_drvdata(dma);
struct hda_chan_data *chan = p->chan + channel;
return hda_dma_copy_ch(dma, &p->chan[channel], bytes, flags); if (flags & DMA_COPY_PRELOAD)
chan->state |= HDA_STATE_PRELOAD;
if (chan->state & HDA_STATE_PRELOAD)
return hda_dma_preload(dma, chan);
else
return hda_dma_copy_ch(dma, chan, bytes);
} }
/* acquire the specific DMA channel */ /* acquire the specific DMA channel */
@ -292,6 +338,7 @@ static void hda_dma_channel_put_unlocked(struct dma *dma, int channel)
/* set new state */ /* set new state */
p->chan[channel].status = COMP_STATE_INIT; p->chan[channel].status = COMP_STATE_INIT;
p->chan[channel].state = 0;
p->chan[channel].period_bytes = 0; p->chan[channel].period_bytes = 0;
p->chan[channel].buffer_bytes = 0; p->chan[channel].buffer_bytes = 0;
p->chan[channel].cb = NULL; p->chan[channel].cb = NULL;
@ -495,6 +542,12 @@ static int hda_dma_set_config(struct dma *dma, int channel,
if (buffer_addr == 0) if (buffer_addr == 0)
buffer_addr = addr; buffer_addr = addr;
} }
/* buffer size must be multiple of hda dma burst size */
if (buffer_bytes % PLATFORM_HDA_BUFFER_ALIGNMENT) {
ret = -EINVAL;
goto out;
}
p->chan[channel].period_bytes = period_bytes; p->chan[channel].period_bytes = period_bytes;
p->chan[channel].buffer_bytes = buffer_bytes; p->chan[channel].buffer_bytes = buffer_bytes;
@ -511,7 +564,7 @@ static int hda_dma_set_config(struct dma *dma, int channel,
if (config->direction == DMA_DIR_LMEM_TO_HMEM || if (config->direction == DMA_DIR_LMEM_TO_HMEM ||
config->direction == DMA_DIR_HMEM_TO_LMEM) config->direction == DMA_DIR_HMEM_TO_LMEM)
host_dma_reg_write(dma, channel, DGMBS, host_dma_reg_write(dma, channel, DGMBS,
ALIGN_UP(period_bytes, ALIGN_UP(buffer_bytes,
PLATFORM_HDA_BUFFER_ALIGNMENT)); PLATFORM_HDA_BUFFER_ALIGNMENT));
/* firmware control buffer */ /* firmware control buffer */

View File

@ -81,7 +81,7 @@
#define DMA_IRQ_TYPE_LLIST BIT(1) #define DMA_IRQ_TYPE_LLIST BIT(1)
/* DMA copy flags */ /* DMA copy flags */
#define DMA_COPY_NO_COMMIT BIT(0) #define DMA_COPY_PRELOAD BIT(0)
/* We will use this macro in cb handler to inform dma that /* We will use this macro in cb handler to inform dma that
* we need to stop the reload for special purpose * we need to stop the reload for special purpose