Merge pull request #336 from mmaka1/cherry-pick/hda-dma-init-fix

dma: hda: playback preload phase synced with logical buffer state
This commit is contained in:
Liam Girdwood 2018-09-10 14:01:45 +01:00 committed by GitHub
commit 6c3b0ac18b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 112 additions and 39 deletions

View File

@ -48,6 +48,9 @@
#define DAI_PLAYBACK_STREAM 0
#define DAI_CAPTURE_STREAM 1
#define DAI_PTR_INIT_DAI 1 /* buffer ptr initialized by dai */
#define DAI_PTR_INIT_HOST 2 /* buffer ptr initialized by host */
/* tracing */
#define trace_dai(__e) trace_event(TRACE_CLASS_DAI, __e)
#define trace_dai_error(__e) trace_error(TRACE_CLASS_DAI, __e)
@ -523,6 +526,8 @@ static void dai_pointer_init(struct comp_dev *dev)
struct comp_buffer *dma_buffer;
struct dai_data *dd = comp_get_drvdata(dev);
dd->pointer_init = DAI_PTR_INIT_DAI;
/* not required for capture streams */
if (dev->params.direction == SOF_IPC_STREAM_PLAYBACK) {
dma_buffer = list_first_item(&dev->bsource_list,
@ -532,6 +537,7 @@ static void dai_pointer_init(struct comp_dev *dev)
case SOF_COMP_HOST:
case SOF_COMP_SG_HOST:
/* buffer is preloaded and advanced by host DMA engine */
dd->pointer_init = DAI_PTR_INIT_HOST;
break;
default:
/* advance source pipeline w_ptr by one period
@ -540,8 +546,6 @@ static void dai_pointer_init(struct comp_dev *dev)
break;
}
}
dd->pointer_init = 1;
}
/* used to pass standard and bespoke command (with data) to component */
@ -563,8 +567,12 @@ static int dai_comp_trigger(struct comp_dev *dev, int cmd)
trace_dai("tsa");
if (!dd->pointer_init)
dai_pointer_init(dev);
/* only start the DAI if we are not XRUN handling */
if (dd->xrun == 0) {
/* only start the DAI if we are not XRUN handling
* and the ptr is not initialized by the host as in this
* case start is deferred to the first copy call as the buffer
* is populated by the host only then
*/
if (dd->xrun == 0 && dd->pointer_init != DAI_PTR_INIT_HOST) {
/* start the DAI */
ret = dma_start(dd->dma, dd->chan);
if (ret < 0)
@ -632,6 +640,18 @@ static int dai_comp_trigger(struct comp_dev *dev, int cmd)
/* copy and process stream data from source to sink buffers */
static int dai_copy(struct comp_dev *dev)
{
struct dai_data *dd = comp_get_drvdata(dev);
int ret;
if (dd->pointer_init == DAI_PTR_INIT_HOST) {
/* start the DAI */
ret = dma_start(dd->dma, dd->chan);
if (ret < 0)
return ret;
dai_trigger(dd->dai, COMP_TRIGGER_START, dev->params.direction);
dd->pointer_init = DAI_PTR_INIT_DAI; /* next copy just quits */
platform_dai_wallclock(dev, &dd->wallclock);
}
return 0;
}

View File

@ -741,7 +741,7 @@ static int host_copy_int(struct comp_dev *dev, bool preload_run)
#if defined CONFIG_DMA_GW
/* tell gateway to copy another period */
ret = dma_copy(hd->dma, hd->chan, hd->period_bytes,
preload_run ? DMA_COPY_NO_COMMIT : 0);
preload_run ? DMA_COPY_PRELOAD : 0);
if (ret < 0)
goto out;

View File

@ -86,11 +86,15 @@
#define HDA_LINK_1MS_US 1000
#define HDA_STATE_PRELOAD BIT(0)
#define HDA_STATE_BF_WAIT BIT(1)
struct hda_chan_data {
struct dma *dma;
uint32_t index;
uint32_t stream_id;
uint32_t status;
uint32_t status; /* common status */
uint32_t state; /* hda specific additional state */
uint32_t desc_count;
uint32_t desc_avail;
uint32_t direction;
@ -186,42 +190,77 @@ static inline uint32_t hda_dma_get_data_size(struct dma *dma, uint32_t chan)
return ds;
}
static int hda_dma_copy_ch(struct dma *dma, struct hda_chan_data *chan,
int bytes, uint32_t flags)
static int hda_dma_preload(struct dma *dma, struct hda_chan_data *chan)
{
struct dma_sg_elem next;
struct dma_sg_elem next = {
.src = DMA_RELOAD_LLI,
.dest = DMA_RELOAD_LLI,
.size = DMA_RELOAD_LLI
};
int i;
int period_cnt;
/* waiting for buffer full after start
* first try is unblocking, then blocking
*/
while (!(host_dma_reg_read(dma, chan->index, DGCS) & DGCS_BF) &&
(chan->state & HDA_STATE_BF_WAIT))
;
if (host_dma_reg_read(dma, chan->index, DGCS) & DGCS_BF) {
chan->state &= ~(HDA_STATE_PRELOAD | HDA_STATE_BF_WAIT);
if (chan->cb) {
/* loop over each period */
period_cnt = chan->buffer_bytes /
chan->period_bytes;
for (i = 0; i < period_cnt; i++)
chan->cb(chan->cb_data,
DMA_IRQ_TYPE_LLIST, &next);
/* do not need to test out next in this path */
}
} else {
/* next call in pre-load state will be blocking */
chan->state |= HDA_STATE_BF_WAIT;
}
return 0;
}
static int hda_dma_copy_ch(struct dma *dma, struct hda_chan_data *chan,
int bytes)
{
struct dma_sg_elem next = {
.src = DMA_RELOAD_LLI,
.dest = DMA_RELOAD_LLI,
.size = DMA_RELOAD_LLI
};
uint32_t flags;
uint32_t dgcs = 0;
tracev_host("GwU");
if (flags & DMA_COPY_NO_COMMIT) {
/* delay to fill the buffer, only for the first time
* for _input_ dma.
*/
idelay(PLATFORM_DEFAULT_DELAY);
} else {
/* clear link xruns */
dgcs = host_dma_reg_read(dma, chan->index, DGCS);
if (dgcs & DGCS_BOR)
hda_update_bits(dma, chan->index,
DGCS, DGCS_BOR, DGCS_BOR);
/* make sure that previous transfer is complete */
if (chan->direction == DMA_DIR_MEM_TO_DEV) {
while (hda_dma_get_free_size(dma, chan->index) <
bytes)
idelay(PLATFORM_DEFAULT_DELAY);
}
/* clear link xruns */
dgcs = host_dma_reg_read(dma, chan->index, DGCS);
if (dgcs & DGCS_BOR)
hda_update_bits(dma, chan->index,
DGCS, DGCS_BOR, DGCS_BOR);
/*
* set BFPI to let host gateway knows we have read size,
* which will trigger next copy start.
*/
if (chan->direction == DMA_DIR_MEM_TO_DEV)
hda_dma_inc_link_fp(dma, chan->index, bytes);
else
hda_dma_inc_fp(dma, chan->index, bytes);
/* make sure that previous transfer is complete */
if (chan->direction == DMA_DIR_MEM_TO_DEV) {
while (hda_dma_get_free_size(dma, chan->index) <
bytes)
idelay(PLATFORM_DEFAULT_DELAY);
}
/*
* set BFPI to let host gateway knows we have read size,
* which will trigger next copy start.
*/
if (chan->direction == DMA_DIR_MEM_TO_DEV)
hda_dma_inc_link_fp(dma, chan->index, bytes);
else
hda_dma_inc_fp(dma, chan->index, bytes);
spin_lock_irq(&dma->lock, flags);
if (chan->cb) {
next.src = DMA_RELOAD_LLI;
@ -245,8 +284,8 @@ static uint64_t hda_dma_work(void *data, uint64_t delay)
{
struct hda_chan_data *chan = (struct hda_chan_data *)data;
hda_dma_copy_ch(chan->dma, chan, chan->period_bytes, 0);
/* next time to re-arm (TODO: should sub elapsed time?) */
hda_dma_copy_ch(chan->dma, chan, chan->period_bytes);
/* next time to re-arm */
return HDA_LINK_1MS_US;
}
@ -254,8 +293,15 @@ static uint64_t hda_dma_work(void *data, uint64_t delay)
static int hda_dma_copy(struct dma *dma, int channel, int bytes, uint32_t flags)
{
struct dma_pdata *p = dma_get_drvdata(dma);
struct hda_chan_data *chan = p->chan + channel;
return hda_dma_copy_ch(dma, &p->chan[channel], bytes, flags);
if (flags & DMA_COPY_PRELOAD)
chan->state |= HDA_STATE_PRELOAD;
if (chan->state & HDA_STATE_PRELOAD)
return hda_dma_preload(dma, chan);
else
return hda_dma_copy_ch(dma, chan, bytes);
}
/* acquire the specific DMA channel */
@ -292,6 +338,7 @@ static void hda_dma_channel_put_unlocked(struct dma *dma, int channel)
/* set new state */
p->chan[channel].status = COMP_STATE_INIT;
p->chan[channel].state = 0;
p->chan[channel].period_bytes = 0;
p->chan[channel].buffer_bytes = 0;
p->chan[channel].cb = NULL;
@ -495,6 +542,12 @@ static int hda_dma_set_config(struct dma *dma, int channel,
if (buffer_addr == 0)
buffer_addr = addr;
}
/* buffer size must be multiple of hda dma burst size */
if (buffer_bytes % PLATFORM_HDA_BUFFER_ALIGNMENT) {
ret = -EINVAL;
goto out;
}
p->chan[channel].period_bytes = period_bytes;
p->chan[channel].buffer_bytes = buffer_bytes;
@ -511,7 +564,7 @@ static int hda_dma_set_config(struct dma *dma, int channel,
if (config->direction == DMA_DIR_LMEM_TO_HMEM ||
config->direction == DMA_DIR_HMEM_TO_LMEM)
host_dma_reg_write(dma, channel, DGMBS,
ALIGN_UP(period_bytes,
ALIGN_UP(buffer_bytes,
PLATFORM_HDA_BUFFER_ALIGNMENT));
/* firmware control buffer */

View File

@ -81,7 +81,7 @@
#define DMA_IRQ_TYPE_LLIST BIT(1)
/* DMA copy flags */
#define DMA_COPY_NO_COMMIT BIT(0)
#define DMA_COPY_PRELOAD BIT(0)
/* We will use this macro in cb handler to inform dma that
* we need to stop the reload for special purpose