trace: dont reschedule trace if it's already in progress.

The purpose of checking half fullness is sending trace data as soon as
possible. It will avoid local DMA trace buffer full and unsent trace data
overwritten.
If DMA trace copying is running currently, it is unnecessary to checking
half fullness local DMA trace buffer.

The following is implementation details:
1. Add one flag in DMA trace data strcuture.
2. Set/unset this flag in trace_work() callback.
3. Add checking for this flag in dtrace_event().

Signed-off-by: Yan Wang <yan.wang@linux.intel.com>
This commit is contained in:
Yan Wang 2017-12-05 18:46:08 +08:00 committed by Liam Girdwood
parent 3805427244
commit 6881d96c06
2 changed files with 18 additions and 0 deletions

View File

@ -60,6 +60,7 @@ struct dma_trace_data {
uint32_t host_size; uint32_t host_size;
struct work dmat_work; struct work dmat_work;
uint32_t enabled; uint32_t enabled;
uint32_t copy_in_progress;
spinlock_t lock; spinlock_t lock;
}; };

View File

@ -56,6 +56,9 @@ static uint64_t trace_work(void *data, uint64_t delay)
if (avail == 0) if (avail == 0)
return DMA_TRACE_US; return DMA_TRACE_US;
/* DMA trace copying is working */
d->copy_in_progress = 1;
/* make sure we dont write more than buffer */ /* make sure we dont write more than buffer */
if (avail > DMA_TRACE_LOCAL_SIZE) if (avail > DMA_TRACE_LOCAL_SIZE)
avail = DMA_TRACE_LOCAL_SIZE; avail = DMA_TRACE_LOCAL_SIZE;
@ -100,7 +103,12 @@ static uint64_t trace_work(void *data, uint64_t delay)
out: out:
spin_lock_irq(&d->lock, flags); spin_lock_irq(&d->lock, flags);
buffer->avail -= size; buffer->avail -= size;
/* DMA trace copying is done */
d->copy_in_progress = 0;
spin_unlock_irq(&d->lock, flags); spin_unlock_irq(&d->lock, flags);
/* reschedule the trace copying work */ /* reschedule the trace copying work */
@ -138,6 +146,7 @@ int dma_trace_init(struct dma_trace_data *d)
buffer->avail = 0; buffer->avail = 0;
d->host_offset = 0; d->host_offset = 0;
d->enabled = 0; d->enabled = 0;
d->copy_in_progress = 0;
list_init(&d->config.elem_list); list_init(&d->config.elem_list);
work_init(&d->dmat_work, trace_work, d, WORK_ASYNC); work_init(&d->dmat_work, trace_work, d, WORK_ASYNC);
@ -226,6 +235,14 @@ void dtrace_event(const char *e, uint32_t length)
spin_lock_irq(&trace_data->lock, flags); spin_lock_irq(&trace_data->lock, flags);
dtrace_add_event(e, length); dtrace_add_event(e, length);
/* if DMA trace copying is working */
/* don't check if local buffer is half full */
if (trace_data->copy_in_progress) {
spin_unlock_irq(&trace_data->lock, flags);
return;
}
spin_unlock_irq(&trace_data->lock, flags); spin_unlock_irq(&trace_data->lock, flags);
/* schedule copy now if buffer > 50% full */ /* schedule copy now if buffer > 50% full */