mirror of https://github.com/thesofproject/sof.git
kpb: move history buffer allocation to prepare
This change moves history buffer allocation from kpb_new() to kpb_prepare(). This is to folow the calling convention with kernel. Signed-off-by: Marcin Rajwa <marcin.rajwa@linux.intel.com>
This commit is contained in:
parent
444effac9c
commit
0b279e06bc
|
@ -56,7 +56,9 @@ struct comp_data {
|
|||
bool is_internal_buffer_full;
|
||||
size_t buffered_data;
|
||||
struct dd draining_task_data;
|
||||
size_t buffer_size;
|
||||
size_t kpb_buffer_size;
|
||||
size_t host_buffer_size;
|
||||
size_t host_period_size;
|
||||
};
|
||||
|
||||
/*! KPB private functions */
|
||||
|
@ -91,7 +93,6 @@ static struct comp_dev *kpb_new(struct sof_ipc_comp *comp)
|
|||
size_t bs = ipc_process->size;
|
||||
struct comp_dev *dev;
|
||||
struct comp_data *kpb;
|
||||
size_t allocated_size;
|
||||
|
||||
trace_kpb("kpb_new()");
|
||||
|
||||
|
@ -126,23 +127,12 @@ static struct comp_dev *kpb_new(struct sof_ipc_comp *comp)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/* Sampling width accepted. Lets calculate and store
|
||||
* its derivatives for quick lookup in runtime.
|
||||
*/
|
||||
kpb->buffer_size = KPB_MAX_BUFFER_SIZE(kpb->config.sampling_width);
|
||||
|
||||
if (kpb->config.no_channels > KPB_MAX_SUPPORTED_CHANNELS) {
|
||||
trace_kpb_error("kpb_new() error: "
|
||||
"no of channels exceeded the limit");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (kpb->config.history_depth > kpb->buffer_size) {
|
||||
trace_kpb_error("kpb_new() error: "
|
||||
"history depth exceeded the limit");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (kpb->config.sampling_freq != KPB_SAMPLNG_FREQUENCY) {
|
||||
trace_kpb_error("kpb_new() error: "
|
||||
"requested sampling frequency not supported");
|
||||
|
@ -150,23 +140,11 @@ static struct comp_dev *kpb_new(struct sof_ipc_comp *comp)
|
|||
}
|
||||
|
||||
dev->state = COMP_STATE_READY;
|
||||
kpb->history_buffer = NULL;
|
||||
|
||||
/* Zero number of clients */
|
||||
kpb->kpb_no_of_clients = 0;
|
||||
|
||||
/* Set initial state as buffering */
|
||||
kpb->state = KPB_STATE_BUFFERING;
|
||||
|
||||
/* Allocate history buffer */
|
||||
allocated_size = kpb_allocate_history_buffer(kpb);
|
||||
|
||||
/* Have we allocated what we requested? */
|
||||
if (allocated_size < kpb->buffer_size) {
|
||||
trace_kpb_error("Failed to allocate space for "
|
||||
"KPB buffer/s");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
||||
|
@ -181,7 +159,7 @@ static size_t kpb_allocate_history_buffer(struct comp_data *kpb)
|
|||
struct hb *history_buffer;
|
||||
struct hb *new_hb = NULL;
|
||||
/*! Total allocation size */
|
||||
size_t hb_size = kpb->buffer_size;
|
||||
size_t hb_size = kpb->kpb_buffer_size;
|
||||
/*! Current allocation size */
|
||||
size_t ca_size = hb_size;
|
||||
/*! Memory caps priorites for history buffer */
|
||||
|
@ -346,6 +324,7 @@ static int kpb_prepare(struct comp_dev *dev)
|
|||
int i;
|
||||
struct list_item *blist;
|
||||
struct comp_buffer *sink;
|
||||
size_t allocated_size;
|
||||
|
||||
trace_kpb("kpb_prepare()");
|
||||
|
||||
|
@ -359,8 +338,24 @@ static int kpb_prepare(struct comp_dev *dev)
|
|||
/* Init private data */
|
||||
kpb->kpb_no_of_clients = 0;
|
||||
kpb->buffered_data = 0;
|
||||
kpb->state = KPB_STATE_BUFFERING;
|
||||
kpb->host_buffer_size = dev->params.buffer.size;
|
||||
kpb->host_period_size = dev->params.host_period_bytes;
|
||||
kpb->config.sampling_width = dev->params.sample_container_bytes * 8;
|
||||
kpb->kpb_buffer_size = KPB_MAX_BUFFER_SIZE(kpb->config.sampling_width);
|
||||
|
||||
if (!kpb->history_buffer) {
|
||||
/* Allocate history buffer */
|
||||
allocated_size = kpb_allocate_history_buffer(kpb);
|
||||
|
||||
/* Have we allocated what we requested? */
|
||||
if (allocated_size < kpb->kpb_buffer_size) {
|
||||
trace_kpb_error("Failed to allocate space for "
|
||||
"KPB buffer/s");
|
||||
kpb_free_history_buffer(kpb->history_buffer);
|
||||
kpb->history_buffer = NULL;
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
/* Init history buffer */
|
||||
kpb_clear_history_buffer(kpb->history_buffer);
|
||||
|
||||
|
@ -407,6 +402,8 @@ static int kpb_prepare(struct comp_dev *dev)
|
|||
}
|
||||
}
|
||||
|
||||
kpb->state = KPB_STATE_BUFFERING;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -507,10 +504,10 @@ static int kpb_copy(struct comp_dev *dev)
|
|||
/* Buffer source data internally in history buffer for future
|
||||
* use by clients.
|
||||
*/
|
||||
if (source->avail <= kpb->buffer_size) {
|
||||
if (source->avail <= kpb->kpb_buffer_size) {
|
||||
kpb_buffer_data(kpb, source, copy_bytes);
|
||||
|
||||
if (kpb->buffered_data < kpb->buffer_size)
|
||||
if (kpb->buffered_data < kpb->kpb_buffer_size)
|
||||
kpb->buffered_data += copy_bytes;
|
||||
else
|
||||
kpb->is_internal_buffer_full = true;
|
||||
|
@ -942,7 +939,7 @@ static bool kpb_has_enough_history_data(struct comp_data *kpb,
|
|||
|
||||
/* Quick check if we've already filled internal buffer */
|
||||
if (kpb->is_internal_buffer_full)
|
||||
return his_req <= kpb->buffer_size;
|
||||
return his_req <= kpb->kpb_buffer_size;
|
||||
|
||||
/* Internal buffer isn't full yet. Verify if what already buffered
|
||||
* is sufficient for draining request.
|
||||
|
|
|
@ -256,7 +256,7 @@ static void null_test_success(void **state)
|
|||
/* Test main function */
|
||||
int main(void)
|
||||
{
|
||||
struct CMUnitTest tests[2];
|
||||
struct CMUnitTest tests[1];
|
||||
struct test_case internal_buffering = {
|
||||
.period_bytes = KPB_MAX_BUFFER_SIZE(16),
|
||||
.history_buffer_size = KPB_MAX_BUFFER_SIZE(16),
|
||||
|
|
Loading…
Reference in New Issue