kpb: refactor kpb->hb_buffer_size

This patch changes name of the variable to be more
meaningful

Signed-off-by: Marcin Rajwa <marcin.rajwa@linux.intel.com>
This commit is contained in:
Marcin Rajwa 2019-12-09 14:00:40 +01:00 committed by Liam Girdwood
parent 895b46771c
commit 8685b2cafd
1 changed files with 11 additions and 12 deletions

View File

@ -59,7 +59,7 @@ struct comp_data {
bool is_internal_buffer_full; bool is_internal_buffer_full;
size_t buffered_data; size_t buffered_data;
struct dd draining_task_data; struct dd draining_task_data;
size_t kpb_buffer_size; size_t buffer_size;
size_t host_buffer_size; size_t host_buffer_size;
size_t host_period_size; size_t host_period_size;
}; };
@ -191,7 +191,7 @@ static size_t kpb_allocate_history_buffer(struct comp_data *kpb)
struct hb *history_buffer; struct hb *history_buffer;
struct hb *new_hb = NULL; struct hb *new_hb = NULL;
/*! Total allocation size */ /*! Total allocation size */
size_t hb_size = kpb->kpb_buffer_size; size_t hb_size = kpb->buffer_size;
/*! Current allocation size */ /*! Current allocation size */
size_t ca_size = hb_size; size_t ca_size = hb_size;
/*! Memory caps priorites for history buffer */ /*! Memory caps priorites for history buffer */
@ -404,7 +404,7 @@ static int kpb_prepare(struct comp_dev *dev)
kpb_change_state(kpb, KPB_STATE_PREPARING); kpb_change_state(kpb, KPB_STATE_PREPARING);
kpb->kpb_no_of_clients = 0; kpb->kpb_no_of_clients = 0;
kpb->buffered_data = 0; kpb->buffered_data = 0;
kpb->kpb_buffer_size = KPB_MAX_BUFFER_SIZE(kpb->config.sampling_width); kpb->buffer_size = KPB_MAX_BUFFER_SIZE(kpb->config.sampling_width);
kpb->sel_sink = NULL; kpb->sel_sink = NULL;
kpb->host_sink = NULL; kpb->host_sink = NULL;
@ -413,9 +413,8 @@ static int kpb_prepare(struct comp_dev *dev)
allocated_size = kpb_allocate_history_buffer(kpb); allocated_size = kpb_allocate_history_buffer(kpb);
/* Have we allocated what we requested? */ /* Have we allocated what we requested? */
if (allocated_size < kpb->kpb_buffer_size) { if (allocated_size < kpb->buffer_size) {
trace_kpb_error_with_ids(dev, "Failed to allocate " trace_kpb_error("kpb_prepare() error: failed to allocate space for KPB buffer/s");
"space for KPB buffer/s");
kpb_free_history_buffer(kpb->history_buffer); kpb_free_history_buffer(kpb->history_buffer);
kpb->history_buffer = NULL; kpb->history_buffer = NULL;
return -EINVAL; return -EINVAL;
@ -604,7 +603,7 @@ static int kpb_copy(struct comp_dev *dev)
/* Buffer source data internally in history buffer for future /* Buffer source data internally in history buffer for future
* use by clients. * use by clients.
*/ */
if (source->stream.avail <= kpb->kpb_buffer_size) { if (source->stream.avail <= kpb->buffer_size) {
ret = kpb_buffer_data(dev, source, copy_bytes); ret = kpb_buffer_data(dev, source, copy_bytes);
if (ret) { if (ret) {
trace_kpb_error_with_ids(dev, "kpb_copy(): " trace_kpb_error_with_ids(dev, "kpb_copy(): "
@ -612,7 +611,7 @@ static int kpb_copy(struct comp_dev *dev)
"failed."); "failed.");
goto out; goto out;
} }
if (kpb->buffered_data < kpb->kpb_buffer_size) if (kpb->buffered_data < kpb->buffer_size)
kpb->buffered_data += copy_bytes; kpb->buffered_data += copy_bytes;
else else
kpb->is_internal_buffer_full = true; kpb->is_internal_buffer_full = true;
@ -658,7 +657,7 @@ static int kpb_copy(struct comp_dev *dev)
/* In draining state we only buffer data in internal, /* In draining state we only buffer data in internal,
* history buffer. * history buffer.
*/ */
if (source->stream.avail <= kpb->kpb_buffer_size) { if (source->stream.avail <= kpb->buffer_size) {
ret = kpb_buffer_data(dev, source, ret = kpb_buffer_data(dev, source,
source->stream.avail); source->stream.avail);
if (ret) { if (ret) {
@ -932,9 +931,8 @@ static void kpb_init_draining(struct comp_dev *dev, struct kpb_client *cli)
trace_kpb_error_with_ids(dev, "kpb_init_draining() error: " trace_kpb_error_with_ids(dev, "kpb_init_draining() error: "
"sink not ready for draining"); "sink not ready for draining");
} else if (kpb->buffered_data < history_depth || } else if (kpb->buffered_data < history_depth ||
kpb->kpb_buffer_size < history_depth) { kpb->buffer_size < history_depth) {
trace_kpb_error_with_ids(dev, "kpb_init_draining() error: " trace_kpb_error("kpb_init_draining() error: not enough data in history buffer");
"not enough data in history buffer");
} else if (!validate_host_params(host_period_size, } else if (!validate_host_params(host_period_size,
host_buffer_size, host_buffer_size,
bytes_per_ms)) { bytes_per_ms)) {
@ -1003,6 +1001,7 @@ static void kpb_init_draining(struct comp_dev *dev, struct kpb_client *cli)
* take place. This time will be used to synchronize us with * take place. This time will be used to synchronize us with
* an end application interrupts. * an end application interrupts.
*/ */
drain_interval = (host_period_size / bytes_per_ms) * drain_interval = (host_period_size / bytes_per_ms) *
ticks_per_ms; ticks_per_ms;
/* In draining intervals we will fill only two periods /* In draining intervals we will fill only two periods