kpb: forbid draining requests bigger than history_buffer

This patch adds a check if history depth request is
bigger than history buffer size. If so refuse to drain.

Signed-off-by: Marcin Rajwa <marcin.rajwa@linux.intel.com>
This commit is contained in:
Marcin Rajwa 2019-07-27 23:49:29 +02:00 committed by Janusz Jankowski
parent 3bdc499467
commit ad59cf465a
1 changed files with 2 additions and 51 deletions

View File

@ -71,8 +71,6 @@ static int kpb_buffer_data(struct comp_dev *dev, struct comp_buffer *source,
static size_t kpb_allocate_history_buffer(struct comp_data *kpb); static size_t kpb_allocate_history_buffer(struct comp_data *kpb);
static void kpb_clear_history_buffer(struct hb *buff); static void kpb_clear_history_buffer(struct hb *buff);
static void kpb_free_history_buffer(struct hb *buff); static void kpb_free_history_buffer(struct hb *buff);
static bool kpb_has_enough_history_data(struct comp_data *kpb,
struct hb *buff, size_t his_req);
static inline bool kpb_is_sample_width_supported(uint32_t sampling_width); static inline bool kpb_is_sample_width_supported(uint32_t sampling_width);
static void kpb_copy_samples(struct comp_buffer *sink, static void kpb_copy_samples(struct comp_buffer *sink,
struct comp_buffer *source, size_t size, struct comp_buffer *source, size_t size,
@ -842,7 +840,8 @@ static void kpb_init_draining(struct comp_dev *dev, struct kpb_client *cli)
} else if (!is_sink_ready) { } else if (!is_sink_ready) {
trace_kpb_error("kpb_init_draining() error: " trace_kpb_error("kpb_init_draining() error: "
"sink not ready for draining"); "sink not ready for draining");
} else if (!kpb_has_enough_history_data(kpb, buff, history_depth)) { } else if (kpb->buffered_data < history_depth ||
kpb->kpb_buffer_size < history_depth) {
trace_kpb_error("kpb_init_draining() error: " trace_kpb_error("kpb_init_draining() error: "
"not enough data in history buffer"); "not enough data in history buffer");
} else if (!validate_host_params(host_period_size, } else if (!validate_host_params(host_period_size,
@ -1134,54 +1133,6 @@ static void kpb_clear_history_buffer(struct hb *buff)
} while (buff != first_buff); } while (buff != first_buff);
} }
/**
* \brief Verify if KPB has enough data buffered.
*
* \param[in] kpb - KPB component data pointer.
* \param[in] buff - pointer to current history buffer.
* \param[in] his_req - requested draining size.
*
* \return 1 if there is enough data in history buffer
* and 0 otherwise.
*/
static bool kpb_has_enough_history_data(struct comp_data *kpb,
struct hb *buff, size_t his_req)
{
size_t buffered_data = 0;
struct hb *first_buff = buff;
/* Quick check if we've already filled internal buffer */
if (kpb->is_internal_buffer_full)
return his_req <= kpb->kpb_buffer_size;
/* Internal buffer isn't full yet. Verify if what already buffered
* is sufficient for draining request.
*/
while (buffered_data < his_req) {
if (buff->state == KPB_BUFFER_FREE) {
if (buff->w_ptr == buff->start_addr &&
buff->next->state == KPB_BUFFER_FULL) {
buffered_data += ((uint32_t)buff->end_addr -
(uint32_t)buff->start_addr);
} else {
buffered_data += ((uint32_t)buff->w_ptr -
(uint32_t)buff->start_addr);
}
} else {
buffered_data += ((uint32_t)buff->end_addr -
(uint32_t)buff->start_addr);
}
if (buff->next && buff->next != first_buff)
buff = buff->next;
else
break;
}
return buffered_data >= his_req;
}
static inline bool kpb_is_sample_width_supported(uint32_t sampling_width) static inline bool kpb_is_sample_width_supported(uint32_t sampling_width)
{ {
bool ret; bool ret;