pipeline: rebalance KCPS instead of adding or subtracting

Adding or subtracting module CPC when starting and stopping pipelines
is brittle. Particularly it's prone to mistakes with modules, not
specifying their CPC explicitly. Instead recalculate CPC every time
a pipeline is started or stopped.

Signed-off-by: Guennadi Liakhovetski <guennadi.liakhovetski@linux.intel.com>
This commit is contained in:
Guennadi Liakhovetski 2024-07-19 17:13:32 +02:00 committed by Kai Vehmanen
parent b1df683c91
commit f2a0bf8acb
1 changed files with 44 additions and 77 deletions

View File

@ -192,7 +192,9 @@ int pipeline_trigger(struct pipeline *p, struct comp_dev *host, int cmd)
return 0;
}
#else
#else /* CONFIG_LIBRARY */
/* only collect scheduling components */
static int pipeline_comp_list(struct comp_dev *current,
struct comp_buffer *calling_buf,
@ -326,70 +328,58 @@ static struct ipc4_base_module_cfg *ipc4_get_base_cfg(struct comp_dev *comp)
return &md->cfg.base_cfg;
}
static int pipeline_calc_cps_consumption(struct comp_dev *current,
struct comp_buffer *calling_buf,
struct pipeline_walk_context *ctx, int dir)
static void pipeline_cps_rebalance(struct pipeline *p, bool starting)
{
struct pipeline_data *ppl_data = ctx->comp_data;
struct ipc4_base_module_cfg *cd;
int comp_core, kcps;
unsigned int core_kcps[CONFIG_CORE_COUNT];
struct ipc *ipc = ipc_get();
struct ipc_comp_dev *icd;
struct list_item *clist;
const unsigned int clk_max_khz = CLK_MAX_CPU_HZ / 1000;
pipe_dbg(ppl_data->p, "pipeline_calc_cps_consumption(), current->comp.id = %u, dir = %u",
dev_comp_id(current), dir);
for (unsigned int i = 0; i < CONFIG_CORE_COUNT; i++)
core_kcps[i] = i == PLATFORM_PRIMARY_CORE_ID ? PRIMARY_CORE_BASE_CPS_USAGE :
SECONDARY_CORE_BASE_CPS_USAGE;
if (!comp_is_single_pipeline(current, ppl_data->start)) {
pipe_dbg(ppl_data->p, "pipeline_calc_cps_consumption(), current is from another pipeline");
return 0;
}
comp_core = current->ipc_config.core;
list_for_item(clist, &ipc->comp_list) {
icd = container_of(clist, struct ipc_comp_dev, list);
if (icd->type != COMP_TYPE_COMPONENT)
continue;
/* modules created through module adapter have different priv_data */
cd = ipc4_get_base_cfg(current);
struct comp_dev *comp = icd->cd;
if (cd->cpc == 0) {
/* Use maximum clock budget, assume 1ms chunk size */
uint32_t core_kcps = core_kcps_get(comp_core);
/*
* When a pipeline is started, its components have state PREPARE, when
* a pipeline is terminated, its components still have state ACTIVE
*/
if ((comp->state == COMP_STATE_ACTIVE &&
(starting || comp->pipeline != p)) ||
((comp->state == COMP_STATE_PREPARE || comp->state == COMP_STATE_PAUSED) &&
starting && comp->pipeline == p)) {
struct ipc4_base_module_cfg *cd = ipc4_get_base_cfg(comp);
if (!current->kcps_inc[comp_core]) {
current->kcps_inc[comp_core] = core_kcps;
ppl_data->kcps[comp_core] = CLK_MAX_CPU_HZ / 1000 - core_kcps;
} else {
ppl_data->kcps[comp_core] = core_kcps - current->kcps_inc[comp_core];
current->kcps_inc[comp_core] = 0;
if (cd->cpc && core_kcps[icd->core] < clk_max_khz)
core_kcps[icd->core] += cd->cpc;
else
core_kcps[icd->core] = clk_max_khz;
}
tr_warn(pipe,
"0 CPS requested for module: %#x, core: %d using safe max KCPS: %u",
current->ipc_config.id, comp_core, ppl_data->kcps[comp_core]);
return PPL_STATUS_PATH_STOP;
} else {
kcps = cd->cpc * 1000 / current->period;
tr_dbg(pipe, "Module: %#x KCPS consumption: %d, core: %d",
current->ipc_config.id, kcps, comp_core);
ppl_data->kcps[comp_core] += kcps;
}
return pipeline_for_each_comp(current, ctx, dir);
for (int i = 0; i < arch_num_cpus(); i++) {
int delta_kcps = core_kcps[i] - core_kcps_get(i);
tr_dbg(pipe, "Proposed KCPS consumption: %d, core: %d, delta: %d",
core_kcps[i], i, delta_kcps);
if (delta_kcps)
core_kcps_adjust(i, delta_kcps);
}
}
#endif
#endif /* CONFIG_KCPS_DYNAMIC_CLOCK_CONTROL */
/* trigger pipeline in IPC context */
int pipeline_trigger(struct pipeline *p, struct comp_dev *host, int cmd)
{
int ret;
#if CONFIG_KCPS_DYNAMIC_CLOCK_CONTROL
/* FIXME: this must be a platform-specific parameter or a Kconfig option */
#define DSP_MIN_KCPS 50000
struct pipeline_data data = {
.start = p->source_comp,
.p = p,
};
struct pipeline_walk_context walk_ctx = {
.comp_func = pipeline_calc_cps_consumption,
.comp_data = &data,
};
bool trigger_first = false;
uint32_t flags = 0;
#endif
@ -422,16 +412,8 @@ int pipeline_trigger(struct pipeline *p, struct comp_dev *host, int cmd)
#if CONFIG_KCPS_DYNAMIC_CLOCK_CONTROL
flags = irq_lock();
/* setup walking ctx for removing consumption */
if (!trigger_first) {
ret = walk_ctx.comp_func(p->source_comp, NULL, &walk_ctx, PPL_DIR_DOWNSTREAM);
for (int i = 0; i < arch_num_cpus(); i++) {
if (data.kcps[i] > 0) {
core_kcps_adjust(i, data.kcps[i]);
tr_info(pipe, "Sum of KCPS consumption: %d, core: %d", core_kcps_get(i), i);
}
}
}
if (!trigger_first)
pipeline_cps_rebalance(p, true);
#endif
ret = pipeline_trigger_list(p, host, cmd);
if (ret < 0) {
@ -441,23 +423,8 @@ int pipeline_trigger(struct pipeline *p, struct comp_dev *host, int cmd)
return ret;
}
#if CONFIG_KCPS_DYNAMIC_CLOCK_CONTROL
if (trigger_first) {
ret = walk_ctx.comp_func(p->source_comp, NULL, &walk_ctx, PPL_DIR_DOWNSTREAM);
for (int i = 0; i < arch_num_cpus(); i++) {
if (data.kcps[i] > 0) {
uint32_t core_kcps = core_kcps_get(i);
/* Tests showed, that we cannot go below 40000kcps on MTL */
if (data.kcps[i] > core_kcps - DSP_MIN_KCPS)
data.kcps[i] = core_kcps - DSP_MIN_KCPS;
core_kcps_adjust(i, -data.kcps[i]);
tr_info(pipe, "Sum of KCPS consumption: %d, core: %d",
core_kcps, i);
}
}
}
if (trigger_first)
pipeline_cps_rebalance(p, false);
irq_unlock(flags);
#endif
/* IPC response will be sent from the task, unless it was paused */
@ -466,7 +433,7 @@ int pipeline_trigger(struct pipeline *p, struct comp_dev *host, int cmd)
return 0;
}
#endif
#endif /* CONFIG_LIBRARY */
/* Runs in IPC or in pipeline task context */
static int pipeline_comp_trigger(struct comp_dev *current,