ace: cpu: cpu management for mtl

This patch adds implementation of core power state changing functions
for meteorlake. Code uses zephyr kernel api provided by power manager.

Signed-off-by: Tomasz Leman <tomasz.m.leman@intel.com>
This commit is contained in:
Tomasz Leman 2022-10-26 10:09:22 +02:00 committed by Liam Girdwood
parent b6a4bb682c
commit ab3ad0151a
2 changed files with 102 additions and 2 deletions

View File

@ -283,9 +283,14 @@ void idc_cmd(struct idc_msg *msg)
int ret = 0;
switch (type) {
#ifndef CONFIG_PM
/* In flow with Zephyr PM this IDC is not used.
* Primary core is forcing OFF state directly via power manager.
*/
case iTS(IDC_MSG_POWER_DOWN):
cpu_power_down_core(0);
break;
#endif
case iTS(IDC_MSG_NOTIFY):
notifier_notify_remote();
break;

View File

@ -27,8 +27,6 @@ extern K_KERNEL_STACK_ARRAY_DEFINE(z_interrupt_stacks, CONFIG_MP_NUM_CPUS,
static atomic_t start_flag;
static atomic_t ready_flag;
static int w_core_enable_mask = 0x1; /*Core 0 is always active*/
/* Zephyr kernel_internal.h interface */
extern void smp_timer_init(void);
@ -59,6 +57,9 @@ static FUNC_NORETURN void secondary_init(void *arg)
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
}
#ifndef CONFIG_ACE
static int w_core_enable_mask = 0x1; /*Core 0 is always active*/
int cpu_enable_core(int id)
{
pm_runtime_get(PM_RUNTIME_DSP, PWRD_BY_TPLG | id);
@ -120,6 +121,100 @@ int cpu_enabled_cores(void)
return w_core_enable_mask;
}
#else /* CONFIG_ACE */
#include <sof/trace/trace.h>
#include <zephyr/pm/pm.h>
LOG_MODULE_DECLARE(zephyr, CONFIG_SOF_LOG_LEVEL);
extern struct tr_ctx zephyr_tr;
int cpu_enable_core(int id)
{
/* only called from single core, no RMW lock */
__ASSERT_NO_MSG(cpu_is_primary(arch_proc_id()));
/*
* This is an open-coded version of zephyr/kernel/smp.c
* z_smp_start_cpu(). We do this, so we can use a customized
* secondary_init() for SOF.
*/
if (arch_cpu_active(id))
return 0;
#if ZEPHYR_VERSION(3, 0, 99) <= ZEPHYR_VERSION_CODE
z_init_cpu(id);
#endif
atomic_clear(&start_flag);
atomic_clear(&ready_flag);
arch_start_cpu(id, z_interrupt_stacks[id], CONFIG_ISR_STACK_SIZE,
secondary_init, &start_flag);
while (!atomic_get(&ready_flag))
k_busy_wait(100);
atomic_set(&start_flag, 1);
return 0;
}
void cpu_disable_core(int id)
{
/* only called from single core, no RMW lock */
__ASSERT_NO_MSG(cpu_is_primary(arch_proc_id()));
if (!arch_cpu_active(id)) {
tr_warn(&zephyr_tr, "core %d is already disabled", id);
return;
}
#if defined(CONFIG_PM)
/* TODO: before requesting core shut down check if it's not actively used */
if (!pm_state_force(id, &(struct pm_state_info){PM_STATE_SOFT_OFF, 0, 0})) {
tr_err(&zephyr_tr, "failed to set PM_STATE_SOFT_OFF on core %d", id);
return;
}
/* Primary core will be turn off by the host after it enter SOFT_OFF state */
if (cpu_is_primary(id))
return;
uint64_t timeout = k_cycle_get_64() +
k_ms_to_cyc_ceil64(CONFIG_SECONDARY_CORE_DISABLING_TIMEOUT);
/* Waiting for secondary core to enter idle state */
while (arch_cpu_active(id) && (k_cycle_get_64() < timeout))
idelay(PLATFORM_DEFAULT_DELAY);
if (arch_cpu_active(id)) {
tr_err(&zephyr_tr, "core %d did not enter idle state", id);
return;
}
if (soc_adsp_halt_cpu(id) != 0)
tr_err(&zephyr_tr, "failed to disable core %d", id);
#endif /* CONFIG_PM */
}
int cpu_is_core_enabled(int id)
{
return arch_cpu_active(id);
}
int cpu_enabled_cores(void)
{
unsigned int i;
int mask = 0;
for (i = 0; i < CONFIG_MP_NUM_CPUS; i++)
if (arch_cpu_active(i))
mask |= BIT(i);
return mask;
}
#endif /* CONFIG_ACE */
void cpu_power_down_core(uint32_t flags)
{
/* TODO: use Zephyr version */