zephyr: ipc: refactor of pg prevent

Preventing d0i3 state during IPC processing in case if a host previously
allowed for power state transitions. Transition can occur only in the
Idle thread and this lock acts as a safeguard in case of the remaining
threads to be non active (e.g. waiting for a semaphore).

The advantage of this solution is that we do not risk bypassing the
prevent. PM state lock (get and put) require that their calls be
balanced. One more put or get will result with hard to debug error. For
example we can end up with permanent prevent on power gating.

Original code introduced here: ae4e037b0

Signed-off-by: Tomasz Leman <tomasz.m.leman@intel.com>
This commit is contained in:
Tomasz Leman 2022-12-15 12:25:24 +01:00 committed by Liam Girdwood
parent 5270e631c3
commit 3f05c792c3
2 changed files with 7 additions and 11 deletions

View File

@ -28,8 +28,6 @@
#include <rtos/spinlock.h> #include <rtos/spinlock.h>
#include <ipc/header.h> #include <ipc/header.h>
#include <zephyr/pm/policy.h>
#include <stdbool.h> #include <stdbool.h>
#include <stddef.h> #include <stddef.h>
#include <stdint.h> #include <stdint.h>
@ -70,13 +68,6 @@ static bool message_handler(const struct device *dev, void *arg, uint32_t data,
#if CONFIG_DEBUG_IPC_COUNTERS #if CONFIG_DEBUG_IPC_COUNTERS
increment_ipc_received_counter(); increment_ipc_received_counter();
#endif #endif
/* Preventing d0i3 state during IPC processing in case if a host previously allowed for
* power state transitions. Transition can occur only in the Idle thread and this lock acts
* as a safeguard in case of the remaining threads to be non active
* (e.g. waiting for a semaphore).
*/
pm_policy_state_lock_get(PM_STATE_RUNTIME_IDLE, PM_ALL_SUBSTATES);
ipc_schedule_process(ipc); ipc_schedule_process(ipc);
k_spin_unlock(&ipc->lock, key); k_spin_unlock(&ipc->lock, key);
@ -141,7 +132,6 @@ enum task_state ipc_platform_do_cmd(struct ipc *ipc)
} }
#endif #endif
pm_policy_state_lock_put(PM_STATE_RUNTIME_IDLE, PM_ALL_SUBSTATES);
return SOF_TASK_STATE_COMPLETED; return SOF_TASK_STATE_COMPLETED;
} }

View File

@ -2,6 +2,7 @@
// //
// Copyright(c) 2022 Intel Corporation. All rights reserved. // Copyright(c) 2022 Intel Corporation. All rights reserved.
#include <sof/ipc/common.h>
#include <sof/lib/pm_runtime.h> #include <sof/lib/pm_runtime.h>
#include <sof/lib/uuid.h> #include <sof/lib/uuid.h>
#include <sof/trace/trace.h> #include <sof/trace/trace.h>
@ -40,10 +41,15 @@ const struct pm_state_info *pm_policy_next_state(uint8_t cpu, int32_t ticks)
if (pm_policy_state_lock_is_active(state->state, state->substate_id)) if (pm_policy_state_lock_is_active(state->state, state->substate_id))
continue; continue;
/* checking conditions for D0i3 */
if (state->state == PM_STATE_RUNTIME_IDLE) { if (state->state == PM_STATE_RUNTIME_IDLE) {
/* No D0i3 when secondary cores are active! */ /* skipping when secondary cores are active */
if (cpu_enabled_cores() & ~BIT(PLATFORM_PRIMARY_CORE_ID)) if (cpu_enabled_cores() & ~BIT(PLATFORM_PRIMARY_CORE_ID))
continue; continue;
/* skipping when some ipc task is not finished */
if (ipc_get()->task_mask)
continue;
} }
min_residency = k_us_to_ticks_ceil32(state->min_residency_us); min_residency = k_us_to_ticks_ceil32(state->min_residency_us);