/* * Copyright (c) 2022 Intel Corporation. * * SPDX-License-Identifier: Apache-2.0 */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define LPSRAM_MAGIC_VALUE 0x13579BDF #define LPSCTL_BATTR_MASK GENMASK(16, 12) #if CONFIG_SOC_INTEL_ACE15_MTPM /* Used to force any pending transaction by HW issuing an upstream read before * power down host domain. */ uint8_t adsp_pending_buffer[CONFIG_DCACHE_LINE_SIZE] __aligned(CONFIG_DCACHE_LINE_SIZE); #endif /* CONFIG_SOC_INTEL_ACE15_MTPM */ __imr void power_init(void) { #if CONFIG_ADSP_IDLE_CLOCK_GATING /* Disable idle power gating */ DSPCS.bootctl[0].bctl |= DSPBR_BCTL_WAITIPPG; #else /* Disable idle power and clock gating */ DSPCS.bootctl[0].bctl |= DSPBR_BCTL_WAITIPCG | DSPBR_BCTL_WAITIPPG; #endif /* CONFIG_ADSP_IDLE_CLOCK_GATING */ #if CONFIG_SOC_INTEL_ACE15_MTPM *((__sparse_force uint32_t *)sys_cache_cached_ptr_get(&adsp_pending_buffer)) = INTEL_ADSP_ACE15_MAGIC_KEY; sys_cache_data_flush_range((__sparse_force void *) sys_cache_cached_ptr_get(&adsp_pending_buffer), sizeof(adsp_pending_buffer)); #endif /* CONFIG_SOC_INTEL_ACE15_MTPM */ } #ifdef CONFIG_PM #define L2_INTERRUPT_NUMBER 4 #define L2_INTERRUPT_MASK (1<adsp_lpsram_magic = LPSRAM_MAGIC_VALUE; lpsheader->lp_restore_vector = &dsp_restore_vector; sys_cache_data_flush_range(lpsheader, sizeof(struct lpsram_header)); /* Re-enabling interrupts for core 0 because someone has to wake-up us * from power gaiting. */ z_xt_ints_on(ALL_USED_INT_LEVELS_MASK); } soc_cpus_active[core_id] = false; k_cpu_idle(); /* It is unlikely we get in here, but when this happens * we need to lock interruptions again. * * @note Zephyr looks PS.INTLEVEL to check if interruptions are locked. */ (void)arch_irq_lock(); z_xt_ints_off(0xffffffff); } static void __used power_gate_exit(void) { cpu_early_init(); sys_cache_data_flush_and_invd_all(); _restore_core_context(); /* Secondary core is resumed by set_dx */ if (arch_proc_id()) { mp_resume_entry(); } } __asm__(".align 4\n\t" ".global dsp_restore_vector\n\t" "dsp_restore_vector:\n\t" " movi a0, 0\n\t" " movi a1, 1\n\t" " movi a2, " STRINGIFY(PS_UM | PS_WOE | PS_INTLEVEL(XCHAL_EXCM_LEVEL)) "\n\t" " wsr a2, PS\n\t" " wsr a1, WINDOWSTART\n\t" " wsr a0, WINDOWBASE\n\t" " rsync\n\t" " movi a1, z_interrupt_stacks\n\t" " rsr a2, PRID\n\t" " movi a3, " STRINGIFY(CONFIG_ISR_STACK_SIZE) "\n\t" " mull a2, a2, a3\n\t" " add a2, a2, a3\n\t" " add a1, a1, a2\n\t" " call0 power_gate_exit\n\t"); #ifdef CONFIG_ADSP_IMR_CONTEXT_SAVE static ALWAYS_INLINE void power_off_exit(void) { __asm__( " movi a0, 0\n\t" " movi a1, 1\n\t" " movi a2, " STRINGIFY(PS_UM | PS_WOE | PS_INTLEVEL(XCHAL_EXCM_LEVEL)) "\n\t" " wsr a2, PS\n\t" " wsr a1, WINDOWSTART\n\t" " wsr a0, WINDOWBASE\n\t" " rsync\n\t"); _restore_core_context(); } __imr void pm_state_imr_restore(void) { struct imr_layout *imr_layout = (struct imr_layout *)(IMR_LAYOUT_ADDRESS); /* restore lpsram power and contents */ bmemcpy(sys_cache_uncached_ptr_get((__sparse_force void __sparse_cache *) UINT_TO_POINTER(LP_SRAM_BASE)), imr_layout->imr_state.header.imr_ram_storage, LP_SRAM_SIZE); /* restore HPSRAM contents, mapping and power states */ adsp_mm_restore_context(imr_layout->imr_state.header.imr_ram_storage+LP_SRAM_SIZE); /* this function won't return, it will restore a saved state */ power_off_exit(); } #endif /* CONFIG_ADSP_IMR_CONTEXT_SAVE */ void pm_state_set(enum pm_state state, uint8_t substate_id) { ARG_UNUSED(substate_id); uint32_t cpu = arch_proc_id(); uint32_t battr; int ret; ARG_UNUSED(ret); /* save interrupt state and turn off all interrupts */ core_desc[cpu].intenable = XTENSA_RSR("INTENABLE"); z_xt_ints_off(0xffffffff); switch (state) { case PM_STATE_SOFT_OFF: core_desc[cpu].bctl = DSPCS.bootctl[cpu].bctl; DSPCS.bootctl[cpu].bctl &= ~DSPBR_BCTL_WAITIPCG; if (cpu == 0) { soc_cpus_active[cpu] = false; ret = pm_device_runtime_put(INTEL_ADSP_HST_DOMAIN_DEV); __ASSERT_NO_MSG(ret == 0); #ifdef CONFIG_ADSP_IMR_CONTEXT_SAVE /* save storage and restore information to imr */ __ASSERT_NO_MSG(global_imr_ram_storage != NULL); #endif struct imr_layout *imr_layout = (struct imr_layout *)(IMR_LAYOUT_ADDRESS); imr_layout->imr_state.header.adsp_imr_magic = ADSP_IMR_MAGIC_VALUE; #ifdef CONFIG_ADSP_IMR_CONTEXT_SAVE sys_cache_data_flush_and_invd_all(); imr_layout->imr_state.header.imr_restore_vector = (void *)boot_entry_d3_restore; imr_layout->imr_state.header.imr_ram_storage = global_imr_ram_storage; sys_cache_data_flush_range((void *)imr_layout, sizeof(*imr_layout)); /* save CPU context here * when _restore_core_context() is called, it will return directly to * the caller of this procedure * any changes to CPU context after _save_core_context * will be lost when power_down is executed * Only data in the imr region survives */ xthal_window_spill(); _save_core_context(cpu); /* save LPSRAM - a simple copy */ memcpy(global_imr_ram_storage, (void *)LP_SRAM_BASE, LP_SRAM_SIZE); /* save HPSRAM - a multi step procedure, executed by a TLB driver * the TLB driver will change memory mapping * leaving the system not operational * it must be called directly here, * just before power_down */ const struct device *tlb_dev = DEVICE_DT_GET(DT_NODELABEL(tlb)); __ASSERT_NO_MSG(tlb_dev != NULL); const struct intel_adsp_tlb_api *tlb_api = (struct intel_adsp_tlb_api *)tlb_dev->api; tlb_api->save_context(global_imr_ram_storage+LP_SRAM_SIZE); #else imr_layout->imr_state.header.imr_restore_vector = (void *)rom_entry; sys_cache_data_flush_range((void *)imr_layout, sizeof(*imr_layout)); #endif /* CONFIG_ADSP_IMR_CONTEXT_SAVE */ /* do power down - this function won't return */ power_down(true, CONFIG_ADSP_POWER_DOWN_HPSRAM, true); } else { power_gate_entry(cpu); } break; /* Only core 0 handles this state */ case PM_STATE_RUNTIME_IDLE: battr = DSPCS.bootctl[cpu].battr & (~LPSCTL_BATTR_MASK); DSPCS.bootctl[cpu].bctl &= ~DSPBR_BCTL_WAITIPPG; DSPCS.bootctl[cpu].bctl &= ~DSPBR_BCTL_WAITIPCG; soc_cpu_power_down(cpu); battr |= (DSPBR_BATTR_LPSCTL_RESTORE_BOOT & LPSCTL_BATTR_MASK); DSPCS.bootctl[cpu].battr = battr; ret = pm_device_runtime_put(INTEL_ADSP_HST_DOMAIN_DEV); __ASSERT_NO_MSG(ret == 0); power_gate_entry(cpu); break; default: __ASSERT(false, "invalid argument - unsupported power state"); } } /* Handle SOC specific activity after Low Power Mode Exit */ void pm_state_exit_post_ops(enum pm_state state, uint8_t substate_id) { ARG_UNUSED(substate_id); uint32_t cpu = arch_proc_id(); if (cpu == 0) { int ret = pm_device_runtime_get(INTEL_ADSP_HST_DOMAIN_DEV); ARG_UNUSED(ret); __ASSERT_NO_MSG(ret == 0); } if (state == PM_STATE_SOFT_OFF) { /* restore clock gating state */ DSPCS.bootctl[cpu].bctl |= (core_desc[cpu].bctl & DSPBR_BCTL_WAITIPCG); #ifdef CONFIG_ADSP_IMR_CONTEXT_SAVE if (cpu == 0) { struct imr_layout *imr_layout = (struct imr_layout *)(IMR_LAYOUT_ADDRESS); /* clean storage and restore information */ sys_cache_data_invd_range(imr_layout, sizeof(*imr_layout)); imr_layout->imr_state.header.adsp_imr_magic = 0; imr_layout->imr_state.header.imr_restore_vector = NULL; imr_layout->imr_state.header.imr_ram_storage = NULL; intel_adsp_clock_soft_off_exit(); mem_window_idle_exit(); soc_mp_on_d3_exit(); } #endif /* CONFIG_ADSP_IMR_CONTEXT_SAVE */ soc_cpus_active[cpu] = true; sys_cache_data_flush_and_invd_all(); } else if (state == PM_STATE_RUNTIME_IDLE) { soc_cpu_power_up(cpu); if (!WAIT_FOR(soc_cpu_is_powered(cpu), CPU_POWERUP_TIMEOUT_USEC, k_busy_wait(HW_STATE_CHECK_DELAY))) { k_panic(); } #if CONFIG_ADSP_IDLE_CLOCK_GATING DSPCS.bootctl[cpu].bctl |= DSPBR_BCTL_WAITIPPG; #else DSPCS.bootctl[cpu].bctl |= DSPBR_BCTL_WAITIPCG | DSPBR_BCTL_WAITIPPG; #endif /* CONFIG_ADSP_IDLE_CLOCK_GATING */ DSPCS.bootctl[cpu].battr &= (~LPSCTL_BATTR_MASK); soc_cpus_active[cpu] = true; sys_cache_data_flush_and_invd_all(); } else { __ASSERT(false, "invalid argument - unsupported power state"); } z_xt_ints_on(core_desc[cpu].intenable); /* We don't have the key used to lock interruptions here. * Just set PS.INTLEVEL to 0. */ __asm__ volatile ("rsil a2, 0"); } #endif /* CONFIG_PM */ #ifdef CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE __no_optimization void arch_cpu_idle(void) { uint32_t cpu = arch_proc_id(); sys_trace_idle(); /* * unlock and invalidate icache if clock gating is allowed */ if (!(DSPCS.bootctl[cpu].bctl & DSPBR_BCTL_WAITIPCG)) { xthal_icache_all_unlock(); xthal_icache_all_invalidate(); } __asm__ volatile ("waiti 0"); } #endif /* CONFIG_ARCH_HAS_CUSTOM_CPU_IDLE */