/* * Copyright (c) 2019 Intel Corporation * * SPDX-License-Identifier: Apache-2.0 */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "soc.h" #ifdef CONFIG_DYNAMIC_INTERRUPTS #include #endif #define LOG_LEVEL CONFIG_SOC_LOG_LEVEL #include LOG_MODULE_REGISTER(soc); # define SHIM_GPDMA_BASE_OFFSET 0x6500 # define SHIM_GPDMA_BASE(x) (SHIM_GPDMA_BASE_OFFSET + (x) * 0x100) # define SHIM_GPDMA_CLKCTL(x) (SHIM_GPDMA_BASE(x) + 0x4) # define SHIM_CLKCTL_LPGPDMAFDCGB BIT(0) #ifdef CONFIG_PM #define SRAM_ALIAS_BASE 0x9E000000 #define SRAM_ALIAS_MASK 0xFF000000 #define SRAM_ALIAS_OFFSET 0x20000000 #define L2_INTERRUPT_NUMBER 4 #define L2_INTERRUPT_MASK (1<imr_state.header = hdr; #ifdef CONFIG_ADSP_POWER_DOWN_HPSRAM /* turn off all HPSRAM banks - get a full bitmap */ for (int i = 0; i < HPSRAM_SEGMENTS; i++) hpsram_mask[i] = HPSRAM_MEMMASK(i); #endif /* CONFIG_ADSP_POWER_DOWN_HPSRAM */ /* do power down - this function won't return */ power_down_cavs(true, uncache_to_cache(&hpsram_mask[0])); } else { k_cpu_idle(); } } else { __ASSERT(false, "invalid argument - unsupported power state"); } } /* Handle SOC specific activity after Low Power Mode Exit */ void pm_state_exit_post_ops(enum pm_state state, uint8_t substate_id) { ARG_UNUSED(substate_id); uint32_t cpu = arch_proc_id(); if (state == PM_STATE_SOFT_OFF) { soc_cpus_active[cpu] = true; sys_cache_data_flush_and_invd_all(); z_xt_ints_on(core_desc[cpu].intenable); } else { __ASSERT(false, "invalid argument - unsupported power state"); } } #endif /* CONFIG_PM */ #ifdef CONFIG_ARCH_CPU_IDLE_CUSTOM /* xt-clang removes any NOPs more than 8. So we need to set * no optimization to avoid those NOPs from being removed. * * This function is simply enough and full of hand written * assembly that optimization is not really meaningful * anyway. So we can skip optimization unconditionally. * Re-evalulate its use and add #ifdef if this assumption * is no longer valid. */ __no_optimization void arch_cpu_idle(void) { sys_trace_idle(); /* Just spin forever with interrupts unmasked, for platforms * where WAITI can't be used or where its behavior is * complicated (Intel DSPs will power gate on idle entry under * some circumstances) */ if (IS_ENABLED(CONFIG_XTENSA_CPU_IDLE_SPIN)) { __asm__ volatile("rsil a0, 0"); __asm__ volatile("loop_forever: j loop_forever"); return; } /* Cribbed from SOF: workaround for a bug in some versions of * the LX6 IP. Preprocessor ugliness avoids the need to * figure out how to get the compiler to unroll a loop. */ if (IS_ENABLED(CONFIG_XTENSA_WAITI_BUG)) { #define NOP4 __asm__ volatile("nop; nop; nop; nop"); #define NOP32 NOP4 NOP4 NOP4 NOP4 NOP4 NOP4 NOP4 NOP4 #define NOP128() NOP32 NOP32 NOP32 NOP32 NOP128(); #undef NOP128 #undef NOP32 #undef NOP4 __asm__ volatile("isync; extw"); } __asm__ volatile ("waiti 0"); } #endif __imr void power_init(void) { /* Request HP ring oscillator and * wait for status to indicate it's ready. */ CAVS_SHIM.clkctl |= CAVS_CLKCTL_RHROSCC; while ((CAVS_SHIM.clkctl & CAVS_CLKCTL_RHROSCC) != CAVS_CLKCTL_RHROSCC) { k_busy_wait(10); } /* Request HP Ring Oscillator * Select HP Ring Oscillator * High Power Domain PLL Clock Select device by 2 * Low Power Domain PLL Clock Select device by 4 * Disable Tensilica Core(s) Prevent Local Clock Gating * - Disabling "prevent clock gating" means allowing clock gating */ CAVS_SHIM.clkctl = (CAVS_CLKCTL_RHROSCC | CAVS_CLKCTL_OCS | CAVS_CLKCTL_LMCS); /* Prevent LP GPDMA 0 & 1 clock gating */ sys_write32(SHIM_CLKCTL_LPGPDMAFDCGB, SHIM_GPDMA_CLKCTL(0)); sys_write32(SHIM_CLKCTL_LPGPDMAFDCGB, SHIM_GPDMA_CLKCTL(1)); /* Disable power gating for first cores */ CAVS_SHIM.pwrctl |= CAVS_PWRCTL_TCPDSPPG(0); /* On cAVS 1.8+, we must demand ownership of the timestamping * and clock generator registers. Lacking the former will * prevent wall clock timer interrupts from arriving, even * though the device itself is operational. */ sys_write32(GENO_MDIVOSEL | GENO_DIOPTOSEL, DSP_INIT_GENO); sys_write32(IOPO_DMIC_FLAG | IOPO_I2SSEL_MASK, DSP_INIT_IOPO); }