arch: xtensa: Add bootloader support

Add a support for a secondary boot loader than cant be used to bootstrap
multiple firmware modules.

Signed-off-by: Rander Wang <rander.wang@linux.intel.com>
This commit is contained in:
Rander Wang 2018-01-23 15:30:10 +00:00 committed by Liam Girdwood
parent aa85e2c0e9
commit 44c7093525
5 changed files with 391 additions and 2 deletions

View File

@ -0,0 +1,98 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the Intel Corporation nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
*/
/*
* Entry point from ROM - assumes :-
*
* 1) C runtime environment is initalised by ROM.
* 2) Stack is in first HPSRAM bank.
*/
#include <config.h>
#include <platform/shim.h>
#include <xtensa/corebits.h>
#include <xtensa/config/core-isa-boot.h>
.type boot_pri_core, @function
.type boot_sec_core, @function
.begin literal_prefix .boot_entry
.section .boot_entry.text, "ax"
.align 4
.global boot_entry
boot_entry:
entry a1, 48
j boot_init
.align 4
.literal_position
l2_mecs:
.word SHIM_L2_MECS
boot_init:
.align 4
#if defined(CONFIG_CANNONLAKE)
/* reset memory hole */
l32r a3, l2_mecs
movi a5, 0
s32i a5, a3, 0
#endif
#if (XCHAL_DCACHE_IS_COHERENT || XCHAL_LOOP_BUFFER_SIZE) && \
XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RE_2012_0
/* Enable zero-overhead loop instr buffer,
and snoop responses, if configured. */
movi a3, (MEMCTL_SNOOP_EN | MEMCTL_L0IBUF_EN)
rsr a2, MEMCTL
or a2, a2, a3
wsr a2, MEMCTL
#endif
/* determine core we are running on */
rsr a2, PRID
beqz a2, 1f
/* we are seconadry core, so boot it */
call8 boot_sec_core
j dead
1:
/* we are primary core so boot it */
call8 boot_pri_core
dead:
/* should never get here - we are dead */
j dead
.size boot_entry, . - boot_entry
.end literal_prefix

View File

@ -0,0 +1,206 @@
/*
* Copyright (c) 2016, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the Intel Corporation nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
*/
#include <arch/cache.h>
#include <arch/wait.h>
#include <reef/trace.h>
#include <reef/io.h>
#include <uapi/manifest.h>
#include <platform/memory.h>
/* entry point to main firmware */
extern void _ResetVector(void);
void boot_pri_core(void);
void boot_sec_core(void);
#if defined(CONFIG_CANNONLAKE)
/* memcopy used by boot loader */
static inline void bmemcpy(void *dest, void *src, size_t bytes)
{
uint32_t *d = dest;
uint32_t *s = src;
int i;
for (i = 0; i < (bytes >> 2); i++)
d[i] = s[i];
dcache_writeback_region(dest, bytes);
}
/* bzero used by bootloader */
static inline void bbzero(void *dest, size_t bytes)
{
uint32_t *d = dest;
int i;
for (i = 0; i < (bytes >> 2); i++)
d[i] = 0;
dcache_writeback_region(dest, bytes);
}
static void parse_module(struct sof_man_fw_header *hdr,
struct sof_man_module *mod)
{
int i;
uint32_t bias;
/* each module has 3 segments */
for (i = 0; i < 3; i++) {
switch (mod->segment[i].flags.r.type) {
case SOF_MAN_SEGMENT_TEXT:
case SOF_MAN_SEGMENT_DATA:
bias = (mod->segment[i].file_offset -
SOF_MAN_ELF_TEXT_OFFSET);
/* copy from IMR to SRAM */
bmemcpy((void *)mod->segment[i].v_base_addr,
(void *)((int)hdr + bias),
mod->segment[i].flags.r.length * HOST_PAGE_SIZE);
break;
case SOF_MAN_SEGMENT_BSS:
/* copy from IMR to SRAM */
bbzero((void*)mod->segment[i].v_base_addr,
mod->segment[i].flags.r.length * HOST_PAGE_SIZE);
break;
default:
/* ignore */
break;
}
}
}
/* parse FW manifest and copy modules */
static void parse_manifest(void)
{
struct sof_man_fw_desc *desc =
(struct sof_man_fw_desc *)IMR_BOOT_LDR_MANIFEST_BASE;
struct sof_man_fw_header *hdr = &desc->header;
struct sof_man_module *mod;
int i;
/* copy module to SRAM - skip bootloader module */
for (i = 1; i < hdr->num_module_entries; i++) {
mod = sof_man_get_module(desc, i);
parse_module(hdr, mod);
}
}
#endif
/* power on HPSRAM */
static int32_t hp_sram_init(void)
{
int delay_count = 256, timeout = 256;
uint32_t status;
shim_write(SHIM_LDOCTL, SHIM_HPMEM_POWER_ON);
/* add some delay before touch power register */
idelay(delay_count);
/* now all the memory bank has been powered up */
io_reg_write(HSPGCTL0, 0);
io_reg_write(HSRMCTL0, 0);
io_reg_write(HSPGCTL1, 0);
io_reg_write(HSRMCTL1, 0);
/* query the power status of first part of HP memory */
/* to check whether it has been powered up. A few */
/* cycles are needed for it to be powered up */
status = io_reg_read(HSPGISTS0);
while (status) {
idelay(delay_count);
status = io_reg_read(HSPGISTS0);
if (timeout-- < 0) {
return -EIO;
}
}
/* query the power status of second part of HP memory */
/* and do as above code */
timeout = 256;
status = io_reg_read(HSPGISTS1);
while (status) {
idelay(delay_count);
status = io_reg_read(HSPGISTS1);
if (timeout-- < 0) {
return -EIO;
}
}
/* add some delay before touch power register */
idelay(delay_count);
shim_write(SHIM_LDOCTL, SHIM_LPMEM_POWER_BYPASS);
return 0;
}
/* boot secondary core - i.e core ID > 0 */
void boot_sec_core(void)
{
/* TODO: prepare C stack for this core */
while (1);
/* now call SOF entry */
_ResetVector();
}
/* boot primary core - i.e. core ID == 0 */
void boot_pri_core(void)
{
int32_t result;
/* TODO: platform trace should write to HW IPC regs on CNL */
platform_trace_point(TRACE_BOOT_LDR_ENTRY);
/* init the HPSRAM */
platform_trace_point(TRACE_BOOT_LDR_HPSRAM);
result = hp_sram_init();
if (result < 0) {
platform_panic(PANIC_MEM);
return;
}
#if defined(CONFIG_CANNONLAKE)
/* parse manifest and copy modules */
platform_trace_point(TRACE_BOOT_LDR_MANIFEST);
parse_manifest();
#endif
/* now call SOF entry */
platform_trace_point(TRACE_BOOT_LDR_JUMP);
_ResetVector();
}

View File

@ -120,6 +120,22 @@ _start:
#include "reset-unneeded.S"
#endif
#if XCHAL_HAVE_BOOTLOADER
.weak _Level2FromVector
.weak _Level3FromVector
.weak _Level4FromVector
.weak _Level5FromVector
movi a4, _Level2FromVector
wsr a4, EXCSAVE+2
movi a4, _Level3FromVector
wsr a4, EXCSAVE+3
movi a4, _Level4FromVector
wsr a4, EXCSAVE+4
movi a4, _Level5FromVector
wsr a4, EXCSAVE+5
#endif
// Initialize the stack pointer.
// See the "ABI and Software Conventions" chapter in the
// Xtensa ISA Reference manual for details.

View File

@ -0,0 +1,57 @@
/*
* Copyright (c) 2017, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the Intel Corporation nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Author: Rander Wang <rander.wang@intel.com>
*/
/*
* Entry point from boot loader.
* Fix link address of this entry to REEF_TEXT_START so that
* it is easy for boot loader to jump to the baseFW becasue
* the boot loader and baseFW are in different elf file.
*/
// Exports
.global _MainEntry
/**************************************************************************/
.begin literal_prefix .MainEntry
.section .MainEntry.text, "ax"
.align 4
.global _MainEntry
_MainEntry:
call0 _start
.size _MainEntry, . - _MainEntry
.end literal_prefix

View File

@ -35,6 +35,10 @@
#include "xtos-internal.h"
#include <config.h>
#if XCHAL_HAVE_BOOTLOADER
#include <platform/memory.h>
#endif
// The following reset vector avoids initializing certain registers already
// initialized by processor reset. But it does initialize some of them
// anyway, for minimal support of warm restart (restarting in software by
@ -290,6 +294,7 @@ _ResetHandler:
s32i a0, a2, 0 // clear sync variable
.Ldonesync:
#endif
#if XCHAL_HAVE_EXTERN_REGS && XCHAL_HAVE_MP_RUNSTALL
/* On core 0, this releases other cores. On other cores this has no effect, because
runstall control is unconnected. */
@ -375,7 +380,6 @@ _ResetHandler:
movi a2, _memmap_cacheattr_reset /* note: absolute symbol, not a ptr */
cacheattr_set /* set CACHEATTR from a2 (clobbers a3-a8) */
#endif
/* Now that caches are initialized, cache coherency can be enabled. */
#if XCHAL_DCACHE_IS_COHERENT
# if XCHAL_HAVE_EXTERN_REGS && XCHAL_HAVE_MX && (XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RE_2012_0)
@ -574,7 +578,15 @@ unpackdone:
* needed (with -mlongcalls) which it doesn't with j or jx.
* Note: This needs to be call0 regardless of the selected ABI.
*/
call0 _start // jump to _start (in crt1-*.S)
#if XCHAL_HAVE_BOOTLOADER
/*ToDo refine the _start*/
movi a0, REEF_TEXT_START
callx0 a0
#else
call0 _start // jump to _start (in crt1-*.S)
#endif
/* does not return */
#else /* XCHAL_HAVE_HALT */