blob: 012af58955c35a65effdb7f8b360a9dc79c4c145 [file] [log] [blame]
/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <zephyr/arch/cpu.h>
#include "boot.h"
#include "macro_priv.inc"
_ASM_FILE_PROLOGUE
/*
* Platform specific pre-C init code
*
* Note: - Stack is not yet available
* - x23, x24 and x25 must be preserved
*/
WTEXT(z_arm64_el3_plat_prep_c)
SECTION_FUNC(TEXT,z_arm64_el3_plat_prep_c)
ret
WTEXT(z_arm64_el2_plat_prep_c)
SECTION_FUNC(TEXT,z_arm64_el2_plat_prep_c)
ret
WTEXT(z_arm64_el1_plat_prep_c)
SECTION_FUNC(TEXT,z_arm64_el1_plat_prep_c)
ret
/*
* Set the minimum necessary to safely call C code
*/
GTEXT(__reset_prep_c)
SECTION_SUBSEC_FUNC(TEXT,_reset_section,__reset_prep_c)
/* return address: x23 */
mov x23, lr
switch_el x0, 3f, 2f, 1f
3:
/* Reinitialize SCTLR from scratch in EL3 */
ldr w0, =(SCTLR_EL3_RES1 | SCTLR_I_BIT | SCTLR_SA_BIT)
msr sctlr_el3, x0
/* Custom plat prep_c init */
bl z_arm64_el3_plat_prep_c
/* Set SP_EL1 */
msr sp_el1, x24
b out
2:
/* Disable alignment fault checking */
mrs x0, sctlr_el2
bic x0, x0, SCTLR_A_BIT
msr sctlr_el2, x0
/* Custom plat prep_c init */
bl z_arm64_el2_plat_prep_c
/* Set SP_EL1 */
msr sp_el1, x24
b out
1:
/* Disable alignment fault checking */
mrs x0, sctlr_el1
bic x0, x0, SCTLR_A_BIT
msr sctlr_el1, x0
/* Custom plat prep_c init */
bl z_arm64_el1_plat_prep_c
/* Set SP_EL1. We cannot use sp_el1 at EL1 */
msr SPSel, #1
mov sp, x24
out:
isb
/* Select SP_EL0 */
msr SPSel, #0
/* Initialize stack */
mov sp, x24
ret x23
/*
* Reset vector
*
* Ran when the system comes out of reset. The processor is in thread mode with
* privileged level. At this point, neither SP_EL0 nor SP_ELx point to a valid
* area in SRAM.
*/
GTEXT(__reset)
SECTION_SUBSEC_FUNC(TEXT,_reset_section,__reset)
GTEXT(__start)
SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
#ifdef CONFIG_WAIT_AT_RESET_VECTOR
resetwait:
wfe
b resetwait
#endif
/* Mask all exceptions */
msr DAIFSet, #0xf
#if CONFIG_MP_NUM_CPUS > 1
ldr x0, =arm64_cpu_boot_params
get_cpu_id x1
/*
* If the cores start up at the same time, we should atomically load and
* store the mpid into arm64_cpu_boot_params.
*/
ldaxr x2, [x0, #BOOT_PARAM_MPID_OFFSET]
cmp x2, #-1
bne 1f
/* try to store x1 (mpid) */
stlxr w3, x1, [x0]
/* If succeed, go to primary_core */
cbz w3, primary_core
/* loop until our turn comes */
1: dmb ld
ldr x2, [x0, #BOOT_PARAM_MPID_OFFSET]
cmp x1, x2
bne 1b
/* we can now load our stack pointer value and move on */
ldr x24, [x0, #BOOT_PARAM_SP_OFFSET]
ldr x25, =z_arm64_secondary_prep_c
b 2f
primary_core:
#endif
/* load primary stack and entry point */
ldr x24, =(z_interrupt_stacks + CONFIG_ISR_STACK_SIZE)
ldr x25, =z_arm64_prep_c
2:
/* Prepare for calling C code */
bl __reset_prep_c
/*
* Initialize the interrupt stack with 0xaa so stack utilization
* can be measured. This needs to be done before using the stack
* so that we don't clobber any data.
*/
#ifdef CONFIG_INIT_STACKS
ldr x0, =(z_interrupt_stacks)
sub x9, sp, #8
mov x10, 0xaaaaaaaaaaaaaaaa
stack_init_loop:
cmp x0, x9
beq stack_init_done
str x10, [x0], #8
b stack_init_loop
stack_init_done:
#endif
/* Platform hook for highest EL */
bl z_arm64_el_highest_init
switch_el:
switch_el x0, 3f, 2f, 1f
3:
/* EL3 init */
bl z_arm64_el3_init
/* Get next EL */
adr x0, switch_el
bl z_arm64_el3_get_next_el
eret
2:
/* EL2 init */
bl z_arm64_el2_init
/* Move to EL1 with all exceptions masked */
mov_imm x0, (SPSR_DAIF_MASK | SPSR_MODE_EL1T)
msr spsr_el2, x0
adr x0, 1f
msr elr_el2, x0
eret
1:
/* EL1 init */
bl z_arm64_el1_init
/* We want to use SP_ELx from now on */
msr SPSel, #1
/* Enable SError interrupts */
msr DAIFClr, #(DAIFCLR_ABT_BIT)
isb
ret x25 /* either z_arm64_prep_c or z_arm64_secondary_prep_c */