blob: baaae7020d696dfdd28a26fe48484226a7d9b6ec [file] [log] [blame]
Andy Rosse7172672018-01-24 15:48:32 -08001/*
2 * Copyright (c) 2018 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
Leandro Pereira3beb4812018-02-20 09:11:38 -08006
7/* Include esp-idf headers first to avoid redefining BIT() macro */
Lucas Tamborrino11fc1822023-12-20 12:24:13 -03008#include <soc/dport_reg.h>
9#include <soc/gpio_periph.h>
10#include <soc/rtc_periph.h>
Leandro Pereira3beb4812018-02-20 09:11:38 -080011
Gerard Marull-Paretas0e691292022-05-06 11:11:04 +020012#include <zephyr/drivers/interrupt_controller/intc_esp32.h>
Felipe Neves754ef4d2021-11-10 14:06:16 -030013#include <soc.h>
Kumar Gala9387d862022-10-25 09:08:29 -050014#include <ksched.h>
Gerard Marull-Paretas0e691292022-05-06 11:11:04 +020015#include <zephyr/device.h>
Gerard Marull-Paretas79e6b0e2022-08-25 09:58:46 +020016#include <zephyr/kernel.h>
Gerard Marull-Paretas0e691292022-05-06 11:11:04 +020017#include <zephyr/spinlock.h>
18#include <zephyr/kernel_structs.h>
Andy Rosse7172672018-01-24 15:48:32 -080019
Kumar Galaa1b77fd2020-05-27 11:26:57 -050020#define Z_REG(base, off) (*(volatile uint32_t *)((base) + (off)))
Andy Rosse7172672018-01-24 15:48:32 -080021
22#define RTC_CNTL_BASE 0x3ff48000
Patrik Flyktfd42bf72019-03-14 16:04:29 -060023#define RTC_CNTL_OPTIONS0 Z_REG(RTC_CNTL_BASE, 0x0)
24#define RTC_CNTL_SW_CPU_STALL Z_REG(RTC_CNTL_BASE, 0xac)
Andy Rosse7172672018-01-24 15:48:32 -080025
26#define DPORT_BASE 0x3ff00000
Patrik Flyktfd42bf72019-03-14 16:04:29 -060027#define DPORT_APPCPU_CTRL_A Z_REG(DPORT_BASE, 0x02C)
28#define DPORT_APPCPU_CTRL_B Z_REG(DPORT_BASE, 0x030)
29#define DPORT_APPCPU_CTRL_C Z_REG(DPORT_BASE, 0x034)
Andy Rosse7172672018-01-24 15:48:32 -080030
Kumar Gala9387d862022-10-25 09:08:29 -050031#ifdef CONFIG_SMP
Andy Rosse7172672018-01-24 15:48:32 -080032struct cpustart_rec {
33 int cpu;
Andrew Boiea594ca72020-01-10 12:51:38 -080034 arch_cpustart_t fn;
Andy Rosse7172672018-01-24 15:48:32 -080035 char *stack_top;
36 void *arg;
37 int vecbase;
38 volatile int *alive;
39};
40
41volatile struct cpustart_rec *start_rec;
42static void *appcpu_top;
Kumar Galac778eb22022-10-12 10:55:36 -050043static bool cpus_active[CONFIG_MP_MAX_NUM_CPUS];
Kumar Gala9387d862022-10-25 09:08:29 -050044#endif
Andy Ross1fe6c362018-05-30 14:06:35 -070045static struct k_spinlock loglock;
46
Felipe Neves754ef4d2021-11-10 14:06:16 -030047
Andy Ross1fe6c362018-05-30 14:06:35 -070048/* Note that the logging done here is ACTUALLY REQUIRED FOR RELIABLE
49 * OPERATION! At least one particular board will experience spurious
50 * hangs during initialization (usually the APPCPU fails to start at
51 * all) without these calls present. It's not just time -- careful
52 * use of k_busy_wait() (and even hand-crafted timer loops using the
53 * Xtensa timer SRs directly) that duplicates the timing exactly still
54 * sees hangs. Something is happening inside the ROM UART code that
55 * magically makes the startup sequence reliable.
56 *
57 * Leave this in place until the sequence is understood better.
58 *
59 * (Note that the use of the spinlock is cosmetic only -- if you take
60 * it out the messages will interleave across the two CPUs but startup
61 * will still be reliable.)
62 */
63void smp_log(const char *msg)
64{
Lucas Tamborrino11fc1822023-12-20 12:24:13 -030065#ifndef CONFIG_SOC_ESP32_PROCPU
Andy Ross1fe6c362018-05-30 14:06:35 -070066 k_spinlock_key_t key = k_spin_lock(&loglock);
67
68 while (*msg) {
Sylvio Alves944b6d02021-10-13 14:51:33 -030069 esp_rom_uart_tx_one_char(*msg++);
Andy Ross1fe6c362018-05-30 14:06:35 -070070 }
Sylvio Alves944b6d02021-10-13 14:51:33 -030071 esp_rom_uart_tx_one_char('\r');
72 esp_rom_uart_tx_one_char('\n');
Andy Ross1fe6c362018-05-30 14:06:35 -070073
74 k_spin_unlock(&loglock, key);
Felipe Nevesbb6e6562022-07-14 18:30:44 -030075#endif
Andy Ross1fe6c362018-05-30 14:06:35 -070076}
77
Kumar Gala9387d862022-10-25 09:08:29 -050078#ifdef CONFIG_SMP
Andy Rosse7172672018-01-24 15:48:32 -080079static void appcpu_entry2(void)
80{
81 volatile int ps, ie;
82
83 /* Copy over VECBASE from the main CPU for an initial value
84 * (will need to revisit this if we ever allow a user API to
85 * change interrupt vectors at runtime). Make sure interrupts
86 * are locally disabled, then synthesize a PS value that will
87 * enable them for the user code to pass to irq_unlock()
88 * later.
89 */
90 __asm__ volatile("rsr.PS %0" : "=r"(ps));
91 ps &= ~(PS_EXCM_MASK | PS_INTLEVEL_MASK);
92 __asm__ volatile("wsr.PS %0" : : "r"(ps));
93
94 ie = 0;
95 __asm__ volatile("wsr.INTENABLE %0" : : "r"(ie));
96 __asm__ volatile("wsr.VECBASE %0" : : "r"(start_rec->vecbase));
97 __asm__ volatile("rsync");
98
Andy Ross53eceff2018-02-02 11:23:30 -080099 /* Set up the CPU pointer. Really this should be xtensa arch
100 * code, not in the ESP-32 layer
101 */
102 _cpu_t *cpu = &_kernel.cpus[1];
103
104 __asm__ volatile("wsr.MISC0 %0" : : "r"(cpu));
105
Felipe Neves754ef4d2021-11-10 14:06:16 -0300106 smp_log("ESP32: APPCPU running");
107
Andy Rosse7172672018-01-24 15:48:32 -0800108 *start_rec->alive = 1;
Andrew Boiea594ca72020-01-10 12:51:38 -0800109 start_rec->fn(start_rec->arg);
Andy Rosse7172672018-01-24 15:48:32 -0800110}
111
112/* Defines a locally callable "function" named _stack-switch(). The
113 * first argument (in register a2 post-ENTRY) is the new stack pointer
114 * to go into register a1. The second (a3) is the entry point.
115 * Because this never returns, a0 is used as a scratch register then
116 * set to zero for the called function (a null return value is the
117 * signal for "top of stack" to the debugger).
118 */
Patrik Flyktfd42bf72019-03-14 16:04:29 -0600119void z_appcpu_stack_switch(void *stack, void *entry);
Andy Rosse7172672018-01-24 15:48:32 -0800120__asm__("\n"
121 ".align 4" "\n"
Patrik Flyktfd42bf72019-03-14 16:04:29 -0600122 "z_appcpu_stack_switch:" "\n\t"
Andy Rosse7172672018-01-24 15:48:32 -0800123
124 "entry a1, 16" "\n\t"
125
126 /* Subtle: we want the stack to be 16 bytes higher than the
127 * top on entry to the called function, because the ABI forces
128 * it to assume that those bytes are for its caller's A0-A3
129 * spill area. (In fact ENTRY instructions with stack
130 * adjustments less than 16 are a warning condition in the
131 * assembler). But we aren't a caller, have no bit set in
132 * WINDOWSTART and will never be asked to spill anything.
133 * Those 16 bytes would otherwise be wasted on the stack, so
134 * adjust
135 */
136 "addi a1, a2, 16" "\n\t"
137
138 /* Clear WINDOWSTART so called functions never try to spill
139 * our callers' registers into the now-garbage stack pointers
140 * they contain. No need to set the bit corresponding to
141 * WINDOWBASE, our C callee will do that when it does an
142 * ENTRY.
143 */
144 "movi a0, 0" "\n\t"
145 "wsr.WINDOWSTART a0" "\n\t"
146
147 /* Clear CALLINC field of PS (you would think it would, but
148 * our ENTRY doesn't actually do that) so the callee's ENTRY
149 * doesn't shift the registers
150 */
151 "rsr.PS a0" "\n\t"
152 "movi a2, 0xfffcffff" "\n\t"
153 "and a0, a0, a2" "\n\t"
154 "wsr.PS a0" "\n\t"
155
156 "rsync" "\n\t"
157 "movi a0, 0" "\n\t"
158
159 "jx a3" "\n\t");
160
161/* Carefully constructed to use no stack beyond compiler-generated ABI
162 * instructions. WE DO NOT KNOW WHERE THE STACK FOR THIS FUNCTION IS.
163 * The ROM library just picks a spot on its own with no input from our
164 * app linkage and tells us nothing about it until we're already
165 * running.
166 */
167static void appcpu_entry1(void)
168{
Patrik Flyktfd42bf72019-03-14 16:04:29 -0600169 z_appcpu_stack_switch(appcpu_top, appcpu_entry2);
Andy Rosse7172672018-01-24 15:48:32 -0800170}
Kumar Gala9387d862022-10-25 09:08:29 -0500171#endif
Andy Rosse7172672018-01-24 15:48:32 -0800172
173/* The calls and sequencing here were extracted from the ESP-32
174 * FreeRTOS integration with just a tiny bit of cleanup. None of the
175 * calls or registers shown are documented, so treat this code with
176 * extreme caution.
177 */
Felipe Nevesbb6e6562022-07-14 18:30:44 -0300178void esp_appcpu_start(void *entry_point)
Andy Rosse7172672018-01-24 15:48:32 -0800179{
Andy Ross1fe6c362018-05-30 14:06:35 -0700180 smp_log("ESP32: starting APPCPU");
181
Andy Rosse7172672018-01-24 15:48:32 -0800182 /* These two calls are wrapped in a "stall_other_cpu" API in
183 * esp-idf. But in this context the appcpu is stalled by
184 * definition, so we can skip that complexity and just call
185 * the ROM directly.
186 */
Sylvio Alves944b6d02021-10-13 14:51:33 -0300187 esp_rom_Cache_Flush(1);
188 esp_rom_Cache_Read_Enable(1);
Andy Rosse7172672018-01-24 15:48:32 -0800189
Felipe Neves382b4142022-07-28 21:20:38 -0300190 esp_rom_ets_set_appcpu_boot_addr((void *)0);
191
Andy Rosse7172672018-01-24 15:48:32 -0800192 RTC_CNTL_SW_CPU_STALL &= ~RTC_CNTL_SW_STALL_APPCPU_C1;
193 RTC_CNTL_OPTIONS0 &= ~RTC_CNTL_SW_STALL_APPCPU_C0;
194 DPORT_APPCPU_CTRL_B |= DPORT_APPCPU_CLKGATE_EN;
195 DPORT_APPCPU_CTRL_C &= ~DPORT_APPCPU_RUNSTALL;
196
197 /* Pulse the RESETTING bit */
198 DPORT_APPCPU_CTRL_A |= DPORT_APPCPU_RESETTING;
199 DPORT_APPCPU_CTRL_A &= ~DPORT_APPCPU_RESETTING;
200
Felipe Neves5760fcc2022-10-14 20:47:50 -0300201
202 /* extracted from SMP LOG above, THIS IS REQUIRED FOR AMP RELIABLE
203 * OPERATION AS WELL, PLEASE DON'T touch on the dummy write below!
204 *
205 * Note that the logging done here is ACTUALLY REQUIRED FOR RELIABLE
206 * OPERATION! At least one particular board will experience spurious
207 * hangs during initialization (usually the APPCPU fails to start at
208 * all) without these calls present. It's not just time -- careful
209 * use of k_busy_wait() (and even hand-crafted timer loops using the
210 * Xtensa timer SRs directly) that duplicates the timing exactly still
211 * sees hangs. Something is happening inside the ROM UART code that
212 * magically makes the startup sequence reliable.
213 *
214 * Leave this in place until the sequence is understood better.
215 *
216 */
217 esp_rom_uart_tx_one_char('\r');
218 esp_rom_uart_tx_one_char('\r');
219 esp_rom_uart_tx_one_char('\n');
220
Andy Rosse7172672018-01-24 15:48:32 -0800221 /* Seems weird that you set the boot address AFTER starting
222 * the CPU, but this is how they do it...
223 */
Felipe Nevesbb6e6562022-07-14 18:30:44 -0300224 esp_rom_ets_set_appcpu_boot_addr((void *)entry_point);
Andy Ross1fe6c362018-05-30 14:06:35 -0700225
226 smp_log("ESP32: APPCPU start sequence complete");
Andy Rosse7172672018-01-24 15:48:32 -0800227}
228
Kumar Gala9387d862022-10-25 09:08:29 -0500229#ifdef CONFIG_SMP
Felipe Neves754ef4d2021-11-10 14:06:16 -0300230IRAM_ATTR static void esp_crosscore_isr(void *arg)
231{
232 ARG_UNUSED(arg);
233
Felipe Neves754ef4d2021-11-10 14:06:16 -0300234 /* Right now this interrupt is only used for IPIs */
235 z_sched_ipi();
Felipe Neves754ef4d2021-11-10 14:06:16 -0300236
Felipe Nevesbb6e6562022-07-14 18:30:44 -0300237 const int core_id = esp_core_id();
Felipe Neves754ef4d2021-11-10 14:06:16 -0300238
239 if (core_id == 0) {
240 DPORT_WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_0_REG, 0);
241 } else {
242 DPORT_WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_1_REG, 0);
243 }
244}
245
Daniel Leung6ea749d2023-11-08 09:05:17 -0800246void arch_cpu_start(int cpu_num, k_thread_stack_t *stack, int sz,
Andrew Boiea594ca72020-01-10 12:51:38 -0800247 arch_cpustart_t fn, void *arg)
Andy Rosse7172672018-01-24 15:48:32 -0800248{
249 volatile struct cpustart_rec sr;
250 int vb;
251 volatile int alive_flag;
252
253 __ASSERT(cpu_num == 1, "ESP-32 supports only two CPUs");
254
255 __asm__ volatile("rsr.VECBASE %0\n\t" : "=r"(vb));
256
257 alive_flag = 0;
258
259 sr.cpu = cpu_num;
260 sr.fn = fn;
Daniel Leungb69d2482024-03-22 12:56:12 -0700261 sr.stack_top = K_KERNEL_STACK_BUFFER(stack) + sz;
Andy Rosse7172672018-01-24 15:48:32 -0800262 sr.arg = arg;
263 sr.vecbase = vb;
264 sr.alive = &alive_flag;
265
Daniel Leungb69d2482024-03-22 12:56:12 -0700266 appcpu_top = K_KERNEL_STACK_BUFFER(stack) + sz;
Andy Rosse7172672018-01-24 15:48:32 -0800267
268 start_rec = &sr;
269
Felipe Nevesbb6e6562022-07-14 18:30:44 -0300270 esp_appcpu_start(appcpu_entry1);
Andy Rosse7172672018-01-24 15:48:32 -0800271
272 while (!alive_flag) {
273 }
Andy Ross1fe6c362018-05-30 14:06:35 -0700274
Felipe Neves754ef4d2021-11-10 14:06:16 -0300275 cpus_active[0] = true;
Kumar Gala0ce0f432022-10-24 14:06:23 -0500276 cpus_active[cpu_num] = true;
Felipe Neves754ef4d2021-11-10 14:06:16 -0300277
278 esp_intr_alloc(DT_IRQN(DT_NODELABEL(ipi0)),
279 ESP_INTR_FLAG_IRAM,
280 esp_crosscore_isr,
281 NULL,
282 NULL);
283
284 esp_intr_alloc(DT_IRQN(DT_NODELABEL(ipi1)),
285 ESP_INTR_FLAG_IRAM,
286 esp_crosscore_isr,
287 NULL,
288 NULL);
289
Andy Ross1fe6c362018-05-30 14:06:35 -0700290 smp_log("ESP32: APPCPU initialized");
Andy Rosse7172672018-01-24 15:48:32 -0800291}
Felipe Neves754ef4d2021-11-10 14:06:16 -0300292
293void arch_sched_ipi(void)
294{
Felipe Nevesbb6e6562022-07-14 18:30:44 -0300295 const int core_id = esp_core_id();
Felipe Neves754ef4d2021-11-10 14:06:16 -0300296
297 if (core_id == 0) {
298 DPORT_WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_0_REG, DPORT_CPU_INTR_FROM_CPU_0);
299 } else {
300 DPORT_WRITE_PERI_REG(DPORT_CPU_INTR_FROM_CPU_1_REG, DPORT_CPU_INTR_FROM_CPU_1);
301 }
302}
303
304IRAM_ATTR bool arch_cpu_active(int cpu_num)
305{
306 return cpus_active[cpu_num];
307}
Kumar Gala9387d862022-10-25 09:08:29 -0500308#endif /* CONFIG_SMP */