blob: 63ac7bc8975e7b06c3fc59cd9665a62bd422b092 [file] [log] [blame]
Andy Ross2b210cb2022-01-17 11:56:54 -08001/* Copyright (c) 2022 Intel corporation
Andy Ross364cbae2018-01-29 09:23:49 -08002 * SPDX-License-Identifier: Apache-2.0
3 */
4
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +02005#include <zephyr/kernel.h>
6#include <zephyr/kernel_structs.h>
Daniel Leung89b231e2023-11-06 14:29:35 -08007#include <zephyr/kernel/smp.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +02008#include <zephyr/spinlock.h>
Andy Rossbdcd18a72018-01-17 11:34:50 -08009#include <kswap.h>
Andy Ross245b54e2018-02-08 09:10:46 -080010#include <kernel_internal.h>
Andy Ross364cbae2018-01-29 09:23:49 -080011
Andy Ross15c40072018-04-12 12:50:05 -070012static atomic_t global_lock;
Daniel Leungfe66e352023-11-02 13:23:06 -070013
14/**
15 * Flag to tell recently powered up CPU to start
16 * initialization routine.
17 *
18 * 0 to tell powered up CPU to wait.
19 * 1 to tell powered up CPU to continue initialization.
20 */
Daniel Leung9c0ff332023-08-03 10:28:01 -070021static atomic_t cpu_start_flag;
Daniel Leungfe66e352023-11-02 13:23:06 -070022
23/**
24 * Flag to tell caller that the target CPU is now
25 * powered up and ready to be initialized.
26 *
27 * 0 if target CPU is not yet ready.
28 * 1 if target CPU has powered up and ready to be initialized.
29 */
Andy Ross34571182022-01-07 17:08:20 -080030static atomic_t ready_flag;
Andy Ross34571182022-01-07 17:08:20 -080031
Daniel Leung89b231e2023-11-06 14:29:35 -080032/**
33 * Struct holding the function to be called before handing off
34 * to schedule and its argument.
35 */
36static struct cpu_start_cb {
37 /**
38 * Function to be called before handing off to scheduler.
39 * Can be NULL.
40 */
41 smp_init_fn fn;
42
43 /** Argument to @ref cpu_start_fn.fn. */
44 void *arg;
Daniel Leungeefaeee2023-11-08 13:00:43 -080045
46 /** Invoke scheduler after CPU has started if true. */
47 bool invoke_sched;
48
49#ifdef CONFIG_SYS_CLOCK_EXISTS
50 /** True if smp_timer_init() needs to be called. */
51 bool reinit_timer;
Simon Heinbcd1d192024-03-08 12:00:10 +010052#endif /* CONFIG_SYS_CLOCK_EXISTS */
Daniel Leung89b231e2023-11-06 14:29:35 -080053} cpu_start_fn;
54
55static struct k_spinlock cpu_start_lock;
56
Patrik Flykt4344e272019-03-08 14:19:05 -070057unsigned int z_smp_global_lock(void)
Andy Ross364cbae2018-01-29 09:23:49 -080058{
Andrew Boie4f77c2a2019-11-07 12:43:29 -080059 unsigned int key = arch_irq_lock();
Andy Ross364cbae2018-01-29 09:23:49 -080060
Nicolas Pitre46aa6712025-01-07 12:00:43 -050061 if (!_current->base.global_lock_count) {
Andy Ross15c40072018-04-12 12:50:05 -070062 while (!atomic_cas(&global_lock, 0, 1)) {
Peter Mitsisd082cd22024-05-21 10:39:22 -040063 arch_spin_relax();
Andy Ross15c40072018-04-12 12:50:05 -070064 }
Andy Ross364cbae2018-01-29 09:23:49 -080065 }
66
Nicolas Pitre46aa6712025-01-07 12:00:43 -050067 _current->base.global_lock_count++;
Andy Ross364cbae2018-01-29 09:23:49 -080068
Andy Ross15c40072018-04-12 12:50:05 -070069 return key;
Andy Ross364cbae2018-01-29 09:23:49 -080070}
71
Patrik Flykt4344e272019-03-08 14:19:05 -070072void z_smp_global_unlock(unsigned int key)
Andy Ross364cbae2018-01-29 09:23:49 -080073{
Nicolas Pitre46aa6712025-01-07 12:00:43 -050074 if (_current->base.global_lock_count != 0U) {
75 _current->base.global_lock_count--;
Andy Ross15c40072018-04-12 12:50:05 -070076
Nicolas Pitre46aa6712025-01-07 12:00:43 -050077 if (!_current->base.global_lock_count) {
frei tycho14cb7d52024-04-30 14:20:26 +000078 (void)atomic_clear(&global_lock);
Andy Ross15c40072018-04-12 12:50:05 -070079 }
Andy Ross364cbae2018-01-29 09:23:49 -080080 }
81
Andrew Boie4f77c2a2019-11-07 12:43:29 -080082 arch_irq_unlock(key);
Andy Ross364cbae2018-01-29 09:23:49 -080083}
Andy Rossbdcd18a72018-01-17 11:34:50 -080084
Patrik Flykt4344e272019-03-08 14:19:05 -070085/* Called from within z_swap(), so assumes lock already held */
86void z_smp_release_global_lock(struct k_thread *thread)
Andy Ross15c40072018-04-12 12:50:05 -070087{
88 if (!thread->base.global_lock_count) {
frei tycho14cb7d52024-04-30 14:20:26 +000089 (void)atomic_clear(&global_lock);
Andy Ross15c40072018-04-12 12:50:05 -070090 }
91}
92
Andy Ross34571182022-01-07 17:08:20 -080093/* Tiny delay that relaxes bus traffic to avoid spamming a shared
94 * memory bus looking at an atomic variable
95 */
96static inline void local_delay(void)
97{
98 for (volatile int i = 0; i < 1000; i++) {
99 }
100}
101
Daniel Leung9c0ff332023-08-03 10:28:01 -0700102static void wait_for_start_signal(atomic_t *start_flag)
Andy Rossbdcd18a72018-01-17 11:34:50 -0800103{
Andy Rossbdcd18a72018-01-17 11:34:50 -0800104 /* Wait for the signal to begin scheduling */
Daniel Leung9c0ff332023-08-03 10:28:01 -0700105 while (!atomic_get(start_flag)) {
Andy Ross34571182022-01-07 17:08:20 -0800106 local_delay();
Andy Ross6ed59bc2019-02-14 21:04:19 -0800107 }
Guennadi Liakhovetski8d07b772021-04-01 13:46:57 +0200108}
109
Daniel Leungeefaeee2023-11-08 13:00:43 -0800110static inline void smp_init_top(void *arg)
Guennadi Liakhovetski8d07b772021-04-01 13:46:57 +0200111{
Guennadi Liakhovetski09cf3e02024-01-29 18:13:54 +0100112 struct cpu_start_cb csc = arg ? *(struct cpu_start_cb *)arg : (struct cpu_start_cb){0};
Guennadi Liakhovetski8d07b772021-04-01 13:46:57 +0200113
Daniel Leungfe66e352023-11-02 13:23:06 -0700114 /* Let start_cpu() know that this CPU has powered up. */
Andy Ross34571182022-01-07 17:08:20 -0800115 (void)atomic_set(&ready_flag, 1);
Andy Ross2b210cb2022-01-17 11:56:54 -0800116
Daniel Leungfe66e352023-11-02 13:23:06 -0700117 /* Wait for the CPU start caller to signal that
118 * we can start initialization.
119 */
Daniel Leung89b231e2023-11-06 14:29:35 -0800120 wait_for_start_signal(&cpu_start_flag);
Daniel Leungfe66e352023-11-02 13:23:06 -0700121
Guennadi Liakhovetski09cf3e02024-01-29 18:13:54 +0100122 if ((arg == NULL) || csc.invoke_sched) {
Daniel Leungcaacc272024-01-31 10:54:53 -0800123 /* Initialize the dummy thread struct so that
124 * the scheduler can schedule actual threads to run.
125 */
Andy Rossfd340eb2024-04-19 15:03:09 -0700126 z_dummy_thread_init(&_thread_dummy);
Daniel Leungcaacc272024-01-31 10:54:53 -0800127 }
128
Evgeniy Paltsev16b81912023-06-19 17:18:03 +0100129#ifdef CONFIG_SYS_CLOCK_EXISTS
Guennadi Liakhovetski09cf3e02024-01-29 18:13:54 +0100130 if ((arg == NULL) || csc.reinit_timer) {
Daniel Leungeefaeee2023-11-08 13:00:43 -0800131 smp_timer_init();
132 }
Simon Heinbcd1d192024-03-08 12:00:10 +0100133#endif /* CONFIG_SYS_CLOCK_EXISTS */
Andy Ross2b210cb2022-01-17 11:56:54 -0800134
Daniel Leung89b231e2023-11-06 14:29:35 -0800135 /* Do additional initialization steps if needed. */
Guennadi Liakhovetski09cf3e02024-01-29 18:13:54 +0100136 if (csc.fn != NULL) {
137 csc.fn(csc.arg);
Daniel Leung89b231e2023-11-06 14:29:35 -0800138 }
139
Guennadi Liakhovetski09cf3e02024-01-29 18:13:54 +0100140 if ((arg != NULL) && !csc.invoke_sched) {
Daniel Leungeefaeee2023-11-08 13:00:43 -0800141 /* Don't invoke scheduler. */
142 return;
143 }
144
Daniel Leungfe66e352023-11-02 13:23:06 -0700145 /* Let scheduler decide what thread to run next. */
Patrik Flykt4344e272019-03-08 14:19:05 -0700146 z_swap_unlocked();
Andy Rossbdcd18a72018-01-17 11:34:50 -0800147
Enjia Mai53ca7092021-01-15 17:09:58 +0800148 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
Andy Rossbdcd18a72018-01-17 11:34:50 -0800149}
Andy Rossc6d077e2021-08-18 06:28:11 -0700150
Daniel Leung89b231e2023-11-06 14:29:35 -0800151static void start_cpu(int id, struct cpu_start_cb *csc)
Andy Rossc6d077e2021-08-18 06:28:11 -0700152{
Daniel Leungfe66e352023-11-02 13:23:06 -0700153 /* Clear the ready flag so the newly powered up CPU can
154 * signal that it has powered up.
155 */
Andy Ross2b210cb2022-01-17 11:56:54 -0800156 (void)atomic_clear(&ready_flag);
Daniel Leungfe66e352023-11-02 13:23:06 -0700157
158 /* Power up the CPU */
Daniel Leung6ea749d2023-11-08 09:05:17 -0800159 arch_cpu_start(id, z_interrupt_stacks[id], CONFIG_ISR_STACK_SIZE,
Daniel Leung89b231e2023-11-06 14:29:35 -0800160 smp_init_top, csc);
Daniel Leungfe66e352023-11-02 13:23:06 -0700161
162 /* Wait until the newly powered up CPU to signal that
163 * it has powered up.
164 */
Andy Ross2b210cb2022-01-17 11:56:54 -0800165 while (!atomic_get(&ready_flag)) {
166 local_delay();
167 }
Andy Rossc6d077e2021-08-18 06:28:11 -0700168}
169
Daniel Leung89b231e2023-11-06 14:29:35 -0800170void k_smp_cpu_start(int id, smp_init_fn fn, void *arg)
Andy Ross2b210cb2022-01-17 11:56:54 -0800171{
Daniel Leung89b231e2023-11-06 14:29:35 -0800172 k_spinlock_key_t key = k_spin_lock(&cpu_start_lock);
173
174 cpu_start_fn.fn = fn;
175 cpu_start_fn.arg = arg;
Daniel Leungeefaeee2023-11-08 13:00:43 -0800176 cpu_start_fn.invoke_sched = true;
177
178#ifdef CONFIG_SYS_CLOCK_EXISTS
179 cpu_start_fn.reinit_timer = true;
Simon Heinbcd1d192024-03-08 12:00:10 +0100180#endif /* CONFIG_SYS_CLOCK_EXISTS */
Daniel Leungeefaeee2023-11-08 13:00:43 -0800181
182 /* We are only starting one CPU so we do not need to synchronize
183 * across all CPUs using the start_flag. So just set it to 1.
184 */
185 (void)atomic_set(&cpu_start_flag, 1); /* async, don't care */
186
187 /* Initialize various CPU structs related to this CPU. */
188 z_init_cpu(id);
189
190 /* Start the CPU! */
191 start_cpu(id, &cpu_start_fn);
192
193 k_spin_unlock(&cpu_start_lock, key);
194}
195
196void k_smp_cpu_resume(int id, smp_init_fn fn, void *arg,
197 bool reinit_timer, bool invoke_sched)
198{
199 k_spinlock_key_t key = k_spin_lock(&cpu_start_lock);
200
201 cpu_start_fn.fn = fn;
202 cpu_start_fn.arg = arg;
203 cpu_start_fn.invoke_sched = invoke_sched;
204
205#ifdef CONFIG_SYS_CLOCK_EXISTS
206 cpu_start_fn.reinit_timer = reinit_timer;
207#else
208 ARG_UNUSED(reinit_timer);
Simon Heinbcd1d192024-03-08 12:00:10 +0100209#endif /* CONFIG_SYS_CLOCK_EXISTS */
Daniel Leung89b231e2023-11-06 14:29:35 -0800210
Daniel Leungfe66e352023-11-02 13:23:06 -0700211 /* We are only starting one CPU so we do not need to synchronize
212 * across all CPUs using the start_flag. So just set it to 1.
213 */
214 (void)atomic_set(&cpu_start_flag, 1);
215
216 /* Start the CPU! */
Daniel Leung89b231e2023-11-06 14:29:35 -0800217 start_cpu(id, &cpu_start_fn);
218
219 k_spin_unlock(&cpu_start_lock, key);
Andy Ross2b210cb2022-01-17 11:56:54 -0800220}
Andy Rossbdcd18a72018-01-17 11:34:50 -0800221
Andy Rossa12f2d62019-06-05 08:58:42 -0700222void z_smp_init(void)
Andy Rossbdcd18a72018-01-17 11:34:50 -0800223{
Daniel Leungfe66e352023-11-02 13:23:06 -0700224 /* We are powering up all CPUs and we want to synchronize their
225 * entry into scheduler. So set the start flag to 0 here.
226 */
Daniel Leung9c0ff332023-08-03 10:28:01 -0700227 (void)atomic_clear(&cpu_start_flag);
Kumar Galaa1195ae2022-10-18 09:45:13 -0500228
Daniel Leungfe66e352023-11-02 13:23:06 -0700229 /* Just start CPUs one by one. */
Kumar Galaa1195ae2022-10-18 09:45:13 -0500230 unsigned int num_cpus = arch_num_cpus();
231
232 for (int i = 1; i < num_cpus; i++) {
Daniel Leungeefaeee2023-11-08 13:00:43 -0800233 z_init_cpu(i);
Daniel Leung89b231e2023-11-06 14:29:35 -0800234 start_cpu(i, NULL);
Andrew Boie80a0d9d2020-03-12 15:37:29 -0700235 }
Daniel Leungfe66e352023-11-02 13:23:06 -0700236
237 /* Let loose those CPUs so they can start scheduling
238 * threads to run.
239 */
Daniel Leung9c0ff332023-08-03 10:28:01 -0700240 (void)atomic_set(&cpu_start_flag, 1);
Andy Rossbdcd18a72018-01-17 11:34:50 -0800241}
Andy Ross42ed12a2019-02-19 16:03:39 -0800242
Andy Rosseefd3da2020-02-06 13:39:52 -0800243bool z_smp_cpu_mobile(void)
244{
245 unsigned int k = arch_irq_lock();
246 bool pinned = arch_is_in_isr() || !arch_irq_unlocked(k);
247
248 arch_irq_unlock(k);
249 return !pinned;
250}
Nicolas Pitre7a3124d2025-01-07 15:42:07 -0500251
Nicolas Pitrebc6eded2025-01-08 12:36:37 -0500252__attribute_const__ struct k_thread *z_smp_current_get(void)
Nicolas Pitre7a3124d2025-01-07 15:42:07 -0500253{
254 /*
255 * _current is a field read from _current_cpu, which can race
256 * with preemption before it is read. We must lock local
257 * interrupts when reading it.
258 */
259 unsigned int key = arch_irq_lock();
260 struct k_thread *t = _current_cpu->current;
261
262 arch_irq_unlock(key);
263 return t;
264}