Andy Ross | 2b210cb | 2022-01-17 11:56:54 -0800 | [diff] [blame] | 1 | /* Copyright (c) 2022 Intel corporation |
Andy Ross | 364cbae | 2018-01-29 09:23:49 -0800 | [diff] [blame] | 2 | * SPDX-License-Identifier: Apache-2.0 |
| 3 | */ |
| 4 | |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 5 | #include <zephyr/kernel.h> |
| 6 | #include <zephyr/kernel_structs.h> |
Daniel Leung | 89b231e | 2023-11-06 14:29:35 -0800 | [diff] [blame] | 7 | #include <zephyr/kernel/smp.h> |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 8 | #include <zephyr/spinlock.h> |
Andy Ross | bdcd18a7 | 2018-01-17 11:34:50 -0800 | [diff] [blame] | 9 | #include <kswap.h> |
Andy Ross | 245b54e | 2018-02-08 09:10:46 -0800 | [diff] [blame] | 10 | #include <kernel_internal.h> |
Andy Ross | 364cbae | 2018-01-29 09:23:49 -0800 | [diff] [blame] | 11 | |
Andy Ross | 15c4007 | 2018-04-12 12:50:05 -0700 | [diff] [blame] | 12 | static atomic_t global_lock; |
Daniel Leung | fe66e35 | 2023-11-02 13:23:06 -0700 | [diff] [blame] | 13 | |
| 14 | /** |
| 15 | * Flag to tell recently powered up CPU to start |
| 16 | * initialization routine. |
| 17 | * |
| 18 | * 0 to tell powered up CPU to wait. |
| 19 | * 1 to tell powered up CPU to continue initialization. |
| 20 | */ |
Daniel Leung | 9c0ff33 | 2023-08-03 10:28:01 -0700 | [diff] [blame] | 21 | static atomic_t cpu_start_flag; |
Daniel Leung | fe66e35 | 2023-11-02 13:23:06 -0700 | [diff] [blame] | 22 | |
| 23 | /** |
| 24 | * Flag to tell caller that the target CPU is now |
| 25 | * powered up and ready to be initialized. |
| 26 | * |
| 27 | * 0 if target CPU is not yet ready. |
| 28 | * 1 if target CPU has powered up and ready to be initialized. |
| 29 | */ |
Andy Ross | 3457118 | 2022-01-07 17:08:20 -0800 | [diff] [blame] | 30 | static atomic_t ready_flag; |
Andy Ross | 3457118 | 2022-01-07 17:08:20 -0800 | [diff] [blame] | 31 | |
Daniel Leung | 89b231e | 2023-11-06 14:29:35 -0800 | [diff] [blame] | 32 | /** |
| 33 | * Struct holding the function to be called before handing off |
| 34 | * to schedule and its argument. |
| 35 | */ |
| 36 | static struct cpu_start_cb { |
| 37 | /** |
| 38 | * Function to be called before handing off to scheduler. |
| 39 | * Can be NULL. |
| 40 | */ |
| 41 | smp_init_fn fn; |
| 42 | |
| 43 | /** Argument to @ref cpu_start_fn.fn. */ |
| 44 | void *arg; |
Daniel Leung | eefaeee | 2023-11-08 13:00:43 -0800 | [diff] [blame] | 45 | |
| 46 | /** Invoke scheduler after CPU has started if true. */ |
| 47 | bool invoke_sched; |
| 48 | |
| 49 | #ifdef CONFIG_SYS_CLOCK_EXISTS |
| 50 | /** True if smp_timer_init() needs to be called. */ |
| 51 | bool reinit_timer; |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 52 | #endif /* CONFIG_SYS_CLOCK_EXISTS */ |
Daniel Leung | 89b231e | 2023-11-06 14:29:35 -0800 | [diff] [blame] | 53 | } cpu_start_fn; |
| 54 | |
| 55 | static struct k_spinlock cpu_start_lock; |
| 56 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 57 | unsigned int z_smp_global_lock(void) |
Andy Ross | 364cbae | 2018-01-29 09:23:49 -0800 | [diff] [blame] | 58 | { |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 59 | unsigned int key = arch_irq_lock(); |
Andy Ross | 364cbae | 2018-01-29 09:23:49 -0800 | [diff] [blame] | 60 | |
Nicolas Pitre | 46aa671 | 2025-01-07 12:00:43 -0500 | [diff] [blame] | 61 | if (!_current->base.global_lock_count) { |
Andy Ross | 15c4007 | 2018-04-12 12:50:05 -0700 | [diff] [blame] | 62 | while (!atomic_cas(&global_lock, 0, 1)) { |
Peter Mitsis | d082cd2 | 2024-05-21 10:39:22 -0400 | [diff] [blame] | 63 | arch_spin_relax(); |
Andy Ross | 15c4007 | 2018-04-12 12:50:05 -0700 | [diff] [blame] | 64 | } |
Andy Ross | 364cbae | 2018-01-29 09:23:49 -0800 | [diff] [blame] | 65 | } |
| 66 | |
Nicolas Pitre | 46aa671 | 2025-01-07 12:00:43 -0500 | [diff] [blame] | 67 | _current->base.global_lock_count++; |
Andy Ross | 364cbae | 2018-01-29 09:23:49 -0800 | [diff] [blame] | 68 | |
Andy Ross | 15c4007 | 2018-04-12 12:50:05 -0700 | [diff] [blame] | 69 | return key; |
Andy Ross | 364cbae | 2018-01-29 09:23:49 -0800 | [diff] [blame] | 70 | } |
| 71 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 72 | void z_smp_global_unlock(unsigned int key) |
Andy Ross | 364cbae | 2018-01-29 09:23:49 -0800 | [diff] [blame] | 73 | { |
Nicolas Pitre | 46aa671 | 2025-01-07 12:00:43 -0500 | [diff] [blame] | 74 | if (_current->base.global_lock_count != 0U) { |
| 75 | _current->base.global_lock_count--; |
Andy Ross | 15c4007 | 2018-04-12 12:50:05 -0700 | [diff] [blame] | 76 | |
Nicolas Pitre | 46aa671 | 2025-01-07 12:00:43 -0500 | [diff] [blame] | 77 | if (!_current->base.global_lock_count) { |
frei tycho | 14cb7d5 | 2024-04-30 14:20:26 +0000 | [diff] [blame] | 78 | (void)atomic_clear(&global_lock); |
Andy Ross | 15c4007 | 2018-04-12 12:50:05 -0700 | [diff] [blame] | 79 | } |
Andy Ross | 364cbae | 2018-01-29 09:23:49 -0800 | [diff] [blame] | 80 | } |
| 81 | |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 82 | arch_irq_unlock(key); |
Andy Ross | 364cbae | 2018-01-29 09:23:49 -0800 | [diff] [blame] | 83 | } |
Andy Ross | bdcd18a7 | 2018-01-17 11:34:50 -0800 | [diff] [blame] | 84 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 85 | /* Called from within z_swap(), so assumes lock already held */ |
| 86 | void z_smp_release_global_lock(struct k_thread *thread) |
Andy Ross | 15c4007 | 2018-04-12 12:50:05 -0700 | [diff] [blame] | 87 | { |
| 88 | if (!thread->base.global_lock_count) { |
frei tycho | 14cb7d5 | 2024-04-30 14:20:26 +0000 | [diff] [blame] | 89 | (void)atomic_clear(&global_lock); |
Andy Ross | 15c4007 | 2018-04-12 12:50:05 -0700 | [diff] [blame] | 90 | } |
| 91 | } |
| 92 | |
Andy Ross | 3457118 | 2022-01-07 17:08:20 -0800 | [diff] [blame] | 93 | /* Tiny delay that relaxes bus traffic to avoid spamming a shared |
| 94 | * memory bus looking at an atomic variable |
| 95 | */ |
| 96 | static inline void local_delay(void) |
| 97 | { |
| 98 | for (volatile int i = 0; i < 1000; i++) { |
| 99 | } |
| 100 | } |
| 101 | |
Daniel Leung | 9c0ff33 | 2023-08-03 10:28:01 -0700 | [diff] [blame] | 102 | static void wait_for_start_signal(atomic_t *start_flag) |
Andy Ross | bdcd18a7 | 2018-01-17 11:34:50 -0800 | [diff] [blame] | 103 | { |
Andy Ross | bdcd18a7 | 2018-01-17 11:34:50 -0800 | [diff] [blame] | 104 | /* Wait for the signal to begin scheduling */ |
Daniel Leung | 9c0ff33 | 2023-08-03 10:28:01 -0700 | [diff] [blame] | 105 | while (!atomic_get(start_flag)) { |
Andy Ross | 3457118 | 2022-01-07 17:08:20 -0800 | [diff] [blame] | 106 | local_delay(); |
Andy Ross | 6ed59bc | 2019-02-14 21:04:19 -0800 | [diff] [blame] | 107 | } |
Guennadi Liakhovetski | 8d07b77 | 2021-04-01 13:46:57 +0200 | [diff] [blame] | 108 | } |
| 109 | |
Daniel Leung | eefaeee | 2023-11-08 13:00:43 -0800 | [diff] [blame] | 110 | static inline void smp_init_top(void *arg) |
Guennadi Liakhovetski | 8d07b77 | 2021-04-01 13:46:57 +0200 | [diff] [blame] | 111 | { |
Guennadi Liakhovetski | 09cf3e0 | 2024-01-29 18:13:54 +0100 | [diff] [blame] | 112 | struct cpu_start_cb csc = arg ? *(struct cpu_start_cb *)arg : (struct cpu_start_cb){0}; |
Guennadi Liakhovetski | 8d07b77 | 2021-04-01 13:46:57 +0200 | [diff] [blame] | 113 | |
Daniel Leung | fe66e35 | 2023-11-02 13:23:06 -0700 | [diff] [blame] | 114 | /* Let start_cpu() know that this CPU has powered up. */ |
Andy Ross | 3457118 | 2022-01-07 17:08:20 -0800 | [diff] [blame] | 115 | (void)atomic_set(&ready_flag, 1); |
Andy Ross | 2b210cb | 2022-01-17 11:56:54 -0800 | [diff] [blame] | 116 | |
Daniel Leung | fe66e35 | 2023-11-02 13:23:06 -0700 | [diff] [blame] | 117 | /* Wait for the CPU start caller to signal that |
| 118 | * we can start initialization. |
| 119 | */ |
Daniel Leung | 89b231e | 2023-11-06 14:29:35 -0800 | [diff] [blame] | 120 | wait_for_start_signal(&cpu_start_flag); |
Daniel Leung | fe66e35 | 2023-11-02 13:23:06 -0700 | [diff] [blame] | 121 | |
Guennadi Liakhovetski | 09cf3e0 | 2024-01-29 18:13:54 +0100 | [diff] [blame] | 122 | if ((arg == NULL) || csc.invoke_sched) { |
Daniel Leung | caacc27 | 2024-01-31 10:54:53 -0800 | [diff] [blame] | 123 | /* Initialize the dummy thread struct so that |
| 124 | * the scheduler can schedule actual threads to run. |
| 125 | */ |
Andy Ross | fd340eb | 2024-04-19 15:03:09 -0700 | [diff] [blame] | 126 | z_dummy_thread_init(&_thread_dummy); |
Daniel Leung | caacc27 | 2024-01-31 10:54:53 -0800 | [diff] [blame] | 127 | } |
| 128 | |
Evgeniy Paltsev | 16b8191 | 2023-06-19 17:18:03 +0100 | [diff] [blame] | 129 | #ifdef CONFIG_SYS_CLOCK_EXISTS |
Guennadi Liakhovetski | 09cf3e0 | 2024-01-29 18:13:54 +0100 | [diff] [blame] | 130 | if ((arg == NULL) || csc.reinit_timer) { |
Daniel Leung | eefaeee | 2023-11-08 13:00:43 -0800 | [diff] [blame] | 131 | smp_timer_init(); |
| 132 | } |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 133 | #endif /* CONFIG_SYS_CLOCK_EXISTS */ |
Andy Ross | 2b210cb | 2022-01-17 11:56:54 -0800 | [diff] [blame] | 134 | |
Daniel Leung | 89b231e | 2023-11-06 14:29:35 -0800 | [diff] [blame] | 135 | /* Do additional initialization steps if needed. */ |
Guennadi Liakhovetski | 09cf3e0 | 2024-01-29 18:13:54 +0100 | [diff] [blame] | 136 | if (csc.fn != NULL) { |
| 137 | csc.fn(csc.arg); |
Daniel Leung | 89b231e | 2023-11-06 14:29:35 -0800 | [diff] [blame] | 138 | } |
| 139 | |
Guennadi Liakhovetski | 09cf3e0 | 2024-01-29 18:13:54 +0100 | [diff] [blame] | 140 | if ((arg != NULL) && !csc.invoke_sched) { |
Daniel Leung | eefaeee | 2023-11-08 13:00:43 -0800 | [diff] [blame] | 141 | /* Don't invoke scheduler. */ |
| 142 | return; |
| 143 | } |
| 144 | |
Daniel Leung | fe66e35 | 2023-11-02 13:23:06 -0700 | [diff] [blame] | 145 | /* Let scheduler decide what thread to run next. */ |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 146 | z_swap_unlocked(); |
Andy Ross | bdcd18a7 | 2018-01-17 11:34:50 -0800 | [diff] [blame] | 147 | |
Enjia Mai | 53ca709 | 2021-01-15 17:09:58 +0800 | [diff] [blame] | 148 | CODE_UNREACHABLE; /* LCOV_EXCL_LINE */ |
Andy Ross | bdcd18a7 | 2018-01-17 11:34:50 -0800 | [diff] [blame] | 149 | } |
Andy Ross | c6d077e | 2021-08-18 06:28:11 -0700 | [diff] [blame] | 150 | |
Daniel Leung | 89b231e | 2023-11-06 14:29:35 -0800 | [diff] [blame] | 151 | static void start_cpu(int id, struct cpu_start_cb *csc) |
Andy Ross | c6d077e | 2021-08-18 06:28:11 -0700 | [diff] [blame] | 152 | { |
Daniel Leung | fe66e35 | 2023-11-02 13:23:06 -0700 | [diff] [blame] | 153 | /* Clear the ready flag so the newly powered up CPU can |
| 154 | * signal that it has powered up. |
| 155 | */ |
Andy Ross | 2b210cb | 2022-01-17 11:56:54 -0800 | [diff] [blame] | 156 | (void)atomic_clear(&ready_flag); |
Daniel Leung | fe66e35 | 2023-11-02 13:23:06 -0700 | [diff] [blame] | 157 | |
| 158 | /* Power up the CPU */ |
Daniel Leung | 6ea749d | 2023-11-08 09:05:17 -0800 | [diff] [blame] | 159 | arch_cpu_start(id, z_interrupt_stacks[id], CONFIG_ISR_STACK_SIZE, |
Daniel Leung | 89b231e | 2023-11-06 14:29:35 -0800 | [diff] [blame] | 160 | smp_init_top, csc); |
Daniel Leung | fe66e35 | 2023-11-02 13:23:06 -0700 | [diff] [blame] | 161 | |
| 162 | /* Wait until the newly powered up CPU to signal that |
| 163 | * it has powered up. |
| 164 | */ |
Andy Ross | 2b210cb | 2022-01-17 11:56:54 -0800 | [diff] [blame] | 165 | while (!atomic_get(&ready_flag)) { |
| 166 | local_delay(); |
| 167 | } |
Andy Ross | c6d077e | 2021-08-18 06:28:11 -0700 | [diff] [blame] | 168 | } |
| 169 | |
Daniel Leung | 89b231e | 2023-11-06 14:29:35 -0800 | [diff] [blame] | 170 | void k_smp_cpu_start(int id, smp_init_fn fn, void *arg) |
Andy Ross | 2b210cb | 2022-01-17 11:56:54 -0800 | [diff] [blame] | 171 | { |
Daniel Leung | 89b231e | 2023-11-06 14:29:35 -0800 | [diff] [blame] | 172 | k_spinlock_key_t key = k_spin_lock(&cpu_start_lock); |
| 173 | |
| 174 | cpu_start_fn.fn = fn; |
| 175 | cpu_start_fn.arg = arg; |
Daniel Leung | eefaeee | 2023-11-08 13:00:43 -0800 | [diff] [blame] | 176 | cpu_start_fn.invoke_sched = true; |
| 177 | |
| 178 | #ifdef CONFIG_SYS_CLOCK_EXISTS |
| 179 | cpu_start_fn.reinit_timer = true; |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 180 | #endif /* CONFIG_SYS_CLOCK_EXISTS */ |
Daniel Leung | eefaeee | 2023-11-08 13:00:43 -0800 | [diff] [blame] | 181 | |
| 182 | /* We are only starting one CPU so we do not need to synchronize |
| 183 | * across all CPUs using the start_flag. So just set it to 1. |
| 184 | */ |
| 185 | (void)atomic_set(&cpu_start_flag, 1); /* async, don't care */ |
| 186 | |
| 187 | /* Initialize various CPU structs related to this CPU. */ |
| 188 | z_init_cpu(id); |
| 189 | |
| 190 | /* Start the CPU! */ |
| 191 | start_cpu(id, &cpu_start_fn); |
| 192 | |
| 193 | k_spin_unlock(&cpu_start_lock, key); |
| 194 | } |
| 195 | |
| 196 | void k_smp_cpu_resume(int id, smp_init_fn fn, void *arg, |
| 197 | bool reinit_timer, bool invoke_sched) |
| 198 | { |
| 199 | k_spinlock_key_t key = k_spin_lock(&cpu_start_lock); |
| 200 | |
| 201 | cpu_start_fn.fn = fn; |
| 202 | cpu_start_fn.arg = arg; |
| 203 | cpu_start_fn.invoke_sched = invoke_sched; |
| 204 | |
| 205 | #ifdef CONFIG_SYS_CLOCK_EXISTS |
| 206 | cpu_start_fn.reinit_timer = reinit_timer; |
| 207 | #else |
| 208 | ARG_UNUSED(reinit_timer); |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 209 | #endif /* CONFIG_SYS_CLOCK_EXISTS */ |
Daniel Leung | 89b231e | 2023-11-06 14:29:35 -0800 | [diff] [blame] | 210 | |
Daniel Leung | fe66e35 | 2023-11-02 13:23:06 -0700 | [diff] [blame] | 211 | /* We are only starting one CPU so we do not need to synchronize |
| 212 | * across all CPUs using the start_flag. So just set it to 1. |
| 213 | */ |
| 214 | (void)atomic_set(&cpu_start_flag, 1); |
| 215 | |
| 216 | /* Start the CPU! */ |
Daniel Leung | 89b231e | 2023-11-06 14:29:35 -0800 | [diff] [blame] | 217 | start_cpu(id, &cpu_start_fn); |
| 218 | |
| 219 | k_spin_unlock(&cpu_start_lock, key); |
Andy Ross | 2b210cb | 2022-01-17 11:56:54 -0800 | [diff] [blame] | 220 | } |
Andy Ross | bdcd18a7 | 2018-01-17 11:34:50 -0800 | [diff] [blame] | 221 | |
Andy Ross | a12f2d6 | 2019-06-05 08:58:42 -0700 | [diff] [blame] | 222 | void z_smp_init(void) |
Andy Ross | bdcd18a7 | 2018-01-17 11:34:50 -0800 | [diff] [blame] | 223 | { |
Daniel Leung | fe66e35 | 2023-11-02 13:23:06 -0700 | [diff] [blame] | 224 | /* We are powering up all CPUs and we want to synchronize their |
| 225 | * entry into scheduler. So set the start flag to 0 here. |
| 226 | */ |
Daniel Leung | 9c0ff33 | 2023-08-03 10:28:01 -0700 | [diff] [blame] | 227 | (void)atomic_clear(&cpu_start_flag); |
Kumar Gala | a1195ae | 2022-10-18 09:45:13 -0500 | [diff] [blame] | 228 | |
Daniel Leung | fe66e35 | 2023-11-02 13:23:06 -0700 | [diff] [blame] | 229 | /* Just start CPUs one by one. */ |
Kumar Gala | a1195ae | 2022-10-18 09:45:13 -0500 | [diff] [blame] | 230 | unsigned int num_cpus = arch_num_cpus(); |
| 231 | |
| 232 | for (int i = 1; i < num_cpus; i++) { |
Daniel Leung | eefaeee | 2023-11-08 13:00:43 -0800 | [diff] [blame] | 233 | z_init_cpu(i); |
Daniel Leung | 89b231e | 2023-11-06 14:29:35 -0800 | [diff] [blame] | 234 | start_cpu(i, NULL); |
Andrew Boie | 80a0d9d | 2020-03-12 15:37:29 -0700 | [diff] [blame] | 235 | } |
Daniel Leung | fe66e35 | 2023-11-02 13:23:06 -0700 | [diff] [blame] | 236 | |
| 237 | /* Let loose those CPUs so they can start scheduling |
| 238 | * threads to run. |
| 239 | */ |
Daniel Leung | 9c0ff33 | 2023-08-03 10:28:01 -0700 | [diff] [blame] | 240 | (void)atomic_set(&cpu_start_flag, 1); |
Andy Ross | bdcd18a7 | 2018-01-17 11:34:50 -0800 | [diff] [blame] | 241 | } |
Andy Ross | 42ed12a | 2019-02-19 16:03:39 -0800 | [diff] [blame] | 242 | |
Andy Ross | eefd3da | 2020-02-06 13:39:52 -0800 | [diff] [blame] | 243 | bool z_smp_cpu_mobile(void) |
| 244 | { |
| 245 | unsigned int k = arch_irq_lock(); |
| 246 | bool pinned = arch_is_in_isr() || !arch_irq_unlocked(k); |
| 247 | |
| 248 | arch_irq_unlock(k); |
| 249 | return !pinned; |
| 250 | } |
Nicolas Pitre | 7a3124d | 2025-01-07 15:42:07 -0500 | [diff] [blame] | 251 | |
Nicolas Pitre | bc6eded | 2025-01-08 12:36:37 -0500 | [diff] [blame] | 252 | __attribute_const__ struct k_thread *z_smp_current_get(void) |
Nicolas Pitre | 7a3124d | 2025-01-07 15:42:07 -0500 | [diff] [blame] | 253 | { |
| 254 | /* |
| 255 | * _current is a field read from _current_cpu, which can race |
| 256 | * with preemption before it is read. We must lock local |
| 257 | * interrupts when reading it. |
| 258 | */ |
| 259 | unsigned int key = arch_irq_lock(); |
| 260 | struct k_thread *t = _current_cpu->current; |
| 261 | |
| 262 | arch_irq_unlock(key); |
| 263 | return t; |
| 264 | } |