blob: 2647395528d5ff84f5bd97f346f3e157a1cbf100 [file] [log] [blame]
Andy Ross364cbae2018-01-29 09:23:49 -08001/*
2 * Copyright (c) 2018 Intel corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7#include <kernel.h>
8#include <kernel_structs.h>
9#include <spinlock.h>
Andy Rossbdcd18a72018-01-17 11:34:50 -080010#include <kswap.h>
Andy Ross245b54e2018-02-08 09:10:46 -080011#include <kernel_internal.h>
Andy Ross364cbae2018-01-29 09:23:49 -080012
Andy Ross15c40072018-04-12 12:50:05 -070013static atomic_t global_lock;
Andy Ross6ed59bc2019-02-14 21:04:19 -080014static atomic_t start_flag;
Andy Ross364cbae2018-01-29 09:23:49 -080015
Patrik Flykt4344e272019-03-08 14:19:05 -070016unsigned int z_smp_global_lock(void)
Andy Ross364cbae2018-01-29 09:23:49 -080017{
Andrew Boie4f77c2a2019-11-07 12:43:29 -080018 unsigned int key = arch_irq_lock();
Andy Ross364cbae2018-01-29 09:23:49 -080019
Andy Ross15c40072018-04-12 12:50:05 -070020 if (!_current->base.global_lock_count) {
21 while (!atomic_cas(&global_lock, 0, 1)) {
22 }
Andy Ross364cbae2018-01-29 09:23:49 -080023 }
24
Andy Ross15c40072018-04-12 12:50:05 -070025 _current->base.global_lock_count++;
Andy Ross364cbae2018-01-29 09:23:49 -080026
Andy Ross15c40072018-04-12 12:50:05 -070027 return key;
Andy Ross364cbae2018-01-29 09:23:49 -080028}
29
Patrik Flykt4344e272019-03-08 14:19:05 -070030void z_smp_global_unlock(unsigned int key)
Andy Ross364cbae2018-01-29 09:23:49 -080031{
Andy Ross15c40072018-04-12 12:50:05 -070032 if (_current->base.global_lock_count) {
33 _current->base.global_lock_count--;
34
35 if (!_current->base.global_lock_count) {
36 atomic_clear(&global_lock);
37 }
Andy Ross364cbae2018-01-29 09:23:49 -080038 }
39
Andrew Boie4f77c2a2019-11-07 12:43:29 -080040 arch_irq_unlock(key);
Andy Ross364cbae2018-01-29 09:23:49 -080041}
Andy Rossbdcd18a72018-01-17 11:34:50 -080042
Patrik Flykt4344e272019-03-08 14:19:05 -070043/* Called from within z_swap(), so assumes lock already held */
44void z_smp_release_global_lock(struct k_thread *thread)
Andy Ross15c40072018-04-12 12:50:05 -070045{
46 if (!thread->base.global_lock_count) {
47 atomic_clear(&global_lock);
48 }
49}
50
Andy Ross42ed12a2019-02-19 16:03:39 -080051#if CONFIG_MP_NUM_CPUS > 1
Guennadi Liakhovetski8d07b772021-04-01 13:46:57 +020052
53void z_smp_thread_init(void *arg, struct k_thread *thread)
Andy Rossbdcd18a72018-01-17 11:34:50 -080054{
Daniel Leungac538802020-07-23 14:33:31 -070055 atomic_t *cpu_start_flag = arg;
Andy Rossbdcd18a72018-01-17 11:34:50 -080056
57 /* Wait for the signal to begin scheduling */
Daniel Leungac538802020-07-23 14:33:31 -070058 while (!atomic_get(cpu_start_flag)) {
Andy Ross6ed59bc2019-02-14 21:04:19 -080059 }
Andy Rossbdcd18a72018-01-17 11:34:50 -080060
Guennadi Liakhovetski8d07b772021-04-01 13:46:57 +020061 z_dummy_thread_init(thread);
62}
63
64void z_smp_thread_swap(void)
65{
66 z_swap_unlocked();
67}
68
Andy Rossc6d077e2021-08-18 06:28:11 -070069static inline FUNC_NORETURN void smp_init_top(void *arg)
Guennadi Liakhovetski8d07b772021-04-01 13:46:57 +020070{
71 struct k_thread dummy_thread;
72
73 z_smp_thread_init(arg, &dummy_thread);
Andy Ross564f5902018-01-26 12:30:21 -080074 smp_timer_init();
Guennadi Liakhovetski8d07b772021-04-01 13:46:57 +020075
Patrik Flykt4344e272019-03-08 14:19:05 -070076 z_swap_unlocked();
Andy Rossbdcd18a72018-01-17 11:34:50 -080077
Enjia Mai53ca7092021-01-15 17:09:58 +080078 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
Andy Rossbdcd18a72018-01-17 11:34:50 -080079}
Andy Rossc6d077e2021-08-18 06:28:11 -070080
81void z_smp_start_cpu(int id)
82{
83 (void)atomic_clear(&start_flag);
84 arch_start_cpu(id, z_interrupt_stacks[id], CONFIG_ISR_STACK_SIZE,
85 smp_init_top, &start_flag);
86 (void)atomic_set(&start_flag, 1);
87}
88
Guennadi Liakhovetski8d07b772021-04-01 13:46:57 +020089#endif
Andy Rossbdcd18a72018-01-17 11:34:50 -080090
Andy Rossa12f2d62019-06-05 08:58:42 -070091void z_smp_init(void)
Andy Rossbdcd18a72018-01-17 11:34:50 -080092{
Flavio Ceolin0a447842018-09-13 11:24:30 -070093 (void)atomic_clear(&start_flag);
Andy Rossbdcd18a72018-01-17 11:34:50 -080094
Guennadi Liakhovetski8d07b772021-04-01 13:46:57 +020095#if CONFIG_MP_NUM_CPUS > 1 && !defined(CONFIG_SMP_BOOT_DELAY)
Andrew Boie80a0d9d2020-03-12 15:37:29 -070096 for (int i = 1; i < CONFIG_MP_NUM_CPUS; i++) {
97 arch_start_cpu(i, z_interrupt_stacks[i], CONFIG_ISR_STACK_SIZE,
98 smp_init_top, &start_flag);
99 }
Andy Rossbdcd18a72018-01-17 11:34:50 -0800100#endif
101
Flavio Ceolin0a447842018-09-13 11:24:30 -0700102 (void)atomic_set(&start_flag, 1);
Andy Rossbdcd18a72018-01-17 11:34:50 -0800103}
Andy Ross42ed12a2019-02-19 16:03:39 -0800104
Andy Rosseefd3da2020-02-06 13:39:52 -0800105bool z_smp_cpu_mobile(void)
106{
107 unsigned int k = arch_irq_lock();
108 bool pinned = arch_is_in_isr() || !arch_irq_unlocked(k);
109
110 arch_irq_unlock(k);
111 return !pinned;
112}