Andy Ross | 364cbae | 2018-01-29 09:23:49 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2018 Intel corporation |
| 3 | * |
| 4 | * SPDX-License-Identifier: Apache-2.0 |
| 5 | */ |
| 6 | |
| 7 | #include <kernel.h> |
| 8 | #include <kernel_structs.h> |
| 9 | #include <spinlock.h> |
Andy Ross | bdcd18a7 | 2018-01-17 11:34:50 -0800 | [diff] [blame] | 10 | #include <kswap.h> |
Andy Ross | 245b54e | 2018-02-08 09:10:46 -0800 | [diff] [blame] | 11 | #include <kernel_internal.h> |
Andy Ross | 364cbae | 2018-01-29 09:23:49 -0800 | [diff] [blame] | 12 | |
Andy Ross | 15c4007 | 2018-04-12 12:50:05 -0700 | [diff] [blame] | 13 | static atomic_t global_lock; |
Andy Ross | 6ed59bc | 2019-02-14 21:04:19 -0800 | [diff] [blame] | 14 | static atomic_t start_flag; |
Andy Ross | 364cbae | 2018-01-29 09:23:49 -0800 | [diff] [blame] | 15 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 16 | unsigned int z_smp_global_lock(void) |
Andy Ross | 364cbae | 2018-01-29 09:23:49 -0800 | [diff] [blame] | 17 | { |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 18 | unsigned int key = arch_irq_lock(); |
Andy Ross | 364cbae | 2018-01-29 09:23:49 -0800 | [diff] [blame] | 19 | |
Andy Ross | 15c4007 | 2018-04-12 12:50:05 -0700 | [diff] [blame] | 20 | if (!_current->base.global_lock_count) { |
| 21 | while (!atomic_cas(&global_lock, 0, 1)) { |
| 22 | } |
Andy Ross | 364cbae | 2018-01-29 09:23:49 -0800 | [diff] [blame] | 23 | } |
| 24 | |
Andy Ross | 15c4007 | 2018-04-12 12:50:05 -0700 | [diff] [blame] | 25 | _current->base.global_lock_count++; |
Andy Ross | 364cbae | 2018-01-29 09:23:49 -0800 | [diff] [blame] | 26 | |
Andy Ross | 15c4007 | 2018-04-12 12:50:05 -0700 | [diff] [blame] | 27 | return key; |
Andy Ross | 364cbae | 2018-01-29 09:23:49 -0800 | [diff] [blame] | 28 | } |
| 29 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 30 | void z_smp_global_unlock(unsigned int key) |
Andy Ross | 364cbae | 2018-01-29 09:23:49 -0800 | [diff] [blame] | 31 | { |
Andy Ross | 15c4007 | 2018-04-12 12:50:05 -0700 | [diff] [blame] | 32 | if (_current->base.global_lock_count) { |
| 33 | _current->base.global_lock_count--; |
| 34 | |
| 35 | if (!_current->base.global_lock_count) { |
| 36 | atomic_clear(&global_lock); |
| 37 | } |
Andy Ross | 364cbae | 2018-01-29 09:23:49 -0800 | [diff] [blame] | 38 | } |
| 39 | |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 40 | arch_irq_unlock(key); |
Andy Ross | 364cbae | 2018-01-29 09:23:49 -0800 | [diff] [blame] | 41 | } |
Andy Ross | bdcd18a7 | 2018-01-17 11:34:50 -0800 | [diff] [blame] | 42 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 43 | /* Called from within z_swap(), so assumes lock already held */ |
| 44 | void z_smp_release_global_lock(struct k_thread *thread) |
Andy Ross | 15c4007 | 2018-04-12 12:50:05 -0700 | [diff] [blame] | 45 | { |
| 46 | if (!thread->base.global_lock_count) { |
| 47 | atomic_clear(&global_lock); |
| 48 | } |
| 49 | } |
| 50 | |
Andy Ross | 42ed12a | 2019-02-19 16:03:39 -0800 | [diff] [blame] | 51 | #if CONFIG_MP_NUM_CPUS > 1 |
Guennadi Liakhovetski | 8d07b77 | 2021-04-01 13:46:57 +0200 | [diff] [blame] | 52 | |
| 53 | void z_smp_thread_init(void *arg, struct k_thread *thread) |
Andy Ross | bdcd18a7 | 2018-01-17 11:34:50 -0800 | [diff] [blame] | 54 | { |
Daniel Leung | ac53880 | 2020-07-23 14:33:31 -0700 | [diff] [blame] | 55 | atomic_t *cpu_start_flag = arg; |
Andy Ross | bdcd18a7 | 2018-01-17 11:34:50 -0800 | [diff] [blame] | 56 | |
| 57 | /* Wait for the signal to begin scheduling */ |
Daniel Leung | ac53880 | 2020-07-23 14:33:31 -0700 | [diff] [blame] | 58 | while (!atomic_get(cpu_start_flag)) { |
Andy Ross | 6ed59bc | 2019-02-14 21:04:19 -0800 | [diff] [blame] | 59 | } |
Andy Ross | bdcd18a7 | 2018-01-17 11:34:50 -0800 | [diff] [blame] | 60 | |
Guennadi Liakhovetski | 8d07b77 | 2021-04-01 13:46:57 +0200 | [diff] [blame] | 61 | z_dummy_thread_init(thread); |
| 62 | } |
| 63 | |
| 64 | void z_smp_thread_swap(void) |
| 65 | { |
| 66 | z_swap_unlocked(); |
| 67 | } |
| 68 | |
Andy Ross | c6d077e | 2021-08-18 06:28:11 -0700 | [diff] [blame] | 69 | static inline FUNC_NORETURN void smp_init_top(void *arg) |
Guennadi Liakhovetski | 8d07b77 | 2021-04-01 13:46:57 +0200 | [diff] [blame] | 70 | { |
| 71 | struct k_thread dummy_thread; |
| 72 | |
| 73 | z_smp_thread_init(arg, &dummy_thread); |
Andy Ross | 564f590 | 2018-01-26 12:30:21 -0800 | [diff] [blame] | 74 | smp_timer_init(); |
Guennadi Liakhovetski | 8d07b77 | 2021-04-01 13:46:57 +0200 | [diff] [blame] | 75 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 76 | z_swap_unlocked(); |
Andy Ross | bdcd18a7 | 2018-01-17 11:34:50 -0800 | [diff] [blame] | 77 | |
Enjia Mai | 53ca709 | 2021-01-15 17:09:58 +0800 | [diff] [blame] | 78 | CODE_UNREACHABLE; /* LCOV_EXCL_LINE */ |
Andy Ross | bdcd18a7 | 2018-01-17 11:34:50 -0800 | [diff] [blame] | 79 | } |
Andy Ross | c6d077e | 2021-08-18 06:28:11 -0700 | [diff] [blame] | 80 | |
| 81 | void z_smp_start_cpu(int id) |
| 82 | { |
| 83 | (void)atomic_clear(&start_flag); |
| 84 | arch_start_cpu(id, z_interrupt_stacks[id], CONFIG_ISR_STACK_SIZE, |
| 85 | smp_init_top, &start_flag); |
| 86 | (void)atomic_set(&start_flag, 1); |
| 87 | } |
| 88 | |
Guennadi Liakhovetski | 8d07b77 | 2021-04-01 13:46:57 +0200 | [diff] [blame] | 89 | #endif |
Andy Ross | bdcd18a7 | 2018-01-17 11:34:50 -0800 | [diff] [blame] | 90 | |
Andy Ross | a12f2d6 | 2019-06-05 08:58:42 -0700 | [diff] [blame] | 91 | void z_smp_init(void) |
Andy Ross | bdcd18a7 | 2018-01-17 11:34:50 -0800 | [diff] [blame] | 92 | { |
Flavio Ceolin | 0a44784 | 2018-09-13 11:24:30 -0700 | [diff] [blame] | 93 | (void)atomic_clear(&start_flag); |
Andy Ross | bdcd18a7 | 2018-01-17 11:34:50 -0800 | [diff] [blame] | 94 | |
Guennadi Liakhovetski | 8d07b77 | 2021-04-01 13:46:57 +0200 | [diff] [blame] | 95 | #if CONFIG_MP_NUM_CPUS > 1 && !defined(CONFIG_SMP_BOOT_DELAY) |
Andrew Boie | 80a0d9d | 2020-03-12 15:37:29 -0700 | [diff] [blame] | 96 | for (int i = 1; i < CONFIG_MP_NUM_CPUS; i++) { |
| 97 | arch_start_cpu(i, z_interrupt_stacks[i], CONFIG_ISR_STACK_SIZE, |
| 98 | smp_init_top, &start_flag); |
| 99 | } |
Andy Ross | bdcd18a7 | 2018-01-17 11:34:50 -0800 | [diff] [blame] | 100 | #endif |
| 101 | |
Flavio Ceolin | 0a44784 | 2018-09-13 11:24:30 -0700 | [diff] [blame] | 102 | (void)atomic_set(&start_flag, 1); |
Andy Ross | bdcd18a7 | 2018-01-17 11:34:50 -0800 | [diff] [blame] | 103 | } |
Andy Ross | 42ed12a | 2019-02-19 16:03:39 -0800 | [diff] [blame] | 104 | |
Andy Ross | eefd3da | 2020-02-06 13:39:52 -0800 | [diff] [blame] | 105 | bool z_smp_cpu_mobile(void) |
| 106 | { |
| 107 | unsigned int k = arch_irq_lock(); |
| 108 | bool pinned = arch_is_in_isr() || !arch_irq_unlocked(k); |
| 109 | |
| 110 | arch_irq_unlock(k); |
| 111 | return !pinned; |
| 112 | } |