Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1 | /* |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 2 | * Copyright (c) 2018 Intel Corporation |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 3 | * |
David B. Kinder | ac74d8b | 2017-01-18 17:01:01 -0800 | [diff] [blame] | 4 | * SPDX-License-Identifier: Apache-2.0 |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 5 | */ |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 6 | #include <zephyr/kernel.h> |
Benjamin Walsh | b4b108d | 2016-10-13 10:31:48 -0400 | [diff] [blame] | 7 | #include <ksched.h> |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 8 | #include <zephyr/spinlock.h> |
Anas Nashif | 8634c3b | 2023-08-29 17:03:12 +0000 | [diff] [blame] | 9 | #include <wait_q.h> |
Anas Nashif | 9e83413 | 2024-02-26 17:03:35 -0500 | [diff] [blame] | 10 | #include <kthread.h> |
Anas Nashif | 46484da | 2024-02-26 11:30:49 -0500 | [diff] [blame] | 11 | #include <priority_q.h> |
Andy Ross | 9c62cc6 | 2018-01-25 15:24:15 -0800 | [diff] [blame] | 12 | #include <kswap.h> |
Anas Nashif | 37df485 | 2024-03-08 07:51:01 -0500 | [diff] [blame] | 13 | #include <ipi.h> |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 14 | #include <kernel_arch_func.h> |
Anas Nashif | 4e39617 | 2023-09-26 22:46:01 +0000 | [diff] [blame] | 15 | #include <zephyr/internal/syscall_handler.h> |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 16 | #include <zephyr/drivers/timer/system_timer.h> |
Flavio Ceolin | 8041860 | 2018-11-21 16:22:15 -0800 | [diff] [blame] | 17 | #include <stdbool.h> |
Andrew Boie | fe03161 | 2019-09-21 17:54:37 -0700 | [diff] [blame] | 18 | #include <kernel_internal.h> |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 19 | #include <zephyr/logging/log.h> |
| 20 | #include <zephyr/sys/atomic.h> |
| 21 | #include <zephyr/sys/math_extras.h> |
| 22 | #include <zephyr/timing/timing.h> |
Gerard Marull-Paretas | 4863c5f | 2023-04-11 15:34:39 +0200 | [diff] [blame] | 23 | #include <zephyr/sys/util.h> |
Andy Ross | 5235145 | 2021-09-28 09:38:43 -0700 | [diff] [blame] | 24 | |
Krzysztof Chruscinski | 3ed8083 | 2020-11-26 19:32:34 +0100 | [diff] [blame] | 25 | LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 26 | |
Anas Nashif | 37df485 | 2024-03-08 07:51:01 -0500 | [diff] [blame] | 27 | #if defined(CONFIG_SWAP_NONATOMIC) && defined(CONFIG_TIMESLICING) |
| 28 | extern struct k_thread *pending_current; |
| 29 | #endif |
| 30 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 31 | struct k_spinlock _sched_spinlock; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 32 | |
Andy Ross | f0fd54c | 2024-03-26 08:38:01 -0400 | [diff] [blame] | 33 | /* Storage to "complete" the context switch from an invalid/incomplete thread |
| 34 | * context (ex: exiting an ISR that aborted _current) |
| 35 | */ |
Andy Ross | fd340eb | 2024-04-19 15:03:09 -0700 | [diff] [blame] | 36 | __incoherent struct k_thread _thread_dummy; |
Andy Ross | f0fd54c | 2024-03-26 08:38:01 -0400 | [diff] [blame] | 37 | |
Anas Nashif | 121cb49 | 2024-10-08 18:13:03 -0400 | [diff] [blame] | 38 | static ALWAYS_INLINE void update_cache(int preempt_ok); |
Peter Mitsis | e1db1ce | 2023-08-14 14:06:52 -0400 | [diff] [blame] | 39 | static void halt_thread(struct k_thread *thread, uint8_t new_state); |
Peter Mitsis | b1384a7 | 2023-08-14 14:22:05 -0400 | [diff] [blame] | 40 | static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q); |
Andrew Boie | 8e0f6a5 | 2020-09-05 11:50:18 -0700 | [diff] [blame] | 41 | |
Peter Mitsis | f8b76f3 | 2021-11-29 09:52:11 -0500 | [diff] [blame] | 42 | |
Florian Grandel | cc4d1bd | 2023-08-28 17:31:54 +0200 | [diff] [blame] | 43 | BUILD_ASSERT(CONFIG_NUM_COOP_PRIORITIES >= CONFIG_NUM_METAIRQ_PRIORITIES, |
| 44 | "You need to provide at least as many CONFIG_NUM_COOP_PRIORITIES as " |
| 45 | "CONFIG_NUM_METAIRQ_PRIORITIES as Meta IRQs are just a special class of cooperative " |
| 46 | "threads."); |
| 47 | |
James Harris | 2cd0f66 | 2021-03-01 09:19:57 -0800 | [diff] [blame] | 48 | /* |
| 49 | * Return value same as e.g. memcmp |
| 50 | * > 0 -> thread 1 priority > thread 2 priority |
| 51 | * = 0 -> thread 1 priority == thread 2 priority |
| 52 | * < 0 -> thread 1 priority < thread 2 priority |
| 53 | * Do not rely on the actual value returned aside from the above. |
| 54 | * (Again, like memcmp.) |
| 55 | */ |
| 56 | int32_t z_sched_prio_cmp(struct k_thread *thread_1, |
| 57 | struct k_thread *thread_2) |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 58 | { |
James Harris | 2cd0f66 | 2021-03-01 09:19:57 -0800 | [diff] [blame] | 59 | /* `prio` is <32b, so the below cannot overflow. */ |
| 60 | int32_t b1 = thread_1->base.prio; |
| 61 | int32_t b2 = thread_2->base.prio; |
| 62 | |
| 63 | if (b1 != b2) { |
| 64 | return b2 - b1; |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 65 | } |
| 66 | |
| 67 | #ifdef CONFIG_SCHED_DEADLINE |
Andy Ross | ef62657 | 2020-07-10 09:43:36 -0700 | [diff] [blame] | 68 | /* If we assume all deadlines live within the same "half" of |
| 69 | * the 32 bit modulus space (this is a documented API rule), |
James Harris | 2cd0f66 | 2021-03-01 09:19:57 -0800 | [diff] [blame] | 70 | * then the latest deadline in the queue minus the earliest is |
Andy Ross | ef62657 | 2020-07-10 09:43:36 -0700 | [diff] [blame] | 71 | * guaranteed to be (2's complement) non-negative. We can |
| 72 | * leverage that to compare the values without having to check |
| 73 | * the current time. |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 74 | */ |
James Harris | 2cd0f66 | 2021-03-01 09:19:57 -0800 | [diff] [blame] | 75 | uint32_t d1 = thread_1->base.prio_deadline; |
| 76 | uint32_t d2 = thread_2->base.prio_deadline; |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 77 | |
James Harris | 2cd0f66 | 2021-03-01 09:19:57 -0800 | [diff] [blame] | 78 | if (d1 != d2) { |
| 79 | /* Sooner deadline means higher effective priority. |
| 80 | * Doing the calculation with unsigned types and casting |
| 81 | * to signed isn't perfect, but at least reduces this |
| 82 | * from UB on overflow to impdef. |
| 83 | */ |
| 84 | return (int32_t) (d2 - d1); |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 85 | } |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 86 | #endif /* CONFIG_SCHED_DEADLINE */ |
James Harris | 2cd0f66 | 2021-03-01 09:19:57 -0800 | [diff] [blame] | 87 | return 0; |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 88 | } |
| 89 | |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 90 | static ALWAYS_INLINE void *thread_runq(struct k_thread *thread) |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 91 | { |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 92 | #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY |
| 93 | int cpu, m = thread->base.cpu_mask; |
| 94 | |
| 95 | /* Edge case: it's legal per the API to "make runnable" a |
| 96 | * thread with all CPUs masked off (i.e. one that isn't |
| 97 | * actually runnable!). Sort of a wart in the API and maybe |
| 98 | * we should address this in docs/assertions instead to avoid |
| 99 | * the extra test. |
| 100 | */ |
| 101 | cpu = m == 0 ? 0 : u32_count_trailing_zeros(m); |
| 102 | |
| 103 | return &_kernel.cpus[cpu].ready_q.runq; |
| 104 | #else |
Benjamin Cabé | a46f1b9 | 2023-08-21 15:30:26 +0200 | [diff] [blame] | 105 | ARG_UNUSED(thread); |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 106 | return &_kernel.ready_q.runq; |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 107 | #endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */ |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 108 | } |
| 109 | |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 110 | static ALWAYS_INLINE void *curr_cpu_runq(void) |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 111 | { |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 112 | #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY |
| 113 | return &arch_curr_cpu()->ready_q.runq; |
| 114 | #else |
| 115 | return &_kernel.ready_q.runq; |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 116 | #endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */ |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 117 | } |
| 118 | |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 119 | static ALWAYS_INLINE void runq_add(struct k_thread *thread) |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 120 | { |
Anas Nashif | 4593f0d | 2024-04-11 11:59:07 -0400 | [diff] [blame] | 121 | __ASSERT_NO_MSG(!z_is_idle_thread_object(thread)); |
| 122 | |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 123 | _priq_run_add(thread_runq(thread), thread); |
| 124 | } |
| 125 | |
| 126 | static ALWAYS_INLINE void runq_remove(struct k_thread *thread) |
| 127 | { |
Anas Nashif | 4593f0d | 2024-04-11 11:59:07 -0400 | [diff] [blame] | 128 | __ASSERT_NO_MSG(!z_is_idle_thread_object(thread)); |
| 129 | |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 130 | _priq_run_remove(thread_runq(thread), thread); |
| 131 | } |
| 132 | |
| 133 | static ALWAYS_INLINE struct k_thread *runq_best(void) |
| 134 | { |
| 135 | return _priq_run_best(curr_cpu_runq()); |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 136 | } |
| 137 | |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 138 | /* _current is never in the run queue until context switch on |
| 139 | * SMP configurations, see z_requeue_current() |
| 140 | */ |
Anas Nashif | 595ff63 | 2024-02-27 09:49:07 -0500 | [diff] [blame] | 141 | static inline bool should_queue_thread(struct k_thread *thread) |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 142 | { |
Hess Nathan | 6d417d5 | 2024-04-30 13:26:35 +0200 | [diff] [blame] | 143 | return !IS_ENABLED(CONFIG_SMP) || (thread != _current); |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 144 | } |
| 145 | |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 146 | static ALWAYS_INLINE void queue_thread(struct k_thread *thread) |
Andy Ross | 91946ef | 2021-02-07 13:03:09 -0800 | [diff] [blame] | 147 | { |
| 148 | thread->base.thread_state |= _THREAD_QUEUED; |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 149 | if (should_queue_thread(thread)) { |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 150 | runq_add(thread); |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 151 | } |
| 152 | #ifdef CONFIG_SMP |
| 153 | if (thread == _current) { |
| 154 | /* add current to end of queue means "yield" */ |
| 155 | _current_cpu->swap_ok = true; |
| 156 | } |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 157 | #endif /* CONFIG_SMP */ |
Andy Ross | 91946ef | 2021-02-07 13:03:09 -0800 | [diff] [blame] | 158 | } |
| 159 | |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 160 | static ALWAYS_INLINE void dequeue_thread(struct k_thread *thread) |
Andy Ross | 91946ef | 2021-02-07 13:03:09 -0800 | [diff] [blame] | 161 | { |
| 162 | thread->base.thread_state &= ~_THREAD_QUEUED; |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 163 | if (should_queue_thread(thread)) { |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 164 | runq_remove(thread); |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 165 | } |
Andy Ross | 91946ef | 2021-02-07 13:03:09 -0800 | [diff] [blame] | 166 | } |
| 167 | |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 168 | /* Called out of z_swap() when CONFIG_SMP. The current thread can |
| 169 | * never live in the run queue until we are inexorably on the context |
| 170 | * switch path on SMP, otherwise there is a deadlock condition where a |
| 171 | * set of CPUs pick a cycle of threads to run and wait for them all to |
| 172 | * context switch forever. |
| 173 | */ |
Anas Nashif | 595ff63 | 2024-02-27 09:49:07 -0500 | [diff] [blame] | 174 | void z_requeue_current(struct k_thread *thread) |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 175 | { |
Anas Nashif | 595ff63 | 2024-02-27 09:49:07 -0500 | [diff] [blame] | 176 | if (z_is_thread_queued(thread)) { |
| 177 | runq_add(thread); |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 178 | } |
Andy Ross | b4e9ef0 | 2022-04-06 10:10:17 -0700 | [diff] [blame] | 179 | signal_pending_ipi(); |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 180 | } |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 181 | |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 182 | /* Return true if the thread is aborting, else false */ |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 183 | static inline bool is_aborting(struct k_thread *thread) |
| 184 | { |
Anas Nashif | bbbc38b | 2021-03-29 10:03:49 -0400 | [diff] [blame] | 185 | return (thread->base.thread_state & _THREAD_ABORTING) != 0U; |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 186 | } |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 187 | |
| 188 | /* Return true if the thread is aborting or suspending, else false */ |
| 189 | static inline bool is_halting(struct k_thread *thread) |
| 190 | { |
| 191 | return (thread->base.thread_state & |
| 192 | (_THREAD_ABORTING | _THREAD_SUSPENDING)) != 0U; |
| 193 | } |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 194 | |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 195 | /* Clear the halting bits (_THREAD_ABORTING and _THREAD_SUSPENDING) */ |
| 196 | static inline void clear_halting(struct k_thread *thread) |
| 197 | { |
Andy Ross | 47ab663 | 2024-04-19 15:08:55 -0700 | [diff] [blame] | 198 | barrier_dmem_fence_full(); /* Other cpus spin on this locklessly! */ |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 199 | thread->base.thread_state &= ~(_THREAD_ABORTING | _THREAD_SUSPENDING); |
| 200 | } |
| 201 | |
Andy Ross | b2791b0 | 2019-01-28 09:36:36 -0800 | [diff] [blame] | 202 | static ALWAYS_INLINE struct k_thread *next_up(void) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 203 | { |
Vadim Shakirov | 73944c6 | 2023-07-24 15:42:52 +0300 | [diff] [blame] | 204 | #ifdef CONFIG_SMP |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 205 | if (is_halting(_current)) { |
| 206 | halt_thread(_current, is_aborting(_current) ? |
| 207 | _THREAD_DEAD : _THREAD_SUSPENDED); |
Vadim Shakirov | 73944c6 | 2023-07-24 15:42:52 +0300 | [diff] [blame] | 208 | } |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 209 | #endif /* CONFIG_SMP */ |
Vadim Shakirov | 73944c6 | 2023-07-24 15:42:52 +0300 | [diff] [blame] | 210 | |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 211 | struct k_thread *thread = runq_best(); |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 212 | |
Florian Grandel | cc4d1bd | 2023-08-28 17:31:54 +0200 | [diff] [blame] | 213 | #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && \ |
| 214 | (CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES) |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 215 | /* MetaIRQs must always attempt to return back to a |
| 216 | * cooperative thread they preempted and not whatever happens |
| 217 | * to be highest priority now. The cooperative thread was |
| 218 | * promised it wouldn't be preempted (by non-metairq threads)! |
| 219 | */ |
| 220 | struct k_thread *mirqp = _current_cpu->metairq_preempted; |
| 221 | |
Anas Nashif | 17c874f | 2024-03-28 07:15:04 -0400 | [diff] [blame] | 222 | if (mirqp != NULL && (thread == NULL || !thread_is_metairq(thread))) { |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 223 | if (!z_is_thread_prevented_from_running(mirqp)) { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 224 | thread = mirqp; |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 225 | } else { |
| 226 | _current_cpu->metairq_preempted = NULL; |
| 227 | } |
| 228 | } |
| 229 | #endif |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 230 | /* CONFIG_NUM_METAIRQ_PRIORITIES > 0 && |
| 231 | * CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES |
| 232 | */ |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 233 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 234 | #ifndef CONFIG_SMP |
| 235 | /* In uniprocessor mode, we can leave the current thread in |
| 236 | * the queue (actually we have to, otherwise the assembly |
| 237 | * context switch code for all architectures would be |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 238 | * responsible for putting it back in z_swap and ISR return!), |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 239 | * which makes this choice simple. |
| 240 | */ |
Anas Nashif | 3f4f3f6 | 2021-03-29 17:13:47 -0400 | [diff] [blame] | 241 | return (thread != NULL) ? thread : _current_cpu->idle_thread; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 242 | #else |
| 243 | /* Under SMP, the "cache" mechanism for selecting the next |
| 244 | * thread doesn't work, so we have more work to do to test |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 245 | * _current against the best choice from the queue. Here, the |
| 246 | * thread selected above represents "the best thread that is |
| 247 | * not current". |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 248 | * |
| 249 | * Subtle note on "queued": in SMP mode, _current does not |
| 250 | * live in the queue, so this isn't exactly the same thing as |
| 251 | * "ready", it means "is _current already added back to the |
| 252 | * queue such that we don't want to re-add it". |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 253 | */ |
Simon Hein | 02cfbfe | 2022-07-19 22:30:17 +0200 | [diff] [blame] | 254 | bool queued = z_is_thread_queued(_current); |
| 255 | bool active = !z_is_thread_prevented_from_running(_current); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 256 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 257 | if (thread == NULL) { |
| 258 | thread = _current_cpu->idle_thread; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 259 | } |
| 260 | |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 261 | if (active) { |
James Harris | 2cd0f66 | 2021-03-01 09:19:57 -0800 | [diff] [blame] | 262 | int32_t cmp = z_sched_prio_cmp(_current, thread); |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 263 | |
| 264 | /* Ties only switch if state says we yielded */ |
James Harris | 2cd0f66 | 2021-03-01 09:19:57 -0800 | [diff] [blame] | 265 | if ((cmp > 0) || ((cmp == 0) && !_current_cpu->swap_ok)) { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 266 | thread = _current; |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 267 | } |
| 268 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 269 | if (!should_preempt(thread, _current_cpu->swap_ok)) { |
| 270 | thread = _current; |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 271 | } |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 272 | } |
| 273 | |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 274 | /* Put _current back into the queue */ |
Hess Nathan | 6d417d5 | 2024-04-30 13:26:35 +0200 | [diff] [blame] | 275 | if ((thread != _current) && active && |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 276 | !z_is_idle_thread_object(_current) && !queued) { |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 277 | queue_thread(_current); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 278 | } |
| 279 | |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 280 | /* Take the new _current out of the queue */ |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 281 | if (z_is_thread_queued(thread)) { |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 282 | dequeue_thread(thread); |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 283 | } |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 284 | |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 285 | _current_cpu->swap_ok = false; |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 286 | return thread; |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 287 | #endif /* CONFIG_SMP */ |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 288 | } |
| 289 | |
Anas Nashif | 37df485 | 2024-03-08 07:51:01 -0500 | [diff] [blame] | 290 | void move_thread_to_end_of_prio_q(struct k_thread *thread) |
Andrew Boie | 8e0f6a5 | 2020-09-05 11:50:18 -0700 | [diff] [blame] | 291 | { |
| 292 | if (z_is_thread_queued(thread)) { |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 293 | dequeue_thread(thread); |
Andrew Boie | 8e0f6a5 | 2020-09-05 11:50:18 -0700 | [diff] [blame] | 294 | } |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 295 | queue_thread(thread); |
Andrew Boie | 8e0f6a5 | 2020-09-05 11:50:18 -0700 | [diff] [blame] | 296 | update_cache(thread == _current); |
| 297 | } |
| 298 | |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 299 | /* Track cooperative threads preempted by metairqs so we can return to |
| 300 | * them specifically. Called at the moment a new thread has been |
| 301 | * selected to run. |
| 302 | */ |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 303 | static void update_metairq_preempt(struct k_thread *thread) |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 304 | { |
Florian Grandel | cc4d1bd | 2023-08-28 17:31:54 +0200 | [diff] [blame] | 305 | #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && \ |
| 306 | (CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES) |
Anas Nashif | 17c874f | 2024-03-28 07:15:04 -0400 | [diff] [blame] | 307 | if (thread_is_metairq(thread) && !thread_is_metairq(_current) && |
Anas Nashif | 5c170c7 | 2024-03-28 07:20:51 -0400 | [diff] [blame] | 308 | !thread_is_preemptible(_current)) { |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 309 | /* Record new preemption */ |
| 310 | _current_cpu->metairq_preempted = _current; |
Anas Nashif | 17c874f | 2024-03-28 07:15:04 -0400 | [diff] [blame] | 311 | } else if (!thread_is_metairq(thread) && !z_is_idle_thread_object(thread)) { |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 312 | /* Returning from existing preemption */ |
| 313 | _current_cpu->metairq_preempted = NULL; |
| 314 | } |
Benjamin Cabé | a46f1b9 | 2023-08-21 15:30:26 +0200 | [diff] [blame] | 315 | #else |
| 316 | ARG_UNUSED(thread); |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 317 | #endif |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 318 | /* CONFIG_NUM_METAIRQ_PRIORITIES > 0 && |
| 319 | * CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES |
| 320 | */ |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 321 | } |
| 322 | |
Anas Nashif | 121cb49 | 2024-10-08 18:13:03 -0400 | [diff] [blame] | 323 | static ALWAYS_INLINE void update_cache(int preempt_ok) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 324 | { |
| 325 | #ifndef CONFIG_SMP |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 326 | struct k_thread *thread = next_up(); |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 327 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 328 | if (should_preempt(thread, preempt_ok)) { |
Andy Ross | cb3964f | 2019-08-16 21:29:26 -0700 | [diff] [blame] | 329 | #ifdef CONFIG_TIMESLICING |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 330 | if (thread != _current) { |
Andy Ross | 3e69689 | 2021-11-30 18:26:26 -0800 | [diff] [blame] | 331 | z_reset_time_slice(thread); |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 332 | } |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 333 | #endif /* CONFIG_TIMESLICING */ |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 334 | update_metairq_preempt(thread); |
| 335 | _kernel.ready_q.cache = thread; |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 336 | } else { |
| 337 | _kernel.ready_q.cache = _current; |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 338 | } |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 339 | |
| 340 | #else |
| 341 | /* The way this works is that the CPU record keeps its |
| 342 | * "cooperative swapping is OK" flag until the next reschedule |
| 343 | * call or context switch. It doesn't need to be tracked per |
| 344 | * thread because if the thread gets preempted for whatever |
| 345 | * reason the scheduler will make the same decision anyway. |
| 346 | */ |
| 347 | _current_cpu->swap_ok = preempt_ok; |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 348 | #endif /* CONFIG_SMP */ |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 349 | } |
| 350 | |
Peter Mitsis | 9ff5221 | 2024-03-01 14:44:26 -0500 | [diff] [blame] | 351 | static struct _cpu *thread_active_elsewhere(struct k_thread *thread) |
Andy Ross | 05c468f | 2021-02-19 15:24:24 -0800 | [diff] [blame] | 352 | { |
Peter Mitsis | 9ff5221 | 2024-03-01 14:44:26 -0500 | [diff] [blame] | 353 | /* Returns pointer to _cpu if the thread is currently running on |
| 354 | * another CPU. There are more scalable designs to answer this |
| 355 | * question in constant time, but this is fine for now. |
Andy Ross | 05c468f | 2021-02-19 15:24:24 -0800 | [diff] [blame] | 356 | */ |
| 357 | #ifdef CONFIG_SMP |
| 358 | int currcpu = _current_cpu->id; |
| 359 | |
Kumar Gala | a1195ae | 2022-10-18 09:45:13 -0500 | [diff] [blame] | 360 | unsigned int num_cpus = arch_num_cpus(); |
| 361 | |
| 362 | for (int i = 0; i < num_cpus; i++) { |
Andy Ross | 05c468f | 2021-02-19 15:24:24 -0800 | [diff] [blame] | 363 | if ((i != currcpu) && |
| 364 | (_kernel.cpus[i].current == thread)) { |
Peter Mitsis | 9ff5221 | 2024-03-01 14:44:26 -0500 | [diff] [blame] | 365 | return &_kernel.cpus[i]; |
Andy Ross | 05c468f | 2021-02-19 15:24:24 -0800 | [diff] [blame] | 366 | } |
| 367 | } |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 368 | #endif /* CONFIG_SMP */ |
Benjamin Cabé | a46f1b9 | 2023-08-21 15:30:26 +0200 | [diff] [blame] | 369 | ARG_UNUSED(thread); |
Peter Mitsis | 9ff5221 | 2024-03-01 14:44:26 -0500 | [diff] [blame] | 370 | return NULL; |
Andy Ross | 05c468f | 2021-02-19 15:24:24 -0800 | [diff] [blame] | 371 | } |
| 372 | |
Andy Ross | 96ccc46 | 2020-01-23 13:28:30 -0800 | [diff] [blame] | 373 | static void ready_thread(struct k_thread *thread) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 374 | { |
Anas Nashif | 39f632e | 2020-12-07 13:15:42 -0500 | [diff] [blame] | 375 | #ifdef CONFIG_KERNEL_COHERENCE |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 376 | __ASSERT_NO_MSG(arch_mem_coherent(thread)); |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 377 | #endif /* CONFIG_KERNEL_COHERENCE */ |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 378 | |
Anas Nashif | 081605e | 2020-10-16 20:00:17 -0400 | [diff] [blame] | 379 | /* If thread is queued already, do not try and added it to the |
| 380 | * run queue again |
| 381 | */ |
| 382 | if (!z_is_thread_queued(thread) && z_is_thread_ready(thread)) { |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 383 | SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_ready, thread); |
| 384 | |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 385 | queue_thread(thread); |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 386 | update_cache(0); |
Peter Mitsis | d8a4c8a | 2024-02-16 13:54:47 -0500 | [diff] [blame] | 387 | |
| 388 | flag_ipi(ipi_mask_create(thread)); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 389 | } |
| 390 | } |
| 391 | |
Andy Ross | 96ccc46 | 2020-01-23 13:28:30 -0800 | [diff] [blame] | 392 | void z_ready_thread(struct k_thread *thread) |
| 393 | { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 394 | K_SPINLOCK(&_sched_spinlock) { |
Peter Mitsis | 9ff5221 | 2024-03-01 14:44:26 -0500 | [diff] [blame] | 395 | if (thread_active_elsewhere(thread) == NULL) { |
Andy Ross | 05c468f | 2021-02-19 15:24:24 -0800 | [diff] [blame] | 396 | ready_thread(thread); |
| 397 | } |
Andy Ross | 96ccc46 | 2020-01-23 13:28:30 -0800 | [diff] [blame] | 398 | } |
| 399 | } |
| 400 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 401 | void z_move_thread_to_end_of_prio_q(struct k_thread *thread) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 402 | { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 403 | K_SPINLOCK(&_sched_spinlock) { |
Andrew Boie | 8e0f6a5 | 2020-09-05 11:50:18 -0700 | [diff] [blame] | 404 | move_thread_to_end_of_prio_q(thread); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 405 | } |
| 406 | } |
| 407 | |
Andy Ross | 96ccc46 | 2020-01-23 13:28:30 -0800 | [diff] [blame] | 408 | void z_sched_start(struct k_thread *thread) |
| 409 | { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 410 | k_spinlock_key_t key = k_spin_lock(&_sched_spinlock); |
Andy Ross | 96ccc46 | 2020-01-23 13:28:30 -0800 | [diff] [blame] | 411 | |
| 412 | if (z_has_thread_started(thread)) { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 413 | k_spin_unlock(&_sched_spinlock, key); |
Andy Ross | 96ccc46 | 2020-01-23 13:28:30 -0800 | [diff] [blame] | 414 | return; |
| 415 | } |
| 416 | |
| 417 | z_mark_thread_as_started(thread); |
| 418 | ready_thread(thread); |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 419 | z_reschedule(&_sched_spinlock, key); |
Andy Ross | 96ccc46 | 2020-01-23 13:28:30 -0800 | [diff] [blame] | 420 | } |
| 421 | |
Andy Ross | 5fa2b6f | 2024-04-06 07:44:47 -0700 | [diff] [blame] | 422 | /* Spins in ISR context, waiting for a thread known to be running on |
| 423 | * another CPU to catch the IPI we sent and halt. Note that we check |
| 424 | * for ourselves being asynchronously halted first to prevent simple |
| 425 | * deadlocks (but not complex ones involving cycles of 3+ threads!). |
Andy Ross | f0fd54c | 2024-03-26 08:38:01 -0400 | [diff] [blame] | 426 | * Acts to release the provided lock before returning. |
Andy Ross | 5fa2b6f | 2024-04-06 07:44:47 -0700 | [diff] [blame] | 427 | */ |
Andy Ross | f0fd54c | 2024-03-26 08:38:01 -0400 | [diff] [blame] | 428 | static void thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key) |
Andy Ross | 5fa2b6f | 2024-04-06 07:44:47 -0700 | [diff] [blame] | 429 | { |
| 430 | if (is_halting(_current)) { |
| 431 | halt_thread(_current, |
| 432 | is_aborting(_current) ? _THREAD_DEAD : _THREAD_SUSPENDED); |
| 433 | } |
| 434 | k_spin_unlock(&_sched_spinlock, key); |
| 435 | while (is_halting(thread)) { |
Andy Ross | f0fd54c | 2024-03-26 08:38:01 -0400 | [diff] [blame] | 436 | unsigned int k = arch_irq_lock(); |
| 437 | |
| 438 | arch_spin_relax(); /* Requires interrupts be masked */ |
| 439 | arch_irq_unlock(k); |
Andy Ross | 5fa2b6f | 2024-04-06 07:44:47 -0700 | [diff] [blame] | 440 | } |
Andy Ross | 5fa2b6f | 2024-04-06 07:44:47 -0700 | [diff] [blame] | 441 | } |
| 442 | |
| 443 | /* Shared handler for k_thread_{suspend,abort}(). Called with the |
| 444 | * scheduler lock held and the key passed (which it may |
| 445 | * release/reacquire!) which will be released before a possible return |
| 446 | * (aborting _current will not return, obviously), which may be after |
| 447 | * a context switch. |
Peter Mitsis | b1384a7 | 2023-08-14 14:22:05 -0400 | [diff] [blame] | 448 | */ |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 449 | static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key, |
| 450 | bool terminate) |
Peter Mitsis | b1384a7 | 2023-08-14 14:22:05 -0400 | [diff] [blame] | 451 | { |
Andy Ross | 5fa2b6f | 2024-04-06 07:44:47 -0700 | [diff] [blame] | 452 | _wait_q_t *wq = &thread->join_queue; |
Peter Mitsis | b1384a7 | 2023-08-14 14:22:05 -0400 | [diff] [blame] | 453 | #ifdef CONFIG_SMP |
Andy Ross | 5fa2b6f | 2024-04-06 07:44:47 -0700 | [diff] [blame] | 454 | wq = terminate ? wq : &thread->halt_queue; |
| 455 | #endif |
Peter Mitsis | b1384a7 | 2023-08-14 14:22:05 -0400 | [diff] [blame] | 456 | |
Andy Ross | 5fa2b6f | 2024-04-06 07:44:47 -0700 | [diff] [blame] | 457 | /* If the target is a thread running on another CPU, flag and |
| 458 | * poke (note that we might spin to wait, so a true |
| 459 | * synchronous IPI is needed here, not deferred!), it will |
| 460 | * halt itself in the IPI. Otherwise it's unscheduled, so we |
| 461 | * can clean it up directly. |
| 462 | */ |
Peter Mitsis | 9ff5221 | 2024-03-01 14:44:26 -0500 | [diff] [blame] | 463 | |
| 464 | struct _cpu *cpu = thread_active_elsewhere(thread); |
| 465 | |
| 466 | if (cpu != NULL) { |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 467 | thread->base.thread_state |= (terminate ? _THREAD_ABORTING |
Andy Ross | 5fa2b6f | 2024-04-06 07:44:47 -0700 | [diff] [blame] | 468 | : _THREAD_SUSPENDING); |
| 469 | #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED) |
Peter Mitsis | 0bcdae2 | 2024-03-04 10:52:24 -0500 | [diff] [blame] | 470 | #ifdef CONFIG_ARCH_HAS_DIRECTED_IPIS |
| 471 | arch_sched_directed_ipi(IPI_CPU_MASK(cpu->id)); |
| 472 | #else |
| 473 | arch_sched_broadcast_ipi(); |
| 474 | #endif |
Andy Ross | 5fa2b6f | 2024-04-06 07:44:47 -0700 | [diff] [blame] | 475 | #endif |
Peter Mitsis | b1384a7 | 2023-08-14 14:22:05 -0400 | [diff] [blame] | 476 | if (arch_is_in_isr()) { |
Andy Ross | f0fd54c | 2024-03-26 08:38:01 -0400 | [diff] [blame] | 477 | thread_halt_spin(thread, key); |
Andy Ross | 5fa2b6f | 2024-04-06 07:44:47 -0700 | [diff] [blame] | 478 | } else { |
| 479 | add_to_waitq_locked(_current, wq); |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 480 | z_swap(&_sched_spinlock, key); |
Peter Mitsis | b1384a7 | 2023-08-14 14:22:05 -0400 | [diff] [blame] | 481 | } |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 482 | } else { |
Andy Ross | 5fa2b6f | 2024-04-06 07:44:47 -0700 | [diff] [blame] | 483 | halt_thread(thread, terminate ? _THREAD_DEAD : _THREAD_SUSPENDED); |
| 484 | if ((thread == _current) && !arch_is_in_isr()) { |
| 485 | z_swap(&_sched_spinlock, key); |
| 486 | __ASSERT(!terminate, "aborted _current back from dead"); |
| 487 | } else { |
| 488 | k_spin_unlock(&_sched_spinlock, key); |
| 489 | } |
Peter Mitsis | b1384a7 | 2023-08-14 14:22:05 -0400 | [diff] [blame] | 490 | } |
Andy Ross | f0fd54c | 2024-03-26 08:38:01 -0400 | [diff] [blame] | 491 | /* NOTE: the scheduler lock has been released. Don't put |
| 492 | * logic here, it's likely to be racy/deadlocky even if you |
| 493 | * re-take the lock! |
| 494 | */ |
Peter Mitsis | b1384a7 | 2023-08-14 14:22:05 -0400 | [diff] [blame] | 495 | } |
| 496 | |
Andy Ross | 5fa2b6f | 2024-04-06 07:44:47 -0700 | [diff] [blame] | 497 | |
Hess Nathan | 980d3f4 | 2024-06-25 09:13:15 +0200 | [diff] [blame] | 498 | void z_impl_k_thread_suspend(k_tid_t thread) |
Andy Ross | 8bdabcc | 2020-01-07 09:58:46 -0800 | [diff] [blame] | 499 | { |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 500 | SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, suspend, thread); |
| 501 | |
Andy Ross | 8bdabcc | 2020-01-07 09:58:46 -0800 | [diff] [blame] | 502 | (void)z_abort_thread_timeout(thread); |
| 503 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 504 | k_spinlock_key_t key = k_spin_lock(&_sched_spinlock); |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 505 | |
| 506 | if ((thread->base.thread_state & _THREAD_SUSPENDED) != 0U) { |
| 507 | |
| 508 | /* The target thread is already suspended. Nothing to do. */ |
| 509 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 510 | k_spin_unlock(&_sched_spinlock, key); |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 511 | return; |
Andy Ross | 8bdabcc | 2020-01-07 09:58:46 -0800 | [diff] [blame] | 512 | } |
| 513 | |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 514 | z_thread_halt(thread, key, false); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 515 | |
| 516 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, suspend, thread); |
Andy Ross | 8bdabcc | 2020-01-07 09:58:46 -0800 | [diff] [blame] | 517 | } |
| 518 | |
Andrew Boie | 6cf496f | 2020-02-14 10:52:49 -0800 | [diff] [blame] | 519 | #ifdef CONFIG_USERSPACE |
Hess Nathan | 980d3f4 | 2024-06-25 09:13:15 +0200 | [diff] [blame] | 520 | static inline void z_vrfy_k_thread_suspend(k_tid_t thread) |
Andrew Boie | 6cf496f | 2020-02-14 10:52:49 -0800 | [diff] [blame] | 521 | { |
Anas Nashif | a08bfeb | 2023-09-27 11:20:28 +0000 | [diff] [blame] | 522 | K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD)); |
Andrew Boie | 6cf496f | 2020-02-14 10:52:49 -0800 | [diff] [blame] | 523 | z_impl_k_thread_suspend(thread); |
| 524 | } |
Yong Cong Sin | bbe5e1e | 2024-01-24 17:35:04 +0800 | [diff] [blame] | 525 | #include <zephyr/syscalls/k_thread_suspend_mrsh.c> |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 526 | #endif /* CONFIG_USERSPACE */ |
Andrew Boie | 6cf496f | 2020-02-14 10:52:49 -0800 | [diff] [blame] | 527 | |
Hess Nathan | 980d3f4 | 2024-06-25 09:13:15 +0200 | [diff] [blame] | 528 | void z_impl_k_thread_resume(k_tid_t thread) |
Andrew Boie | 6cf496f | 2020-02-14 10:52:49 -0800 | [diff] [blame] | 529 | { |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 530 | SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, resume, thread); |
| 531 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 532 | k_spinlock_key_t key = k_spin_lock(&_sched_spinlock); |
Andrew Boie | 6cf496f | 2020-02-14 10:52:49 -0800 | [diff] [blame] | 533 | |
Anas Nashif | bf69afc | 2020-10-16 19:53:56 -0400 | [diff] [blame] | 534 | /* Do not try to resume a thread that was not suspended */ |
| 535 | if (!z_is_thread_suspended(thread)) { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 536 | k_spin_unlock(&_sched_spinlock, key); |
Anas Nashif | bf69afc | 2020-10-16 19:53:56 -0400 | [diff] [blame] | 537 | return; |
| 538 | } |
| 539 | |
Andrew Boie | 6cf496f | 2020-02-14 10:52:49 -0800 | [diff] [blame] | 540 | z_mark_thread_as_not_suspended(thread); |
| 541 | ready_thread(thread); |
| 542 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 543 | z_reschedule(&_sched_spinlock, key); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 544 | |
| 545 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, resume, thread); |
Andrew Boie | 6cf496f | 2020-02-14 10:52:49 -0800 | [diff] [blame] | 546 | } |
| 547 | |
| 548 | #ifdef CONFIG_USERSPACE |
Hess Nathan | 980d3f4 | 2024-06-25 09:13:15 +0200 | [diff] [blame] | 549 | static inline void z_vrfy_k_thread_resume(k_tid_t thread) |
Andrew Boie | 6cf496f | 2020-02-14 10:52:49 -0800 | [diff] [blame] | 550 | { |
Anas Nashif | a08bfeb | 2023-09-27 11:20:28 +0000 | [diff] [blame] | 551 | K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD)); |
Andrew Boie | 6cf496f | 2020-02-14 10:52:49 -0800 | [diff] [blame] | 552 | z_impl_k_thread_resume(thread); |
| 553 | } |
Yong Cong Sin | bbe5e1e | 2024-01-24 17:35:04 +0800 | [diff] [blame] | 554 | #include <zephyr/syscalls/k_thread_resume_mrsh.c> |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 555 | #endif /* CONFIG_USERSPACE */ |
Andrew Boie | 6cf496f | 2020-02-14 10:52:49 -0800 | [diff] [blame] | 556 | |
Maksim Masalski | 970820e | 2021-05-25 14:40:14 +0800 | [diff] [blame] | 557 | static _wait_q_t *pended_on_thread(struct k_thread *thread) |
Andy Ross | 8bdabcc | 2020-01-07 09:58:46 -0800 | [diff] [blame] | 558 | { |
| 559 | __ASSERT_NO_MSG(thread->base.pended_on); |
| 560 | |
| 561 | return thread->base.pended_on; |
| 562 | } |
| 563 | |
Andy Ross | ed6b4fb | 2020-01-23 13:04:15 -0800 | [diff] [blame] | 564 | static void unready_thread(struct k_thread *thread) |
| 565 | { |
| 566 | if (z_is_thread_queued(thread)) { |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 567 | dequeue_thread(thread); |
Andy Ross | ed6b4fb | 2020-01-23 13:04:15 -0800 | [diff] [blame] | 568 | } |
| 569 | update_cache(thread == _current); |
| 570 | } |
| 571 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 572 | /* _sched_spinlock must be held */ |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 573 | static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 574 | { |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 575 | unready_thread(thread); |
| 576 | z_mark_thread_as_pending(thread); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 577 | |
| 578 | SYS_PORT_TRACING_FUNC(k_thread, sched_pend, thread); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 579 | |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 580 | if (wait_q != NULL) { |
| 581 | thread->base.pended_on = wait_q; |
Anas Nashif | 4593f0d | 2024-04-11 11:59:07 -0400 | [diff] [blame] | 582 | _priq_wait_add(&wait_q->waitq, thread); |
Andy Ross | 15d5208 | 2018-09-26 13:19:31 -0700 | [diff] [blame] | 583 | } |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 584 | } |
Andy Ross | 15d5208 | 2018-09-26 13:19:31 -0700 | [diff] [blame] | 585 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 586 | static void add_thread_timeout(struct k_thread *thread, k_timeout_t timeout) |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 587 | { |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 588 | if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) { |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 589 | z_add_thread_timeout(thread, timeout); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 590 | } |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 591 | } |
| 592 | |
Andy Ross | c32f376 | 2022-10-08 07:24:28 -0700 | [diff] [blame] | 593 | static void pend_locked(struct k_thread *thread, _wait_q_t *wait_q, |
| 594 | k_timeout_t timeout) |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 595 | { |
Anas Nashif | 39f632e | 2020-12-07 13:15:42 -0500 | [diff] [blame] | 596 | #ifdef CONFIG_KERNEL_COHERENCE |
Andy Ross | 1ba7414 | 2021-02-09 13:48:25 -0800 | [diff] [blame] | 597 | __ASSERT_NO_MSG(wait_q == NULL || arch_mem_coherent(wait_q)); |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 598 | #endif /* CONFIG_KERNEL_COHERENCE */ |
Andy Ross | c32f376 | 2022-10-08 07:24:28 -0700 | [diff] [blame] | 599 | add_to_waitq_locked(thread, wait_q); |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 600 | add_thread_timeout(thread, timeout); |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 601 | } |
| 602 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 603 | void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q, |
| 604 | k_timeout_t timeout) |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 605 | { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 606 | __ASSERT_NO_MSG(thread == _current || is_thread_dummy(thread)); |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 607 | K_SPINLOCK(&_sched_spinlock) { |
Andy Ross | c32f376 | 2022-10-08 07:24:28 -0700 | [diff] [blame] | 608 | pend_locked(thread, wait_q, timeout); |
| 609 | } |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 610 | } |
| 611 | |
Andrew Boie | ffc5bdf | 2020-09-05 11:44:01 -0700 | [diff] [blame] | 612 | static inline void unpend_thread_no_timeout(struct k_thread *thread) |
| 613 | { |
Maksim Masalski | 970820e | 2021-05-25 14:40:14 +0800 | [diff] [blame] | 614 | _priq_wait_remove(&pended_on_thread(thread)->waitq, thread); |
Andrew Boie | ffc5bdf | 2020-09-05 11:44:01 -0700 | [diff] [blame] | 615 | z_mark_thread_as_not_pending(thread); |
| 616 | thread->base.pended_on = NULL; |
| 617 | } |
| 618 | |
Peter Mitsis | 20dee1a | 2024-07-19 14:15:58 -0700 | [diff] [blame] | 619 | void z_unpend_thread_no_timeout(struct k_thread *thread) |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 620 | { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 621 | K_SPINLOCK(&_sched_spinlock) { |
Peter Mitsis | 31dfd84f | 2023-01-06 13:20:28 -0500 | [diff] [blame] | 622 | if (thread->base.pended_on != NULL) { |
| 623 | unpend_thread_no_timeout(thread); |
| 624 | } |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 625 | } |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 626 | } |
| 627 | |
Aastha Grover | 5537776 | 2023-03-08 16:54:12 -0500 | [diff] [blame] | 628 | void z_sched_wake_thread(struct k_thread *thread, bool is_timeout) |
| 629 | { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 630 | K_SPINLOCK(&_sched_spinlock) { |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 631 | bool killed = (thread->base.thread_state & |
| 632 | (_THREAD_DEAD | _THREAD_ABORTING)); |
Aastha Grover | 5537776 | 2023-03-08 16:54:12 -0500 | [diff] [blame] | 633 | |
Aastha Grover | 877fc3d | 2023-03-08 16:56:31 -0500 | [diff] [blame] | 634 | #ifdef CONFIG_EVENTS |
| 635 | bool do_nothing = thread->no_wake_on_timeout && is_timeout; |
| 636 | |
| 637 | thread->no_wake_on_timeout = false; |
| 638 | |
| 639 | if (do_nothing) { |
| 640 | continue; |
| 641 | } |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 642 | #endif /* CONFIG_EVENTS */ |
Aastha Grover | 877fc3d | 2023-03-08 16:56:31 -0500 | [diff] [blame] | 643 | |
Aastha Grover | 5537776 | 2023-03-08 16:54:12 -0500 | [diff] [blame] | 644 | if (!killed) { |
| 645 | /* The thread is not being killed */ |
| 646 | if (thread->base.pended_on != NULL) { |
| 647 | unpend_thread_no_timeout(thread); |
| 648 | } |
| 649 | z_mark_thread_as_started(thread); |
| 650 | if (is_timeout) { |
| 651 | z_mark_thread_as_not_suspended(thread); |
| 652 | } |
| 653 | ready_thread(thread); |
| 654 | } |
| 655 | } |
| 656 | |
| 657 | } |
| 658 | |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 659 | #ifdef CONFIG_SYS_CLOCK_EXISTS |
| 660 | /* Timeout handler for *_thread_timeout() APIs */ |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 661 | void z_thread_timeout(struct _timeout *timeout) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 662 | { |
Andy Ross | 3786633 | 2021-02-17 10:12:36 -0800 | [diff] [blame] | 663 | struct k_thread *thread = CONTAINER_OF(timeout, |
| 664 | struct k_thread, base.timeout); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 665 | |
Aastha Grover | 5537776 | 2023-03-08 16:54:12 -0500 | [diff] [blame] | 666 | z_sched_wake_thread(thread, true); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 667 | } |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 668 | #endif /* CONFIG_SYS_CLOCK_EXISTS */ |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 669 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 670 | int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key, |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 671 | _wait_q_t *wait_q, k_timeout_t timeout) |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 672 | { |
| 673 | #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC) |
| 674 | pending_current = _current; |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 675 | #endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */ |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 676 | __ASSERT_NO_MSG(sizeof(_sched_spinlock) == 0 || lock != &_sched_spinlock); |
Andy Ross | c32f376 | 2022-10-08 07:24:28 -0700 | [diff] [blame] | 677 | |
| 678 | /* We do a "lock swap" prior to calling z_swap(), such that |
| 679 | * the caller's lock gets released as desired. But we ensure |
| 680 | * that we hold the scheduler lock and leave local interrupts |
Pisit Sawangvonganan | 5ed3cd4 | 2024-07-06 01:12:07 +0700 | [diff] [blame] | 681 | * masked until we reach the context switch. z_swap() itself |
Andy Ross | c32f376 | 2022-10-08 07:24:28 -0700 | [diff] [blame] | 682 | * has similar code; the duplication is because it's a legacy |
| 683 | * API that doesn't expect to be called with scheduler lock |
| 684 | * held. |
| 685 | */ |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 686 | (void) k_spin_lock(&_sched_spinlock); |
Andy Ross | c32f376 | 2022-10-08 07:24:28 -0700 | [diff] [blame] | 687 | pend_locked(_current, wait_q, timeout); |
| 688 | k_spin_release(lock); |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 689 | return z_swap(&_sched_spinlock, key); |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 690 | } |
| 691 | |
Andy Ross | 604f0f4 | 2021-02-09 16:47:47 -0800 | [diff] [blame] | 692 | struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q) |
| 693 | { |
| 694 | struct k_thread *thread = NULL; |
| 695 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 696 | K_SPINLOCK(&_sched_spinlock) { |
Andy Ross | 604f0f4 | 2021-02-09 16:47:47 -0800 | [diff] [blame] | 697 | thread = _priq_wait_best(&wait_q->waitq); |
| 698 | |
| 699 | if (thread != NULL) { |
| 700 | unpend_thread_no_timeout(thread); |
| 701 | } |
| 702 | } |
| 703 | |
| 704 | return thread; |
| 705 | } |
| 706 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 707 | struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q) |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 708 | { |
Andy Ross | 604f0f4 | 2021-02-09 16:47:47 -0800 | [diff] [blame] | 709 | struct k_thread *thread = NULL; |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 710 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 711 | K_SPINLOCK(&_sched_spinlock) { |
Andy Ross | 604f0f4 | 2021-02-09 16:47:47 -0800 | [diff] [blame] | 712 | thread = _priq_wait_best(&wait_q->waitq); |
| 713 | |
Peter Mitsis | cc415bc | 2024-10-09 15:36:48 -0700 | [diff] [blame] | 714 | if (unlikely(thread != NULL)) { |
Andy Ross | 604f0f4 | 2021-02-09 16:47:47 -0800 | [diff] [blame] | 715 | unpend_thread_no_timeout(thread); |
| 716 | (void)z_abort_thread_timeout(thread); |
| 717 | } |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 718 | } |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 719 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 720 | return thread; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 721 | } |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 722 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 723 | void z_unpend_thread(struct k_thread *thread) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 724 | { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 725 | z_unpend_thread_no_timeout(thread); |
| 726 | (void)z_abort_thread_timeout(thread); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 727 | } |
| 728 | |
Andy Ross | 6f13980 | 2019-08-20 11:21:28 -0700 | [diff] [blame] | 729 | /* Priority set utility that does no rescheduling, it just changes the |
| 730 | * run queue state, returning true if a reschedule is needed later. |
| 731 | */ |
Anas Nashif | 868f099 | 2024-02-24 11:37:56 -0500 | [diff] [blame] | 732 | bool z_thread_prio_set(struct k_thread *thread, int prio) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 733 | { |
Flavio Ceolin | 02ed85b | 2018-09-20 15:43:57 -0700 | [diff] [blame] | 734 | bool need_sched = 0; |
Peter Mitsis | 9ff5221 | 2024-03-01 14:44:26 -0500 | [diff] [blame] | 735 | int old_prio = thread->base.prio; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 736 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 737 | K_SPINLOCK(&_sched_spinlock) { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 738 | need_sched = z_is_thread_ready(thread); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 739 | |
| 740 | if (need_sched) { |
Andy Ross | 4d8e1f2 | 2019-07-01 10:25:55 -0700 | [diff] [blame] | 741 | if (!IS_ENABLED(CONFIG_SMP) || z_is_thread_queued(thread)) { |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 742 | dequeue_thread(thread); |
Andy Ross | 4d8e1f2 | 2019-07-01 10:25:55 -0700 | [diff] [blame] | 743 | thread->base.prio = prio; |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 744 | queue_thread(thread); |
Peter Mitsis | 9ff5221 | 2024-03-01 14:44:26 -0500 | [diff] [blame] | 745 | |
| 746 | if (old_prio > prio) { |
Peter Mitsis | d8a4c8a | 2024-02-16 13:54:47 -0500 | [diff] [blame] | 747 | flag_ipi(ipi_mask_create(thread)); |
Peter Mitsis | 9ff5221 | 2024-03-01 14:44:26 -0500 | [diff] [blame] | 748 | } |
Andy Ross | 4d8e1f2 | 2019-07-01 10:25:55 -0700 | [diff] [blame] | 749 | } else { |
Peter Mitsis | 9ff5221 | 2024-03-01 14:44:26 -0500 | [diff] [blame] | 750 | /* |
| 751 | * This is a running thread on SMP. Update its |
| 752 | * priority, but do not requeue it. An IPI is |
| 753 | * needed if the priority is both being lowered |
| 754 | * and it is running on another CPU. |
| 755 | */ |
| 756 | |
Andy Ross | 4d8e1f2 | 2019-07-01 10:25:55 -0700 | [diff] [blame] | 757 | thread->base.prio = prio; |
Peter Mitsis | 9ff5221 | 2024-03-01 14:44:26 -0500 | [diff] [blame] | 758 | |
| 759 | struct _cpu *cpu; |
| 760 | |
| 761 | cpu = thread_active_elsewhere(thread); |
| 762 | if ((cpu != NULL) && (old_prio < prio)) { |
Peter Mitsis | d8a4c8a | 2024-02-16 13:54:47 -0500 | [diff] [blame] | 763 | flag_ipi(IPI_CPU_MASK(cpu->id)); |
Peter Mitsis | 9ff5221 | 2024-03-01 14:44:26 -0500 | [diff] [blame] | 764 | } |
Andy Ross | 4d8e1f2 | 2019-07-01 10:25:55 -0700 | [diff] [blame] | 765 | } |
Peter Mitsis | 9ff5221 | 2024-03-01 14:44:26 -0500 | [diff] [blame] | 766 | |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 767 | update_cache(1); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 768 | } else { |
| 769 | thread->base.prio = prio; |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 770 | } |
| 771 | } |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 772 | |
| 773 | SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_priority_set, thread, prio); |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 774 | |
Andy Ross | 6f13980 | 2019-08-20 11:21:28 -0700 | [diff] [blame] | 775 | return need_sched; |
| 776 | } |
| 777 | |
Anas Nashif | 3f4f3f6 | 2021-03-29 17:13:47 -0400 | [diff] [blame] | 778 | static inline bool resched(uint32_t key) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 779 | { |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 780 | #ifdef CONFIG_SMP |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 781 | _current_cpu->swap_ok = 0; |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 782 | #endif /* CONFIG_SMP */ |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 783 | |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 784 | return arch_irq_unlocked(key) && !arch_is_in_isr(); |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 785 | } |
| 786 | |
Anas Nashif | 379b93f | 2020-08-10 15:47:02 -0400 | [diff] [blame] | 787 | /* |
| 788 | * Check if the next ready thread is the same as the current thread |
| 789 | * and save the trip if true. |
| 790 | */ |
| 791 | static inline bool need_swap(void) |
| 792 | { |
| 793 | /* the SMP case will be handled in C based z_swap() */ |
| 794 | #ifdef CONFIG_SMP |
| 795 | return true; |
| 796 | #else |
| 797 | struct k_thread *new_thread; |
| 798 | |
| 799 | /* Check if the next ready thread is the same as the current thread */ |
Andy Ross | 6b84ab3 | 2021-02-18 10:15:23 -0800 | [diff] [blame] | 800 | new_thread = _kernel.ready_q.cache; |
Anas Nashif | 379b93f | 2020-08-10 15:47:02 -0400 | [diff] [blame] | 801 | return new_thread != _current; |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 802 | #endif /* CONFIG_SMP */ |
Anas Nashif | 379b93f | 2020-08-10 15:47:02 -0400 | [diff] [blame] | 803 | } |
| 804 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 805 | void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key) |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 806 | { |
Anas Nashif | 379b93f | 2020-08-10 15:47:02 -0400 | [diff] [blame] | 807 | if (resched(key.key) && need_swap()) { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 808 | z_swap(lock, key); |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 809 | } else { |
| 810 | k_spin_unlock(lock, key); |
Andy Ross | b4e9ef0 | 2022-04-06 10:10:17 -0700 | [diff] [blame] | 811 | signal_pending_ipi(); |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 812 | } |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 813 | } |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 814 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 815 | void z_reschedule_irqlock(uint32_t key) |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 816 | { |
Gaetan Perrot | 68581ca | 2023-12-21 11:01:54 +0900 | [diff] [blame] | 817 | if (resched(key) && need_swap()) { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 818 | z_swap_irqlock(key); |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 819 | } else { |
| 820 | irq_unlock(key); |
Andy Ross | b4e9ef0 | 2022-04-06 10:10:17 -0700 | [diff] [blame] | 821 | signal_pending_ipi(); |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 822 | } |
Andy Ross | 8606fab | 2018-03-26 10:54:40 -0700 | [diff] [blame] | 823 | } |
| 824 | |
Benjamin Walsh | d7ad176 | 2016-11-10 14:46:58 -0500 | [diff] [blame] | 825 | void k_sched_lock(void) |
| 826 | { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 827 | K_SPINLOCK(&_sched_spinlock) { |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 828 | SYS_PORT_TRACING_FUNC(k_thread, sched_lock); |
| 829 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 830 | z_sched_lock(); |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 831 | } |
Benjamin Walsh | d7ad176 | 2016-11-10 14:46:58 -0500 | [diff] [blame] | 832 | } |
| 833 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 834 | void k_sched_unlock(void) |
| 835 | { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 836 | K_SPINLOCK(&_sched_spinlock) { |
Anas Nashif | bbbc38b | 2021-03-29 10:03:49 -0400 | [diff] [blame] | 837 | __ASSERT(_current->base.sched_locked != 0U, ""); |
Andy Ross | eefd3da | 2020-02-06 13:39:52 -0800 | [diff] [blame] | 838 | __ASSERT(!arch_is_in_isr(), ""); |
| 839 | |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 840 | ++_current->base.sched_locked; |
Yasushi SHOJI | 20d0724 | 2019-07-31 11:19:08 +0900 | [diff] [blame] | 841 | update_cache(0); |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 842 | } |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 843 | |
Anas Nashif | 2c5d404 | 2019-12-02 10:24:08 -0500 | [diff] [blame] | 844 | LOG_DBG("scheduler unlocked (%p:%d)", |
Benjamin Walsh | a4e033f | 2016-11-18 16:08:24 -0500 | [diff] [blame] | 845 | _current, _current->base.sched_locked); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 846 | |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 847 | SYS_PORT_TRACING_FUNC(k_thread, sched_unlock); |
| 848 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 849 | z_reschedule_unlocked(); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 850 | } |
| 851 | |
Andy Ross | 6b84ab3 | 2021-02-18 10:15:23 -0800 | [diff] [blame] | 852 | struct k_thread *z_swap_next_thread(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 853 | { |
Andy Ross | 6b84ab3 | 2021-02-18 10:15:23 -0800 | [diff] [blame] | 854 | #ifdef CONFIG_SMP |
Andy Ross | b4e9ef0 | 2022-04-06 10:10:17 -0700 | [diff] [blame] | 855 | struct k_thread *ret = next_up(); |
| 856 | |
| 857 | if (ret == _current) { |
| 858 | /* When not swapping, have to signal IPIs here. In |
| 859 | * the context switch case it must happen later, after |
| 860 | * _current gets requeued. |
| 861 | */ |
| 862 | signal_pending_ipi(); |
| 863 | } |
| 864 | return ret; |
Andy Ross | 6b84ab3 | 2021-02-18 10:15:23 -0800 | [diff] [blame] | 865 | #else |
| 866 | return _kernel.ready_q.cache; |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 867 | #endif /* CONFIG_SMP */ |
Andy Ross | 6b84ab3 | 2021-02-18 10:15:23 -0800 | [diff] [blame] | 868 | } |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 869 | |
Jeremy Bettis | 1e0a36c | 2021-12-06 10:56:33 -0700 | [diff] [blame] | 870 | #ifdef CONFIG_USE_SWITCH |
Andy Ross | b18685b | 2019-02-19 17:24:30 -0800 | [diff] [blame] | 871 | /* Just a wrapper around _current = xxx with tracing */ |
| 872 | static inline void set_current(struct k_thread *new_thread) |
| 873 | { |
Daniel Leung | 11e6b43 | 2020-08-27 16:12:01 -0700 | [diff] [blame] | 874 | z_thread_mark_switched_out(); |
Andy Ross | eefd3da | 2020-02-06 13:39:52 -0800 | [diff] [blame] | 875 | _current_cpu->current = new_thread; |
Andy Ross | b18685b | 2019-02-19 17:24:30 -0800 | [diff] [blame] | 876 | } |
| 877 | |
Nicolas Pitre | c9e3e0d | 2022-03-15 22:36:20 -0400 | [diff] [blame] | 878 | /** |
| 879 | * @brief Determine next thread to execute upon completion of an interrupt |
| 880 | * |
| 881 | * Thread preemption is performed by context switching after the completion |
| 882 | * of a non-recursed interrupt. This function determines which thread to |
| 883 | * switch to if any. This function accepts as @p interrupted either: |
| 884 | * |
| 885 | * - The handle for the interrupted thread in which case the thread's context |
| 886 | * must already be fully saved and ready to be picked up by a different CPU. |
| 887 | * |
| 888 | * - NULL if more work is required to fully save the thread's state after |
| 889 | * it is known that a new thread is to be scheduled. It is up to the caller |
| 890 | * to store the handle resulting from the thread that is being switched out |
| 891 | * in that thread's "switch_handle" field after its |
| 892 | * context has fully been saved, following the same requirements as with |
| 893 | * the @ref arch_switch() function. |
| 894 | * |
| 895 | * If a new thread needs to be scheduled then its handle is returned. |
| 896 | * Otherwise the same value provided as @p interrupted is returned back. |
| 897 | * Those handles are the same opaque types used by the @ref arch_switch() |
| 898 | * function. |
| 899 | * |
| 900 | * @warning |
Anas Nashif | ca09a4b | 2024-09-13 06:41:57 -0400 | [diff] [blame] | 901 | * The _current value may have changed after this call and not refer |
Nicolas Pitre | c9e3e0d | 2022-03-15 22:36:20 -0400 | [diff] [blame] | 902 | * to the interrupted thread anymore. It might be necessary to make a local |
| 903 | * copy before calling this function. |
| 904 | * |
| 905 | * @param interrupted Handle for the thread that was interrupted or NULL. |
| 906 | * @retval Handle for the next thread to execute, or @p interrupted when |
| 907 | * no new thread is to be scheduled. |
| 908 | */ |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 909 | void *z_get_next_switch_handle(void *interrupted) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 910 | { |
Andrew Boie | ae0d1b2 | 2019-03-29 16:25:27 -0700 | [diff] [blame] | 911 | z_check_stack_sentinel(); |
| 912 | |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 913 | #ifdef CONFIG_SMP |
Andy Ross | dd43221 | 2021-02-05 08:15:02 -0800 | [diff] [blame] | 914 | void *ret = NULL; |
| 915 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 916 | K_SPINLOCK(&_sched_spinlock) { |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 917 | struct k_thread *old_thread = _current, *new_thread; |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 918 | |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 919 | if (IS_ENABLED(CONFIG_SMP)) { |
| 920 | old_thread->switch_handle = NULL; |
| 921 | } |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 922 | new_thread = next_up(); |
| 923 | |
Andy Ross | 40d12c1 | 2021-09-27 08:22:43 -0700 | [diff] [blame] | 924 | z_sched_usage_switch(new_thread); |
| 925 | |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 926 | if (old_thread != new_thread) { |
Peter Mitsis | ada3c90 | 2024-04-23 13:53:40 -0400 | [diff] [blame] | 927 | uint8_t cpu_id; |
| 928 | |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 929 | update_metairq_preempt(new_thread); |
Andy Ross | b89e427 | 2023-05-26 09:12:51 -0700 | [diff] [blame] | 930 | z_sched_switch_spin(new_thread); |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 931 | arch_cohere_stacks(old_thread, interrupted, new_thread); |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 932 | |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 933 | _current_cpu->swap_ok = 0; |
Peter Mitsis | ada3c90 | 2024-04-23 13:53:40 -0400 | [diff] [blame] | 934 | cpu_id = arch_curr_cpu()->id; |
| 935 | new_thread->base.cpu = cpu_id; |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 936 | set_current(new_thread); |
| 937 | |
Andy Ross | 3e69689 | 2021-11-30 18:26:26 -0800 | [diff] [blame] | 938 | #ifdef CONFIG_TIMESLICING |
| 939 | z_reset_time_slice(new_thread); |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 940 | #endif /* CONFIG_TIMESLICING */ |
Andy Ross | 3e69689 | 2021-11-30 18:26:26 -0800 | [diff] [blame] | 941 | |
Danny Oerndrup | c9d7840 | 2019-12-13 11:24:56 +0100 | [diff] [blame] | 942 | #ifdef CONFIG_SPIN_VALIDATE |
Andy Ross | 8c1bdda | 2019-02-20 10:07:31 -0800 | [diff] [blame] | 943 | /* Changed _current! Update the spinlock |
Anas Nashif | 6df4405 | 2021-04-30 09:58:20 -0400 | [diff] [blame] | 944 | * bookkeeping so the validation doesn't get |
Andy Ross | 8c1bdda | 2019-02-20 10:07:31 -0800 | [diff] [blame] | 945 | * confused when the "wrong" thread tries to |
| 946 | * release the lock. |
| 947 | */ |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 948 | z_spin_lock_set_owner(&_sched_spinlock); |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 949 | #endif /* CONFIG_SPIN_VALIDATE */ |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 950 | |
| 951 | /* A queued (runnable) old/current thread |
| 952 | * needs to be added back to the run queue |
| 953 | * here, and atomically with its switch handle |
| 954 | * being set below. This is safe now, as we |
| 955 | * will not return into it. |
| 956 | */ |
| 957 | if (z_is_thread_queued(old_thread)) { |
Peter Mitsis | ada3c90 | 2024-04-23 13:53:40 -0400 | [diff] [blame] | 958 | #ifdef CONFIG_SCHED_IPI_CASCADE |
| 959 | if ((new_thread->base.cpu_mask != -1) && |
| 960 | (old_thread->base.cpu_mask != BIT(cpu_id))) { |
| 961 | flag_ipi(ipi_mask_create(old_thread)); |
| 962 | } |
| 963 | #endif |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 964 | runq_add(old_thread); |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 965 | } |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 966 | } |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 967 | old_thread->switch_handle = interrupted; |
Andy Ross | dd43221 | 2021-02-05 08:15:02 -0800 | [diff] [blame] | 968 | ret = new_thread->switch_handle; |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 969 | if (IS_ENABLED(CONFIG_SMP)) { |
| 970 | /* Active threads MUST have a null here */ |
| 971 | new_thread->switch_handle = NULL; |
| 972 | } |
Benjamin Walsh | b8c2160 | 2016-12-23 19:34:41 -0500 | [diff] [blame] | 973 | } |
Andy Ross | b4e9ef0 | 2022-04-06 10:10:17 -0700 | [diff] [blame] | 974 | signal_pending_ipi(); |
Andy Ross | dd43221 | 2021-02-05 08:15:02 -0800 | [diff] [blame] | 975 | return ret; |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 976 | #else |
Andy Ross | 40d12c1 | 2021-09-27 08:22:43 -0700 | [diff] [blame] | 977 | z_sched_usage_switch(_kernel.ready_q.cache); |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 978 | _current->switch_handle = interrupted; |
Andy Ross | 6b84ab3 | 2021-02-18 10:15:23 -0800 | [diff] [blame] | 979 | set_current(_kernel.ready_q.cache); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 980 | return _current->switch_handle; |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 981 | #endif /* CONFIG_SMP */ |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 982 | } |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 983 | #endif /* CONFIG_USE_SWITCH */ |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 984 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 985 | int z_unpend_all(_wait_q_t *wait_q) |
Andy Ross | 4ca0e07 | 2018-05-10 09:45:42 -0700 | [diff] [blame] | 986 | { |
Andy Ross | ccf3bf7 | 2018-05-10 11:10:34 -0700 | [diff] [blame] | 987 | int need_sched = 0; |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 988 | struct k_thread *thread; |
Andy Ross | 4ca0e07 | 2018-05-10 09:45:42 -0700 | [diff] [blame] | 989 | |
Hess Nathan | 20b5542 | 2024-05-02 14:02:20 +0200 | [diff] [blame] | 990 | for (thread = z_waitq_head(wait_q); thread != NULL; thread = z_waitq_head(wait_q)) { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 991 | z_unpend_thread(thread); |
| 992 | z_ready_thread(thread); |
Andy Ross | 4ca0e07 | 2018-05-10 09:45:42 -0700 | [diff] [blame] | 993 | need_sched = 1; |
| 994 | } |
Andy Ross | ccf3bf7 | 2018-05-10 11:10:34 -0700 | [diff] [blame] | 995 | |
| 996 | return need_sched; |
Andy Ross | 4ca0e07 | 2018-05-10 09:45:42 -0700 | [diff] [blame] | 997 | } |
| 998 | |
Anas Nashif | 477a04a | 2024-02-28 08:15:15 -0500 | [diff] [blame] | 999 | void init_ready_q(struct _ready_q *ready_q) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1000 | { |
Andy Ross | b155d06 | 2021-09-24 13:49:14 -0700 | [diff] [blame] | 1001 | #if defined(CONFIG_SCHED_SCALABLE) |
Anas Nashif | 477a04a | 2024-02-28 08:15:15 -0500 | [diff] [blame] | 1002 | ready_q->runq = (struct _priq_rb) { |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1003 | .tree = { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1004 | .lessthan_fn = z_priq_rb_lessthan, |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1005 | } |
| 1006 | }; |
Andy Ross | b155d06 | 2021-09-24 13:49:14 -0700 | [diff] [blame] | 1007 | #elif defined(CONFIG_SCHED_MULTIQ) |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1008 | for (int i = 0; i < ARRAY_SIZE(_kernel.ready_q.runq.queues); i++) { |
Anas Nashif | 477a04a | 2024-02-28 08:15:15 -0500 | [diff] [blame] | 1009 | sys_dlist_init(&ready_q->runq.queues[i]); |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1010 | } |
Andy Ross | b155d06 | 2021-09-24 13:49:14 -0700 | [diff] [blame] | 1011 | #else |
Anas Nashif | 477a04a | 2024-02-28 08:15:15 -0500 | [diff] [blame] | 1012 | sys_dlist_init(&ready_q->runq); |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1013 | #endif |
Andy Ross | b155d06 | 2021-09-24 13:49:14 -0700 | [diff] [blame] | 1014 | } |
| 1015 | |
| 1016 | void z_sched_init(void) |
| 1017 | { |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 1018 | #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY |
Nicolas Pitre | 907eea0 | 2023-03-16 17:54:25 -0400 | [diff] [blame] | 1019 | for (int i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) { |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 1020 | init_ready_q(&_kernel.cpus[i].ready_q); |
| 1021 | } |
| 1022 | #else |
Andy Ross | b155d06 | 2021-09-24 13:49:14 -0700 | [diff] [blame] | 1023 | init_ready_q(&_kernel.ready_q); |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 1024 | #endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */ |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1025 | } |
| 1026 | |
Anas Nashif | 25c87db | 2021-03-29 10:54:23 -0400 | [diff] [blame] | 1027 | void z_impl_k_thread_priority_set(k_tid_t thread, int prio) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1028 | { |
Benjamin Walsh | 3cc2ba9 | 2016-11-08 15:44:05 -0500 | [diff] [blame] | 1029 | /* |
| 1030 | * Use NULL, since we cannot know what the entry point is (we do not |
| 1031 | * keep track of it) and idle cannot change its priority. |
| 1032 | */ |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1033 | Z_ASSERT_VALID_PRIO(prio, NULL); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1034 | |
Anas Nashif | 868f099 | 2024-02-24 11:37:56 -0500 | [diff] [blame] | 1035 | bool need_sched = z_thread_prio_set((struct k_thread *)thread, prio); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1036 | |
Peter Mitsis | 9ff5221 | 2024-03-01 14:44:26 -0500 | [diff] [blame] | 1037 | if ((need_sched) && (IS_ENABLED(CONFIG_SMP) || |
| 1038 | (_current->base.sched_locked == 0U))) { |
Anas Nashif | 5e591c3 | 2024-02-24 10:37:06 -0500 | [diff] [blame] | 1039 | z_reschedule_unlocked(); |
| 1040 | } |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1041 | } |
| 1042 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1043 | #ifdef CONFIG_USERSPACE |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1044 | static inline void z_vrfy_k_thread_priority_set(k_tid_t thread, int prio) |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1045 | { |
Anas Nashif | a08bfeb | 2023-09-27 11:20:28 +0000 | [diff] [blame] | 1046 | K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD)); |
| 1047 | K_OOPS(K_SYSCALL_VERIFY_MSG(_is_valid_prio(prio, NULL), |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1048 | "invalid thread priority %d", prio)); |
Anas Nashif | 5e591c3 | 2024-02-24 10:37:06 -0500 | [diff] [blame] | 1049 | #ifndef CONFIG_USERSPACE_THREAD_MAY_RAISE_PRIORITY |
Anas Nashif | a08bfeb | 2023-09-27 11:20:28 +0000 | [diff] [blame] | 1050 | K_OOPS(K_SYSCALL_VERIFY_MSG((int8_t)prio >= thread->base.prio, |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 1051 | "thread priority may only be downgraded (%d < %d)", |
| 1052 | prio, thread->base.prio)); |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 1053 | #endif /* CONFIG_USERSPACE_THREAD_MAY_RAISE_PRIORITY */ |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1054 | z_impl_k_thread_priority_set(thread, prio); |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1055 | } |
Yong Cong Sin | bbe5e1e | 2024-01-24 17:35:04 +0800 | [diff] [blame] | 1056 | #include <zephyr/syscalls/k_thread_priority_set_mrsh.c> |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 1057 | #endif /* CONFIG_USERSPACE */ |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1058 | |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1059 | #ifdef CONFIG_SCHED_DEADLINE |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1060 | void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline) |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1061 | { |
TaiJu Wu | 555c07e | 2024-03-14 03:09:41 +0800 | [diff] [blame] | 1062 | |
| 1063 | deadline = CLAMP(deadline, 0, INT_MAX); |
| 1064 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1065 | struct k_thread *thread = tid; |
Andy Ross | f2280d1 | 2024-03-08 08:42:08 -0800 | [diff] [blame] | 1066 | int32_t newdl = k_cycle_get_32() + deadline; |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1067 | |
Andy Ross | f2280d1 | 2024-03-08 08:42:08 -0800 | [diff] [blame] | 1068 | /* The prio_deadline field changes the sorting order, so can't |
| 1069 | * change it while the thread is in the run queue (dlists |
| 1070 | * actually are benign as long as we requeue it before we |
| 1071 | * release the lock, but an rbtree will blow up if we break |
| 1072 | * sorting!) |
| 1073 | */ |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1074 | K_SPINLOCK(&_sched_spinlock) { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1075 | if (z_is_thread_queued(thread)) { |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 1076 | dequeue_thread(thread); |
Andy Ross | f2280d1 | 2024-03-08 08:42:08 -0800 | [diff] [blame] | 1077 | thread->base.prio_deadline = newdl; |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 1078 | queue_thread(thread); |
Andy Ross | f2280d1 | 2024-03-08 08:42:08 -0800 | [diff] [blame] | 1079 | } else { |
| 1080 | thread->base.prio_deadline = newdl; |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1081 | } |
| 1082 | } |
| 1083 | } |
| 1084 | |
| 1085 | #ifdef CONFIG_USERSPACE |
Andy Ross | 075c94f | 2019-08-13 11:34:34 -0700 | [diff] [blame] | 1086 | static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline) |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1087 | { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1088 | struct k_thread *thread = tid; |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1089 | |
Anas Nashif | a08bfeb | 2023-09-27 11:20:28 +0000 | [diff] [blame] | 1090 | K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD)); |
| 1091 | K_OOPS(K_SYSCALL_VERIFY_MSG(deadline > 0, |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1092 | "invalid thread deadline %d", |
| 1093 | (int)deadline)); |
| 1094 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1095 | z_impl_k_thread_deadline_set((k_tid_t)thread, deadline); |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1096 | } |
Yong Cong Sin | bbe5e1e | 2024-01-24 17:35:04 +0800 | [diff] [blame] | 1097 | #include <zephyr/syscalls/k_thread_deadline_set_mrsh.c> |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 1098 | #endif /* CONFIG_USERSPACE */ |
| 1099 | #endif /* CONFIG_SCHED_DEADLINE */ |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1100 | |
Jordan Yates | 1ef647f | 2022-03-26 09:55:23 +1000 | [diff] [blame] | 1101 | bool k_can_yield(void) |
| 1102 | { |
| 1103 | return !(k_is_pre_kernel() || k_is_in_isr() || |
| 1104 | z_is_idle_thread_object(_current)); |
| 1105 | } |
| 1106 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1107 | void z_impl_k_yield(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1108 | { |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 1109 | __ASSERT(!arch_is_in_isr(), ""); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1110 | |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1111 | SYS_PORT_TRACING_FUNC(k_thread, yield); |
| 1112 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1113 | k_spinlock_key_t key = k_spin_lock(&_sched_spinlock); |
James Harris | 6543e06 | 2021-03-01 10:14:13 -0800 | [diff] [blame] | 1114 | |
Andy Ross | 851d14a | 2021-05-13 15:46:43 -0700 | [diff] [blame] | 1115 | if (!IS_ENABLED(CONFIG_SMP) || |
| 1116 | z_is_thread_queued(_current)) { |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 1117 | dequeue_thread(_current); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1118 | } |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 1119 | queue_thread(_current); |
Andy Ross | 851d14a | 2021-05-13 15:46:43 -0700 | [diff] [blame] | 1120 | update_cache(1); |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1121 | z_swap(&_sched_spinlock, key); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1122 | } |
| 1123 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1124 | #ifdef CONFIG_USERSPACE |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1125 | static inline void z_vrfy_k_yield(void) |
| 1126 | { |
| 1127 | z_impl_k_yield(); |
| 1128 | } |
Yong Cong Sin | bbe5e1e | 2024-01-24 17:35:04 +0800 | [diff] [blame] | 1129 | #include <zephyr/syscalls/k_yield_mrsh.c> |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 1130 | #endif /* CONFIG_USERSPACE */ |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1131 | |
Flavio Ceolin | 7a815d5 | 2020-10-19 21:37:22 -0700 | [diff] [blame] | 1132 | static int32_t z_tick_sleep(k_ticks_t ticks) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1133 | { |
Flavio Ceolin | 9a16097 | 2020-11-16 10:40:46 -0800 | [diff] [blame] | 1134 | uint32_t expected_wakeup_ticks; |
Carles Cufi | 9849df8 | 2016-12-02 15:31:08 +0100 | [diff] [blame] | 1135 | |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 1136 | __ASSERT(!arch_is_in_isr(), ""); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1137 | |
Gerard Marull-Paretas | 737d799 | 2022-11-23 13:42:04 +0100 | [diff] [blame] | 1138 | LOG_DBG("thread %p for %lu ticks", _current, (unsigned long)ticks); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1139 | |
Benjamin Walsh | 5596f78 | 2016-12-09 19:57:17 -0500 | [diff] [blame] | 1140 | /* wait of 0 ms is treated as a 'yield' */ |
Charles E. Youse | b186303 | 2019-05-08 13:22:46 -0700 | [diff] [blame] | 1141 | if (ticks == 0) { |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1142 | k_yield(); |
Piotr Zięcik | 7700eb2 | 2018-10-25 17:45:08 +0200 | [diff] [blame] | 1143 | return 0; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1144 | } |
| 1145 | |
Lauren Murphy | 4c85b46 | 2021-05-25 17:49:28 -0500 | [diff] [blame] | 1146 | if (Z_TICK_ABS(ticks) <= 0) { |
| 1147 | expected_wakeup_ticks = ticks + sys_clock_tick_get_32(); |
| 1148 | } else { |
| 1149 | expected_wakeup_ticks = Z_TICK_ABS(ticks); |
| 1150 | } |
Andy Ross | d27d4e6 | 2019-02-05 15:36:01 -0800 | [diff] [blame] | 1151 | |
Gerson Fernando Budke | b8188e5 | 2023-10-16 20:15:31 +0200 | [diff] [blame] | 1152 | k_timeout_t timeout = Z_TIMEOUT_TICKS(ticks); |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1153 | k_spinlock_key_t key = k_spin_lock(&_sched_spinlock); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1154 | |
Andy Ross | dff6b71 | 2019-02-25 21:17:29 -0800 | [diff] [blame] | 1155 | #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC) |
| 1156 | pending_current = _current; |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 1157 | #endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */ |
Andrew Boie | a8775ab | 2020-09-05 12:53:42 -0700 | [diff] [blame] | 1158 | unready_thread(_current); |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1159 | z_add_thread_timeout(_current, timeout); |
Andy Ross | 4521e0c | 2019-03-22 10:30:19 -0700 | [diff] [blame] | 1160 | z_mark_thread_as_suspended(_current); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1161 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1162 | (void)z_swap(&_sched_spinlock, key); |
Piotr Zięcik | 7700eb2 | 2018-10-25 17:45:08 +0200 | [diff] [blame] | 1163 | |
Andy Ross | 4521e0c | 2019-03-22 10:30:19 -0700 | [diff] [blame] | 1164 | __ASSERT(!z_is_thread_state_set(_current, _THREAD_SUSPENDED), ""); |
| 1165 | |
Anas Nashif | 5c90ceb | 2021-03-13 08:19:53 -0500 | [diff] [blame] | 1166 | ticks = (k_ticks_t)expected_wakeup_ticks - sys_clock_tick_get_32(); |
Piotr Zięcik | 7700eb2 | 2018-10-25 17:45:08 +0200 | [diff] [blame] | 1167 | if (ticks > 0) { |
Charles E. Youse | b186303 | 2019-05-08 13:22:46 -0700 | [diff] [blame] | 1168 | return ticks; |
Piotr Zięcik | 7700eb2 | 2018-10-25 17:45:08 +0200 | [diff] [blame] | 1169 | } |
Piotr Zięcik | 7700eb2 | 2018-10-25 17:45:08 +0200 | [diff] [blame] | 1170 | |
| 1171 | return 0; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1172 | } |
| 1173 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 1174 | int32_t z_impl_k_sleep(k_timeout_t timeout) |
Charles E. Youse | b186303 | 2019-05-08 13:22:46 -0700 | [diff] [blame] | 1175 | { |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1176 | k_ticks_t ticks; |
Charles E. Youse | b186303 | 2019-05-08 13:22:46 -0700 | [diff] [blame] | 1177 | |
Peter Bigot | 8162e58 | 2019-12-12 16:07:07 -0600 | [diff] [blame] | 1178 | __ASSERT(!arch_is_in_isr(), ""); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1179 | |
| 1180 | SYS_PORT_TRACING_FUNC_ENTER(k_thread, sleep, timeout); |
Peter Bigot | 8162e58 | 2019-12-12 16:07:07 -0600 | [diff] [blame] | 1181 | |
Anas Nashif | d2c7179 | 2020-10-17 07:52:17 -0400 | [diff] [blame] | 1182 | /* in case of K_FOREVER, we suspend */ |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1183 | if (K_TIMEOUT_EQ(timeout, K_FOREVER)) { |
Anas Nashif | 20b2c98 | 2024-03-28 10:09:26 -0400 | [diff] [blame] | 1184 | |
Andrew Boie | d2b8922 | 2019-11-08 10:44:22 -0800 | [diff] [blame] | 1185 | k_thread_suspend(_current); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1186 | SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, (int32_t) K_TICKS_FOREVER); |
| 1187 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 1188 | return (int32_t) K_TICKS_FOREVER; |
Andrew Boie | d2b8922 | 2019-11-08 10:44:22 -0800 | [diff] [blame] | 1189 | } |
| 1190 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1191 | ticks = timeout.ticks; |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1192 | |
Charles E. Youse | b186303 | 2019-05-08 13:22:46 -0700 | [diff] [blame] | 1193 | ticks = z_tick_sleep(ticks); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1194 | |
Peter Mitsis | a3e5af9 | 2023-12-05 13:40:19 -0500 | [diff] [blame] | 1195 | int32_t ret = k_ticks_to_ms_ceil64(ticks); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1196 | |
| 1197 | SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, ret); |
| 1198 | |
| 1199 | return ret; |
Charles E. Youse | b186303 | 2019-05-08 13:22:46 -0700 | [diff] [blame] | 1200 | } |
| 1201 | |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 1202 | #ifdef CONFIG_USERSPACE |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 1203 | static inline int32_t z_vrfy_k_sleep(k_timeout_t timeout) |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 1204 | { |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1205 | return z_impl_k_sleep(timeout); |
Charles E. Youse | a567831 | 2019-05-09 16:46:46 -0700 | [diff] [blame] | 1206 | } |
Yong Cong Sin | bbe5e1e | 2024-01-24 17:35:04 +0800 | [diff] [blame] | 1207 | #include <zephyr/syscalls/k_sleep_mrsh.c> |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 1208 | #endif /* CONFIG_USERSPACE */ |
Charles E. Youse | a567831 | 2019-05-09 16:46:46 -0700 | [diff] [blame] | 1209 | |
Hess Nathan | 980d3f4 | 2024-06-25 09:13:15 +0200 | [diff] [blame] | 1210 | int32_t z_impl_k_usleep(int32_t us) |
Charles E. Youse | a567831 | 2019-05-09 16:46:46 -0700 | [diff] [blame] | 1211 | { |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 1212 | int32_t ticks; |
Charles E. Youse | a567831 | 2019-05-09 16:46:46 -0700 | [diff] [blame] | 1213 | |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1214 | SYS_PORT_TRACING_FUNC_ENTER(k_thread, usleep, us); |
| 1215 | |
Andy Ross | 8892406 | 2019-10-03 11:43:10 -0700 | [diff] [blame] | 1216 | ticks = k_us_to_ticks_ceil64(us); |
Charles E. Youse | a567831 | 2019-05-09 16:46:46 -0700 | [diff] [blame] | 1217 | ticks = z_tick_sleep(ticks); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1218 | |
Peter Mitsis | a3e5af9 | 2023-12-05 13:40:19 -0500 | [diff] [blame] | 1219 | int32_t ret = k_ticks_to_us_ceil64(ticks); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1220 | |
Peter Mitsis | a3e5af9 | 2023-12-05 13:40:19 -0500 | [diff] [blame] | 1221 | SYS_PORT_TRACING_FUNC_EXIT(k_thread, usleep, us, ret); |
| 1222 | |
| 1223 | return ret; |
Charles E. Youse | a567831 | 2019-05-09 16:46:46 -0700 | [diff] [blame] | 1224 | } |
| 1225 | |
| 1226 | #ifdef CONFIG_USERSPACE |
Hess Nathan | 980d3f4 | 2024-06-25 09:13:15 +0200 | [diff] [blame] | 1227 | static inline int32_t z_vrfy_k_usleep(int32_t us) |
Charles E. Youse | a567831 | 2019-05-09 16:46:46 -0700 | [diff] [blame] | 1228 | { |
| 1229 | return z_impl_k_usleep(us); |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 1230 | } |
Yong Cong Sin | bbe5e1e | 2024-01-24 17:35:04 +0800 | [diff] [blame] | 1231 | #include <zephyr/syscalls/k_usleep_mrsh.c> |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 1232 | #endif /* CONFIG_USERSPACE */ |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 1233 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1234 | void z_impl_k_wakeup(k_tid_t thread) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1235 | { |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1236 | SYS_PORT_TRACING_OBJ_FUNC(k_thread, wakeup, thread); |
| 1237 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1238 | if (z_is_thread_pending(thread)) { |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1239 | return; |
| 1240 | } |
| 1241 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1242 | if (z_abort_thread_timeout(thread) < 0) { |
Andrew Boie | d2b8922 | 2019-11-08 10:44:22 -0800 | [diff] [blame] | 1243 | /* Might have just been sleeping forever */ |
| 1244 | if (thread->base.thread_state != _THREAD_SUSPENDED) { |
| 1245 | return; |
| 1246 | } |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1247 | } |
| 1248 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1249 | k_spinlock_key_t key = k_spin_lock(&_sched_spinlock); |
Peter Mitsis | 51ae993 | 2024-02-20 11:50:54 -0500 | [diff] [blame] | 1250 | |
Andy Ross | 4521e0c | 2019-03-22 10:30:19 -0700 | [diff] [blame] | 1251 | z_mark_thread_as_not_suspended(thread); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1252 | |
Peter Mitsis | 9ff5221 | 2024-03-01 14:44:26 -0500 | [diff] [blame] | 1253 | if (thread_active_elsewhere(thread) == NULL) { |
Peter Mitsis | 51ae993 | 2024-02-20 11:50:54 -0500 | [diff] [blame] | 1254 | ready_thread(thread); |
| 1255 | } |
Andy Ross | 5737b5c | 2020-02-04 13:52:09 -0800 | [diff] [blame] | 1256 | |
Peter Mitsis | 51ae993 | 2024-02-20 11:50:54 -0500 | [diff] [blame] | 1257 | if (arch_is_in_isr()) { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1258 | k_spin_unlock(&_sched_spinlock, key); |
Peter Mitsis | 51ae993 | 2024-02-20 11:50:54 -0500 | [diff] [blame] | 1259 | } else { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1260 | z_reschedule(&_sched_spinlock, key); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1261 | } |
| 1262 | } |
| 1263 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1264 | #ifdef CONFIG_USERSPACE |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1265 | static inline void z_vrfy_k_wakeup(k_tid_t thread) |
| 1266 | { |
Anas Nashif | a08bfeb | 2023-09-27 11:20:28 +0000 | [diff] [blame] | 1267 | K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD)); |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1268 | z_impl_k_wakeup(thread); |
| 1269 | } |
Yong Cong Sin | bbe5e1e | 2024-01-24 17:35:04 +0800 | [diff] [blame] | 1270 | #include <zephyr/syscalls/k_wakeup_mrsh.c> |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 1271 | #endif /* CONFIG_USERSPACE */ |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1272 | |
Daniel Leung | 0a50ff3 | 2023-09-25 11:56:10 -0700 | [diff] [blame] | 1273 | k_tid_t z_impl_k_sched_current_thread_query(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1274 | { |
Andy Ross | eefd3da | 2020-02-06 13:39:52 -0800 | [diff] [blame] | 1275 | #ifdef CONFIG_SMP |
| 1276 | /* In SMP, _current is a field read from _current_cpu, which |
| 1277 | * can race with preemption before it is read. We must lock |
| 1278 | * local interrupts when reading it. |
| 1279 | */ |
| 1280 | unsigned int k = arch_irq_lock(); |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 1281 | #endif /* CONFIG_SMP */ |
Andy Ross | eefd3da | 2020-02-06 13:39:52 -0800 | [diff] [blame] | 1282 | |
| 1283 | k_tid_t ret = _current_cpu->current; |
| 1284 | |
| 1285 | #ifdef CONFIG_SMP |
| 1286 | arch_irq_unlock(k); |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 1287 | #endif /* CONFIG_SMP */ |
Andy Ross | eefd3da | 2020-02-06 13:39:52 -0800 | [diff] [blame] | 1288 | return ret; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1289 | } |
| 1290 | |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 1291 | #ifdef CONFIG_USERSPACE |
Daniel Leung | 0a50ff3 | 2023-09-25 11:56:10 -0700 | [diff] [blame] | 1292 | static inline k_tid_t z_vrfy_k_sched_current_thread_query(void) |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1293 | { |
Daniel Leung | 0a50ff3 | 2023-09-25 11:56:10 -0700 | [diff] [blame] | 1294 | return z_impl_k_sched_current_thread_query(); |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1295 | } |
Yong Cong Sin | bbe5e1e | 2024-01-24 17:35:04 +0800 | [diff] [blame] | 1296 | #include <zephyr/syscalls/k_sched_current_thread_query_mrsh.c> |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 1297 | #endif /* CONFIG_USERSPACE */ |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 1298 | |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1299 | static inline void unpend_all(_wait_q_t *wait_q) |
| 1300 | { |
| 1301 | struct k_thread *thread; |
| 1302 | |
Hess Nathan | 20b5542 | 2024-05-02 14:02:20 +0200 | [diff] [blame] | 1303 | for (thread = z_waitq_head(wait_q); thread != NULL; thread = z_waitq_head(wait_q)) { |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1304 | unpend_thread_no_timeout(thread); |
| 1305 | (void)z_abort_thread_timeout(thread); |
| 1306 | arch_thread_return_value_set(thread, 0); |
| 1307 | ready_thread(thread); |
| 1308 | } |
| 1309 | } |
| 1310 | |
Anas Nashif | a6ce422 | 2024-02-22 14:10:17 -0500 | [diff] [blame] | 1311 | #ifdef CONFIG_THREAD_ABORT_HOOK |
| 1312 | extern void thread_abort_hook(struct k_thread *thread); |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 1313 | #endif /* CONFIG_THREAD_ABORT_HOOK */ |
Chen Peng1 | 0f63d11 | 2021-09-06 13:59:40 +0800 | [diff] [blame] | 1314 | |
Peter Mitsis | e1db1ce | 2023-08-14 14:06:52 -0400 | [diff] [blame] | 1315 | /** |
| 1316 | * @brief Dequeues the specified thread |
| 1317 | * |
| 1318 | * Dequeues the specified thread and move it into the specified new state. |
| 1319 | * |
| 1320 | * @param thread Identify the thread to halt |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 1321 | * @param new_state New thread state (_THREAD_DEAD or _THREAD_SUSPENDED) |
Peter Mitsis | e1db1ce | 2023-08-14 14:06:52 -0400 | [diff] [blame] | 1322 | */ |
| 1323 | static void halt_thread(struct k_thread *thread, uint8_t new_state) |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1324 | { |
Andy Ross | f0fd54c | 2024-03-26 08:38:01 -0400 | [diff] [blame] | 1325 | bool dummify = false; |
| 1326 | |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1327 | /* We hold the lock, and the thread is known not to be running |
| 1328 | * anywhere. |
| 1329 | */ |
Peter Mitsis | e1db1ce | 2023-08-14 14:06:52 -0400 | [diff] [blame] | 1330 | if ((thread->base.thread_state & new_state) == 0U) { |
| 1331 | thread->base.thread_state |= new_state; |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1332 | if (z_is_thread_queued(thread)) { |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 1333 | dequeue_thread(thread); |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1334 | } |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 1335 | |
| 1336 | if (new_state == _THREAD_DEAD) { |
| 1337 | if (thread->base.pended_on != NULL) { |
| 1338 | unpend_thread_no_timeout(thread); |
| 1339 | } |
| 1340 | (void)z_abort_thread_timeout(thread); |
| 1341 | unpend_all(&thread->join_queue); |
Andy Ross | f0fd54c | 2024-03-26 08:38:01 -0400 | [diff] [blame] | 1342 | |
| 1343 | /* Edge case: aborting _current from within an |
| 1344 | * ISR that preempted it requires clearing the |
| 1345 | * _current pointer so the upcoming context |
| 1346 | * switch doesn't clobber the now-freed |
| 1347 | * memory |
| 1348 | */ |
| 1349 | if (thread == _current && arch_is_in_isr()) { |
| 1350 | dummify = true; |
| 1351 | } |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1352 | } |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 1353 | #ifdef CONFIG_SMP |
| 1354 | unpend_all(&thread->halt_queue); |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 1355 | #endif /* CONFIG_SMP */ |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1356 | update_cache(1); |
| 1357 | |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 1358 | if (new_state == _THREAD_SUSPENDED) { |
Andy Ross | 47ab663 | 2024-04-19 15:08:55 -0700 | [diff] [blame] | 1359 | clear_halting(thread); |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 1360 | return; |
| 1361 | } |
| 1362 | |
Grant Ramsay | 45701e6 | 2023-08-14 09:41:52 +1200 | [diff] [blame] | 1363 | #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) |
| 1364 | arch_float_disable(thread); |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 1365 | #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */ |
Grant Ramsay | 45701e6 | 2023-08-14 09:41:52 +1200 | [diff] [blame] | 1366 | |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1367 | SYS_PORT_TRACING_FUNC(k_thread, sched_abort, thread); |
| 1368 | |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1369 | z_thread_monitor_exit(thread); |
Anas Nashif | a6ce422 | 2024-02-22 14:10:17 -0500 | [diff] [blame] | 1370 | #ifdef CONFIG_THREAD_ABORT_HOOK |
| 1371 | thread_abort_hook(thread); |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 1372 | #endif /* CONFIG_THREAD_ABORT_HOOK */ |
Chen Peng1 | 0f63d11 | 2021-09-06 13:59:40 +0800 | [diff] [blame] | 1373 | |
Peter Mitsis | 6df8efe | 2023-05-11 14:06:46 -0400 | [diff] [blame] | 1374 | #ifdef CONFIG_OBJ_CORE_THREAD |
Peter Mitsis | e6f1090 | 2023-06-01 12:16:40 -0400 | [diff] [blame] | 1375 | #ifdef CONFIG_OBJ_CORE_STATS_THREAD |
| 1376 | k_obj_core_stats_deregister(K_OBJ_CORE(thread)); |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 1377 | #endif /* CONFIG_OBJ_CORE_STATS_THREAD */ |
Peter Mitsis | 6df8efe | 2023-05-11 14:06:46 -0400 | [diff] [blame] | 1378 | k_obj_core_unlink(K_OBJ_CORE(thread)); |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 1379 | #endif /* CONFIG_OBJ_CORE_THREAD */ |
Peter Mitsis | 6df8efe | 2023-05-11 14:06:46 -0400 | [diff] [blame] | 1380 | |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1381 | #ifdef CONFIG_USERSPACE |
| 1382 | z_mem_domain_exit_thread(thread); |
Anas Nashif | 70cf96b | 2023-09-27 10:45:48 +0000 | [diff] [blame] | 1383 | k_thread_perms_all_clear(thread); |
Anas Nashif | 7a18c2b | 2023-09-27 10:45:18 +0000 | [diff] [blame] | 1384 | k_object_uninit(thread->stack_obj); |
| 1385 | k_object_uninit(thread); |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 1386 | #endif /* CONFIG_USERSPACE */ |
Daniel Leung | 378131c | 2024-03-26 11:54:31 -0700 | [diff] [blame] | 1387 | |
| 1388 | #ifdef CONFIG_THREAD_ABORT_NEED_CLEANUP |
| 1389 | k_thread_abort_cleanup(thread); |
| 1390 | #endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */ |
Andy Ross | f0fd54c | 2024-03-26 08:38:01 -0400 | [diff] [blame] | 1391 | |
| 1392 | /* Do this "set _current to dummy" step last so that |
| 1393 | * subsystems above can rely on _current being |
| 1394 | * unchanged. Disabled for posix as that arch |
| 1395 | * continues to use the _current pointer in its swap |
Andy Ross | dec022a | 2024-04-29 12:50:41 -0700 | [diff] [blame] | 1396 | * code. Note that we must leave a non-null switch |
| 1397 | * handle for any threads spinning in join() (this can |
| 1398 | * never be used, as our thread is flagged dead, but |
| 1399 | * it must not be NULL otherwise join can deadlock). |
Andy Ross | f0fd54c | 2024-03-26 08:38:01 -0400 | [diff] [blame] | 1400 | */ |
| 1401 | if (dummify && !IS_ENABLED(CONFIG_ARCH_POSIX)) { |
Andy Ross | dec022a | 2024-04-29 12:50:41 -0700 | [diff] [blame] | 1402 | #ifdef CONFIG_USE_SWITCH |
| 1403 | _current->switch_handle = _current; |
| 1404 | #endif |
Andy Ross | fd340eb | 2024-04-19 15:03:09 -0700 | [diff] [blame] | 1405 | z_dummy_thread_init(&_thread_dummy); |
Andy Ross | dec022a | 2024-04-29 12:50:41 -0700 | [diff] [blame] | 1406 | |
Andy Ross | f0fd54c | 2024-03-26 08:38:01 -0400 | [diff] [blame] | 1407 | } |
Andy Ross | 47ab663 | 2024-04-19 15:08:55 -0700 | [diff] [blame] | 1408 | |
| 1409 | /* Finally update the halting thread state, on which |
| 1410 | * other CPUs might be spinning (see |
| 1411 | * thread_halt_spin()). |
| 1412 | */ |
| 1413 | clear_halting(thread); |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1414 | } |
| 1415 | } |
| 1416 | |
| 1417 | void z_thread_abort(struct k_thread *thread) |
| 1418 | { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1419 | k_spinlock_key_t key = k_spin_lock(&_sched_spinlock); |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1420 | |
Anas Nashif | 8791012 | 2024-02-22 22:24:36 -0500 | [diff] [blame] | 1421 | if (z_is_thread_essential(thread)) { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1422 | k_spin_unlock(&_sched_spinlock, key); |
Andy Ross | fb61359 | 2022-05-19 12:55:28 -0700 | [diff] [blame] | 1423 | __ASSERT(false, "aborting essential thread %p", thread); |
| 1424 | k_panic(); |
| 1425 | return; |
| 1426 | } |
| 1427 | |
Anas Nashif | 3f4f3f6 | 2021-03-29 17:13:47 -0400 | [diff] [blame] | 1428 | if ((thread->base.thread_state & _THREAD_DEAD) != 0U) { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1429 | k_spin_unlock(&_sched_spinlock, key); |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1430 | return; |
| 1431 | } |
| 1432 | |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 1433 | z_thread_halt(thread, key, true); |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1434 | } |
| 1435 | |
| 1436 | #if !defined(CONFIG_ARCH_HAS_THREAD_ABORT) |
Hess Nathan | 980d3f4 | 2024-06-25 09:13:15 +0200 | [diff] [blame] | 1437 | void z_impl_k_thread_abort(k_tid_t thread) |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1438 | { |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1439 | SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread); |
| 1440 | |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1441 | z_thread_abort(thread); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1442 | |
Andy Ross | dec022a | 2024-04-29 12:50:41 -0700 | [diff] [blame] | 1443 | __ASSERT_NO_MSG((thread->base.thread_state & _THREAD_DEAD) != 0); |
| 1444 | |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1445 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, abort, thread); |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1446 | } |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 1447 | #endif /* !CONFIG_ARCH_HAS_THREAD_ABORT */ |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1448 | |
| 1449 | int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout) |
| 1450 | { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1451 | k_spinlock_key_t key = k_spin_lock(&_sched_spinlock); |
Hess Nathan | 7659cfd | 2024-04-29 16:31:47 +0200 | [diff] [blame] | 1452 | int ret; |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1453 | |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1454 | SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, join, thread, timeout); |
| 1455 | |
Anas Nashif | 3f4f3f6 | 2021-03-29 17:13:47 -0400 | [diff] [blame] | 1456 | if ((thread->base.thread_state & _THREAD_DEAD) != 0U) { |
Andy Ross | a08e23f | 2023-05-26 09:39:16 -0700 | [diff] [blame] | 1457 | z_sched_switch_spin(thread); |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1458 | ret = 0; |
| 1459 | } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { |
| 1460 | ret = -EBUSY; |
Anas Nashif | 3f4f3f6 | 2021-03-29 17:13:47 -0400 | [diff] [blame] | 1461 | } else if ((thread == _current) || |
| 1462 | (thread->base.pended_on == &_current->join_queue)) { |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1463 | ret = -EDEADLK; |
| 1464 | } else { |
| 1465 | __ASSERT(!arch_is_in_isr(), "cannot join in ISR"); |
| 1466 | add_to_waitq_locked(_current, &thread->join_queue); |
| 1467 | add_thread_timeout(_current, timeout); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1468 | |
| 1469 | SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_thread, join, thread, timeout); |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1470 | ret = z_swap(&_sched_spinlock, key); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1471 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret); |
| 1472 | |
| 1473 | return ret; |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1474 | } |
| 1475 | |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1476 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret); |
| 1477 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1478 | k_spin_unlock(&_sched_spinlock, key); |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1479 | return ret; |
| 1480 | } |
| 1481 | |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 1482 | #ifdef CONFIG_USERSPACE |
| 1483 | /* Special case: don't oops if the thread is uninitialized. This is because |
| 1484 | * the initialization bit does double-duty for thread objects; if false, means |
| 1485 | * the thread object is truly uninitialized, or the thread ran and exited for |
| 1486 | * some reason. |
| 1487 | * |
| 1488 | * Return true in this case indicating we should just do nothing and return |
| 1489 | * success to the caller. |
| 1490 | */ |
| 1491 | static bool thread_obj_validate(struct k_thread *thread) |
| 1492 | { |
Anas Nashif | c25d080 | 2023-09-27 10:49:28 +0000 | [diff] [blame] | 1493 | struct k_object *ko = k_object_find(thread); |
Anas Nashif | 21254b2 | 2023-09-27 10:50:26 +0000 | [diff] [blame] | 1494 | int ret = k_object_validate(ko, K_OBJ_THREAD, _OBJ_INIT_TRUE); |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 1495 | |
| 1496 | switch (ret) { |
| 1497 | case 0: |
| 1498 | return false; |
| 1499 | case -EINVAL: |
| 1500 | return true; |
| 1501 | default: |
| 1502 | #ifdef CONFIG_LOG |
Anas Nashif | 3ab3566 | 2023-09-27 10:51:23 +0000 | [diff] [blame] | 1503 | k_object_dump_error(ret, thread, ko, K_OBJ_THREAD); |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 1504 | #endif /* CONFIG_LOG */ |
Anas Nashif | a08bfeb | 2023-09-27 11:20:28 +0000 | [diff] [blame] | 1505 | K_OOPS(K_SYSCALL_VERIFY_MSG(ret, "access denied")); |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 1506 | } |
Enjia Mai | 53ca709 | 2021-01-15 17:09:58 +0800 | [diff] [blame] | 1507 | CODE_UNREACHABLE; /* LCOV_EXCL_LINE */ |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 1508 | } |
| 1509 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1510 | static inline int z_vrfy_k_thread_join(struct k_thread *thread, |
| 1511 | k_timeout_t timeout) |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 1512 | { |
| 1513 | if (thread_obj_validate(thread)) { |
| 1514 | return 0; |
| 1515 | } |
| 1516 | |
| 1517 | return z_impl_k_thread_join(thread, timeout); |
| 1518 | } |
Yong Cong Sin | bbe5e1e | 2024-01-24 17:35:04 +0800 | [diff] [blame] | 1519 | #include <zephyr/syscalls/k_thread_join_mrsh.c> |
Andrew Boie | a4c9190 | 2020-03-24 16:09:24 -0700 | [diff] [blame] | 1520 | |
| 1521 | static inline void z_vrfy_k_thread_abort(k_tid_t thread) |
| 1522 | { |
| 1523 | if (thread_obj_validate(thread)) { |
| 1524 | return; |
| 1525 | } |
| 1526 | |
Anas Nashif | 8791012 | 2024-02-22 22:24:36 -0500 | [diff] [blame] | 1527 | K_OOPS(K_SYSCALL_VERIFY_MSG(!z_is_thread_essential(thread), |
Andrew Boie | a4c9190 | 2020-03-24 16:09:24 -0700 | [diff] [blame] | 1528 | "aborting essential thread %p", thread)); |
| 1529 | |
| 1530 | z_impl_k_thread_abort((struct k_thread *)thread); |
| 1531 | } |
Yong Cong Sin | bbe5e1e | 2024-01-24 17:35:04 +0800 | [diff] [blame] | 1532 | #include <zephyr/syscalls/k_thread_abort_mrsh.c> |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 1533 | #endif /* CONFIG_USERSPACE */ |
Peter Bigot | 0259c86 | 2021-01-12 13:45:32 -0600 | [diff] [blame] | 1534 | |
| 1535 | /* |
| 1536 | * future scheduler.h API implementations |
| 1537 | */ |
| 1538 | bool z_sched_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data) |
| 1539 | { |
| 1540 | struct k_thread *thread; |
| 1541 | bool ret = false; |
| 1542 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1543 | K_SPINLOCK(&_sched_spinlock) { |
Peter Bigot | 0259c86 | 2021-01-12 13:45:32 -0600 | [diff] [blame] | 1544 | thread = _priq_wait_best(&wait_q->waitq); |
| 1545 | |
| 1546 | if (thread != NULL) { |
| 1547 | z_thread_return_value_set_with_data(thread, |
| 1548 | swap_retval, |
| 1549 | swap_data); |
| 1550 | unpend_thread_no_timeout(thread); |
| 1551 | (void)z_abort_thread_timeout(thread); |
| 1552 | ready_thread(thread); |
| 1553 | ret = true; |
| 1554 | } |
| 1555 | } |
| 1556 | |
| 1557 | return ret; |
| 1558 | } |
| 1559 | |
| 1560 | int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key, |
| 1561 | _wait_q_t *wait_q, k_timeout_t timeout, void **data) |
| 1562 | { |
| 1563 | int ret = z_pend_curr(lock, key, wait_q, timeout); |
| 1564 | |
| 1565 | if (data != NULL) { |
| 1566 | *data = _current->base.swap_data; |
| 1567 | } |
| 1568 | return ret; |
| 1569 | } |
Peter Mitsis | ca58339 | 2023-01-05 11:50:21 -0500 | [diff] [blame] | 1570 | |
| 1571 | int z_sched_waitq_walk(_wait_q_t *wait_q, |
| 1572 | int (*func)(struct k_thread *, void *), void *data) |
| 1573 | { |
| 1574 | struct k_thread *thread; |
| 1575 | int status = 0; |
| 1576 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1577 | K_SPINLOCK(&_sched_spinlock) { |
Peter Mitsis | ca58339 | 2023-01-05 11:50:21 -0500 | [diff] [blame] | 1578 | _WAIT_Q_FOR_EACH(wait_q, thread) { |
| 1579 | |
| 1580 | /* |
| 1581 | * Invoke the callback function on each waiting thread |
| 1582 | * for as long as there are both waiting threads AND |
| 1583 | * it returns 0. |
| 1584 | */ |
| 1585 | |
| 1586 | status = func(thread, data); |
| 1587 | if (status != 0) { |
| 1588 | break; |
| 1589 | } |
| 1590 | } |
| 1591 | } |
| 1592 | |
| 1593 | return status; |
| 1594 | } |
Peter Mitsis | 318b495 | 2024-09-16 11:52:11 -0700 | [diff] [blame] | 1595 | |
| 1596 | /* This routine exists for benchmarking purposes. It is not used in |
| 1597 | * general production code. |
| 1598 | */ |
| 1599 | void z_unready_thread(struct k_thread *thread) |
| 1600 | { |
| 1601 | K_SPINLOCK(&_sched_spinlock) { |
| 1602 | unready_thread(thread); |
| 1603 | } |
| 1604 | } |