blob: f8d734f4cb5cb4b244c2e1663fb89db4c97ec22b [file] [log] [blame]
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001/*
Andy Ross1acd8c22018-05-03 14:51:49 -07002 * Copyright (c) 2018 Intel Corporation
Benjamin Walsh456c6da2016-09-02 18:55:39 -04003 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08004 * SPDX-License-Identifier: Apache-2.0
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005 */
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +02006#include <zephyr/kernel.h>
Benjamin Walshb4b108d2016-10-13 10:31:48 -04007#include <ksched.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +02008#include <zephyr/spinlock.h>
Anas Nashif8634c3b2023-08-29 17:03:12 +00009#include <wait_q.h>
Anas Nashif9e834132024-02-26 17:03:35 -050010#include <kthread.h>
Anas Nashif46484da2024-02-26 11:30:49 -050011#include <priority_q.h>
Andy Ross9c62cc62018-01-25 15:24:15 -080012#include <kswap.h>
Anas Nashif37df4852024-03-08 07:51:01 -050013#include <ipi.h>
Andy Ross1acd8c22018-05-03 14:51:49 -070014#include <kernel_arch_func.h>
Anas Nashif4e396172023-09-26 22:46:01 +000015#include <zephyr/internal/syscall_handler.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020016#include <zephyr/drivers/timer/system_timer.h>
Flavio Ceolin80418602018-11-21 16:22:15 -080017#include <stdbool.h>
Andrew Boiefe031612019-09-21 17:54:37 -070018#include <kernel_internal.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020019#include <zephyr/logging/log.h>
20#include <zephyr/sys/atomic.h>
21#include <zephyr/sys/math_extras.h>
22#include <zephyr/timing/timing.h>
Gerard Marull-Paretas4863c5f2023-04-11 15:34:39 +020023#include <zephyr/sys/util.h>
Andy Ross52351452021-09-28 09:38:43 -070024
Krzysztof Chruscinski3ed80832020-11-26 19:32:34 +010025LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040026
Anas Nashif37df4852024-03-08 07:51:01 -050027#if defined(CONFIG_SWAP_NONATOMIC) && defined(CONFIG_TIMESLICING)
28extern struct k_thread *pending_current;
29#endif
30
Anas Nashif0d8da5f2024-03-06 15:59:36 -050031struct k_spinlock _sched_spinlock;
Andy Ross1acd8c22018-05-03 14:51:49 -070032
Andy Rossf0fd54c2024-03-26 08:38:01 -040033/* Storage to "complete" the context switch from an invalid/incomplete thread
34 * context (ex: exiting an ISR that aborted _current)
35 */
Andy Rossfd340eb2024-04-19 15:03:09 -070036__incoherent struct k_thread _thread_dummy;
Andy Rossf0fd54c2024-03-26 08:38:01 -040037
Anas Nashif121cb492024-10-08 18:13:03 -040038static ALWAYS_INLINE void update_cache(int preempt_ok);
Peter Mitsise1db1ce2023-08-14 14:06:52 -040039static void halt_thread(struct k_thread *thread, uint8_t new_state);
Peter Mitsisb1384a72023-08-14 14:22:05 -040040static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q);
Andrew Boie8e0f6a52020-09-05 11:50:18 -070041
Peter Mitsisf8b76f32021-11-29 09:52:11 -050042
Florian Grandelcc4d1bd2023-08-28 17:31:54 +020043BUILD_ASSERT(CONFIG_NUM_COOP_PRIORITIES >= CONFIG_NUM_METAIRQ_PRIORITIES,
44 "You need to provide at least as many CONFIG_NUM_COOP_PRIORITIES as "
45 "CONFIG_NUM_METAIRQ_PRIORITIES as Meta IRQs are just a special class of cooperative "
46 "threads.");
47
James Harris2cd0f662021-03-01 09:19:57 -080048/*
49 * Return value same as e.g. memcmp
50 * > 0 -> thread 1 priority > thread 2 priority
51 * = 0 -> thread 1 priority == thread 2 priority
52 * < 0 -> thread 1 priority < thread 2 priority
53 * Do not rely on the actual value returned aside from the above.
54 * (Again, like memcmp.)
55 */
56int32_t z_sched_prio_cmp(struct k_thread *thread_1,
57 struct k_thread *thread_2)
Andy Ross4a2e50f2018-05-15 11:06:25 -070058{
James Harris2cd0f662021-03-01 09:19:57 -080059 /* `prio` is <32b, so the below cannot overflow. */
60 int32_t b1 = thread_1->base.prio;
61 int32_t b2 = thread_2->base.prio;
62
63 if (b1 != b2) {
64 return b2 - b1;
Andy Ross4a2e50f2018-05-15 11:06:25 -070065 }
66
67#ifdef CONFIG_SCHED_DEADLINE
Andy Rossef626572020-07-10 09:43:36 -070068 /* If we assume all deadlines live within the same "half" of
69 * the 32 bit modulus space (this is a documented API rule),
James Harris2cd0f662021-03-01 09:19:57 -080070 * then the latest deadline in the queue minus the earliest is
Andy Rossef626572020-07-10 09:43:36 -070071 * guaranteed to be (2's complement) non-negative. We can
72 * leverage that to compare the values without having to check
73 * the current time.
Andy Ross4a2e50f2018-05-15 11:06:25 -070074 */
James Harris2cd0f662021-03-01 09:19:57 -080075 uint32_t d1 = thread_1->base.prio_deadline;
76 uint32_t d2 = thread_2->base.prio_deadline;
Andy Ross4a2e50f2018-05-15 11:06:25 -070077
James Harris2cd0f662021-03-01 09:19:57 -080078 if (d1 != d2) {
79 /* Sooner deadline means higher effective priority.
80 * Doing the calculation with unsigned types and casting
81 * to signed isn't perfect, but at least reduces this
82 * from UB on overflow to impdef.
83 */
84 return (int32_t) (d2 - d1);
Andy Ross4a2e50f2018-05-15 11:06:25 -070085 }
Simon Heinbcd1d192024-03-08 12:00:10 +010086#endif /* CONFIG_SCHED_DEADLINE */
James Harris2cd0f662021-03-01 09:19:57 -080087 return 0;
Andy Ross4a2e50f2018-05-15 11:06:25 -070088}
89
Andy Rossb11e7962021-09-24 10:57:39 -070090static ALWAYS_INLINE void *thread_runq(struct k_thread *thread)
Andy Ross387fdd22021-09-23 18:44:40 -070091{
Andy Rossb11e7962021-09-24 10:57:39 -070092#ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
93 int cpu, m = thread->base.cpu_mask;
94
95 /* Edge case: it's legal per the API to "make runnable" a
96 * thread with all CPUs masked off (i.e. one that isn't
97 * actually runnable!). Sort of a wart in the API and maybe
98 * we should address this in docs/assertions instead to avoid
99 * the extra test.
100 */
101 cpu = m == 0 ? 0 : u32_count_trailing_zeros(m);
102
103 return &_kernel.cpus[cpu].ready_q.runq;
104#else
Benjamin Cabéa46f1b92023-08-21 15:30:26 +0200105 ARG_UNUSED(thread);
Andy Rossb11e7962021-09-24 10:57:39 -0700106 return &_kernel.ready_q.runq;
Simon Heinbcd1d192024-03-08 12:00:10 +0100107#endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
Andy Ross387fdd22021-09-23 18:44:40 -0700108}
109
Andy Rossb11e7962021-09-24 10:57:39 -0700110static ALWAYS_INLINE void *curr_cpu_runq(void)
Andy Ross387fdd22021-09-23 18:44:40 -0700111{
Andy Rossb11e7962021-09-24 10:57:39 -0700112#ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
113 return &arch_curr_cpu()->ready_q.runq;
114#else
115 return &_kernel.ready_q.runq;
Simon Heinbcd1d192024-03-08 12:00:10 +0100116#endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
Andy Ross387fdd22021-09-23 18:44:40 -0700117}
118
Andy Rossb11e7962021-09-24 10:57:39 -0700119static ALWAYS_INLINE void runq_add(struct k_thread *thread)
Andy Ross387fdd22021-09-23 18:44:40 -0700120{
Anas Nashif4593f0d2024-04-11 11:59:07 -0400121 __ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
122
Andy Rossb11e7962021-09-24 10:57:39 -0700123 _priq_run_add(thread_runq(thread), thread);
124}
125
126static ALWAYS_INLINE void runq_remove(struct k_thread *thread)
127{
Anas Nashif4593f0d2024-04-11 11:59:07 -0400128 __ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
129
Andy Rossb11e7962021-09-24 10:57:39 -0700130 _priq_run_remove(thread_runq(thread), thread);
131}
132
133static ALWAYS_INLINE struct k_thread *runq_best(void)
134{
135 return _priq_run_best(curr_cpu_runq());
Andy Ross387fdd22021-09-23 18:44:40 -0700136}
137
Andy Ross4ff45712021-02-08 08:28:54 -0800138/* _current is never in the run queue until context switch on
139 * SMP configurations, see z_requeue_current()
140 */
Anas Nashif595ff632024-02-27 09:49:07 -0500141static inline bool should_queue_thread(struct k_thread *thread)
Andy Ross4ff45712021-02-08 08:28:54 -0800142{
Hess Nathan6d417d52024-04-30 13:26:35 +0200143 return !IS_ENABLED(CONFIG_SMP) || (thread != _current);
Andy Ross4ff45712021-02-08 08:28:54 -0800144}
145
Andy Rossc230fb32021-09-23 16:41:30 -0700146static ALWAYS_INLINE void queue_thread(struct k_thread *thread)
Andy Ross91946ef2021-02-07 13:03:09 -0800147{
148 thread->base.thread_state |= _THREAD_QUEUED;
Andy Ross4ff45712021-02-08 08:28:54 -0800149 if (should_queue_thread(thread)) {
Andy Ross387fdd22021-09-23 18:44:40 -0700150 runq_add(thread);
Andy Ross4ff45712021-02-08 08:28:54 -0800151 }
152#ifdef CONFIG_SMP
153 if (thread == _current) {
154 /* add current to end of queue means "yield" */
155 _current_cpu->swap_ok = true;
156 }
Simon Heinbcd1d192024-03-08 12:00:10 +0100157#endif /* CONFIG_SMP */
Andy Ross91946ef2021-02-07 13:03:09 -0800158}
159
Andy Rossc230fb32021-09-23 16:41:30 -0700160static ALWAYS_INLINE void dequeue_thread(struct k_thread *thread)
Andy Ross91946ef2021-02-07 13:03:09 -0800161{
162 thread->base.thread_state &= ~_THREAD_QUEUED;
Andy Ross4ff45712021-02-08 08:28:54 -0800163 if (should_queue_thread(thread)) {
Andy Ross387fdd22021-09-23 18:44:40 -0700164 runq_remove(thread);
Andy Ross4ff45712021-02-08 08:28:54 -0800165 }
Andy Ross91946ef2021-02-07 13:03:09 -0800166}
167
Andy Ross4ff45712021-02-08 08:28:54 -0800168/* Called out of z_swap() when CONFIG_SMP. The current thread can
169 * never live in the run queue until we are inexorably on the context
170 * switch path on SMP, otherwise there is a deadlock condition where a
171 * set of CPUs pick a cycle of threads to run and wait for them all to
172 * context switch forever.
173 */
Anas Nashif595ff632024-02-27 09:49:07 -0500174void z_requeue_current(struct k_thread *thread)
Andy Ross4ff45712021-02-08 08:28:54 -0800175{
Anas Nashif595ff632024-02-27 09:49:07 -0500176 if (z_is_thread_queued(thread)) {
177 runq_add(thread);
Andy Ross4ff45712021-02-08 08:28:54 -0800178 }
Andy Rossb4e9ef02022-04-06 10:10:17 -0700179 signal_pending_ipi();
Andy Ross4ff45712021-02-08 08:28:54 -0800180}
Andy Ross4ff45712021-02-08 08:28:54 -0800181
Peter Mitsise7986eb2023-08-14 16:41:05 -0400182/* Return true if the thread is aborting, else false */
Andy Ross6fb6d3c2021-02-19 15:32:19 -0800183static inline bool is_aborting(struct k_thread *thread)
184{
Anas Nashifbbbc38b2021-03-29 10:03:49 -0400185 return (thread->base.thread_state & _THREAD_ABORTING) != 0U;
Andy Ross6fb6d3c2021-02-19 15:32:19 -0800186}
Peter Mitsise7986eb2023-08-14 16:41:05 -0400187
188/* Return true if the thread is aborting or suspending, else false */
189static inline bool is_halting(struct k_thread *thread)
190{
191 return (thread->base.thread_state &
192 (_THREAD_ABORTING | _THREAD_SUSPENDING)) != 0U;
193}
Andy Ross6fb6d3c2021-02-19 15:32:19 -0800194
Peter Mitsise7986eb2023-08-14 16:41:05 -0400195/* Clear the halting bits (_THREAD_ABORTING and _THREAD_SUSPENDING) */
196static inline void clear_halting(struct k_thread *thread)
197{
Andy Ross47ab6632024-04-19 15:08:55 -0700198 barrier_dmem_fence_full(); /* Other cpus spin on this locklessly! */
Peter Mitsise7986eb2023-08-14 16:41:05 -0400199 thread->base.thread_state &= ~(_THREAD_ABORTING | _THREAD_SUSPENDING);
200}
201
Andy Rossb2791b02019-01-28 09:36:36 -0800202static ALWAYS_INLINE struct k_thread *next_up(void)
Andy Ross1acd8c22018-05-03 14:51:49 -0700203{
Vadim Shakirov73944c62023-07-24 15:42:52 +0300204#ifdef CONFIG_SMP
Peter Mitsise7986eb2023-08-14 16:41:05 -0400205 if (is_halting(_current)) {
206 halt_thread(_current, is_aborting(_current) ?
207 _THREAD_DEAD : _THREAD_SUSPENDED);
Vadim Shakirov73944c62023-07-24 15:42:52 +0300208 }
Simon Heinbcd1d192024-03-08 12:00:10 +0100209#endif /* CONFIG_SMP */
Vadim Shakirov73944c62023-07-24 15:42:52 +0300210
Andy Ross387fdd22021-09-23 18:44:40 -0700211 struct k_thread *thread = runq_best();
Andy Ross11a050b2019-11-13 09:41:52 -0800212
Florian Grandelcc4d1bd2023-08-28 17:31:54 +0200213#if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && \
214 (CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES)
Andy Ross11a050b2019-11-13 09:41:52 -0800215 /* MetaIRQs must always attempt to return back to a
216 * cooperative thread they preempted and not whatever happens
217 * to be highest priority now. The cooperative thread was
218 * promised it wouldn't be preempted (by non-metairq threads)!
219 */
220 struct k_thread *mirqp = _current_cpu->metairq_preempted;
221
Anas Nashif17c874f2024-03-28 07:15:04 -0400222 if (mirqp != NULL && (thread == NULL || !thread_is_metairq(thread))) {
Andy Ross11a050b2019-11-13 09:41:52 -0800223 if (!z_is_thread_prevented_from_running(mirqp)) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500224 thread = mirqp;
Andy Ross11a050b2019-11-13 09:41:52 -0800225 } else {
226 _current_cpu->metairq_preempted = NULL;
227 }
228 }
229#endif
Simon Heinbcd1d192024-03-08 12:00:10 +0100230/* CONFIG_NUM_METAIRQ_PRIORITIES > 0 &&
231 * CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES
232 */
Andy Ross11a050b2019-11-13 09:41:52 -0800233
Andy Ross1acd8c22018-05-03 14:51:49 -0700234#ifndef CONFIG_SMP
235 /* In uniprocessor mode, we can leave the current thread in
236 * the queue (actually we have to, otherwise the assembly
237 * context switch code for all architectures would be
Patrik Flykt4344e272019-03-08 14:19:05 -0700238 * responsible for putting it back in z_swap and ISR return!),
Andy Ross1acd8c22018-05-03 14:51:49 -0700239 * which makes this choice simple.
240 */
Anas Nashif3f4f3f62021-03-29 17:13:47 -0400241 return (thread != NULL) ? thread : _current_cpu->idle_thread;
Andy Ross1acd8c22018-05-03 14:51:49 -0700242#else
243 /* Under SMP, the "cache" mechanism for selecting the next
244 * thread doesn't work, so we have more work to do to test
Andy Ross11a050b2019-11-13 09:41:52 -0800245 * _current against the best choice from the queue. Here, the
246 * thread selected above represents "the best thread that is
247 * not current".
Andy Rosseace1df2018-05-30 11:23:02 -0700248 *
249 * Subtle note on "queued": in SMP mode, _current does not
250 * live in the queue, so this isn't exactly the same thing as
251 * "ready", it means "is _current already added back to the
252 * queue such that we don't want to re-add it".
Andy Ross1acd8c22018-05-03 14:51:49 -0700253 */
Simon Hein02cfbfe2022-07-19 22:30:17 +0200254 bool queued = z_is_thread_queued(_current);
255 bool active = !z_is_thread_prevented_from_running(_current);
Andy Ross1acd8c22018-05-03 14:51:49 -0700256
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500257 if (thread == NULL) {
258 thread = _current_cpu->idle_thread;
Andy Ross1acd8c22018-05-03 14:51:49 -0700259 }
260
Andy Rosseace1df2018-05-30 11:23:02 -0700261 if (active) {
James Harris2cd0f662021-03-01 09:19:57 -0800262 int32_t cmp = z_sched_prio_cmp(_current, thread);
Andy Ross4ff45712021-02-08 08:28:54 -0800263
264 /* Ties only switch if state says we yielded */
James Harris2cd0f662021-03-01 09:19:57 -0800265 if ((cmp > 0) || ((cmp == 0) && !_current_cpu->swap_ok)) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500266 thread = _current;
Andy Rosseace1df2018-05-30 11:23:02 -0700267 }
268
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500269 if (!should_preempt(thread, _current_cpu->swap_ok)) {
270 thread = _current;
Andy Rosseace1df2018-05-30 11:23:02 -0700271 }
Andy Ross1acd8c22018-05-03 14:51:49 -0700272 }
273
Andy Rosseace1df2018-05-30 11:23:02 -0700274 /* Put _current back into the queue */
Hess Nathan6d417d52024-04-30 13:26:35 +0200275 if ((thread != _current) && active &&
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500276 !z_is_idle_thread_object(_current) && !queued) {
Andy Rossc230fb32021-09-23 16:41:30 -0700277 queue_thread(_current);
Andy Ross1acd8c22018-05-03 14:51:49 -0700278 }
279
Andy Rosseace1df2018-05-30 11:23:02 -0700280 /* Take the new _current out of the queue */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500281 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -0700282 dequeue_thread(thread);
Andy Rosseace1df2018-05-30 11:23:02 -0700283 }
Andy Ross1acd8c22018-05-03 14:51:49 -0700284
Andy Ross4ff45712021-02-08 08:28:54 -0800285 _current_cpu->swap_ok = false;
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500286 return thread;
Simon Heinbcd1d192024-03-08 12:00:10 +0100287#endif /* CONFIG_SMP */
Andy Ross1acd8c22018-05-03 14:51:49 -0700288}
289
Anas Nashif37df4852024-03-08 07:51:01 -0500290void move_thread_to_end_of_prio_q(struct k_thread *thread)
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700291{
292 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -0700293 dequeue_thread(thread);
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700294 }
Andy Rossc230fb32021-09-23 16:41:30 -0700295 queue_thread(thread);
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700296 update_cache(thread == _current);
297}
298
Andy Ross11a050b2019-11-13 09:41:52 -0800299/* Track cooperative threads preempted by metairqs so we can return to
300 * them specifically. Called at the moment a new thread has been
301 * selected to run.
302 */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500303static void update_metairq_preempt(struct k_thread *thread)
Andy Ross11a050b2019-11-13 09:41:52 -0800304{
Florian Grandelcc4d1bd2023-08-28 17:31:54 +0200305#if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && \
306 (CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES)
Anas Nashif17c874f2024-03-28 07:15:04 -0400307 if (thread_is_metairq(thread) && !thread_is_metairq(_current) &&
Anas Nashif5c170c72024-03-28 07:20:51 -0400308 !thread_is_preemptible(_current)) {
Andy Ross11a050b2019-11-13 09:41:52 -0800309 /* Record new preemption */
310 _current_cpu->metairq_preempted = _current;
Anas Nashif17c874f2024-03-28 07:15:04 -0400311 } else if (!thread_is_metairq(thread) && !z_is_idle_thread_object(thread)) {
Andy Ross11a050b2019-11-13 09:41:52 -0800312 /* Returning from existing preemption */
313 _current_cpu->metairq_preempted = NULL;
314 }
Benjamin Cabéa46f1b92023-08-21 15:30:26 +0200315#else
316 ARG_UNUSED(thread);
Andy Ross11a050b2019-11-13 09:41:52 -0800317#endif
Simon Heinbcd1d192024-03-08 12:00:10 +0100318/* CONFIG_NUM_METAIRQ_PRIORITIES > 0 &&
319 * CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES
320 */
Andy Ross11a050b2019-11-13 09:41:52 -0800321}
322
Anas Nashif121cb492024-10-08 18:13:03 -0400323static ALWAYS_INLINE void update_cache(int preempt_ok)
Andy Ross1acd8c22018-05-03 14:51:49 -0700324{
325#ifndef CONFIG_SMP
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500326 struct k_thread *thread = next_up();
Andy Ross1856e222018-05-21 11:48:35 -0700327
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500328 if (should_preempt(thread, preempt_ok)) {
Andy Rosscb3964f2019-08-16 21:29:26 -0700329#ifdef CONFIG_TIMESLICING
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500330 if (thread != _current) {
Andy Ross3e696892021-11-30 18:26:26 -0800331 z_reset_time_slice(thread);
Andy Ross9098a452018-09-25 10:56:09 -0700332 }
Simon Heinbcd1d192024-03-08 12:00:10 +0100333#endif /* CONFIG_TIMESLICING */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500334 update_metairq_preempt(thread);
335 _kernel.ready_q.cache = thread;
Andy Rosseace1df2018-05-30 11:23:02 -0700336 } else {
337 _kernel.ready_q.cache = _current;
Andy Ross1856e222018-05-21 11:48:35 -0700338 }
Andy Rosseace1df2018-05-30 11:23:02 -0700339
340#else
341 /* The way this works is that the CPU record keeps its
342 * "cooperative swapping is OK" flag until the next reschedule
343 * call or context switch. It doesn't need to be tracked per
344 * thread because if the thread gets preempted for whatever
345 * reason the scheduler will make the same decision anyway.
346 */
347 _current_cpu->swap_ok = preempt_ok;
Simon Heinbcd1d192024-03-08 12:00:10 +0100348#endif /* CONFIG_SMP */
Andy Ross1acd8c22018-05-03 14:51:49 -0700349}
350
Peter Mitsis9ff52212024-03-01 14:44:26 -0500351static struct _cpu *thread_active_elsewhere(struct k_thread *thread)
Andy Ross05c468f2021-02-19 15:24:24 -0800352{
Peter Mitsis9ff52212024-03-01 14:44:26 -0500353 /* Returns pointer to _cpu if the thread is currently running on
354 * another CPU. There are more scalable designs to answer this
355 * question in constant time, but this is fine for now.
Andy Ross05c468f2021-02-19 15:24:24 -0800356 */
357#ifdef CONFIG_SMP
358 int currcpu = _current_cpu->id;
359
Kumar Galaa1195ae2022-10-18 09:45:13 -0500360 unsigned int num_cpus = arch_num_cpus();
361
362 for (int i = 0; i < num_cpus; i++) {
Andy Ross05c468f2021-02-19 15:24:24 -0800363 if ((i != currcpu) &&
364 (_kernel.cpus[i].current == thread)) {
Peter Mitsis9ff52212024-03-01 14:44:26 -0500365 return &_kernel.cpus[i];
Andy Ross05c468f2021-02-19 15:24:24 -0800366 }
367 }
Simon Heinbcd1d192024-03-08 12:00:10 +0100368#endif /* CONFIG_SMP */
Benjamin Cabéa46f1b92023-08-21 15:30:26 +0200369 ARG_UNUSED(thread);
Peter Mitsis9ff52212024-03-01 14:44:26 -0500370 return NULL;
Andy Ross05c468f2021-02-19 15:24:24 -0800371}
372
Andy Ross96ccc462020-01-23 13:28:30 -0800373static void ready_thread(struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -0700374{
Anas Nashif39f632e2020-12-07 13:15:42 -0500375#ifdef CONFIG_KERNEL_COHERENCE
Andy Rossf6d32ab2020-05-13 15:34:04 +0000376 __ASSERT_NO_MSG(arch_mem_coherent(thread));
Simon Heinbcd1d192024-03-08 12:00:10 +0100377#endif /* CONFIG_KERNEL_COHERENCE */
Andy Rossf6d32ab2020-05-13 15:34:04 +0000378
Anas Nashif081605e2020-10-16 20:00:17 -0400379 /* If thread is queued already, do not try and added it to the
380 * run queue again
381 */
382 if (!z_is_thread_queued(thread) && z_is_thread_ready(thread)) {
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100383 SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_ready, thread);
384
Andy Rossc230fb32021-09-23 16:41:30 -0700385 queue_thread(thread);
Andy Ross1856e222018-05-21 11:48:35 -0700386 update_cache(0);
Peter Mitsisd8a4c8a2024-02-16 13:54:47 -0500387
388 flag_ipi(ipi_mask_create(thread));
Andy Ross1acd8c22018-05-03 14:51:49 -0700389 }
390}
391
Andy Ross96ccc462020-01-23 13:28:30 -0800392void z_ready_thread(struct k_thread *thread)
393{
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500394 K_SPINLOCK(&_sched_spinlock) {
Peter Mitsis9ff52212024-03-01 14:44:26 -0500395 if (thread_active_elsewhere(thread) == NULL) {
Andy Ross05c468f2021-02-19 15:24:24 -0800396 ready_thread(thread);
397 }
Andy Ross96ccc462020-01-23 13:28:30 -0800398 }
399}
400
Patrik Flykt4344e272019-03-08 14:19:05 -0700401void z_move_thread_to_end_of_prio_q(struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -0700402{
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500403 K_SPINLOCK(&_sched_spinlock) {
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700404 move_thread_to_end_of_prio_q(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700405 }
406}
407
Andy Ross96ccc462020-01-23 13:28:30 -0800408void z_sched_start(struct k_thread *thread)
409{
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500410 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
Andy Ross96ccc462020-01-23 13:28:30 -0800411
412 if (z_has_thread_started(thread)) {
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500413 k_spin_unlock(&_sched_spinlock, key);
Andy Ross96ccc462020-01-23 13:28:30 -0800414 return;
415 }
416
417 z_mark_thread_as_started(thread);
418 ready_thread(thread);
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500419 z_reschedule(&_sched_spinlock, key);
Andy Ross96ccc462020-01-23 13:28:30 -0800420}
421
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700422/* Spins in ISR context, waiting for a thread known to be running on
423 * another CPU to catch the IPI we sent and halt. Note that we check
424 * for ourselves being asynchronously halted first to prevent simple
425 * deadlocks (but not complex ones involving cycles of 3+ threads!).
Andy Rossf0fd54c2024-03-26 08:38:01 -0400426 * Acts to release the provided lock before returning.
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700427 */
Andy Rossf0fd54c2024-03-26 08:38:01 -0400428static void thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key)
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700429{
430 if (is_halting(_current)) {
431 halt_thread(_current,
432 is_aborting(_current) ? _THREAD_DEAD : _THREAD_SUSPENDED);
433 }
434 k_spin_unlock(&_sched_spinlock, key);
435 while (is_halting(thread)) {
Andy Rossf0fd54c2024-03-26 08:38:01 -0400436 unsigned int k = arch_irq_lock();
437
438 arch_spin_relax(); /* Requires interrupts be masked */
439 arch_irq_unlock(k);
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700440 }
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700441}
442
443/* Shared handler for k_thread_{suspend,abort}(). Called with the
444 * scheduler lock held and the key passed (which it may
445 * release/reacquire!) which will be released before a possible return
446 * (aborting _current will not return, obviously), which may be after
447 * a context switch.
Peter Mitsisb1384a72023-08-14 14:22:05 -0400448 */
Peter Mitsise7986eb2023-08-14 16:41:05 -0400449static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
450 bool terminate)
Peter Mitsisb1384a72023-08-14 14:22:05 -0400451{
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700452 _wait_q_t *wq = &thread->join_queue;
Peter Mitsisb1384a72023-08-14 14:22:05 -0400453#ifdef CONFIG_SMP
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700454 wq = terminate ? wq : &thread->halt_queue;
455#endif
Peter Mitsisb1384a72023-08-14 14:22:05 -0400456
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700457 /* If the target is a thread running on another CPU, flag and
458 * poke (note that we might spin to wait, so a true
459 * synchronous IPI is needed here, not deferred!), it will
460 * halt itself in the IPI. Otherwise it's unscheduled, so we
461 * can clean it up directly.
462 */
Peter Mitsis9ff52212024-03-01 14:44:26 -0500463
464 struct _cpu *cpu = thread_active_elsewhere(thread);
465
466 if (cpu != NULL) {
Peter Mitsise7986eb2023-08-14 16:41:05 -0400467 thread->base.thread_state |= (terminate ? _THREAD_ABORTING
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700468 : _THREAD_SUSPENDING);
469#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
Peter Mitsis0bcdae22024-03-04 10:52:24 -0500470#ifdef CONFIG_ARCH_HAS_DIRECTED_IPIS
471 arch_sched_directed_ipi(IPI_CPU_MASK(cpu->id));
472#else
473 arch_sched_broadcast_ipi();
474#endif
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700475#endif
Peter Mitsisb1384a72023-08-14 14:22:05 -0400476 if (arch_is_in_isr()) {
Andy Rossf0fd54c2024-03-26 08:38:01 -0400477 thread_halt_spin(thread, key);
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700478 } else {
479 add_to_waitq_locked(_current, wq);
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500480 z_swap(&_sched_spinlock, key);
Peter Mitsisb1384a72023-08-14 14:22:05 -0400481 }
Peter Mitsise7986eb2023-08-14 16:41:05 -0400482 } else {
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700483 halt_thread(thread, terminate ? _THREAD_DEAD : _THREAD_SUSPENDED);
484 if ((thread == _current) && !arch_is_in_isr()) {
485 z_swap(&_sched_spinlock, key);
486 __ASSERT(!terminate, "aborted _current back from dead");
487 } else {
488 k_spin_unlock(&_sched_spinlock, key);
489 }
Peter Mitsisb1384a72023-08-14 14:22:05 -0400490 }
Andy Rossf0fd54c2024-03-26 08:38:01 -0400491 /* NOTE: the scheduler lock has been released. Don't put
492 * logic here, it's likely to be racy/deadlocky even if you
493 * re-take the lock!
494 */
Peter Mitsisb1384a72023-08-14 14:22:05 -0400495}
496
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700497
Hess Nathan980d3f42024-06-25 09:13:15 +0200498void z_impl_k_thread_suspend(k_tid_t thread)
Andy Ross8bdabcc2020-01-07 09:58:46 -0800499{
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100500 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, suspend, thread);
501
Andy Ross8bdabcc2020-01-07 09:58:46 -0800502 (void)z_abort_thread_timeout(thread);
503
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500504 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
Peter Mitsise7986eb2023-08-14 16:41:05 -0400505
506 if ((thread->base.thread_state & _THREAD_SUSPENDED) != 0U) {
507
508 /* The target thread is already suspended. Nothing to do. */
509
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500510 k_spin_unlock(&_sched_spinlock, key);
Peter Mitsise7986eb2023-08-14 16:41:05 -0400511 return;
Andy Ross8bdabcc2020-01-07 09:58:46 -0800512 }
513
Peter Mitsise7986eb2023-08-14 16:41:05 -0400514 z_thread_halt(thread, key, false);
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100515
516 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, suspend, thread);
Andy Ross8bdabcc2020-01-07 09:58:46 -0800517}
518
Andrew Boie6cf496f2020-02-14 10:52:49 -0800519#ifdef CONFIG_USERSPACE
Hess Nathan980d3f42024-06-25 09:13:15 +0200520static inline void z_vrfy_k_thread_suspend(k_tid_t thread)
Andrew Boie6cf496f2020-02-14 10:52:49 -0800521{
Anas Nashifa08bfeb2023-09-27 11:20:28 +0000522 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
Andrew Boie6cf496f2020-02-14 10:52:49 -0800523 z_impl_k_thread_suspend(thread);
524}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +0800525#include <zephyr/syscalls/k_thread_suspend_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +0100526#endif /* CONFIG_USERSPACE */
Andrew Boie6cf496f2020-02-14 10:52:49 -0800527
Hess Nathan980d3f42024-06-25 09:13:15 +0200528void z_impl_k_thread_resume(k_tid_t thread)
Andrew Boie6cf496f2020-02-14 10:52:49 -0800529{
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100530 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, resume, thread);
531
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500532 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
Andrew Boie6cf496f2020-02-14 10:52:49 -0800533
Anas Nashifbf69afc2020-10-16 19:53:56 -0400534 /* Do not try to resume a thread that was not suspended */
535 if (!z_is_thread_suspended(thread)) {
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500536 k_spin_unlock(&_sched_spinlock, key);
Anas Nashifbf69afc2020-10-16 19:53:56 -0400537 return;
538 }
539
Andrew Boie6cf496f2020-02-14 10:52:49 -0800540 z_mark_thread_as_not_suspended(thread);
541 ready_thread(thread);
542
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500543 z_reschedule(&_sched_spinlock, key);
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100544
545 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, resume, thread);
Andrew Boie6cf496f2020-02-14 10:52:49 -0800546}
547
548#ifdef CONFIG_USERSPACE
Hess Nathan980d3f42024-06-25 09:13:15 +0200549static inline void z_vrfy_k_thread_resume(k_tid_t thread)
Andrew Boie6cf496f2020-02-14 10:52:49 -0800550{
Anas Nashifa08bfeb2023-09-27 11:20:28 +0000551 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
Andrew Boie6cf496f2020-02-14 10:52:49 -0800552 z_impl_k_thread_resume(thread);
553}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +0800554#include <zephyr/syscalls/k_thread_resume_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +0100555#endif /* CONFIG_USERSPACE */
Andrew Boie6cf496f2020-02-14 10:52:49 -0800556
Maksim Masalski970820e2021-05-25 14:40:14 +0800557static _wait_q_t *pended_on_thread(struct k_thread *thread)
Andy Ross8bdabcc2020-01-07 09:58:46 -0800558{
559 __ASSERT_NO_MSG(thread->base.pended_on);
560
561 return thread->base.pended_on;
562}
563
Andy Rossed6b4fb2020-01-23 13:04:15 -0800564static void unready_thread(struct k_thread *thread)
565{
566 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -0700567 dequeue_thread(thread);
Andy Rossed6b4fb2020-01-23 13:04:15 -0800568 }
569 update_cache(thread == _current);
570}
571
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500572/* _sched_spinlock must be held */
Andrew Boie322816e2020-02-20 16:33:06 -0800573static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q)
Andy Ross1acd8c22018-05-03 14:51:49 -0700574{
Andrew Boie322816e2020-02-20 16:33:06 -0800575 unready_thread(thread);
576 z_mark_thread_as_pending(thread);
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100577
578 SYS_PORT_TRACING_FUNC(k_thread, sched_pend, thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700579
Andrew Boie322816e2020-02-20 16:33:06 -0800580 if (wait_q != NULL) {
581 thread->base.pended_on = wait_q;
Anas Nashif4593f0d2024-04-11 11:59:07 -0400582 _priq_wait_add(&wait_q->waitq, thread);
Andy Ross15d52082018-09-26 13:19:31 -0700583 }
Andrew Boie322816e2020-02-20 16:33:06 -0800584}
Andy Ross15d52082018-09-26 13:19:31 -0700585
Andy Ross78327382020-03-05 15:18:14 -0800586static void add_thread_timeout(struct k_thread *thread, k_timeout_t timeout)
Andrew Boie322816e2020-02-20 16:33:06 -0800587{
Andy Ross78327382020-03-05 15:18:14 -0800588 if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
Andy Ross78327382020-03-05 15:18:14 -0800589 z_add_thread_timeout(thread, timeout);
Andy Ross1acd8c22018-05-03 14:51:49 -0700590 }
Andy Rosse7ded112018-04-11 14:52:47 -0700591}
592
Andy Rossc32f3762022-10-08 07:24:28 -0700593static void pend_locked(struct k_thread *thread, _wait_q_t *wait_q,
594 k_timeout_t timeout)
Andrew Boie322816e2020-02-20 16:33:06 -0800595{
Anas Nashif39f632e2020-12-07 13:15:42 -0500596#ifdef CONFIG_KERNEL_COHERENCE
Andy Ross1ba74142021-02-09 13:48:25 -0800597 __ASSERT_NO_MSG(wait_q == NULL || arch_mem_coherent(wait_q));
Simon Heinbcd1d192024-03-08 12:00:10 +0100598#endif /* CONFIG_KERNEL_COHERENCE */
Andy Rossc32f3762022-10-08 07:24:28 -0700599 add_to_waitq_locked(thread, wait_q);
Andy Ross78327382020-03-05 15:18:14 -0800600 add_thread_timeout(thread, timeout);
Andrew Boie322816e2020-02-20 16:33:06 -0800601}
602
Andy Ross78327382020-03-05 15:18:14 -0800603void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
604 k_timeout_t timeout)
Andy Rosse7ded112018-04-11 14:52:47 -0700605{
Patrik Flykt4344e272019-03-08 14:19:05 -0700606 __ASSERT_NO_MSG(thread == _current || is_thread_dummy(thread));
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500607 K_SPINLOCK(&_sched_spinlock) {
Andy Rossc32f3762022-10-08 07:24:28 -0700608 pend_locked(thread, wait_q, timeout);
609 }
Andy Ross1acd8c22018-05-03 14:51:49 -0700610}
611
Andrew Boieffc5bdf2020-09-05 11:44:01 -0700612static inline void unpend_thread_no_timeout(struct k_thread *thread)
613{
Maksim Masalski970820e2021-05-25 14:40:14 +0800614 _priq_wait_remove(&pended_on_thread(thread)->waitq, thread);
Andrew Boieffc5bdf2020-09-05 11:44:01 -0700615 z_mark_thread_as_not_pending(thread);
616 thread->base.pended_on = NULL;
617}
618
Peter Mitsis20dee1a2024-07-19 14:15:58 -0700619void z_unpend_thread_no_timeout(struct k_thread *thread)
Andy Rosse7ded112018-04-11 14:52:47 -0700620{
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500621 K_SPINLOCK(&_sched_spinlock) {
Peter Mitsis31dfd84f2023-01-06 13:20:28 -0500622 if (thread->base.pended_on != NULL) {
623 unpend_thread_no_timeout(thread);
624 }
Andy Ross1acd8c22018-05-03 14:51:49 -0700625 }
Andy Rosse7ded112018-04-11 14:52:47 -0700626}
627
Aastha Grover55377762023-03-08 16:54:12 -0500628void z_sched_wake_thread(struct k_thread *thread, bool is_timeout)
629{
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500630 K_SPINLOCK(&_sched_spinlock) {
Peter Mitsise7986eb2023-08-14 16:41:05 -0400631 bool killed = (thread->base.thread_state &
632 (_THREAD_DEAD | _THREAD_ABORTING));
Aastha Grover55377762023-03-08 16:54:12 -0500633
Aastha Grover877fc3d2023-03-08 16:56:31 -0500634#ifdef CONFIG_EVENTS
635 bool do_nothing = thread->no_wake_on_timeout && is_timeout;
636
637 thread->no_wake_on_timeout = false;
638
639 if (do_nothing) {
640 continue;
641 }
Simon Heinbcd1d192024-03-08 12:00:10 +0100642#endif /* CONFIG_EVENTS */
Aastha Grover877fc3d2023-03-08 16:56:31 -0500643
Aastha Grover55377762023-03-08 16:54:12 -0500644 if (!killed) {
645 /* The thread is not being killed */
646 if (thread->base.pended_on != NULL) {
647 unpend_thread_no_timeout(thread);
648 }
649 z_mark_thread_as_started(thread);
650 if (is_timeout) {
651 z_mark_thread_as_not_suspended(thread);
652 }
653 ready_thread(thread);
654 }
655 }
656
657}
658
Andy Ross987c0e52018-09-27 16:50:00 -0700659#ifdef CONFIG_SYS_CLOCK_EXISTS
660/* Timeout handler for *_thread_timeout() APIs */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500661void z_thread_timeout(struct _timeout *timeout)
Andy Ross987c0e52018-09-27 16:50:00 -0700662{
Andy Ross37866332021-02-17 10:12:36 -0800663 struct k_thread *thread = CONTAINER_OF(timeout,
664 struct k_thread, base.timeout);
Andy Ross987c0e52018-09-27 16:50:00 -0700665
Aastha Grover55377762023-03-08 16:54:12 -0500666 z_sched_wake_thread(thread, true);
Andy Ross987c0e52018-09-27 16:50:00 -0700667}
Simon Heinbcd1d192024-03-08 12:00:10 +0100668#endif /* CONFIG_SYS_CLOCK_EXISTS */
Andy Ross987c0e52018-09-27 16:50:00 -0700669
Patrik Flykt4344e272019-03-08 14:19:05 -0700670int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
Andy Ross78327382020-03-05 15:18:14 -0800671 _wait_q_t *wait_q, k_timeout_t timeout)
Andy Rossec554f42018-07-24 13:37:59 -0700672{
673#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
674 pending_current = _current;
Simon Heinbcd1d192024-03-08 12:00:10 +0100675#endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500676 __ASSERT_NO_MSG(sizeof(_sched_spinlock) == 0 || lock != &_sched_spinlock);
Andy Rossc32f3762022-10-08 07:24:28 -0700677
678 /* We do a "lock swap" prior to calling z_swap(), such that
679 * the caller's lock gets released as desired. But we ensure
680 * that we hold the scheduler lock and leave local interrupts
Pisit Sawangvonganan5ed3cd42024-07-06 01:12:07 +0700681 * masked until we reach the context switch. z_swap() itself
Andy Rossc32f3762022-10-08 07:24:28 -0700682 * has similar code; the duplication is because it's a legacy
683 * API that doesn't expect to be called with scheduler lock
684 * held.
685 */
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500686 (void) k_spin_lock(&_sched_spinlock);
Andy Rossc32f3762022-10-08 07:24:28 -0700687 pend_locked(_current, wait_q, timeout);
688 k_spin_release(lock);
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500689 return z_swap(&_sched_spinlock, key);
Andy Rossec554f42018-07-24 13:37:59 -0700690}
691
Andy Ross604f0f42021-02-09 16:47:47 -0800692struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q)
693{
694 struct k_thread *thread = NULL;
695
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500696 K_SPINLOCK(&_sched_spinlock) {
Andy Ross604f0f42021-02-09 16:47:47 -0800697 thread = _priq_wait_best(&wait_q->waitq);
698
699 if (thread != NULL) {
700 unpend_thread_no_timeout(thread);
701 }
702 }
703
704 return thread;
705}
706
Patrik Flykt4344e272019-03-08 14:19:05 -0700707struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q)
Andy Rosse7ded112018-04-11 14:52:47 -0700708{
Andy Ross604f0f42021-02-09 16:47:47 -0800709 struct k_thread *thread = NULL;
Andy Rosse7ded112018-04-11 14:52:47 -0700710
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500711 K_SPINLOCK(&_sched_spinlock) {
Andy Ross604f0f42021-02-09 16:47:47 -0800712 thread = _priq_wait_best(&wait_q->waitq);
713
Peter Mitsiscc415bc2024-10-09 15:36:48 -0700714 if (unlikely(thread != NULL)) {
Andy Ross604f0f42021-02-09 16:47:47 -0800715 unpend_thread_no_timeout(thread);
716 (void)z_abort_thread_timeout(thread);
717 }
Andy Ross1acd8c22018-05-03 14:51:49 -0700718 }
Andy Rosse7ded112018-04-11 14:52:47 -0700719
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500720 return thread;
Andy Ross1acd8c22018-05-03 14:51:49 -0700721}
Andy Rosse7ded112018-04-11 14:52:47 -0700722
Patrik Flykt4344e272019-03-08 14:19:05 -0700723void z_unpend_thread(struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -0700724{
Patrik Flykt4344e272019-03-08 14:19:05 -0700725 z_unpend_thread_no_timeout(thread);
726 (void)z_abort_thread_timeout(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700727}
728
Andy Ross6f139802019-08-20 11:21:28 -0700729/* Priority set utility that does no rescheduling, it just changes the
730 * run queue state, returning true if a reschedule is needed later.
731 */
Anas Nashif868f0992024-02-24 11:37:56 -0500732bool z_thread_prio_set(struct k_thread *thread, int prio)
Andy Ross1acd8c22018-05-03 14:51:49 -0700733{
Flavio Ceolin02ed85b2018-09-20 15:43:57 -0700734 bool need_sched = 0;
Peter Mitsis9ff52212024-03-01 14:44:26 -0500735 int old_prio = thread->base.prio;
Andy Ross1acd8c22018-05-03 14:51:49 -0700736
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500737 K_SPINLOCK(&_sched_spinlock) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700738 need_sched = z_is_thread_ready(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700739
740 if (need_sched) {
Andy Ross4d8e1f22019-07-01 10:25:55 -0700741 if (!IS_ENABLED(CONFIG_SMP) || z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -0700742 dequeue_thread(thread);
Andy Ross4d8e1f22019-07-01 10:25:55 -0700743 thread->base.prio = prio;
Andy Rossc230fb32021-09-23 16:41:30 -0700744 queue_thread(thread);
Peter Mitsis9ff52212024-03-01 14:44:26 -0500745
746 if (old_prio > prio) {
Peter Mitsisd8a4c8a2024-02-16 13:54:47 -0500747 flag_ipi(ipi_mask_create(thread));
Peter Mitsis9ff52212024-03-01 14:44:26 -0500748 }
Andy Ross4d8e1f22019-07-01 10:25:55 -0700749 } else {
Peter Mitsis9ff52212024-03-01 14:44:26 -0500750 /*
751 * This is a running thread on SMP. Update its
752 * priority, but do not requeue it. An IPI is
753 * needed if the priority is both being lowered
754 * and it is running on another CPU.
755 */
756
Andy Ross4d8e1f22019-07-01 10:25:55 -0700757 thread->base.prio = prio;
Peter Mitsis9ff52212024-03-01 14:44:26 -0500758
759 struct _cpu *cpu;
760
761 cpu = thread_active_elsewhere(thread);
762 if ((cpu != NULL) && (old_prio < prio)) {
Peter Mitsisd8a4c8a2024-02-16 13:54:47 -0500763 flag_ipi(IPI_CPU_MASK(cpu->id));
Peter Mitsis9ff52212024-03-01 14:44:26 -0500764 }
Andy Ross4d8e1f22019-07-01 10:25:55 -0700765 }
Peter Mitsis9ff52212024-03-01 14:44:26 -0500766
Andy Ross1856e222018-05-21 11:48:35 -0700767 update_cache(1);
Andy Ross1acd8c22018-05-03 14:51:49 -0700768 } else {
769 thread->base.prio = prio;
Andy Rosse7ded112018-04-11 14:52:47 -0700770 }
771 }
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100772
773 SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_priority_set, thread, prio);
Andy Rosse7ded112018-04-11 14:52:47 -0700774
Andy Ross6f139802019-08-20 11:21:28 -0700775 return need_sched;
776}
777
Anas Nashif3f4f3f62021-03-29 17:13:47 -0400778static inline bool resched(uint32_t key)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400779{
Andy Rosseace1df2018-05-30 11:23:02 -0700780#ifdef CONFIG_SMP
Andy Rosseace1df2018-05-30 11:23:02 -0700781 _current_cpu->swap_ok = 0;
Simon Heinbcd1d192024-03-08 12:00:10 +0100782#endif /* CONFIG_SMP */
Andy Rosseace1df2018-05-30 11:23:02 -0700783
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800784 return arch_irq_unlocked(key) && !arch_is_in_isr();
Andy Rossec554f42018-07-24 13:37:59 -0700785}
786
Anas Nashif379b93f2020-08-10 15:47:02 -0400787/*
788 * Check if the next ready thread is the same as the current thread
789 * and save the trip if true.
790 */
791static inline bool need_swap(void)
792{
793 /* the SMP case will be handled in C based z_swap() */
794#ifdef CONFIG_SMP
795 return true;
796#else
797 struct k_thread *new_thread;
798
799 /* Check if the next ready thread is the same as the current thread */
Andy Ross6b84ab32021-02-18 10:15:23 -0800800 new_thread = _kernel.ready_q.cache;
Anas Nashif379b93f2020-08-10 15:47:02 -0400801 return new_thread != _current;
Simon Heinbcd1d192024-03-08 12:00:10 +0100802#endif /* CONFIG_SMP */
Anas Nashif379b93f2020-08-10 15:47:02 -0400803}
804
Patrik Flykt4344e272019-03-08 14:19:05 -0700805void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key)
Andy Rossec554f42018-07-24 13:37:59 -0700806{
Anas Nashif379b93f2020-08-10 15:47:02 -0400807 if (resched(key.key) && need_swap()) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700808 z_swap(lock, key);
Andy Rossec554f42018-07-24 13:37:59 -0700809 } else {
810 k_spin_unlock(lock, key);
Andy Rossb4e9ef02022-04-06 10:10:17 -0700811 signal_pending_ipi();
Andy Rosseace1df2018-05-30 11:23:02 -0700812 }
Andy Rossec554f42018-07-24 13:37:59 -0700813}
Andy Rosseace1df2018-05-30 11:23:02 -0700814
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500815void z_reschedule_irqlock(uint32_t key)
Andy Rossec554f42018-07-24 13:37:59 -0700816{
Gaetan Perrot68581ca2023-12-21 11:01:54 +0900817 if (resched(key) && need_swap()) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700818 z_swap_irqlock(key);
Andy Rossec554f42018-07-24 13:37:59 -0700819 } else {
820 irq_unlock(key);
Andy Rossb4e9ef02022-04-06 10:10:17 -0700821 signal_pending_ipi();
Andy Rossec554f42018-07-24 13:37:59 -0700822 }
Andy Ross8606fab2018-03-26 10:54:40 -0700823}
824
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500825void k_sched_lock(void)
826{
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500827 K_SPINLOCK(&_sched_spinlock) {
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100828 SYS_PORT_TRACING_FUNC(k_thread, sched_lock);
829
Patrik Flykt4344e272019-03-08 14:19:05 -0700830 z_sched_lock();
Andy Ross1856e222018-05-21 11:48:35 -0700831 }
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500832}
833
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400834void k_sched_unlock(void)
835{
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500836 K_SPINLOCK(&_sched_spinlock) {
Anas Nashifbbbc38b2021-03-29 10:03:49 -0400837 __ASSERT(_current->base.sched_locked != 0U, "");
Andy Rosseefd3da2020-02-06 13:39:52 -0800838 __ASSERT(!arch_is_in_isr(), "");
839
Andy Ross1856e222018-05-21 11:48:35 -0700840 ++_current->base.sched_locked;
Yasushi SHOJI20d07242019-07-31 11:19:08 +0900841 update_cache(0);
Andy Ross1856e222018-05-21 11:48:35 -0700842 }
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400843
Anas Nashif2c5d4042019-12-02 10:24:08 -0500844 LOG_DBG("scheduler unlocked (%p:%d)",
Benjamin Walsha4e033f2016-11-18 16:08:24 -0500845 _current, _current->base.sched_locked);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400846
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100847 SYS_PORT_TRACING_FUNC(k_thread, sched_unlock);
848
Patrik Flykt4344e272019-03-08 14:19:05 -0700849 z_reschedule_unlocked();
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400850}
851
Andy Ross6b84ab32021-02-18 10:15:23 -0800852struct k_thread *z_swap_next_thread(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400853{
Andy Ross6b84ab32021-02-18 10:15:23 -0800854#ifdef CONFIG_SMP
Andy Rossb4e9ef02022-04-06 10:10:17 -0700855 struct k_thread *ret = next_up();
856
857 if (ret == _current) {
858 /* When not swapping, have to signal IPIs here. In
859 * the context switch case it must happen later, after
860 * _current gets requeued.
861 */
862 signal_pending_ipi();
863 }
864 return ret;
Andy Ross6b84ab32021-02-18 10:15:23 -0800865#else
866 return _kernel.ready_q.cache;
Simon Heinbcd1d192024-03-08 12:00:10 +0100867#endif /* CONFIG_SMP */
Andy Ross6b84ab32021-02-18 10:15:23 -0800868}
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400869
Jeremy Bettis1e0a36c2021-12-06 10:56:33 -0700870#ifdef CONFIG_USE_SWITCH
Andy Rossb18685b2019-02-19 17:24:30 -0800871/* Just a wrapper around _current = xxx with tracing */
872static inline void set_current(struct k_thread *new_thread)
873{
Daniel Leung11e6b432020-08-27 16:12:01 -0700874 z_thread_mark_switched_out();
Andy Rosseefd3da2020-02-06 13:39:52 -0800875 _current_cpu->current = new_thread;
Andy Rossb18685b2019-02-19 17:24:30 -0800876}
877
Nicolas Pitrec9e3e0d2022-03-15 22:36:20 -0400878/**
879 * @brief Determine next thread to execute upon completion of an interrupt
880 *
881 * Thread preemption is performed by context switching after the completion
882 * of a non-recursed interrupt. This function determines which thread to
883 * switch to if any. This function accepts as @p interrupted either:
884 *
885 * - The handle for the interrupted thread in which case the thread's context
886 * must already be fully saved and ready to be picked up by a different CPU.
887 *
888 * - NULL if more work is required to fully save the thread's state after
889 * it is known that a new thread is to be scheduled. It is up to the caller
890 * to store the handle resulting from the thread that is being switched out
891 * in that thread's "switch_handle" field after its
892 * context has fully been saved, following the same requirements as with
893 * the @ref arch_switch() function.
894 *
895 * If a new thread needs to be scheduled then its handle is returned.
896 * Otherwise the same value provided as @p interrupted is returned back.
897 * Those handles are the same opaque types used by the @ref arch_switch()
898 * function.
899 *
900 * @warning
Anas Nashifca09a4b2024-09-13 06:41:57 -0400901 * The _current value may have changed after this call and not refer
Nicolas Pitrec9e3e0d2022-03-15 22:36:20 -0400902 * to the interrupted thread anymore. It might be necessary to make a local
903 * copy before calling this function.
904 *
905 * @param interrupted Handle for the thread that was interrupted or NULL.
906 * @retval Handle for the next thread to execute, or @p interrupted when
907 * no new thread is to be scheduled.
908 */
Patrik Flykt4344e272019-03-08 14:19:05 -0700909void *z_get_next_switch_handle(void *interrupted)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400910{
Andrew Boieae0d1b22019-03-29 16:25:27 -0700911 z_check_stack_sentinel();
912
Andy Rosseace1df2018-05-30 11:23:02 -0700913#ifdef CONFIG_SMP
Andy Rossdd432212021-02-05 08:15:02 -0800914 void *ret = NULL;
915
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500916 K_SPINLOCK(&_sched_spinlock) {
Andy Rossf6d32ab2020-05-13 15:34:04 +0000917 struct k_thread *old_thread = _current, *new_thread;
Andy Rosseace1df2018-05-30 11:23:02 -0700918
Andy Ross4ff45712021-02-08 08:28:54 -0800919 if (IS_ENABLED(CONFIG_SMP)) {
920 old_thread->switch_handle = NULL;
921 }
Andy Rossf6d32ab2020-05-13 15:34:04 +0000922 new_thread = next_up();
923
Andy Ross40d12c12021-09-27 08:22:43 -0700924 z_sched_usage_switch(new_thread);
925
Andy Rossf6d32ab2020-05-13 15:34:04 +0000926 if (old_thread != new_thread) {
Peter Mitsisada3c902024-04-23 13:53:40 -0400927 uint8_t cpu_id;
928
Andy Rossf6d32ab2020-05-13 15:34:04 +0000929 update_metairq_preempt(new_thread);
Andy Rossb89e4272023-05-26 09:12:51 -0700930 z_sched_switch_spin(new_thread);
Andy Rossf6d32ab2020-05-13 15:34:04 +0000931 arch_cohere_stacks(old_thread, interrupted, new_thread);
Andy Ross11a050b2019-11-13 09:41:52 -0800932
Andy Rosseace1df2018-05-30 11:23:02 -0700933 _current_cpu->swap_ok = 0;
Peter Mitsisada3c902024-04-23 13:53:40 -0400934 cpu_id = arch_curr_cpu()->id;
935 new_thread->base.cpu = cpu_id;
Andy Rossf6d32ab2020-05-13 15:34:04 +0000936 set_current(new_thread);
937
Andy Ross3e696892021-11-30 18:26:26 -0800938#ifdef CONFIG_TIMESLICING
939 z_reset_time_slice(new_thread);
Simon Heinbcd1d192024-03-08 12:00:10 +0100940#endif /* CONFIG_TIMESLICING */
Andy Ross3e696892021-11-30 18:26:26 -0800941
Danny Oerndrupc9d78402019-12-13 11:24:56 +0100942#ifdef CONFIG_SPIN_VALIDATE
Andy Ross8c1bdda2019-02-20 10:07:31 -0800943 /* Changed _current! Update the spinlock
Anas Nashif6df44052021-04-30 09:58:20 -0400944 * bookkeeping so the validation doesn't get
Andy Ross8c1bdda2019-02-20 10:07:31 -0800945 * confused when the "wrong" thread tries to
946 * release the lock.
947 */
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500948 z_spin_lock_set_owner(&_sched_spinlock);
Simon Heinbcd1d192024-03-08 12:00:10 +0100949#endif /* CONFIG_SPIN_VALIDATE */
Andy Ross4ff45712021-02-08 08:28:54 -0800950
951 /* A queued (runnable) old/current thread
952 * needs to be added back to the run queue
953 * here, and atomically with its switch handle
954 * being set below. This is safe now, as we
955 * will not return into it.
956 */
957 if (z_is_thread_queued(old_thread)) {
Peter Mitsisada3c902024-04-23 13:53:40 -0400958#ifdef CONFIG_SCHED_IPI_CASCADE
959 if ((new_thread->base.cpu_mask != -1) &&
960 (old_thread->base.cpu_mask != BIT(cpu_id))) {
961 flag_ipi(ipi_mask_create(old_thread));
962 }
963#endif
Andy Ross387fdd22021-09-23 18:44:40 -0700964 runq_add(old_thread);
Andy Ross4ff45712021-02-08 08:28:54 -0800965 }
Andy Rosseace1df2018-05-30 11:23:02 -0700966 }
Andy Rossf6d32ab2020-05-13 15:34:04 +0000967 old_thread->switch_handle = interrupted;
Andy Rossdd432212021-02-05 08:15:02 -0800968 ret = new_thread->switch_handle;
Andy Ross4ff45712021-02-08 08:28:54 -0800969 if (IS_ENABLED(CONFIG_SMP)) {
970 /* Active threads MUST have a null here */
971 new_thread->switch_handle = NULL;
972 }
Benjamin Walshb8c21602016-12-23 19:34:41 -0500973 }
Andy Rossb4e9ef02022-04-06 10:10:17 -0700974 signal_pending_ipi();
Andy Rossdd432212021-02-05 08:15:02 -0800975 return ret;
Andy Rosseace1df2018-05-30 11:23:02 -0700976#else
Andy Ross40d12c12021-09-27 08:22:43 -0700977 z_sched_usage_switch(_kernel.ready_q.cache);
Andy Rossf6d32ab2020-05-13 15:34:04 +0000978 _current->switch_handle = interrupted;
Andy Ross6b84ab32021-02-18 10:15:23 -0800979 set_current(_kernel.ready_q.cache);
Andy Ross1acd8c22018-05-03 14:51:49 -0700980 return _current->switch_handle;
Simon Heinbcd1d192024-03-08 12:00:10 +0100981#endif /* CONFIG_SMP */
Andy Ross1acd8c22018-05-03 14:51:49 -0700982}
Simon Heinbcd1d192024-03-08 12:00:10 +0100983#endif /* CONFIG_USE_SWITCH */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400984
Patrik Flykt4344e272019-03-08 14:19:05 -0700985int z_unpend_all(_wait_q_t *wait_q)
Andy Ross4ca0e072018-05-10 09:45:42 -0700986{
Andy Rossccf3bf72018-05-10 11:10:34 -0700987 int need_sched = 0;
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500988 struct k_thread *thread;
Andy Ross4ca0e072018-05-10 09:45:42 -0700989
Hess Nathan20b55422024-05-02 14:02:20 +0200990 for (thread = z_waitq_head(wait_q); thread != NULL; thread = z_waitq_head(wait_q)) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500991 z_unpend_thread(thread);
992 z_ready_thread(thread);
Andy Ross4ca0e072018-05-10 09:45:42 -0700993 need_sched = 1;
994 }
Andy Rossccf3bf72018-05-10 11:10:34 -0700995
996 return need_sched;
Andy Ross4ca0e072018-05-10 09:45:42 -0700997}
998
Anas Nashif477a04a2024-02-28 08:15:15 -0500999void init_ready_q(struct _ready_q *ready_q)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001000{
Andy Rossb155d062021-09-24 13:49:14 -07001001#if defined(CONFIG_SCHED_SCALABLE)
Anas Nashif477a04a2024-02-28 08:15:15 -05001002 ready_q->runq = (struct _priq_rb) {
Andy Ross1acd8c22018-05-03 14:51:49 -07001003 .tree = {
Patrik Flykt4344e272019-03-08 14:19:05 -07001004 .lessthan_fn = z_priq_rb_lessthan,
Andy Ross1acd8c22018-05-03 14:51:49 -07001005 }
1006 };
Andy Rossb155d062021-09-24 13:49:14 -07001007#elif defined(CONFIG_SCHED_MULTIQ)
Andy Ross9f06a352018-06-28 10:38:14 -07001008 for (int i = 0; i < ARRAY_SIZE(_kernel.ready_q.runq.queues); i++) {
Anas Nashif477a04a2024-02-28 08:15:15 -05001009 sys_dlist_init(&ready_q->runq.queues[i]);
Andy Ross9f06a352018-06-28 10:38:14 -07001010 }
Andy Rossb155d062021-09-24 13:49:14 -07001011#else
Anas Nashif477a04a2024-02-28 08:15:15 -05001012 sys_dlist_init(&ready_q->runq);
Andy Ross9f06a352018-06-28 10:38:14 -07001013#endif
Andy Rossb155d062021-09-24 13:49:14 -07001014}
1015
1016void z_sched_init(void)
1017{
Andy Rossb11e7962021-09-24 10:57:39 -07001018#ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
Nicolas Pitre907eea02023-03-16 17:54:25 -04001019 for (int i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
Andy Rossb11e7962021-09-24 10:57:39 -07001020 init_ready_q(&_kernel.cpus[i].ready_q);
1021 }
1022#else
Andy Rossb155d062021-09-24 13:49:14 -07001023 init_ready_q(&_kernel.ready_q);
Simon Heinbcd1d192024-03-08 12:00:10 +01001024#endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001025}
1026
Anas Nashif25c87db2021-03-29 10:54:23 -04001027void z_impl_k_thread_priority_set(k_tid_t thread, int prio)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001028{
Benjamin Walsh3cc2ba92016-11-08 15:44:05 -05001029 /*
1030 * Use NULL, since we cannot know what the entry point is (we do not
1031 * keep track of it) and idle cannot change its priority.
1032 */
Patrik Flykt4344e272019-03-08 14:19:05 -07001033 Z_ASSERT_VALID_PRIO(prio, NULL);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001034
Anas Nashif868f0992024-02-24 11:37:56 -05001035 bool need_sched = z_thread_prio_set((struct k_thread *)thread, prio);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001036
Peter Mitsis9ff52212024-03-01 14:44:26 -05001037 if ((need_sched) && (IS_ENABLED(CONFIG_SMP) ||
1038 (_current->base.sched_locked == 0U))) {
Anas Nashif5e591c32024-02-24 10:37:06 -05001039 z_reschedule_unlocked();
1040 }
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001041}
1042
Andrew Boie468190a2017-09-29 14:00:48 -07001043#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -07001044static inline void z_vrfy_k_thread_priority_set(k_tid_t thread, int prio)
Andrew Boie468190a2017-09-29 14:00:48 -07001045{
Anas Nashifa08bfeb2023-09-27 11:20:28 +00001046 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1047 K_OOPS(K_SYSCALL_VERIFY_MSG(_is_valid_prio(prio, NULL),
Andy Ross65649742019-08-06 13:34:31 -07001048 "invalid thread priority %d", prio));
Anas Nashif5e591c32024-02-24 10:37:06 -05001049#ifndef CONFIG_USERSPACE_THREAD_MAY_RAISE_PRIORITY
Anas Nashifa08bfeb2023-09-27 11:20:28 +00001050 K_OOPS(K_SYSCALL_VERIFY_MSG((int8_t)prio >= thread->base.prio,
Andrew Boie8345e5e2018-05-04 15:57:57 -07001051 "thread priority may only be downgraded (%d < %d)",
1052 prio, thread->base.prio));
Simon Heinbcd1d192024-03-08 12:00:10 +01001053#endif /* CONFIG_USERSPACE_THREAD_MAY_RAISE_PRIORITY */
Andy Ross65649742019-08-06 13:34:31 -07001054 z_impl_k_thread_priority_set(thread, prio);
Andrew Boie468190a2017-09-29 14:00:48 -07001055}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +08001056#include <zephyr/syscalls/k_thread_priority_set_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +01001057#endif /* CONFIG_USERSPACE */
Andrew Boie468190a2017-09-29 14:00:48 -07001058
Andy Ross4a2e50f2018-05-15 11:06:25 -07001059#ifdef CONFIG_SCHED_DEADLINE
Patrik Flykt4344e272019-03-08 14:19:05 -07001060void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline)
Andy Ross4a2e50f2018-05-15 11:06:25 -07001061{
TaiJu Wu555c07e2024-03-14 03:09:41 +08001062
1063 deadline = CLAMP(deadline, 0, INT_MAX);
1064
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001065 struct k_thread *thread = tid;
Andy Rossf2280d12024-03-08 08:42:08 -08001066 int32_t newdl = k_cycle_get_32() + deadline;
Andy Ross4a2e50f2018-05-15 11:06:25 -07001067
Andy Rossf2280d12024-03-08 08:42:08 -08001068 /* The prio_deadline field changes the sorting order, so can't
1069 * change it while the thread is in the run queue (dlists
1070 * actually are benign as long as we requeue it before we
1071 * release the lock, but an rbtree will blow up if we break
1072 * sorting!)
1073 */
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001074 K_SPINLOCK(&_sched_spinlock) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001075 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -07001076 dequeue_thread(thread);
Andy Rossf2280d12024-03-08 08:42:08 -08001077 thread->base.prio_deadline = newdl;
Andy Rossc230fb32021-09-23 16:41:30 -07001078 queue_thread(thread);
Andy Rossf2280d12024-03-08 08:42:08 -08001079 } else {
1080 thread->base.prio_deadline = newdl;
Andy Ross4a2e50f2018-05-15 11:06:25 -07001081 }
1082 }
1083}
1084
1085#ifdef CONFIG_USERSPACE
Andy Ross075c94f2019-08-13 11:34:34 -07001086static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline)
Andy Ross4a2e50f2018-05-15 11:06:25 -07001087{
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001088 struct k_thread *thread = tid;
Andy Ross4a2e50f2018-05-15 11:06:25 -07001089
Anas Nashifa08bfeb2023-09-27 11:20:28 +00001090 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1091 K_OOPS(K_SYSCALL_VERIFY_MSG(deadline > 0,
Andy Ross4a2e50f2018-05-15 11:06:25 -07001092 "invalid thread deadline %d",
1093 (int)deadline));
1094
Patrik Flykt4344e272019-03-08 14:19:05 -07001095 z_impl_k_thread_deadline_set((k_tid_t)thread, deadline);
Andy Ross4a2e50f2018-05-15 11:06:25 -07001096}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +08001097#include <zephyr/syscalls/k_thread_deadline_set_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +01001098#endif /* CONFIG_USERSPACE */
1099#endif /* CONFIG_SCHED_DEADLINE */
Andy Ross4a2e50f2018-05-15 11:06:25 -07001100
Jordan Yates1ef647f2022-03-26 09:55:23 +10001101bool k_can_yield(void)
1102{
1103 return !(k_is_pre_kernel() || k_is_in_isr() ||
1104 z_is_idle_thread_object(_current));
1105}
1106
Patrik Flykt4344e272019-03-08 14:19:05 -07001107void z_impl_k_yield(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001108{
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001109 __ASSERT(!arch_is_in_isr(), "");
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001110
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001111 SYS_PORT_TRACING_FUNC(k_thread, yield);
1112
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001113 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
James Harris6543e062021-03-01 10:14:13 -08001114
Andy Ross851d14a2021-05-13 15:46:43 -07001115 if (!IS_ENABLED(CONFIG_SMP) ||
1116 z_is_thread_queued(_current)) {
Andy Rossc230fb32021-09-23 16:41:30 -07001117 dequeue_thread(_current);
Andy Ross1acd8c22018-05-03 14:51:49 -07001118 }
Andy Rossc230fb32021-09-23 16:41:30 -07001119 queue_thread(_current);
Andy Ross851d14a2021-05-13 15:46:43 -07001120 update_cache(1);
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001121 z_swap(&_sched_spinlock, key);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001122}
1123
Andrew Boie468190a2017-09-29 14:00:48 -07001124#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -07001125static inline void z_vrfy_k_yield(void)
1126{
1127 z_impl_k_yield();
1128}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +08001129#include <zephyr/syscalls/k_yield_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +01001130#endif /* CONFIG_USERSPACE */
Andrew Boie468190a2017-09-29 14:00:48 -07001131
Flavio Ceolin7a815d52020-10-19 21:37:22 -07001132static int32_t z_tick_sleep(k_ticks_t ticks)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001133{
Flavio Ceolin9a160972020-11-16 10:40:46 -08001134 uint32_t expected_wakeup_ticks;
Carles Cufi9849df82016-12-02 15:31:08 +01001135
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001136 __ASSERT(!arch_is_in_isr(), "");
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001137
Gerard Marull-Paretas737d7992022-11-23 13:42:04 +01001138 LOG_DBG("thread %p for %lu ticks", _current, (unsigned long)ticks);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001139
Benjamin Walsh5596f782016-12-09 19:57:17 -05001140 /* wait of 0 ms is treated as a 'yield' */
Charles E. Youseb1863032019-05-08 13:22:46 -07001141 if (ticks == 0) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001142 k_yield();
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001143 return 0;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001144 }
1145
Lauren Murphy4c85b462021-05-25 17:49:28 -05001146 if (Z_TICK_ABS(ticks) <= 0) {
1147 expected_wakeup_ticks = ticks + sys_clock_tick_get_32();
1148 } else {
1149 expected_wakeup_ticks = Z_TICK_ABS(ticks);
1150 }
Andy Rossd27d4e62019-02-05 15:36:01 -08001151
Gerson Fernando Budkeb8188e52023-10-16 20:15:31 +02001152 k_timeout_t timeout = Z_TIMEOUT_TICKS(ticks);
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001153 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001154
Andy Rossdff6b712019-02-25 21:17:29 -08001155#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
1156 pending_current = _current;
Simon Heinbcd1d192024-03-08 12:00:10 +01001157#endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
Andrew Boiea8775ab2020-09-05 12:53:42 -07001158 unready_thread(_current);
Andy Ross78327382020-03-05 15:18:14 -08001159 z_add_thread_timeout(_current, timeout);
Andy Ross4521e0c2019-03-22 10:30:19 -07001160 z_mark_thread_as_suspended(_current);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001161
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001162 (void)z_swap(&_sched_spinlock, key);
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001163
Andy Ross4521e0c2019-03-22 10:30:19 -07001164 __ASSERT(!z_is_thread_state_set(_current, _THREAD_SUSPENDED), "");
1165
Anas Nashif5c90ceb2021-03-13 08:19:53 -05001166 ticks = (k_ticks_t)expected_wakeup_ticks - sys_clock_tick_get_32();
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001167 if (ticks > 0) {
Charles E. Youseb1863032019-05-08 13:22:46 -07001168 return ticks;
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001169 }
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001170
1171 return 0;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001172}
1173
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001174int32_t z_impl_k_sleep(k_timeout_t timeout)
Charles E. Youseb1863032019-05-08 13:22:46 -07001175{
Andy Ross78327382020-03-05 15:18:14 -08001176 k_ticks_t ticks;
Charles E. Youseb1863032019-05-08 13:22:46 -07001177
Peter Bigot8162e582019-12-12 16:07:07 -06001178 __ASSERT(!arch_is_in_isr(), "");
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001179
1180 SYS_PORT_TRACING_FUNC_ENTER(k_thread, sleep, timeout);
Peter Bigot8162e582019-12-12 16:07:07 -06001181
Anas Nashifd2c71792020-10-17 07:52:17 -04001182 /* in case of K_FOREVER, we suspend */
Andy Ross78327382020-03-05 15:18:14 -08001183 if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
Anas Nashif20b2c982024-03-28 10:09:26 -04001184
Andrew Boied2b89222019-11-08 10:44:22 -08001185 k_thread_suspend(_current);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001186 SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, (int32_t) K_TICKS_FOREVER);
1187
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001188 return (int32_t) K_TICKS_FOREVER;
Andrew Boied2b89222019-11-08 10:44:22 -08001189 }
1190
Andy Ross78327382020-03-05 15:18:14 -08001191 ticks = timeout.ticks;
Andy Ross78327382020-03-05 15:18:14 -08001192
Charles E. Youseb1863032019-05-08 13:22:46 -07001193 ticks = z_tick_sleep(ticks);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001194
Peter Mitsisa3e5af92023-12-05 13:40:19 -05001195 int32_t ret = k_ticks_to_ms_ceil64(ticks);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001196
1197 SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, ret);
1198
1199 return ret;
Charles E. Youseb1863032019-05-08 13:22:46 -07001200}
1201
Andrew Boie76c04a22017-09-27 14:45:10 -07001202#ifdef CONFIG_USERSPACE
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001203static inline int32_t z_vrfy_k_sleep(k_timeout_t timeout)
Andrew Boie76c04a22017-09-27 14:45:10 -07001204{
Andy Ross78327382020-03-05 15:18:14 -08001205 return z_impl_k_sleep(timeout);
Charles E. Yousea5678312019-05-09 16:46:46 -07001206}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +08001207#include <zephyr/syscalls/k_sleep_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +01001208#endif /* CONFIG_USERSPACE */
Charles E. Yousea5678312019-05-09 16:46:46 -07001209
Hess Nathan980d3f42024-06-25 09:13:15 +02001210int32_t z_impl_k_usleep(int32_t us)
Charles E. Yousea5678312019-05-09 16:46:46 -07001211{
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001212 int32_t ticks;
Charles E. Yousea5678312019-05-09 16:46:46 -07001213
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001214 SYS_PORT_TRACING_FUNC_ENTER(k_thread, usleep, us);
1215
Andy Ross88924062019-10-03 11:43:10 -07001216 ticks = k_us_to_ticks_ceil64(us);
Charles E. Yousea5678312019-05-09 16:46:46 -07001217 ticks = z_tick_sleep(ticks);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001218
Peter Mitsisa3e5af92023-12-05 13:40:19 -05001219 int32_t ret = k_ticks_to_us_ceil64(ticks);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001220
Peter Mitsisa3e5af92023-12-05 13:40:19 -05001221 SYS_PORT_TRACING_FUNC_EXIT(k_thread, usleep, us, ret);
1222
1223 return ret;
Charles E. Yousea5678312019-05-09 16:46:46 -07001224}
1225
1226#ifdef CONFIG_USERSPACE
Hess Nathan980d3f42024-06-25 09:13:15 +02001227static inline int32_t z_vrfy_k_usleep(int32_t us)
Charles E. Yousea5678312019-05-09 16:46:46 -07001228{
1229 return z_impl_k_usleep(us);
Andrew Boie76c04a22017-09-27 14:45:10 -07001230}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +08001231#include <zephyr/syscalls/k_usleep_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +01001232#endif /* CONFIG_USERSPACE */
Andrew Boie76c04a22017-09-27 14:45:10 -07001233
Patrik Flykt4344e272019-03-08 14:19:05 -07001234void z_impl_k_wakeup(k_tid_t thread)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001235{
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001236 SYS_PORT_TRACING_OBJ_FUNC(k_thread, wakeup, thread);
1237
Patrik Flykt4344e272019-03-08 14:19:05 -07001238 if (z_is_thread_pending(thread)) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001239 return;
1240 }
1241
Patrik Flykt4344e272019-03-08 14:19:05 -07001242 if (z_abort_thread_timeout(thread) < 0) {
Andrew Boied2b89222019-11-08 10:44:22 -08001243 /* Might have just been sleeping forever */
1244 if (thread->base.thread_state != _THREAD_SUSPENDED) {
1245 return;
1246 }
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001247 }
1248
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001249 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
Peter Mitsis51ae9932024-02-20 11:50:54 -05001250
Andy Ross4521e0c2019-03-22 10:30:19 -07001251 z_mark_thread_as_not_suspended(thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001252
Peter Mitsis9ff52212024-03-01 14:44:26 -05001253 if (thread_active_elsewhere(thread) == NULL) {
Peter Mitsis51ae9932024-02-20 11:50:54 -05001254 ready_thread(thread);
1255 }
Andy Ross5737b5c2020-02-04 13:52:09 -08001256
Peter Mitsis51ae9932024-02-20 11:50:54 -05001257 if (arch_is_in_isr()) {
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001258 k_spin_unlock(&_sched_spinlock, key);
Peter Mitsis51ae9932024-02-20 11:50:54 -05001259 } else {
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001260 z_reschedule(&_sched_spinlock, key);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001261 }
1262}
1263
Andrew Boie468190a2017-09-29 14:00:48 -07001264#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -07001265static inline void z_vrfy_k_wakeup(k_tid_t thread)
1266{
Anas Nashifa08bfeb2023-09-27 11:20:28 +00001267 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
Andy Ross65649742019-08-06 13:34:31 -07001268 z_impl_k_wakeup(thread);
1269}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +08001270#include <zephyr/syscalls/k_wakeup_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +01001271#endif /* CONFIG_USERSPACE */
Andrew Boie468190a2017-09-29 14:00:48 -07001272
Daniel Leung0a50ff32023-09-25 11:56:10 -07001273k_tid_t z_impl_k_sched_current_thread_query(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001274{
Andy Rosseefd3da2020-02-06 13:39:52 -08001275#ifdef CONFIG_SMP
1276 /* In SMP, _current is a field read from _current_cpu, which
1277 * can race with preemption before it is read. We must lock
1278 * local interrupts when reading it.
1279 */
1280 unsigned int k = arch_irq_lock();
Simon Heinbcd1d192024-03-08 12:00:10 +01001281#endif /* CONFIG_SMP */
Andy Rosseefd3da2020-02-06 13:39:52 -08001282
1283 k_tid_t ret = _current_cpu->current;
1284
1285#ifdef CONFIG_SMP
1286 arch_irq_unlock(k);
Simon Heinbcd1d192024-03-08 12:00:10 +01001287#endif /* CONFIG_SMP */
Andy Rosseefd3da2020-02-06 13:39:52 -08001288 return ret;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001289}
1290
Andrew Boie76c04a22017-09-27 14:45:10 -07001291#ifdef CONFIG_USERSPACE
Daniel Leung0a50ff32023-09-25 11:56:10 -07001292static inline k_tid_t z_vrfy_k_sched_current_thread_query(void)
Andy Ross65649742019-08-06 13:34:31 -07001293{
Daniel Leung0a50ff32023-09-25 11:56:10 -07001294 return z_impl_k_sched_current_thread_query();
Andy Ross65649742019-08-06 13:34:31 -07001295}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +08001296#include <zephyr/syscalls/k_sched_current_thread_query_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +01001297#endif /* CONFIG_USERSPACE */
Andrew Boie76c04a22017-09-27 14:45:10 -07001298
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001299static inline void unpend_all(_wait_q_t *wait_q)
1300{
1301 struct k_thread *thread;
1302
Hess Nathan20b55422024-05-02 14:02:20 +02001303 for (thread = z_waitq_head(wait_q); thread != NULL; thread = z_waitq_head(wait_q)) {
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001304 unpend_thread_no_timeout(thread);
1305 (void)z_abort_thread_timeout(thread);
1306 arch_thread_return_value_set(thread, 0);
1307 ready_thread(thread);
1308 }
1309}
1310
Anas Nashifa6ce4222024-02-22 14:10:17 -05001311#ifdef CONFIG_THREAD_ABORT_HOOK
1312extern void thread_abort_hook(struct k_thread *thread);
Simon Heinbcd1d192024-03-08 12:00:10 +01001313#endif /* CONFIG_THREAD_ABORT_HOOK */
Chen Peng10f63d112021-09-06 13:59:40 +08001314
Peter Mitsise1db1ce2023-08-14 14:06:52 -04001315/**
1316 * @brief Dequeues the specified thread
1317 *
1318 * Dequeues the specified thread and move it into the specified new state.
1319 *
1320 * @param thread Identify the thread to halt
Peter Mitsise7986eb2023-08-14 16:41:05 -04001321 * @param new_state New thread state (_THREAD_DEAD or _THREAD_SUSPENDED)
Peter Mitsise1db1ce2023-08-14 14:06:52 -04001322 */
1323static void halt_thread(struct k_thread *thread, uint8_t new_state)
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001324{
Andy Rossf0fd54c2024-03-26 08:38:01 -04001325 bool dummify = false;
1326
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001327 /* We hold the lock, and the thread is known not to be running
1328 * anywhere.
1329 */
Peter Mitsise1db1ce2023-08-14 14:06:52 -04001330 if ((thread->base.thread_state & new_state) == 0U) {
1331 thread->base.thread_state |= new_state;
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001332 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -07001333 dequeue_thread(thread);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001334 }
Peter Mitsise7986eb2023-08-14 16:41:05 -04001335
1336 if (new_state == _THREAD_DEAD) {
1337 if (thread->base.pended_on != NULL) {
1338 unpend_thread_no_timeout(thread);
1339 }
1340 (void)z_abort_thread_timeout(thread);
1341 unpend_all(&thread->join_queue);
Andy Rossf0fd54c2024-03-26 08:38:01 -04001342
1343 /* Edge case: aborting _current from within an
1344 * ISR that preempted it requires clearing the
1345 * _current pointer so the upcoming context
1346 * switch doesn't clobber the now-freed
1347 * memory
1348 */
1349 if (thread == _current && arch_is_in_isr()) {
1350 dummify = true;
1351 }
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001352 }
Peter Mitsise7986eb2023-08-14 16:41:05 -04001353#ifdef CONFIG_SMP
1354 unpend_all(&thread->halt_queue);
Simon Heinbcd1d192024-03-08 12:00:10 +01001355#endif /* CONFIG_SMP */
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001356 update_cache(1);
1357
Peter Mitsise7986eb2023-08-14 16:41:05 -04001358 if (new_state == _THREAD_SUSPENDED) {
Andy Ross47ab6632024-04-19 15:08:55 -07001359 clear_halting(thread);
Peter Mitsise7986eb2023-08-14 16:41:05 -04001360 return;
1361 }
1362
Grant Ramsay45701e62023-08-14 09:41:52 +12001363#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
1364 arch_float_disable(thread);
Simon Heinbcd1d192024-03-08 12:00:10 +01001365#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
Grant Ramsay45701e62023-08-14 09:41:52 +12001366
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001367 SYS_PORT_TRACING_FUNC(k_thread, sched_abort, thread);
1368
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001369 z_thread_monitor_exit(thread);
Anas Nashifa6ce4222024-02-22 14:10:17 -05001370#ifdef CONFIG_THREAD_ABORT_HOOK
1371 thread_abort_hook(thread);
Simon Heinbcd1d192024-03-08 12:00:10 +01001372#endif /* CONFIG_THREAD_ABORT_HOOK */
Chen Peng10f63d112021-09-06 13:59:40 +08001373
Peter Mitsis6df8efe2023-05-11 14:06:46 -04001374#ifdef CONFIG_OBJ_CORE_THREAD
Peter Mitsise6f10902023-06-01 12:16:40 -04001375#ifdef CONFIG_OBJ_CORE_STATS_THREAD
1376 k_obj_core_stats_deregister(K_OBJ_CORE(thread));
Simon Heinbcd1d192024-03-08 12:00:10 +01001377#endif /* CONFIG_OBJ_CORE_STATS_THREAD */
Peter Mitsis6df8efe2023-05-11 14:06:46 -04001378 k_obj_core_unlink(K_OBJ_CORE(thread));
Simon Heinbcd1d192024-03-08 12:00:10 +01001379#endif /* CONFIG_OBJ_CORE_THREAD */
Peter Mitsis6df8efe2023-05-11 14:06:46 -04001380
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001381#ifdef CONFIG_USERSPACE
1382 z_mem_domain_exit_thread(thread);
Anas Nashif70cf96b2023-09-27 10:45:48 +00001383 k_thread_perms_all_clear(thread);
Anas Nashif7a18c2b2023-09-27 10:45:18 +00001384 k_object_uninit(thread->stack_obj);
1385 k_object_uninit(thread);
Simon Heinbcd1d192024-03-08 12:00:10 +01001386#endif /* CONFIG_USERSPACE */
Daniel Leung378131c2024-03-26 11:54:31 -07001387
1388#ifdef CONFIG_THREAD_ABORT_NEED_CLEANUP
1389 k_thread_abort_cleanup(thread);
1390#endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
Andy Rossf0fd54c2024-03-26 08:38:01 -04001391
1392 /* Do this "set _current to dummy" step last so that
1393 * subsystems above can rely on _current being
1394 * unchanged. Disabled for posix as that arch
1395 * continues to use the _current pointer in its swap
Andy Rossdec022a2024-04-29 12:50:41 -07001396 * code. Note that we must leave a non-null switch
1397 * handle for any threads spinning in join() (this can
1398 * never be used, as our thread is flagged dead, but
1399 * it must not be NULL otherwise join can deadlock).
Andy Rossf0fd54c2024-03-26 08:38:01 -04001400 */
1401 if (dummify && !IS_ENABLED(CONFIG_ARCH_POSIX)) {
Andy Rossdec022a2024-04-29 12:50:41 -07001402#ifdef CONFIG_USE_SWITCH
1403 _current->switch_handle = _current;
1404#endif
Andy Rossfd340eb2024-04-19 15:03:09 -07001405 z_dummy_thread_init(&_thread_dummy);
Andy Rossdec022a2024-04-29 12:50:41 -07001406
Andy Rossf0fd54c2024-03-26 08:38:01 -04001407 }
Andy Ross47ab6632024-04-19 15:08:55 -07001408
1409 /* Finally update the halting thread state, on which
1410 * other CPUs might be spinning (see
1411 * thread_halt_spin()).
1412 */
1413 clear_halting(thread);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001414 }
1415}
1416
1417void z_thread_abort(struct k_thread *thread)
1418{
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001419 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001420
Anas Nashif87910122024-02-22 22:24:36 -05001421 if (z_is_thread_essential(thread)) {
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001422 k_spin_unlock(&_sched_spinlock, key);
Andy Rossfb613592022-05-19 12:55:28 -07001423 __ASSERT(false, "aborting essential thread %p", thread);
1424 k_panic();
1425 return;
1426 }
1427
Anas Nashif3f4f3f62021-03-29 17:13:47 -04001428 if ((thread->base.thread_state & _THREAD_DEAD) != 0U) {
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001429 k_spin_unlock(&_sched_spinlock, key);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001430 return;
1431 }
1432
Peter Mitsise7986eb2023-08-14 16:41:05 -04001433 z_thread_halt(thread, key, true);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001434}
1435
1436#if !defined(CONFIG_ARCH_HAS_THREAD_ABORT)
Hess Nathan980d3f42024-06-25 09:13:15 +02001437void z_impl_k_thread_abort(k_tid_t thread)
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001438{
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001439 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread);
1440
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001441 z_thread_abort(thread);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001442
Andy Rossdec022a2024-04-29 12:50:41 -07001443 __ASSERT_NO_MSG((thread->base.thread_state & _THREAD_DEAD) != 0);
1444
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001445 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, abort, thread);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001446}
Simon Heinbcd1d192024-03-08 12:00:10 +01001447#endif /* !CONFIG_ARCH_HAS_THREAD_ABORT */
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001448
1449int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout)
1450{
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001451 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
Hess Nathan7659cfd2024-04-29 16:31:47 +02001452 int ret;
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001453
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001454 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, join, thread, timeout);
1455
Anas Nashif3f4f3f62021-03-29 17:13:47 -04001456 if ((thread->base.thread_state & _THREAD_DEAD) != 0U) {
Andy Rossa08e23f2023-05-26 09:39:16 -07001457 z_sched_switch_spin(thread);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001458 ret = 0;
1459 } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
1460 ret = -EBUSY;
Anas Nashif3f4f3f62021-03-29 17:13:47 -04001461 } else if ((thread == _current) ||
1462 (thread->base.pended_on == &_current->join_queue)) {
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001463 ret = -EDEADLK;
1464 } else {
1465 __ASSERT(!arch_is_in_isr(), "cannot join in ISR");
1466 add_to_waitq_locked(_current, &thread->join_queue);
1467 add_thread_timeout(_current, timeout);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001468
1469 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_thread, join, thread, timeout);
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001470 ret = z_swap(&_sched_spinlock, key);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001471 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret);
1472
1473 return ret;
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001474 }
1475
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001476 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret);
1477
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001478 k_spin_unlock(&_sched_spinlock, key);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001479 return ret;
1480}
1481
Andrew Boie322816e2020-02-20 16:33:06 -08001482#ifdef CONFIG_USERSPACE
1483/* Special case: don't oops if the thread is uninitialized. This is because
1484 * the initialization bit does double-duty for thread objects; if false, means
1485 * the thread object is truly uninitialized, or the thread ran and exited for
1486 * some reason.
1487 *
1488 * Return true in this case indicating we should just do nothing and return
1489 * success to the caller.
1490 */
1491static bool thread_obj_validate(struct k_thread *thread)
1492{
Anas Nashifc25d0802023-09-27 10:49:28 +00001493 struct k_object *ko = k_object_find(thread);
Anas Nashif21254b22023-09-27 10:50:26 +00001494 int ret = k_object_validate(ko, K_OBJ_THREAD, _OBJ_INIT_TRUE);
Andrew Boie322816e2020-02-20 16:33:06 -08001495
1496 switch (ret) {
1497 case 0:
1498 return false;
1499 case -EINVAL:
1500 return true;
1501 default:
1502#ifdef CONFIG_LOG
Anas Nashif3ab35662023-09-27 10:51:23 +00001503 k_object_dump_error(ret, thread, ko, K_OBJ_THREAD);
Simon Heinbcd1d192024-03-08 12:00:10 +01001504#endif /* CONFIG_LOG */
Anas Nashifa08bfeb2023-09-27 11:20:28 +00001505 K_OOPS(K_SYSCALL_VERIFY_MSG(ret, "access denied"));
Andrew Boie322816e2020-02-20 16:33:06 -08001506 }
Enjia Mai53ca7092021-01-15 17:09:58 +08001507 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
Andrew Boie322816e2020-02-20 16:33:06 -08001508}
1509
Andy Ross78327382020-03-05 15:18:14 -08001510static inline int z_vrfy_k_thread_join(struct k_thread *thread,
1511 k_timeout_t timeout)
Andrew Boie322816e2020-02-20 16:33:06 -08001512{
1513 if (thread_obj_validate(thread)) {
1514 return 0;
1515 }
1516
1517 return z_impl_k_thread_join(thread, timeout);
1518}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +08001519#include <zephyr/syscalls/k_thread_join_mrsh.c>
Andrew Boiea4c91902020-03-24 16:09:24 -07001520
1521static inline void z_vrfy_k_thread_abort(k_tid_t thread)
1522{
1523 if (thread_obj_validate(thread)) {
1524 return;
1525 }
1526
Anas Nashif87910122024-02-22 22:24:36 -05001527 K_OOPS(K_SYSCALL_VERIFY_MSG(!z_is_thread_essential(thread),
Andrew Boiea4c91902020-03-24 16:09:24 -07001528 "aborting essential thread %p", thread));
1529
1530 z_impl_k_thread_abort((struct k_thread *)thread);
1531}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +08001532#include <zephyr/syscalls/k_thread_abort_mrsh.c>
Andrew Boie322816e2020-02-20 16:33:06 -08001533#endif /* CONFIG_USERSPACE */
Peter Bigot0259c862021-01-12 13:45:32 -06001534
1535/*
1536 * future scheduler.h API implementations
1537 */
1538bool z_sched_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data)
1539{
1540 struct k_thread *thread;
1541 bool ret = false;
1542
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001543 K_SPINLOCK(&_sched_spinlock) {
Peter Bigot0259c862021-01-12 13:45:32 -06001544 thread = _priq_wait_best(&wait_q->waitq);
1545
1546 if (thread != NULL) {
1547 z_thread_return_value_set_with_data(thread,
1548 swap_retval,
1549 swap_data);
1550 unpend_thread_no_timeout(thread);
1551 (void)z_abort_thread_timeout(thread);
1552 ready_thread(thread);
1553 ret = true;
1554 }
1555 }
1556
1557 return ret;
1558}
1559
1560int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key,
1561 _wait_q_t *wait_q, k_timeout_t timeout, void **data)
1562{
1563 int ret = z_pend_curr(lock, key, wait_q, timeout);
1564
1565 if (data != NULL) {
1566 *data = _current->base.swap_data;
1567 }
1568 return ret;
1569}
Peter Mitsisca583392023-01-05 11:50:21 -05001570
1571int z_sched_waitq_walk(_wait_q_t *wait_q,
1572 int (*func)(struct k_thread *, void *), void *data)
1573{
1574 struct k_thread *thread;
1575 int status = 0;
1576
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001577 K_SPINLOCK(&_sched_spinlock) {
Peter Mitsisca583392023-01-05 11:50:21 -05001578 _WAIT_Q_FOR_EACH(wait_q, thread) {
1579
1580 /*
1581 * Invoke the callback function on each waiting thread
1582 * for as long as there are both waiting threads AND
1583 * it returns 0.
1584 */
1585
1586 status = func(thread, data);
1587 if (status != 0) {
1588 break;
1589 }
1590 }
1591 }
1592
1593 return status;
1594}
Peter Mitsis318b4952024-09-16 11:52:11 -07001595
1596/* This routine exists for benchmarking purposes. It is not used in
1597 * general production code.
1598 */
1599void z_unready_thread(struct k_thread *thread)
1600{
1601 K_SPINLOCK(&_sched_spinlock) {
1602 unready_thread(thread);
1603 }
1604}