blob: 67e5645bc6f24520834dfcc7c473dd4849f4fc95 [file] [log] [blame]
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001/*
Andy Ross1acd8c22018-05-03 14:51:49 -07002 * Copyright (c) 2018 Intel Corporation
Benjamin Walsh456c6da2016-09-02 18:55:39 -04003 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08004 * SPDX-License-Identifier: Apache-2.0
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005 */
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +02006#include <zephyr/kernel.h>
Benjamin Walshb4b108d2016-10-13 10:31:48 -04007#include <ksched.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +02008#include <zephyr/spinlock.h>
Anas Nashif8634c3b2023-08-29 17:03:12 +00009#include <wait_q.h>
Anas Nashif9e834132024-02-26 17:03:35 -050010#include <kthread.h>
Anas Nashif46484da2024-02-26 11:30:49 -050011#include <priority_q.h>
Andy Ross9c62cc62018-01-25 15:24:15 -080012#include <kswap.h>
Anas Nashif37df4852024-03-08 07:51:01 -050013#include <ipi.h>
Andy Ross1acd8c22018-05-03 14:51:49 -070014#include <kernel_arch_func.h>
Anas Nashif4e396172023-09-26 22:46:01 +000015#include <zephyr/internal/syscall_handler.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020016#include <zephyr/drivers/timer/system_timer.h>
Flavio Ceolin80418602018-11-21 16:22:15 -080017#include <stdbool.h>
Andrew Boiefe031612019-09-21 17:54:37 -070018#include <kernel_internal.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020019#include <zephyr/logging/log.h>
20#include <zephyr/sys/atomic.h>
21#include <zephyr/sys/math_extras.h>
22#include <zephyr/timing/timing.h>
Gerard Marull-Paretas4863c5f2023-04-11 15:34:39 +020023#include <zephyr/sys/util.h>
Andy Ross52351452021-09-28 09:38:43 -070024
Krzysztof Chruscinski3ed80832020-11-26 19:32:34 +010025LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040026
Anas Nashif37df4852024-03-08 07:51:01 -050027#if defined(CONFIG_SWAP_NONATOMIC) && defined(CONFIG_TIMESLICING)
28extern struct k_thread *pending_current;
29#endif
30
Anas Nashif0d8da5f2024-03-06 15:59:36 -050031struct k_spinlock _sched_spinlock;
Andy Ross1acd8c22018-05-03 14:51:49 -070032
Andy Rossf0fd54c2024-03-26 08:38:01 -040033/* Storage to "complete" the context switch from an invalid/incomplete thread
34 * context (ex: exiting an ISR that aborted _current)
35 */
Andy Rossfd340eb2024-04-19 15:03:09 -070036__incoherent struct k_thread _thread_dummy;
Andy Rossf0fd54c2024-03-26 08:38:01 -040037
Maksim Masalski78ba2ec2021-06-01 15:44:45 +080038static void update_cache(int preempt_ok);
Peter Mitsise1db1ce2023-08-14 14:06:52 -040039static void halt_thread(struct k_thread *thread, uint8_t new_state);
Peter Mitsisb1384a72023-08-14 14:22:05 -040040static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q);
Andrew Boie8e0f6a52020-09-05 11:50:18 -070041
Peter Mitsisf8b76f32021-11-29 09:52:11 -050042
Florian Grandelcc4d1bd2023-08-28 17:31:54 +020043BUILD_ASSERT(CONFIG_NUM_COOP_PRIORITIES >= CONFIG_NUM_METAIRQ_PRIORITIES,
44 "You need to provide at least as many CONFIG_NUM_COOP_PRIORITIES as "
45 "CONFIG_NUM_METAIRQ_PRIORITIES as Meta IRQs are just a special class of cooperative "
46 "threads.");
47
James Harris2cd0f662021-03-01 09:19:57 -080048/*
49 * Return value same as e.g. memcmp
50 * > 0 -> thread 1 priority > thread 2 priority
51 * = 0 -> thread 1 priority == thread 2 priority
52 * < 0 -> thread 1 priority < thread 2 priority
53 * Do not rely on the actual value returned aside from the above.
54 * (Again, like memcmp.)
55 */
56int32_t z_sched_prio_cmp(struct k_thread *thread_1,
57 struct k_thread *thread_2)
Andy Ross4a2e50f2018-05-15 11:06:25 -070058{
James Harris2cd0f662021-03-01 09:19:57 -080059 /* `prio` is <32b, so the below cannot overflow. */
60 int32_t b1 = thread_1->base.prio;
61 int32_t b2 = thread_2->base.prio;
62
63 if (b1 != b2) {
64 return b2 - b1;
Andy Ross4a2e50f2018-05-15 11:06:25 -070065 }
66
67#ifdef CONFIG_SCHED_DEADLINE
Andy Rossef626572020-07-10 09:43:36 -070068 /* If we assume all deadlines live within the same "half" of
69 * the 32 bit modulus space (this is a documented API rule),
James Harris2cd0f662021-03-01 09:19:57 -080070 * then the latest deadline in the queue minus the earliest is
Andy Rossef626572020-07-10 09:43:36 -070071 * guaranteed to be (2's complement) non-negative. We can
72 * leverage that to compare the values without having to check
73 * the current time.
Andy Ross4a2e50f2018-05-15 11:06:25 -070074 */
James Harris2cd0f662021-03-01 09:19:57 -080075 uint32_t d1 = thread_1->base.prio_deadline;
76 uint32_t d2 = thread_2->base.prio_deadline;
Andy Ross4a2e50f2018-05-15 11:06:25 -070077
James Harris2cd0f662021-03-01 09:19:57 -080078 if (d1 != d2) {
79 /* Sooner deadline means higher effective priority.
80 * Doing the calculation with unsigned types and casting
81 * to signed isn't perfect, but at least reduces this
82 * from UB on overflow to impdef.
83 */
84 return (int32_t) (d2 - d1);
Andy Ross4a2e50f2018-05-15 11:06:25 -070085 }
Simon Heinbcd1d192024-03-08 12:00:10 +010086#endif /* CONFIG_SCHED_DEADLINE */
James Harris2cd0f662021-03-01 09:19:57 -080087 return 0;
Andy Ross4a2e50f2018-05-15 11:06:25 -070088}
89
Andy Rossb11e7962021-09-24 10:57:39 -070090static ALWAYS_INLINE void *thread_runq(struct k_thread *thread)
Andy Ross387fdd22021-09-23 18:44:40 -070091{
Andy Rossb11e7962021-09-24 10:57:39 -070092#ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
93 int cpu, m = thread->base.cpu_mask;
94
95 /* Edge case: it's legal per the API to "make runnable" a
96 * thread with all CPUs masked off (i.e. one that isn't
97 * actually runnable!). Sort of a wart in the API and maybe
98 * we should address this in docs/assertions instead to avoid
99 * the extra test.
100 */
101 cpu = m == 0 ? 0 : u32_count_trailing_zeros(m);
102
103 return &_kernel.cpus[cpu].ready_q.runq;
104#else
Benjamin Cabéa46f1b92023-08-21 15:30:26 +0200105 ARG_UNUSED(thread);
Andy Rossb11e7962021-09-24 10:57:39 -0700106 return &_kernel.ready_q.runq;
Simon Heinbcd1d192024-03-08 12:00:10 +0100107#endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
Andy Ross387fdd22021-09-23 18:44:40 -0700108}
109
Andy Rossb11e7962021-09-24 10:57:39 -0700110static ALWAYS_INLINE void *curr_cpu_runq(void)
Andy Ross387fdd22021-09-23 18:44:40 -0700111{
Andy Rossb11e7962021-09-24 10:57:39 -0700112#ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
113 return &arch_curr_cpu()->ready_q.runq;
114#else
115 return &_kernel.ready_q.runq;
Simon Heinbcd1d192024-03-08 12:00:10 +0100116#endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
Andy Ross387fdd22021-09-23 18:44:40 -0700117}
118
Andy Rossb11e7962021-09-24 10:57:39 -0700119static ALWAYS_INLINE void runq_add(struct k_thread *thread)
Andy Ross387fdd22021-09-23 18:44:40 -0700120{
Anas Nashif4593f0d2024-04-11 11:59:07 -0400121 __ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
122
Andy Rossb11e7962021-09-24 10:57:39 -0700123 _priq_run_add(thread_runq(thread), thread);
124}
125
126static ALWAYS_INLINE void runq_remove(struct k_thread *thread)
127{
Anas Nashif4593f0d2024-04-11 11:59:07 -0400128 __ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
129
Andy Rossb11e7962021-09-24 10:57:39 -0700130 _priq_run_remove(thread_runq(thread), thread);
131}
132
133static ALWAYS_INLINE struct k_thread *runq_best(void)
134{
135 return _priq_run_best(curr_cpu_runq());
Andy Ross387fdd22021-09-23 18:44:40 -0700136}
137
Andy Ross4ff45712021-02-08 08:28:54 -0800138/* _current is never in the run queue until context switch on
139 * SMP configurations, see z_requeue_current()
140 */
Anas Nashif595ff632024-02-27 09:49:07 -0500141static inline bool should_queue_thread(struct k_thread *thread)
Andy Ross4ff45712021-02-08 08:28:54 -0800142{
Hess Nathan6d417d52024-04-30 13:26:35 +0200143 return !IS_ENABLED(CONFIG_SMP) || (thread != _current);
Andy Ross4ff45712021-02-08 08:28:54 -0800144}
145
Andy Rossc230fb32021-09-23 16:41:30 -0700146static ALWAYS_INLINE void queue_thread(struct k_thread *thread)
Andy Ross91946ef2021-02-07 13:03:09 -0800147{
148 thread->base.thread_state |= _THREAD_QUEUED;
Andy Ross4ff45712021-02-08 08:28:54 -0800149 if (should_queue_thread(thread)) {
Andy Ross387fdd22021-09-23 18:44:40 -0700150 runq_add(thread);
Andy Ross4ff45712021-02-08 08:28:54 -0800151 }
152#ifdef CONFIG_SMP
153 if (thread == _current) {
154 /* add current to end of queue means "yield" */
155 _current_cpu->swap_ok = true;
156 }
Simon Heinbcd1d192024-03-08 12:00:10 +0100157#endif /* CONFIG_SMP */
Andy Ross91946ef2021-02-07 13:03:09 -0800158}
159
Andy Rossc230fb32021-09-23 16:41:30 -0700160static ALWAYS_INLINE void dequeue_thread(struct k_thread *thread)
Andy Ross91946ef2021-02-07 13:03:09 -0800161{
162 thread->base.thread_state &= ~_THREAD_QUEUED;
Andy Ross4ff45712021-02-08 08:28:54 -0800163 if (should_queue_thread(thread)) {
Andy Ross387fdd22021-09-23 18:44:40 -0700164 runq_remove(thread);
Andy Ross4ff45712021-02-08 08:28:54 -0800165 }
Andy Ross91946ef2021-02-07 13:03:09 -0800166}
167
Andy Ross4ff45712021-02-08 08:28:54 -0800168/* Called out of z_swap() when CONFIG_SMP. The current thread can
169 * never live in the run queue until we are inexorably on the context
170 * switch path on SMP, otherwise there is a deadlock condition where a
171 * set of CPUs pick a cycle of threads to run and wait for them all to
172 * context switch forever.
173 */
Anas Nashif595ff632024-02-27 09:49:07 -0500174void z_requeue_current(struct k_thread *thread)
Andy Ross4ff45712021-02-08 08:28:54 -0800175{
Anas Nashif595ff632024-02-27 09:49:07 -0500176 if (z_is_thread_queued(thread)) {
177 runq_add(thread);
Andy Ross4ff45712021-02-08 08:28:54 -0800178 }
Andy Rossb4e9ef02022-04-06 10:10:17 -0700179 signal_pending_ipi();
Andy Ross4ff45712021-02-08 08:28:54 -0800180}
Andy Ross4ff45712021-02-08 08:28:54 -0800181
Peter Mitsise7986eb2023-08-14 16:41:05 -0400182/* Return true if the thread is aborting, else false */
Andy Ross6fb6d3c2021-02-19 15:32:19 -0800183static inline bool is_aborting(struct k_thread *thread)
184{
Anas Nashifbbbc38b2021-03-29 10:03:49 -0400185 return (thread->base.thread_state & _THREAD_ABORTING) != 0U;
Andy Ross6fb6d3c2021-02-19 15:32:19 -0800186}
Peter Mitsise7986eb2023-08-14 16:41:05 -0400187
188/* Return true if the thread is aborting or suspending, else false */
189static inline bool is_halting(struct k_thread *thread)
190{
191 return (thread->base.thread_state &
192 (_THREAD_ABORTING | _THREAD_SUSPENDING)) != 0U;
193}
Andy Ross6fb6d3c2021-02-19 15:32:19 -0800194
Peter Mitsise7986eb2023-08-14 16:41:05 -0400195/* Clear the halting bits (_THREAD_ABORTING and _THREAD_SUSPENDING) */
196static inline void clear_halting(struct k_thread *thread)
197{
Andy Ross47ab6632024-04-19 15:08:55 -0700198 barrier_dmem_fence_full(); /* Other cpus spin on this locklessly! */
Peter Mitsise7986eb2023-08-14 16:41:05 -0400199 thread->base.thread_state &= ~(_THREAD_ABORTING | _THREAD_SUSPENDING);
200}
201
Andy Rossb2791b02019-01-28 09:36:36 -0800202static ALWAYS_INLINE struct k_thread *next_up(void)
Andy Ross1acd8c22018-05-03 14:51:49 -0700203{
Vadim Shakirov73944c62023-07-24 15:42:52 +0300204#ifdef CONFIG_SMP
Peter Mitsise7986eb2023-08-14 16:41:05 -0400205 if (is_halting(_current)) {
206 halt_thread(_current, is_aborting(_current) ?
207 _THREAD_DEAD : _THREAD_SUSPENDED);
Vadim Shakirov73944c62023-07-24 15:42:52 +0300208 }
Simon Heinbcd1d192024-03-08 12:00:10 +0100209#endif /* CONFIG_SMP */
Vadim Shakirov73944c62023-07-24 15:42:52 +0300210
Andy Ross387fdd22021-09-23 18:44:40 -0700211 struct k_thread *thread = runq_best();
Andy Ross11a050b2019-11-13 09:41:52 -0800212
Florian Grandelcc4d1bd2023-08-28 17:31:54 +0200213#if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && \
214 (CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES)
Andy Ross11a050b2019-11-13 09:41:52 -0800215 /* MetaIRQs must always attempt to return back to a
216 * cooperative thread they preempted and not whatever happens
217 * to be highest priority now. The cooperative thread was
218 * promised it wouldn't be preempted (by non-metairq threads)!
219 */
220 struct k_thread *mirqp = _current_cpu->metairq_preempted;
221
Anas Nashif17c874f2024-03-28 07:15:04 -0400222 if (mirqp != NULL && (thread == NULL || !thread_is_metairq(thread))) {
Andy Ross11a050b2019-11-13 09:41:52 -0800223 if (!z_is_thread_prevented_from_running(mirqp)) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500224 thread = mirqp;
Andy Ross11a050b2019-11-13 09:41:52 -0800225 } else {
226 _current_cpu->metairq_preempted = NULL;
227 }
228 }
229#endif
Simon Heinbcd1d192024-03-08 12:00:10 +0100230/* CONFIG_NUM_METAIRQ_PRIORITIES > 0 &&
231 * CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES
232 */
Andy Ross11a050b2019-11-13 09:41:52 -0800233
Andy Ross1acd8c22018-05-03 14:51:49 -0700234#ifndef CONFIG_SMP
235 /* In uniprocessor mode, we can leave the current thread in
236 * the queue (actually we have to, otherwise the assembly
237 * context switch code for all architectures would be
Patrik Flykt4344e272019-03-08 14:19:05 -0700238 * responsible for putting it back in z_swap and ISR return!),
Andy Ross1acd8c22018-05-03 14:51:49 -0700239 * which makes this choice simple.
240 */
Anas Nashif3f4f3f62021-03-29 17:13:47 -0400241 return (thread != NULL) ? thread : _current_cpu->idle_thread;
Andy Ross1acd8c22018-05-03 14:51:49 -0700242#else
243 /* Under SMP, the "cache" mechanism for selecting the next
244 * thread doesn't work, so we have more work to do to test
Andy Ross11a050b2019-11-13 09:41:52 -0800245 * _current against the best choice from the queue. Here, the
246 * thread selected above represents "the best thread that is
247 * not current".
Andy Rosseace1df2018-05-30 11:23:02 -0700248 *
249 * Subtle note on "queued": in SMP mode, _current does not
250 * live in the queue, so this isn't exactly the same thing as
251 * "ready", it means "is _current already added back to the
252 * queue such that we don't want to re-add it".
Andy Ross1acd8c22018-05-03 14:51:49 -0700253 */
Simon Hein02cfbfe2022-07-19 22:30:17 +0200254 bool queued = z_is_thread_queued(_current);
255 bool active = !z_is_thread_prevented_from_running(_current);
Andy Ross1acd8c22018-05-03 14:51:49 -0700256
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500257 if (thread == NULL) {
258 thread = _current_cpu->idle_thread;
Andy Ross1acd8c22018-05-03 14:51:49 -0700259 }
260
Andy Rosseace1df2018-05-30 11:23:02 -0700261 if (active) {
James Harris2cd0f662021-03-01 09:19:57 -0800262 int32_t cmp = z_sched_prio_cmp(_current, thread);
Andy Ross4ff45712021-02-08 08:28:54 -0800263
264 /* Ties only switch if state says we yielded */
James Harris2cd0f662021-03-01 09:19:57 -0800265 if ((cmp > 0) || ((cmp == 0) && !_current_cpu->swap_ok)) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500266 thread = _current;
Andy Rosseace1df2018-05-30 11:23:02 -0700267 }
268
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500269 if (!should_preempt(thread, _current_cpu->swap_ok)) {
270 thread = _current;
Andy Rosseace1df2018-05-30 11:23:02 -0700271 }
Andy Ross1acd8c22018-05-03 14:51:49 -0700272 }
273
Andy Rosseace1df2018-05-30 11:23:02 -0700274 /* Put _current back into the queue */
Hess Nathan6d417d52024-04-30 13:26:35 +0200275 if ((thread != _current) && active &&
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500276 !z_is_idle_thread_object(_current) && !queued) {
Andy Rossc230fb32021-09-23 16:41:30 -0700277 queue_thread(_current);
Andy Ross1acd8c22018-05-03 14:51:49 -0700278 }
279
Andy Rosseace1df2018-05-30 11:23:02 -0700280 /* Take the new _current out of the queue */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500281 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -0700282 dequeue_thread(thread);
Andy Rosseace1df2018-05-30 11:23:02 -0700283 }
Andy Ross1acd8c22018-05-03 14:51:49 -0700284
Andy Ross4ff45712021-02-08 08:28:54 -0800285 _current_cpu->swap_ok = false;
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500286 return thread;
Simon Heinbcd1d192024-03-08 12:00:10 +0100287#endif /* CONFIG_SMP */
Andy Ross1acd8c22018-05-03 14:51:49 -0700288}
289
Anas Nashif37df4852024-03-08 07:51:01 -0500290void move_thread_to_end_of_prio_q(struct k_thread *thread)
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700291{
292 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -0700293 dequeue_thread(thread);
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700294 }
Andy Rossc230fb32021-09-23 16:41:30 -0700295 queue_thread(thread);
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700296 update_cache(thread == _current);
297}
298
Andy Ross11a050b2019-11-13 09:41:52 -0800299/* Track cooperative threads preempted by metairqs so we can return to
300 * them specifically. Called at the moment a new thread has been
301 * selected to run.
302 */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500303static void update_metairq_preempt(struct k_thread *thread)
Andy Ross11a050b2019-11-13 09:41:52 -0800304{
Florian Grandelcc4d1bd2023-08-28 17:31:54 +0200305#if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && \
306 (CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES)
Anas Nashif17c874f2024-03-28 07:15:04 -0400307 if (thread_is_metairq(thread) && !thread_is_metairq(_current) &&
Anas Nashif5c170c72024-03-28 07:20:51 -0400308 !thread_is_preemptible(_current)) {
Andy Ross11a050b2019-11-13 09:41:52 -0800309 /* Record new preemption */
310 _current_cpu->metairq_preempted = _current;
Anas Nashif17c874f2024-03-28 07:15:04 -0400311 } else if (!thread_is_metairq(thread) && !z_is_idle_thread_object(thread)) {
Andy Ross11a050b2019-11-13 09:41:52 -0800312 /* Returning from existing preemption */
313 _current_cpu->metairq_preempted = NULL;
314 }
Benjamin Cabéa46f1b92023-08-21 15:30:26 +0200315#else
316 ARG_UNUSED(thread);
Andy Ross11a050b2019-11-13 09:41:52 -0800317#endif
Simon Heinbcd1d192024-03-08 12:00:10 +0100318/* CONFIG_NUM_METAIRQ_PRIORITIES > 0 &&
319 * CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES
320 */
Andy Ross11a050b2019-11-13 09:41:52 -0800321}
322
Andy Ross1856e222018-05-21 11:48:35 -0700323static void update_cache(int preempt_ok)
Andy Ross1acd8c22018-05-03 14:51:49 -0700324{
325#ifndef CONFIG_SMP
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500326 struct k_thread *thread = next_up();
Andy Ross1856e222018-05-21 11:48:35 -0700327
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500328 if (should_preempt(thread, preempt_ok)) {
Andy Rosscb3964f2019-08-16 21:29:26 -0700329#ifdef CONFIG_TIMESLICING
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500330 if (thread != _current) {
Andy Ross3e696892021-11-30 18:26:26 -0800331 z_reset_time_slice(thread);
Andy Ross9098a452018-09-25 10:56:09 -0700332 }
Simon Heinbcd1d192024-03-08 12:00:10 +0100333#endif /* CONFIG_TIMESLICING */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500334 update_metairq_preempt(thread);
335 _kernel.ready_q.cache = thread;
Andy Rosseace1df2018-05-30 11:23:02 -0700336 } else {
337 _kernel.ready_q.cache = _current;
Andy Ross1856e222018-05-21 11:48:35 -0700338 }
Andy Rosseace1df2018-05-30 11:23:02 -0700339
340#else
341 /* The way this works is that the CPU record keeps its
342 * "cooperative swapping is OK" flag until the next reschedule
343 * call or context switch. It doesn't need to be tracked per
344 * thread because if the thread gets preempted for whatever
345 * reason the scheduler will make the same decision anyway.
346 */
347 _current_cpu->swap_ok = preempt_ok;
Simon Heinbcd1d192024-03-08 12:00:10 +0100348#endif /* CONFIG_SMP */
Andy Ross1acd8c22018-05-03 14:51:49 -0700349}
350
Peter Mitsis9ff52212024-03-01 14:44:26 -0500351static struct _cpu *thread_active_elsewhere(struct k_thread *thread)
Andy Ross05c468f2021-02-19 15:24:24 -0800352{
Peter Mitsis9ff52212024-03-01 14:44:26 -0500353 /* Returns pointer to _cpu if the thread is currently running on
354 * another CPU. There are more scalable designs to answer this
355 * question in constant time, but this is fine for now.
Andy Ross05c468f2021-02-19 15:24:24 -0800356 */
357#ifdef CONFIG_SMP
358 int currcpu = _current_cpu->id;
359
Kumar Galaa1195ae2022-10-18 09:45:13 -0500360 unsigned int num_cpus = arch_num_cpus();
361
362 for (int i = 0; i < num_cpus; i++) {
Andy Ross05c468f2021-02-19 15:24:24 -0800363 if ((i != currcpu) &&
364 (_kernel.cpus[i].current == thread)) {
Peter Mitsis9ff52212024-03-01 14:44:26 -0500365 return &_kernel.cpus[i];
Andy Ross05c468f2021-02-19 15:24:24 -0800366 }
367 }
Simon Heinbcd1d192024-03-08 12:00:10 +0100368#endif /* CONFIG_SMP */
Benjamin Cabéa46f1b92023-08-21 15:30:26 +0200369 ARG_UNUSED(thread);
Peter Mitsis9ff52212024-03-01 14:44:26 -0500370 return NULL;
Andy Ross05c468f2021-02-19 15:24:24 -0800371}
372
Andy Ross96ccc462020-01-23 13:28:30 -0800373static void ready_thread(struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -0700374{
Anas Nashif39f632e2020-12-07 13:15:42 -0500375#ifdef CONFIG_KERNEL_COHERENCE
Andy Rossf6d32ab2020-05-13 15:34:04 +0000376 __ASSERT_NO_MSG(arch_mem_coherent(thread));
Simon Heinbcd1d192024-03-08 12:00:10 +0100377#endif /* CONFIG_KERNEL_COHERENCE */
Andy Rossf6d32ab2020-05-13 15:34:04 +0000378
Anas Nashif081605e2020-10-16 20:00:17 -0400379 /* If thread is queued already, do not try and added it to the
380 * run queue again
381 */
382 if (!z_is_thread_queued(thread) && z_is_thread_ready(thread)) {
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100383 SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_ready, thread);
384
Andy Rossc230fb32021-09-23 16:41:30 -0700385 queue_thread(thread);
Andy Ross1856e222018-05-21 11:48:35 -0700386 update_cache(0);
Peter Mitsisd8a4c8a2024-02-16 13:54:47 -0500387
388 flag_ipi(ipi_mask_create(thread));
Andy Ross1acd8c22018-05-03 14:51:49 -0700389 }
390}
391
Daniel Leung378131c2024-03-26 11:54:31 -0700392void z_ready_thread_locked(struct k_thread *thread)
393{
Peter Mitsis9ff52212024-03-01 14:44:26 -0500394 if (thread_active_elsewhere(thread) == NULL) {
Daniel Leung378131c2024-03-26 11:54:31 -0700395 ready_thread(thread);
396 }
397}
398
Andy Ross96ccc462020-01-23 13:28:30 -0800399void z_ready_thread(struct k_thread *thread)
400{
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500401 K_SPINLOCK(&_sched_spinlock) {
Peter Mitsis9ff52212024-03-01 14:44:26 -0500402 if (thread_active_elsewhere(thread) == NULL) {
Andy Ross05c468f2021-02-19 15:24:24 -0800403 ready_thread(thread);
404 }
Andy Ross96ccc462020-01-23 13:28:30 -0800405 }
406}
407
Patrik Flykt4344e272019-03-08 14:19:05 -0700408void z_move_thread_to_end_of_prio_q(struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -0700409{
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500410 K_SPINLOCK(&_sched_spinlock) {
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700411 move_thread_to_end_of_prio_q(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700412 }
413}
414
Andy Ross96ccc462020-01-23 13:28:30 -0800415void z_sched_start(struct k_thread *thread)
416{
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500417 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
Andy Ross96ccc462020-01-23 13:28:30 -0800418
419 if (z_has_thread_started(thread)) {
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500420 k_spin_unlock(&_sched_spinlock, key);
Andy Ross96ccc462020-01-23 13:28:30 -0800421 return;
422 }
423
424 z_mark_thread_as_started(thread);
425 ready_thread(thread);
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500426 z_reschedule(&_sched_spinlock, key);
Andy Ross96ccc462020-01-23 13:28:30 -0800427}
428
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700429/* Spins in ISR context, waiting for a thread known to be running on
430 * another CPU to catch the IPI we sent and halt. Note that we check
431 * for ourselves being asynchronously halted first to prevent simple
432 * deadlocks (but not complex ones involving cycles of 3+ threads!).
Andy Rossf0fd54c2024-03-26 08:38:01 -0400433 * Acts to release the provided lock before returning.
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700434 */
Andy Rossf0fd54c2024-03-26 08:38:01 -0400435static void thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key)
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700436{
437 if (is_halting(_current)) {
438 halt_thread(_current,
439 is_aborting(_current) ? _THREAD_DEAD : _THREAD_SUSPENDED);
440 }
441 k_spin_unlock(&_sched_spinlock, key);
442 while (is_halting(thread)) {
Andy Rossf0fd54c2024-03-26 08:38:01 -0400443 unsigned int k = arch_irq_lock();
444
445 arch_spin_relax(); /* Requires interrupts be masked */
446 arch_irq_unlock(k);
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700447 }
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700448}
449
450/* Shared handler for k_thread_{suspend,abort}(). Called with the
451 * scheduler lock held and the key passed (which it may
452 * release/reacquire!) which will be released before a possible return
453 * (aborting _current will not return, obviously), which may be after
454 * a context switch.
Peter Mitsisb1384a72023-08-14 14:22:05 -0400455 */
Peter Mitsise7986eb2023-08-14 16:41:05 -0400456static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
457 bool terminate)
Peter Mitsisb1384a72023-08-14 14:22:05 -0400458{
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700459 _wait_q_t *wq = &thread->join_queue;
Peter Mitsisb1384a72023-08-14 14:22:05 -0400460#ifdef CONFIG_SMP
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700461 wq = terminate ? wq : &thread->halt_queue;
462#endif
Peter Mitsisb1384a72023-08-14 14:22:05 -0400463
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700464 /* If the target is a thread running on another CPU, flag and
465 * poke (note that we might spin to wait, so a true
466 * synchronous IPI is needed here, not deferred!), it will
467 * halt itself in the IPI. Otherwise it's unscheduled, so we
468 * can clean it up directly.
469 */
Peter Mitsis9ff52212024-03-01 14:44:26 -0500470
471 struct _cpu *cpu = thread_active_elsewhere(thread);
472
473 if (cpu != NULL) {
Peter Mitsise7986eb2023-08-14 16:41:05 -0400474 thread->base.thread_state |= (terminate ? _THREAD_ABORTING
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700475 : _THREAD_SUSPENDING);
476#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
Peter Mitsis0bcdae22024-03-04 10:52:24 -0500477#ifdef CONFIG_ARCH_HAS_DIRECTED_IPIS
478 arch_sched_directed_ipi(IPI_CPU_MASK(cpu->id));
479#else
480 arch_sched_broadcast_ipi();
481#endif
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700482#endif
Peter Mitsisb1384a72023-08-14 14:22:05 -0400483 if (arch_is_in_isr()) {
Andy Rossf0fd54c2024-03-26 08:38:01 -0400484 thread_halt_spin(thread, key);
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700485 } else {
486 add_to_waitq_locked(_current, wq);
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500487 z_swap(&_sched_spinlock, key);
Peter Mitsisb1384a72023-08-14 14:22:05 -0400488 }
Peter Mitsise7986eb2023-08-14 16:41:05 -0400489 } else {
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700490 halt_thread(thread, terminate ? _THREAD_DEAD : _THREAD_SUSPENDED);
491 if ((thread == _current) && !arch_is_in_isr()) {
492 z_swap(&_sched_spinlock, key);
493 __ASSERT(!terminate, "aborted _current back from dead");
494 } else {
495 k_spin_unlock(&_sched_spinlock, key);
496 }
Peter Mitsisb1384a72023-08-14 14:22:05 -0400497 }
Andy Rossf0fd54c2024-03-26 08:38:01 -0400498 /* NOTE: the scheduler lock has been released. Don't put
499 * logic here, it's likely to be racy/deadlocky even if you
500 * re-take the lock!
501 */
Peter Mitsisb1384a72023-08-14 14:22:05 -0400502}
503
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700504
Andrew Boie6cf496f2020-02-14 10:52:49 -0800505void z_impl_k_thread_suspend(struct k_thread *thread)
Andy Ross8bdabcc2020-01-07 09:58:46 -0800506{
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100507 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, suspend, thread);
508
Andy Ross8bdabcc2020-01-07 09:58:46 -0800509 (void)z_abort_thread_timeout(thread);
510
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500511 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
Peter Mitsise7986eb2023-08-14 16:41:05 -0400512
513 if ((thread->base.thread_state & _THREAD_SUSPENDED) != 0U) {
514
515 /* The target thread is already suspended. Nothing to do. */
516
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500517 k_spin_unlock(&_sched_spinlock, key);
Peter Mitsise7986eb2023-08-14 16:41:05 -0400518 return;
Andy Ross8bdabcc2020-01-07 09:58:46 -0800519 }
520
Peter Mitsise7986eb2023-08-14 16:41:05 -0400521 z_thread_halt(thread, key, false);
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100522
523 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, suspend, thread);
Andy Ross8bdabcc2020-01-07 09:58:46 -0800524}
525
Andrew Boie6cf496f2020-02-14 10:52:49 -0800526#ifdef CONFIG_USERSPACE
527static inline void z_vrfy_k_thread_suspend(struct k_thread *thread)
528{
Anas Nashifa08bfeb2023-09-27 11:20:28 +0000529 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
Andrew Boie6cf496f2020-02-14 10:52:49 -0800530 z_impl_k_thread_suspend(thread);
531}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +0800532#include <zephyr/syscalls/k_thread_suspend_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +0100533#endif /* CONFIG_USERSPACE */
Andrew Boie6cf496f2020-02-14 10:52:49 -0800534
535void z_impl_k_thread_resume(struct k_thread *thread)
536{
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100537 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, resume, thread);
538
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500539 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
Andrew Boie6cf496f2020-02-14 10:52:49 -0800540
Anas Nashifbf69afc2020-10-16 19:53:56 -0400541 /* Do not try to resume a thread that was not suspended */
542 if (!z_is_thread_suspended(thread)) {
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500543 k_spin_unlock(&_sched_spinlock, key);
Anas Nashifbf69afc2020-10-16 19:53:56 -0400544 return;
545 }
546
Andrew Boie6cf496f2020-02-14 10:52:49 -0800547 z_mark_thread_as_not_suspended(thread);
548 ready_thread(thread);
549
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500550 z_reschedule(&_sched_spinlock, key);
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100551
552 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, resume, thread);
Andrew Boie6cf496f2020-02-14 10:52:49 -0800553}
554
555#ifdef CONFIG_USERSPACE
556static inline void z_vrfy_k_thread_resume(struct k_thread *thread)
557{
Anas Nashifa08bfeb2023-09-27 11:20:28 +0000558 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
Andrew Boie6cf496f2020-02-14 10:52:49 -0800559 z_impl_k_thread_resume(thread);
560}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +0800561#include <zephyr/syscalls/k_thread_resume_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +0100562#endif /* CONFIG_USERSPACE */
Andrew Boie6cf496f2020-02-14 10:52:49 -0800563
Maksim Masalski970820e2021-05-25 14:40:14 +0800564static _wait_q_t *pended_on_thread(struct k_thread *thread)
Andy Ross8bdabcc2020-01-07 09:58:46 -0800565{
566 __ASSERT_NO_MSG(thread->base.pended_on);
567
568 return thread->base.pended_on;
569}
570
Andy Rossed6b4fb2020-01-23 13:04:15 -0800571static void unready_thread(struct k_thread *thread)
572{
573 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -0700574 dequeue_thread(thread);
Andy Rossed6b4fb2020-01-23 13:04:15 -0800575 }
576 update_cache(thread == _current);
577}
578
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500579/* _sched_spinlock must be held */
Andrew Boie322816e2020-02-20 16:33:06 -0800580static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q)
Andy Ross1acd8c22018-05-03 14:51:49 -0700581{
Andrew Boie322816e2020-02-20 16:33:06 -0800582 unready_thread(thread);
583 z_mark_thread_as_pending(thread);
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100584
585 SYS_PORT_TRACING_FUNC(k_thread, sched_pend, thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700586
Andrew Boie322816e2020-02-20 16:33:06 -0800587 if (wait_q != NULL) {
588 thread->base.pended_on = wait_q;
Anas Nashif4593f0d2024-04-11 11:59:07 -0400589 _priq_wait_add(&wait_q->waitq, thread);
Andy Ross15d52082018-09-26 13:19:31 -0700590 }
Andrew Boie322816e2020-02-20 16:33:06 -0800591}
Andy Ross15d52082018-09-26 13:19:31 -0700592
Andy Ross78327382020-03-05 15:18:14 -0800593static void add_thread_timeout(struct k_thread *thread, k_timeout_t timeout)
Andrew Boie322816e2020-02-20 16:33:06 -0800594{
Andy Ross78327382020-03-05 15:18:14 -0800595 if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
Andy Ross78327382020-03-05 15:18:14 -0800596 z_add_thread_timeout(thread, timeout);
Andy Ross1acd8c22018-05-03 14:51:49 -0700597 }
Andy Rosse7ded112018-04-11 14:52:47 -0700598}
599
Andy Rossc32f3762022-10-08 07:24:28 -0700600static void pend_locked(struct k_thread *thread, _wait_q_t *wait_q,
601 k_timeout_t timeout)
Andrew Boie322816e2020-02-20 16:33:06 -0800602{
Anas Nashif39f632e2020-12-07 13:15:42 -0500603#ifdef CONFIG_KERNEL_COHERENCE
Andy Ross1ba74142021-02-09 13:48:25 -0800604 __ASSERT_NO_MSG(wait_q == NULL || arch_mem_coherent(wait_q));
Simon Heinbcd1d192024-03-08 12:00:10 +0100605#endif /* CONFIG_KERNEL_COHERENCE */
Andy Rossc32f3762022-10-08 07:24:28 -0700606 add_to_waitq_locked(thread, wait_q);
Andy Ross78327382020-03-05 15:18:14 -0800607 add_thread_timeout(thread, timeout);
Andrew Boie322816e2020-02-20 16:33:06 -0800608}
609
Andy Ross78327382020-03-05 15:18:14 -0800610void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
611 k_timeout_t timeout)
Andy Rosse7ded112018-04-11 14:52:47 -0700612{
Patrik Flykt4344e272019-03-08 14:19:05 -0700613 __ASSERT_NO_MSG(thread == _current || is_thread_dummy(thread));
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500614 K_SPINLOCK(&_sched_spinlock) {
Andy Rossc32f3762022-10-08 07:24:28 -0700615 pend_locked(thread, wait_q, timeout);
616 }
Andy Ross1acd8c22018-05-03 14:51:49 -0700617}
618
Andrew Boieffc5bdf2020-09-05 11:44:01 -0700619static inline void unpend_thread_no_timeout(struct k_thread *thread)
620{
Maksim Masalski970820e2021-05-25 14:40:14 +0800621 _priq_wait_remove(&pended_on_thread(thread)->waitq, thread);
Andrew Boieffc5bdf2020-09-05 11:44:01 -0700622 z_mark_thread_as_not_pending(thread);
623 thread->base.pended_on = NULL;
624}
625
Patrik Flykt4344e272019-03-08 14:19:05 -0700626ALWAYS_INLINE void z_unpend_thread_no_timeout(struct k_thread *thread)
Andy Rosse7ded112018-04-11 14:52:47 -0700627{
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500628 K_SPINLOCK(&_sched_spinlock) {
Peter Mitsis31dfd84f2023-01-06 13:20:28 -0500629 if (thread->base.pended_on != NULL) {
630 unpend_thread_no_timeout(thread);
631 }
Andy Ross1acd8c22018-05-03 14:51:49 -0700632 }
Andy Rosse7ded112018-04-11 14:52:47 -0700633}
634
Aastha Grover55377762023-03-08 16:54:12 -0500635void z_sched_wake_thread(struct k_thread *thread, bool is_timeout)
636{
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500637 K_SPINLOCK(&_sched_spinlock) {
Peter Mitsise7986eb2023-08-14 16:41:05 -0400638 bool killed = (thread->base.thread_state &
639 (_THREAD_DEAD | _THREAD_ABORTING));
Aastha Grover55377762023-03-08 16:54:12 -0500640
Aastha Grover877fc3d2023-03-08 16:56:31 -0500641#ifdef CONFIG_EVENTS
642 bool do_nothing = thread->no_wake_on_timeout && is_timeout;
643
644 thread->no_wake_on_timeout = false;
645
646 if (do_nothing) {
647 continue;
648 }
Simon Heinbcd1d192024-03-08 12:00:10 +0100649#endif /* CONFIG_EVENTS */
Aastha Grover877fc3d2023-03-08 16:56:31 -0500650
Aastha Grover55377762023-03-08 16:54:12 -0500651 if (!killed) {
652 /* The thread is not being killed */
653 if (thread->base.pended_on != NULL) {
654 unpend_thread_no_timeout(thread);
655 }
656 z_mark_thread_as_started(thread);
657 if (is_timeout) {
658 z_mark_thread_as_not_suspended(thread);
659 }
660 ready_thread(thread);
661 }
662 }
663
664}
665
Andy Ross987c0e52018-09-27 16:50:00 -0700666#ifdef CONFIG_SYS_CLOCK_EXISTS
667/* Timeout handler for *_thread_timeout() APIs */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500668void z_thread_timeout(struct _timeout *timeout)
Andy Ross987c0e52018-09-27 16:50:00 -0700669{
Andy Ross37866332021-02-17 10:12:36 -0800670 struct k_thread *thread = CONTAINER_OF(timeout,
671 struct k_thread, base.timeout);
Andy Ross987c0e52018-09-27 16:50:00 -0700672
Aastha Grover55377762023-03-08 16:54:12 -0500673 z_sched_wake_thread(thread, true);
Andy Ross987c0e52018-09-27 16:50:00 -0700674}
Simon Heinbcd1d192024-03-08 12:00:10 +0100675#endif /* CONFIG_SYS_CLOCK_EXISTS */
Andy Ross987c0e52018-09-27 16:50:00 -0700676
Patrik Flykt4344e272019-03-08 14:19:05 -0700677int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
Andy Ross78327382020-03-05 15:18:14 -0800678 _wait_q_t *wait_q, k_timeout_t timeout)
Andy Rossec554f42018-07-24 13:37:59 -0700679{
680#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
681 pending_current = _current;
Simon Heinbcd1d192024-03-08 12:00:10 +0100682#endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500683 __ASSERT_NO_MSG(sizeof(_sched_spinlock) == 0 || lock != &_sched_spinlock);
Andy Rossc32f3762022-10-08 07:24:28 -0700684
685 /* We do a "lock swap" prior to calling z_swap(), such that
686 * the caller's lock gets released as desired. But we ensure
687 * that we hold the scheduler lock and leave local interrupts
688 * masked until we reach the context swich. z_swap() itself
689 * has similar code; the duplication is because it's a legacy
690 * API that doesn't expect to be called with scheduler lock
691 * held.
692 */
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500693 (void) k_spin_lock(&_sched_spinlock);
Andy Rossc32f3762022-10-08 07:24:28 -0700694 pend_locked(_current, wait_q, timeout);
695 k_spin_release(lock);
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500696 return z_swap(&_sched_spinlock, key);
Andy Rossec554f42018-07-24 13:37:59 -0700697}
698
Andy Ross604f0f42021-02-09 16:47:47 -0800699struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q)
700{
701 struct k_thread *thread = NULL;
702
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500703 K_SPINLOCK(&_sched_spinlock) {
Andy Ross604f0f42021-02-09 16:47:47 -0800704 thread = _priq_wait_best(&wait_q->waitq);
705
706 if (thread != NULL) {
707 unpend_thread_no_timeout(thread);
708 }
709 }
710
711 return thread;
712}
713
Patrik Flykt4344e272019-03-08 14:19:05 -0700714struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q)
Andy Rosse7ded112018-04-11 14:52:47 -0700715{
Andy Ross604f0f42021-02-09 16:47:47 -0800716 struct k_thread *thread = NULL;
Andy Rosse7ded112018-04-11 14:52:47 -0700717
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500718 K_SPINLOCK(&_sched_spinlock) {
Andy Ross604f0f42021-02-09 16:47:47 -0800719 thread = _priq_wait_best(&wait_q->waitq);
720
721 if (thread != NULL) {
722 unpend_thread_no_timeout(thread);
723 (void)z_abort_thread_timeout(thread);
724 }
Andy Ross1acd8c22018-05-03 14:51:49 -0700725 }
Andy Rosse7ded112018-04-11 14:52:47 -0700726
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500727 return thread;
Andy Ross1acd8c22018-05-03 14:51:49 -0700728}
Andy Rosse7ded112018-04-11 14:52:47 -0700729
Patrik Flykt4344e272019-03-08 14:19:05 -0700730void z_unpend_thread(struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -0700731{
Patrik Flykt4344e272019-03-08 14:19:05 -0700732 z_unpend_thread_no_timeout(thread);
733 (void)z_abort_thread_timeout(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700734}
735
Andy Ross6f139802019-08-20 11:21:28 -0700736/* Priority set utility that does no rescheduling, it just changes the
737 * run queue state, returning true if a reschedule is needed later.
738 */
Anas Nashif868f0992024-02-24 11:37:56 -0500739bool z_thread_prio_set(struct k_thread *thread, int prio)
Andy Ross1acd8c22018-05-03 14:51:49 -0700740{
Flavio Ceolin02ed85b2018-09-20 15:43:57 -0700741 bool need_sched = 0;
Peter Mitsis9ff52212024-03-01 14:44:26 -0500742 int old_prio = thread->base.prio;
Andy Ross1acd8c22018-05-03 14:51:49 -0700743
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500744 K_SPINLOCK(&_sched_spinlock) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700745 need_sched = z_is_thread_ready(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700746
747 if (need_sched) {
Andy Ross4d8e1f22019-07-01 10:25:55 -0700748 if (!IS_ENABLED(CONFIG_SMP) || z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -0700749 dequeue_thread(thread);
Andy Ross4d8e1f22019-07-01 10:25:55 -0700750 thread->base.prio = prio;
Andy Rossc230fb32021-09-23 16:41:30 -0700751 queue_thread(thread);
Peter Mitsis9ff52212024-03-01 14:44:26 -0500752
753 if (old_prio > prio) {
Peter Mitsisd8a4c8a2024-02-16 13:54:47 -0500754 flag_ipi(ipi_mask_create(thread));
Peter Mitsis9ff52212024-03-01 14:44:26 -0500755 }
Andy Ross4d8e1f22019-07-01 10:25:55 -0700756 } else {
Peter Mitsis9ff52212024-03-01 14:44:26 -0500757 /*
758 * This is a running thread on SMP. Update its
759 * priority, but do not requeue it. An IPI is
760 * needed if the priority is both being lowered
761 * and it is running on another CPU.
762 */
763
Andy Ross4d8e1f22019-07-01 10:25:55 -0700764 thread->base.prio = prio;
Peter Mitsis9ff52212024-03-01 14:44:26 -0500765
766 struct _cpu *cpu;
767
768 cpu = thread_active_elsewhere(thread);
769 if ((cpu != NULL) && (old_prio < prio)) {
Peter Mitsisd8a4c8a2024-02-16 13:54:47 -0500770 flag_ipi(IPI_CPU_MASK(cpu->id));
Peter Mitsis9ff52212024-03-01 14:44:26 -0500771 }
Andy Ross4d8e1f22019-07-01 10:25:55 -0700772 }
Peter Mitsis9ff52212024-03-01 14:44:26 -0500773
Andy Ross1856e222018-05-21 11:48:35 -0700774 update_cache(1);
Andy Ross1acd8c22018-05-03 14:51:49 -0700775 } else {
776 thread->base.prio = prio;
Andy Rosse7ded112018-04-11 14:52:47 -0700777 }
778 }
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100779
780 SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_priority_set, thread, prio);
Andy Rosse7ded112018-04-11 14:52:47 -0700781
Andy Ross6f139802019-08-20 11:21:28 -0700782 return need_sched;
783}
784
Anas Nashif3f4f3f62021-03-29 17:13:47 -0400785static inline bool resched(uint32_t key)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400786{
Andy Rosseace1df2018-05-30 11:23:02 -0700787#ifdef CONFIG_SMP
Andy Rosseace1df2018-05-30 11:23:02 -0700788 _current_cpu->swap_ok = 0;
Simon Heinbcd1d192024-03-08 12:00:10 +0100789#endif /* CONFIG_SMP */
Andy Rosseace1df2018-05-30 11:23:02 -0700790
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800791 return arch_irq_unlocked(key) && !arch_is_in_isr();
Andy Rossec554f42018-07-24 13:37:59 -0700792}
793
Anas Nashif379b93f2020-08-10 15:47:02 -0400794/*
795 * Check if the next ready thread is the same as the current thread
796 * and save the trip if true.
797 */
798static inline bool need_swap(void)
799{
800 /* the SMP case will be handled in C based z_swap() */
801#ifdef CONFIG_SMP
802 return true;
803#else
804 struct k_thread *new_thread;
805
806 /* Check if the next ready thread is the same as the current thread */
Andy Ross6b84ab32021-02-18 10:15:23 -0800807 new_thread = _kernel.ready_q.cache;
Anas Nashif379b93f2020-08-10 15:47:02 -0400808 return new_thread != _current;
Simon Heinbcd1d192024-03-08 12:00:10 +0100809#endif /* CONFIG_SMP */
Anas Nashif379b93f2020-08-10 15:47:02 -0400810}
811
Patrik Flykt4344e272019-03-08 14:19:05 -0700812void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key)
Andy Rossec554f42018-07-24 13:37:59 -0700813{
Anas Nashif379b93f2020-08-10 15:47:02 -0400814 if (resched(key.key) && need_swap()) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700815 z_swap(lock, key);
Andy Rossec554f42018-07-24 13:37:59 -0700816 } else {
817 k_spin_unlock(lock, key);
Andy Rossb4e9ef02022-04-06 10:10:17 -0700818 signal_pending_ipi();
Andy Rosseace1df2018-05-30 11:23:02 -0700819 }
Andy Rossec554f42018-07-24 13:37:59 -0700820}
Andy Rosseace1df2018-05-30 11:23:02 -0700821
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500822void z_reschedule_irqlock(uint32_t key)
Andy Rossec554f42018-07-24 13:37:59 -0700823{
Gaetan Perrot68581ca2023-12-21 11:01:54 +0900824 if (resched(key) && need_swap()) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700825 z_swap_irqlock(key);
Andy Rossec554f42018-07-24 13:37:59 -0700826 } else {
827 irq_unlock(key);
Andy Rossb4e9ef02022-04-06 10:10:17 -0700828 signal_pending_ipi();
Andy Rossec554f42018-07-24 13:37:59 -0700829 }
Andy Ross8606fab2018-03-26 10:54:40 -0700830}
831
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500832void k_sched_lock(void)
833{
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500834 K_SPINLOCK(&_sched_spinlock) {
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100835 SYS_PORT_TRACING_FUNC(k_thread, sched_lock);
836
Patrik Flykt4344e272019-03-08 14:19:05 -0700837 z_sched_lock();
Andy Ross1856e222018-05-21 11:48:35 -0700838 }
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500839}
840
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400841void k_sched_unlock(void)
842{
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500843 K_SPINLOCK(&_sched_spinlock) {
Anas Nashifbbbc38b2021-03-29 10:03:49 -0400844 __ASSERT(_current->base.sched_locked != 0U, "");
Andy Rosseefd3da2020-02-06 13:39:52 -0800845 __ASSERT(!arch_is_in_isr(), "");
846
Andy Ross1856e222018-05-21 11:48:35 -0700847 ++_current->base.sched_locked;
Yasushi SHOJI20d07242019-07-31 11:19:08 +0900848 update_cache(0);
Andy Ross1856e222018-05-21 11:48:35 -0700849 }
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400850
Anas Nashif2c5d4042019-12-02 10:24:08 -0500851 LOG_DBG("scheduler unlocked (%p:%d)",
Benjamin Walsha4e033f2016-11-18 16:08:24 -0500852 _current, _current->base.sched_locked);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400853
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100854 SYS_PORT_TRACING_FUNC(k_thread, sched_unlock);
855
Patrik Flykt4344e272019-03-08 14:19:05 -0700856 z_reschedule_unlocked();
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400857}
858
Andy Ross6b84ab32021-02-18 10:15:23 -0800859struct k_thread *z_swap_next_thread(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400860{
Andy Ross6b84ab32021-02-18 10:15:23 -0800861#ifdef CONFIG_SMP
Andy Rossb4e9ef02022-04-06 10:10:17 -0700862 struct k_thread *ret = next_up();
863
864 if (ret == _current) {
865 /* When not swapping, have to signal IPIs here. In
866 * the context switch case it must happen later, after
867 * _current gets requeued.
868 */
869 signal_pending_ipi();
870 }
871 return ret;
Andy Ross6b84ab32021-02-18 10:15:23 -0800872#else
873 return _kernel.ready_q.cache;
Simon Heinbcd1d192024-03-08 12:00:10 +0100874#endif /* CONFIG_SMP */
Andy Ross6b84ab32021-02-18 10:15:23 -0800875}
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400876
Jeremy Bettis1e0a36c2021-12-06 10:56:33 -0700877#ifdef CONFIG_USE_SWITCH
Andy Rossb18685b2019-02-19 17:24:30 -0800878/* Just a wrapper around _current = xxx with tracing */
879static inline void set_current(struct k_thread *new_thread)
880{
Daniel Leung11e6b432020-08-27 16:12:01 -0700881 z_thread_mark_switched_out();
Andy Rosseefd3da2020-02-06 13:39:52 -0800882 _current_cpu->current = new_thread;
Andy Rossb18685b2019-02-19 17:24:30 -0800883}
884
Nicolas Pitrec9e3e0d2022-03-15 22:36:20 -0400885/**
886 * @brief Determine next thread to execute upon completion of an interrupt
887 *
888 * Thread preemption is performed by context switching after the completion
889 * of a non-recursed interrupt. This function determines which thread to
890 * switch to if any. This function accepts as @p interrupted either:
891 *
892 * - The handle for the interrupted thread in which case the thread's context
893 * must already be fully saved and ready to be picked up by a different CPU.
894 *
895 * - NULL if more work is required to fully save the thread's state after
896 * it is known that a new thread is to be scheduled. It is up to the caller
897 * to store the handle resulting from the thread that is being switched out
898 * in that thread's "switch_handle" field after its
899 * context has fully been saved, following the same requirements as with
900 * the @ref arch_switch() function.
901 *
902 * If a new thread needs to be scheduled then its handle is returned.
903 * Otherwise the same value provided as @p interrupted is returned back.
904 * Those handles are the same opaque types used by the @ref arch_switch()
905 * function.
906 *
907 * @warning
908 * The @ref _current value may have changed after this call and not refer
909 * to the interrupted thread anymore. It might be necessary to make a local
910 * copy before calling this function.
911 *
912 * @param interrupted Handle for the thread that was interrupted or NULL.
913 * @retval Handle for the next thread to execute, or @p interrupted when
914 * no new thread is to be scheduled.
915 */
Patrik Flykt4344e272019-03-08 14:19:05 -0700916void *z_get_next_switch_handle(void *interrupted)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400917{
Andrew Boieae0d1b22019-03-29 16:25:27 -0700918 z_check_stack_sentinel();
919
Andy Rosseace1df2018-05-30 11:23:02 -0700920#ifdef CONFIG_SMP
Andy Rossdd432212021-02-05 08:15:02 -0800921 void *ret = NULL;
922
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500923 K_SPINLOCK(&_sched_spinlock) {
Andy Rossf6d32ab2020-05-13 15:34:04 +0000924 struct k_thread *old_thread = _current, *new_thread;
Andy Rosseace1df2018-05-30 11:23:02 -0700925
Andy Ross4ff45712021-02-08 08:28:54 -0800926 if (IS_ENABLED(CONFIG_SMP)) {
927 old_thread->switch_handle = NULL;
928 }
Andy Rossf6d32ab2020-05-13 15:34:04 +0000929 new_thread = next_up();
930
Andy Ross40d12c12021-09-27 08:22:43 -0700931 z_sched_usage_switch(new_thread);
932
Andy Rossf6d32ab2020-05-13 15:34:04 +0000933 if (old_thread != new_thread) {
934 update_metairq_preempt(new_thread);
Andy Rossb89e4272023-05-26 09:12:51 -0700935 z_sched_switch_spin(new_thread);
Andy Rossf6d32ab2020-05-13 15:34:04 +0000936 arch_cohere_stacks(old_thread, interrupted, new_thread);
Andy Ross11a050b2019-11-13 09:41:52 -0800937
Andy Rosseace1df2018-05-30 11:23:02 -0700938 _current_cpu->swap_ok = 0;
Peter Mitsisa3c71522024-04-26 10:32:47 -0400939 new_thread->base.cpu = arch_curr_cpu()->id;
Andy Rossf6d32ab2020-05-13 15:34:04 +0000940 set_current(new_thread);
941
Andy Ross3e696892021-11-30 18:26:26 -0800942#ifdef CONFIG_TIMESLICING
943 z_reset_time_slice(new_thread);
Simon Heinbcd1d192024-03-08 12:00:10 +0100944#endif /* CONFIG_TIMESLICING */
Andy Ross3e696892021-11-30 18:26:26 -0800945
Danny Oerndrupc9d78402019-12-13 11:24:56 +0100946#ifdef CONFIG_SPIN_VALIDATE
Andy Ross8c1bdda2019-02-20 10:07:31 -0800947 /* Changed _current! Update the spinlock
Anas Nashif6df44052021-04-30 09:58:20 -0400948 * bookkeeping so the validation doesn't get
Andy Ross8c1bdda2019-02-20 10:07:31 -0800949 * confused when the "wrong" thread tries to
950 * release the lock.
951 */
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500952 z_spin_lock_set_owner(&_sched_spinlock);
Simon Heinbcd1d192024-03-08 12:00:10 +0100953#endif /* CONFIG_SPIN_VALIDATE */
Andy Ross4ff45712021-02-08 08:28:54 -0800954
955 /* A queued (runnable) old/current thread
956 * needs to be added back to the run queue
957 * here, and atomically with its switch handle
958 * being set below. This is safe now, as we
959 * will not return into it.
960 */
961 if (z_is_thread_queued(old_thread)) {
Andy Ross387fdd22021-09-23 18:44:40 -0700962 runq_add(old_thread);
Andy Ross4ff45712021-02-08 08:28:54 -0800963 }
Andy Rosseace1df2018-05-30 11:23:02 -0700964 }
Andy Rossf6d32ab2020-05-13 15:34:04 +0000965 old_thread->switch_handle = interrupted;
Andy Rossdd432212021-02-05 08:15:02 -0800966 ret = new_thread->switch_handle;
Andy Ross4ff45712021-02-08 08:28:54 -0800967 if (IS_ENABLED(CONFIG_SMP)) {
968 /* Active threads MUST have a null here */
969 new_thread->switch_handle = NULL;
970 }
Benjamin Walshb8c21602016-12-23 19:34:41 -0500971 }
Andy Rossb4e9ef02022-04-06 10:10:17 -0700972 signal_pending_ipi();
Andy Rossdd432212021-02-05 08:15:02 -0800973 return ret;
Andy Rosseace1df2018-05-30 11:23:02 -0700974#else
Andy Ross40d12c12021-09-27 08:22:43 -0700975 z_sched_usage_switch(_kernel.ready_q.cache);
Andy Rossf6d32ab2020-05-13 15:34:04 +0000976 _current->switch_handle = interrupted;
Andy Ross6b84ab32021-02-18 10:15:23 -0800977 set_current(_kernel.ready_q.cache);
Andy Ross1acd8c22018-05-03 14:51:49 -0700978 return _current->switch_handle;
Simon Heinbcd1d192024-03-08 12:00:10 +0100979#endif /* CONFIG_SMP */
Andy Ross1acd8c22018-05-03 14:51:49 -0700980}
Simon Heinbcd1d192024-03-08 12:00:10 +0100981#endif /* CONFIG_USE_SWITCH */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400982
Patrik Flykt4344e272019-03-08 14:19:05 -0700983int z_unpend_all(_wait_q_t *wait_q)
Andy Ross4ca0e072018-05-10 09:45:42 -0700984{
Andy Rossccf3bf72018-05-10 11:10:34 -0700985 int need_sched = 0;
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500986 struct k_thread *thread;
Andy Ross4ca0e072018-05-10 09:45:42 -0700987
Hess Nathan20b55422024-05-02 14:02:20 +0200988 for (thread = z_waitq_head(wait_q); thread != NULL; thread = z_waitq_head(wait_q)) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500989 z_unpend_thread(thread);
990 z_ready_thread(thread);
Andy Ross4ca0e072018-05-10 09:45:42 -0700991 need_sched = 1;
992 }
Andy Rossccf3bf72018-05-10 11:10:34 -0700993
994 return need_sched;
Andy Ross4ca0e072018-05-10 09:45:42 -0700995}
996
Anas Nashif477a04a2024-02-28 08:15:15 -0500997void init_ready_q(struct _ready_q *ready_q)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400998{
Andy Rossb155d062021-09-24 13:49:14 -0700999#if defined(CONFIG_SCHED_SCALABLE)
Anas Nashif477a04a2024-02-28 08:15:15 -05001000 ready_q->runq = (struct _priq_rb) {
Andy Ross1acd8c22018-05-03 14:51:49 -07001001 .tree = {
Patrik Flykt4344e272019-03-08 14:19:05 -07001002 .lessthan_fn = z_priq_rb_lessthan,
Andy Ross1acd8c22018-05-03 14:51:49 -07001003 }
1004 };
Andy Rossb155d062021-09-24 13:49:14 -07001005#elif defined(CONFIG_SCHED_MULTIQ)
Andy Ross9f06a352018-06-28 10:38:14 -07001006 for (int i = 0; i < ARRAY_SIZE(_kernel.ready_q.runq.queues); i++) {
Anas Nashif477a04a2024-02-28 08:15:15 -05001007 sys_dlist_init(&ready_q->runq.queues[i]);
Andy Ross9f06a352018-06-28 10:38:14 -07001008 }
Andy Rossb155d062021-09-24 13:49:14 -07001009#else
Anas Nashif477a04a2024-02-28 08:15:15 -05001010 sys_dlist_init(&ready_q->runq);
Andy Ross9f06a352018-06-28 10:38:14 -07001011#endif
Andy Rossb155d062021-09-24 13:49:14 -07001012}
1013
1014void z_sched_init(void)
1015{
Andy Rossb11e7962021-09-24 10:57:39 -07001016#ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
Nicolas Pitre907eea02023-03-16 17:54:25 -04001017 for (int i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
Andy Rossb11e7962021-09-24 10:57:39 -07001018 init_ready_q(&_kernel.cpus[i].ready_q);
1019 }
1020#else
Andy Rossb155d062021-09-24 13:49:14 -07001021 init_ready_q(&_kernel.ready_q);
Simon Heinbcd1d192024-03-08 12:00:10 +01001022#endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001023}
1024
Anas Nashif25c87db2021-03-29 10:54:23 -04001025void z_impl_k_thread_priority_set(k_tid_t thread, int prio)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001026{
Benjamin Walsh3cc2ba92016-11-08 15:44:05 -05001027 /*
1028 * Use NULL, since we cannot know what the entry point is (we do not
1029 * keep track of it) and idle cannot change its priority.
1030 */
Patrik Flykt4344e272019-03-08 14:19:05 -07001031 Z_ASSERT_VALID_PRIO(prio, NULL);
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001032 __ASSERT(!arch_is_in_isr(), "");
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001033
Anas Nashif868f0992024-02-24 11:37:56 -05001034 bool need_sched = z_thread_prio_set((struct k_thread *)thread, prio);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001035
Peter Mitsis9ff52212024-03-01 14:44:26 -05001036 if ((need_sched) && (IS_ENABLED(CONFIG_SMP) ||
1037 (_current->base.sched_locked == 0U))) {
Anas Nashif5e591c32024-02-24 10:37:06 -05001038 z_reschedule_unlocked();
1039 }
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001040}
1041
Andrew Boie468190a2017-09-29 14:00:48 -07001042#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -07001043static inline void z_vrfy_k_thread_priority_set(k_tid_t thread, int prio)
Andrew Boie468190a2017-09-29 14:00:48 -07001044{
Anas Nashifa08bfeb2023-09-27 11:20:28 +00001045 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1046 K_OOPS(K_SYSCALL_VERIFY_MSG(_is_valid_prio(prio, NULL),
Andy Ross65649742019-08-06 13:34:31 -07001047 "invalid thread priority %d", prio));
Anas Nashif5e591c32024-02-24 10:37:06 -05001048#ifndef CONFIG_USERSPACE_THREAD_MAY_RAISE_PRIORITY
Anas Nashifa08bfeb2023-09-27 11:20:28 +00001049 K_OOPS(K_SYSCALL_VERIFY_MSG((int8_t)prio >= thread->base.prio,
Andrew Boie8345e5e2018-05-04 15:57:57 -07001050 "thread priority may only be downgraded (%d < %d)",
1051 prio, thread->base.prio));
Simon Heinbcd1d192024-03-08 12:00:10 +01001052#endif /* CONFIG_USERSPACE_THREAD_MAY_RAISE_PRIORITY */
Andy Ross65649742019-08-06 13:34:31 -07001053 z_impl_k_thread_priority_set(thread, prio);
Andrew Boie468190a2017-09-29 14:00:48 -07001054}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +08001055#include <zephyr/syscalls/k_thread_priority_set_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +01001056#endif /* CONFIG_USERSPACE */
Andrew Boie468190a2017-09-29 14:00:48 -07001057
Andy Ross4a2e50f2018-05-15 11:06:25 -07001058#ifdef CONFIG_SCHED_DEADLINE
Patrik Flykt4344e272019-03-08 14:19:05 -07001059void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline)
Andy Ross4a2e50f2018-05-15 11:06:25 -07001060{
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001061 struct k_thread *thread = tid;
Andy Rossf2280d12024-03-08 08:42:08 -08001062 int32_t newdl = k_cycle_get_32() + deadline;
Andy Ross4a2e50f2018-05-15 11:06:25 -07001063
Andy Rossf2280d12024-03-08 08:42:08 -08001064 /* The prio_deadline field changes the sorting order, so can't
1065 * change it while the thread is in the run queue (dlists
1066 * actually are benign as long as we requeue it before we
1067 * release the lock, but an rbtree will blow up if we break
1068 * sorting!)
1069 */
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001070 K_SPINLOCK(&_sched_spinlock) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001071 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -07001072 dequeue_thread(thread);
Andy Rossf2280d12024-03-08 08:42:08 -08001073 thread->base.prio_deadline = newdl;
Andy Rossc230fb32021-09-23 16:41:30 -07001074 queue_thread(thread);
Andy Rossf2280d12024-03-08 08:42:08 -08001075 } else {
1076 thread->base.prio_deadline = newdl;
Andy Ross4a2e50f2018-05-15 11:06:25 -07001077 }
1078 }
1079}
1080
1081#ifdef CONFIG_USERSPACE
Andy Ross075c94f2019-08-13 11:34:34 -07001082static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline)
Andy Ross4a2e50f2018-05-15 11:06:25 -07001083{
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001084 struct k_thread *thread = tid;
Andy Ross4a2e50f2018-05-15 11:06:25 -07001085
Anas Nashifa08bfeb2023-09-27 11:20:28 +00001086 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1087 K_OOPS(K_SYSCALL_VERIFY_MSG(deadline > 0,
Andy Ross4a2e50f2018-05-15 11:06:25 -07001088 "invalid thread deadline %d",
1089 (int)deadline));
1090
Patrik Flykt4344e272019-03-08 14:19:05 -07001091 z_impl_k_thread_deadline_set((k_tid_t)thread, deadline);
Andy Ross4a2e50f2018-05-15 11:06:25 -07001092}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +08001093#include <zephyr/syscalls/k_thread_deadline_set_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +01001094#endif /* CONFIG_USERSPACE */
1095#endif /* CONFIG_SCHED_DEADLINE */
Andy Ross4a2e50f2018-05-15 11:06:25 -07001096
Jordan Yates1ef647f2022-03-26 09:55:23 +10001097bool k_can_yield(void)
1098{
1099 return !(k_is_pre_kernel() || k_is_in_isr() ||
1100 z_is_idle_thread_object(_current));
1101}
1102
Patrik Flykt4344e272019-03-08 14:19:05 -07001103void z_impl_k_yield(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001104{
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001105 __ASSERT(!arch_is_in_isr(), "");
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001106
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001107 SYS_PORT_TRACING_FUNC(k_thread, yield);
1108
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001109 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
James Harris6543e062021-03-01 10:14:13 -08001110
Andy Ross851d14a2021-05-13 15:46:43 -07001111 if (!IS_ENABLED(CONFIG_SMP) ||
1112 z_is_thread_queued(_current)) {
Andy Rossc230fb32021-09-23 16:41:30 -07001113 dequeue_thread(_current);
Andy Ross1acd8c22018-05-03 14:51:49 -07001114 }
Andy Rossc230fb32021-09-23 16:41:30 -07001115 queue_thread(_current);
Andy Ross851d14a2021-05-13 15:46:43 -07001116 update_cache(1);
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001117 z_swap(&_sched_spinlock, key);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001118}
1119
Andrew Boie468190a2017-09-29 14:00:48 -07001120#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -07001121static inline void z_vrfy_k_yield(void)
1122{
1123 z_impl_k_yield();
1124}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +08001125#include <zephyr/syscalls/k_yield_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +01001126#endif /* CONFIG_USERSPACE */
Andrew Boie468190a2017-09-29 14:00:48 -07001127
Flavio Ceolin7a815d52020-10-19 21:37:22 -07001128static int32_t z_tick_sleep(k_ticks_t ticks)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001129{
Flavio Ceolin9a160972020-11-16 10:40:46 -08001130 uint32_t expected_wakeup_ticks;
Carles Cufi9849df82016-12-02 15:31:08 +01001131
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001132 __ASSERT(!arch_is_in_isr(), "");
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001133
Gerard Marull-Paretas737d7992022-11-23 13:42:04 +01001134 LOG_DBG("thread %p for %lu ticks", _current, (unsigned long)ticks);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001135
Benjamin Walsh5596f782016-12-09 19:57:17 -05001136 /* wait of 0 ms is treated as a 'yield' */
Charles E. Youseb1863032019-05-08 13:22:46 -07001137 if (ticks == 0) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001138 k_yield();
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001139 return 0;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001140 }
1141
Lauren Murphy4c85b462021-05-25 17:49:28 -05001142 if (Z_TICK_ABS(ticks) <= 0) {
1143 expected_wakeup_ticks = ticks + sys_clock_tick_get_32();
1144 } else {
1145 expected_wakeup_ticks = Z_TICK_ABS(ticks);
1146 }
Andy Rossd27d4e62019-02-05 15:36:01 -08001147
Gerson Fernando Budkeb8188e52023-10-16 20:15:31 +02001148 k_timeout_t timeout = Z_TIMEOUT_TICKS(ticks);
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001149 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001150
Andy Rossdff6b712019-02-25 21:17:29 -08001151#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
1152 pending_current = _current;
Simon Heinbcd1d192024-03-08 12:00:10 +01001153#endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
Andrew Boiea8775ab2020-09-05 12:53:42 -07001154 unready_thread(_current);
Andy Ross78327382020-03-05 15:18:14 -08001155 z_add_thread_timeout(_current, timeout);
Andy Ross4521e0c2019-03-22 10:30:19 -07001156 z_mark_thread_as_suspended(_current);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001157
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001158 (void)z_swap(&_sched_spinlock, key);
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001159
Andy Ross4521e0c2019-03-22 10:30:19 -07001160 __ASSERT(!z_is_thread_state_set(_current, _THREAD_SUSPENDED), "");
1161
Anas Nashif5c90ceb2021-03-13 08:19:53 -05001162 ticks = (k_ticks_t)expected_wakeup_ticks - sys_clock_tick_get_32();
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001163 if (ticks > 0) {
Charles E. Youseb1863032019-05-08 13:22:46 -07001164 return ticks;
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001165 }
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001166
1167 return 0;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001168}
1169
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001170int32_t z_impl_k_sleep(k_timeout_t timeout)
Charles E. Youseb1863032019-05-08 13:22:46 -07001171{
Andy Ross78327382020-03-05 15:18:14 -08001172 k_ticks_t ticks;
Charles E. Youseb1863032019-05-08 13:22:46 -07001173
Peter Bigot8162e582019-12-12 16:07:07 -06001174 __ASSERT(!arch_is_in_isr(), "");
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001175
1176 SYS_PORT_TRACING_FUNC_ENTER(k_thread, sleep, timeout);
Peter Bigot8162e582019-12-12 16:07:07 -06001177
Anas Nashifd2c71792020-10-17 07:52:17 -04001178 /* in case of K_FOREVER, we suspend */
Andy Ross78327382020-03-05 15:18:14 -08001179 if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
Anas Nashif20b2c982024-03-28 10:09:26 -04001180
Andrew Boied2b89222019-11-08 10:44:22 -08001181 k_thread_suspend(_current);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001182 SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, (int32_t) K_TICKS_FOREVER);
1183
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001184 return (int32_t) K_TICKS_FOREVER;
Andrew Boied2b89222019-11-08 10:44:22 -08001185 }
1186
Andy Ross78327382020-03-05 15:18:14 -08001187 ticks = timeout.ticks;
Andy Ross78327382020-03-05 15:18:14 -08001188
Charles E. Youseb1863032019-05-08 13:22:46 -07001189 ticks = z_tick_sleep(ticks);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001190
Peter Mitsisa3e5af92023-12-05 13:40:19 -05001191 int32_t ret = k_ticks_to_ms_ceil64(ticks);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001192
1193 SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, ret);
1194
1195 return ret;
Charles E. Youseb1863032019-05-08 13:22:46 -07001196}
1197
Andrew Boie76c04a22017-09-27 14:45:10 -07001198#ifdef CONFIG_USERSPACE
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001199static inline int32_t z_vrfy_k_sleep(k_timeout_t timeout)
Andrew Boie76c04a22017-09-27 14:45:10 -07001200{
Andy Ross78327382020-03-05 15:18:14 -08001201 return z_impl_k_sleep(timeout);
Charles E. Yousea5678312019-05-09 16:46:46 -07001202}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +08001203#include <zephyr/syscalls/k_sleep_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +01001204#endif /* CONFIG_USERSPACE */
Charles E. Yousea5678312019-05-09 16:46:46 -07001205
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001206int32_t z_impl_k_usleep(int us)
Charles E. Yousea5678312019-05-09 16:46:46 -07001207{
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001208 int32_t ticks;
Charles E. Yousea5678312019-05-09 16:46:46 -07001209
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001210 SYS_PORT_TRACING_FUNC_ENTER(k_thread, usleep, us);
1211
Andy Ross88924062019-10-03 11:43:10 -07001212 ticks = k_us_to_ticks_ceil64(us);
Charles E. Yousea5678312019-05-09 16:46:46 -07001213 ticks = z_tick_sleep(ticks);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001214
Peter Mitsisa3e5af92023-12-05 13:40:19 -05001215 int32_t ret = k_ticks_to_us_ceil64(ticks);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001216
Peter Mitsisa3e5af92023-12-05 13:40:19 -05001217 SYS_PORT_TRACING_FUNC_EXIT(k_thread, usleep, us, ret);
1218
1219 return ret;
Charles E. Yousea5678312019-05-09 16:46:46 -07001220}
1221
1222#ifdef CONFIG_USERSPACE
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001223static inline int32_t z_vrfy_k_usleep(int us)
Charles E. Yousea5678312019-05-09 16:46:46 -07001224{
1225 return z_impl_k_usleep(us);
Andrew Boie76c04a22017-09-27 14:45:10 -07001226}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +08001227#include <zephyr/syscalls/k_usleep_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +01001228#endif /* CONFIG_USERSPACE */
Andrew Boie76c04a22017-09-27 14:45:10 -07001229
Patrik Flykt4344e272019-03-08 14:19:05 -07001230void z_impl_k_wakeup(k_tid_t thread)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001231{
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001232 SYS_PORT_TRACING_OBJ_FUNC(k_thread, wakeup, thread);
1233
Patrik Flykt4344e272019-03-08 14:19:05 -07001234 if (z_is_thread_pending(thread)) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001235 return;
1236 }
1237
Patrik Flykt4344e272019-03-08 14:19:05 -07001238 if (z_abort_thread_timeout(thread) < 0) {
Andrew Boied2b89222019-11-08 10:44:22 -08001239 /* Might have just been sleeping forever */
1240 if (thread->base.thread_state != _THREAD_SUSPENDED) {
1241 return;
1242 }
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001243 }
1244
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001245 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
Peter Mitsis51ae9932024-02-20 11:50:54 -05001246
Andy Ross4521e0c2019-03-22 10:30:19 -07001247 z_mark_thread_as_not_suspended(thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001248
Peter Mitsis9ff52212024-03-01 14:44:26 -05001249 if (thread_active_elsewhere(thread) == NULL) {
Peter Mitsis51ae9932024-02-20 11:50:54 -05001250 ready_thread(thread);
1251 }
Andy Ross5737b5c2020-02-04 13:52:09 -08001252
Peter Mitsis51ae9932024-02-20 11:50:54 -05001253 if (arch_is_in_isr()) {
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001254 k_spin_unlock(&_sched_spinlock, key);
Peter Mitsis51ae9932024-02-20 11:50:54 -05001255 } else {
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001256 z_reschedule(&_sched_spinlock, key);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001257 }
1258}
1259
Andrew Boie468190a2017-09-29 14:00:48 -07001260#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -07001261static inline void z_vrfy_k_wakeup(k_tid_t thread)
1262{
Anas Nashifa08bfeb2023-09-27 11:20:28 +00001263 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
Andy Ross65649742019-08-06 13:34:31 -07001264 z_impl_k_wakeup(thread);
1265}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +08001266#include <zephyr/syscalls/k_wakeup_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +01001267#endif /* CONFIG_USERSPACE */
Andrew Boie468190a2017-09-29 14:00:48 -07001268
Daniel Leung0a50ff32023-09-25 11:56:10 -07001269k_tid_t z_impl_k_sched_current_thread_query(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001270{
Andy Rosseefd3da2020-02-06 13:39:52 -08001271#ifdef CONFIG_SMP
1272 /* In SMP, _current is a field read from _current_cpu, which
1273 * can race with preemption before it is read. We must lock
1274 * local interrupts when reading it.
1275 */
1276 unsigned int k = arch_irq_lock();
Simon Heinbcd1d192024-03-08 12:00:10 +01001277#endif /* CONFIG_SMP */
Andy Rosseefd3da2020-02-06 13:39:52 -08001278
1279 k_tid_t ret = _current_cpu->current;
1280
1281#ifdef CONFIG_SMP
1282 arch_irq_unlock(k);
Simon Heinbcd1d192024-03-08 12:00:10 +01001283#endif /* CONFIG_SMP */
Andy Rosseefd3da2020-02-06 13:39:52 -08001284 return ret;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001285}
1286
Andrew Boie76c04a22017-09-27 14:45:10 -07001287#ifdef CONFIG_USERSPACE
Daniel Leung0a50ff32023-09-25 11:56:10 -07001288static inline k_tid_t z_vrfy_k_sched_current_thread_query(void)
Andy Ross65649742019-08-06 13:34:31 -07001289{
Daniel Leung0a50ff32023-09-25 11:56:10 -07001290 return z_impl_k_sched_current_thread_query();
Andy Ross65649742019-08-06 13:34:31 -07001291}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +08001292#include <zephyr/syscalls/k_sched_current_thread_query_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +01001293#endif /* CONFIG_USERSPACE */
Andrew Boie76c04a22017-09-27 14:45:10 -07001294
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001295static inline void unpend_all(_wait_q_t *wait_q)
1296{
1297 struct k_thread *thread;
1298
Hess Nathan20b55422024-05-02 14:02:20 +02001299 for (thread = z_waitq_head(wait_q); thread != NULL; thread = z_waitq_head(wait_q)) {
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001300 unpend_thread_no_timeout(thread);
1301 (void)z_abort_thread_timeout(thread);
1302 arch_thread_return_value_set(thread, 0);
1303 ready_thread(thread);
1304 }
1305}
1306
Anas Nashifa6ce4222024-02-22 14:10:17 -05001307#ifdef CONFIG_THREAD_ABORT_HOOK
1308extern void thread_abort_hook(struct k_thread *thread);
Simon Heinbcd1d192024-03-08 12:00:10 +01001309#endif /* CONFIG_THREAD_ABORT_HOOK */
Chen Peng10f63d112021-09-06 13:59:40 +08001310
Peter Mitsise1db1ce2023-08-14 14:06:52 -04001311/**
1312 * @brief Dequeues the specified thread
1313 *
1314 * Dequeues the specified thread and move it into the specified new state.
1315 *
1316 * @param thread Identify the thread to halt
Peter Mitsise7986eb2023-08-14 16:41:05 -04001317 * @param new_state New thread state (_THREAD_DEAD or _THREAD_SUSPENDED)
Peter Mitsise1db1ce2023-08-14 14:06:52 -04001318 */
1319static void halt_thread(struct k_thread *thread, uint8_t new_state)
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001320{
Andy Rossf0fd54c2024-03-26 08:38:01 -04001321 bool dummify = false;
1322
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001323 /* We hold the lock, and the thread is known not to be running
1324 * anywhere.
1325 */
Peter Mitsise1db1ce2023-08-14 14:06:52 -04001326 if ((thread->base.thread_state & new_state) == 0U) {
1327 thread->base.thread_state |= new_state;
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001328 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -07001329 dequeue_thread(thread);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001330 }
Peter Mitsise7986eb2023-08-14 16:41:05 -04001331
1332 if (new_state == _THREAD_DEAD) {
1333 if (thread->base.pended_on != NULL) {
1334 unpend_thread_no_timeout(thread);
1335 }
1336 (void)z_abort_thread_timeout(thread);
1337 unpend_all(&thread->join_queue);
Andy Rossf0fd54c2024-03-26 08:38:01 -04001338
1339 /* Edge case: aborting _current from within an
1340 * ISR that preempted it requires clearing the
1341 * _current pointer so the upcoming context
1342 * switch doesn't clobber the now-freed
1343 * memory
1344 */
1345 if (thread == _current && arch_is_in_isr()) {
1346 dummify = true;
1347 }
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001348 }
Peter Mitsise7986eb2023-08-14 16:41:05 -04001349#ifdef CONFIG_SMP
1350 unpend_all(&thread->halt_queue);
Simon Heinbcd1d192024-03-08 12:00:10 +01001351#endif /* CONFIG_SMP */
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001352 update_cache(1);
1353
Peter Mitsise7986eb2023-08-14 16:41:05 -04001354 if (new_state == _THREAD_SUSPENDED) {
Andy Ross47ab6632024-04-19 15:08:55 -07001355 clear_halting(thread);
Peter Mitsise7986eb2023-08-14 16:41:05 -04001356 return;
1357 }
1358
Grant Ramsay45701e62023-08-14 09:41:52 +12001359#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
1360 arch_float_disable(thread);
Simon Heinbcd1d192024-03-08 12:00:10 +01001361#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
Grant Ramsay45701e62023-08-14 09:41:52 +12001362
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001363 SYS_PORT_TRACING_FUNC(k_thread, sched_abort, thread);
1364
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001365 z_thread_monitor_exit(thread);
Anas Nashifa6ce4222024-02-22 14:10:17 -05001366#ifdef CONFIG_THREAD_ABORT_HOOK
1367 thread_abort_hook(thread);
Simon Heinbcd1d192024-03-08 12:00:10 +01001368#endif /* CONFIG_THREAD_ABORT_HOOK */
Chen Peng10f63d112021-09-06 13:59:40 +08001369
Peter Mitsis6df8efe2023-05-11 14:06:46 -04001370#ifdef CONFIG_OBJ_CORE_THREAD
Peter Mitsise6f10902023-06-01 12:16:40 -04001371#ifdef CONFIG_OBJ_CORE_STATS_THREAD
1372 k_obj_core_stats_deregister(K_OBJ_CORE(thread));
Simon Heinbcd1d192024-03-08 12:00:10 +01001373#endif /* CONFIG_OBJ_CORE_STATS_THREAD */
Peter Mitsis6df8efe2023-05-11 14:06:46 -04001374 k_obj_core_unlink(K_OBJ_CORE(thread));
Simon Heinbcd1d192024-03-08 12:00:10 +01001375#endif /* CONFIG_OBJ_CORE_THREAD */
Peter Mitsis6df8efe2023-05-11 14:06:46 -04001376
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001377#ifdef CONFIG_USERSPACE
1378 z_mem_domain_exit_thread(thread);
Anas Nashif70cf96b2023-09-27 10:45:48 +00001379 k_thread_perms_all_clear(thread);
Anas Nashif7a18c2b2023-09-27 10:45:18 +00001380 k_object_uninit(thread->stack_obj);
1381 k_object_uninit(thread);
Simon Heinbcd1d192024-03-08 12:00:10 +01001382#endif /* CONFIG_USERSPACE */
Daniel Leung378131c2024-03-26 11:54:31 -07001383
1384#ifdef CONFIG_THREAD_ABORT_NEED_CLEANUP
1385 k_thread_abort_cleanup(thread);
1386#endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
Andy Rossf0fd54c2024-03-26 08:38:01 -04001387
1388 /* Do this "set _current to dummy" step last so that
1389 * subsystems above can rely on _current being
1390 * unchanged. Disabled for posix as that arch
1391 * continues to use the _current pointer in its swap
Andy Rossdec022a2024-04-29 12:50:41 -07001392 * code. Note that we must leave a non-null switch
1393 * handle for any threads spinning in join() (this can
1394 * never be used, as our thread is flagged dead, but
1395 * it must not be NULL otherwise join can deadlock).
Andy Rossf0fd54c2024-03-26 08:38:01 -04001396 */
1397 if (dummify && !IS_ENABLED(CONFIG_ARCH_POSIX)) {
Andy Rossdec022a2024-04-29 12:50:41 -07001398#ifdef CONFIG_USE_SWITCH
1399 _current->switch_handle = _current;
1400#endif
Andy Rossfd340eb2024-04-19 15:03:09 -07001401 z_dummy_thread_init(&_thread_dummy);
Andy Rossdec022a2024-04-29 12:50:41 -07001402
Andy Rossf0fd54c2024-03-26 08:38:01 -04001403 }
Andy Ross47ab6632024-04-19 15:08:55 -07001404
1405 /* Finally update the halting thread state, on which
1406 * other CPUs might be spinning (see
1407 * thread_halt_spin()).
1408 */
1409 clear_halting(thread);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001410 }
1411}
1412
1413void z_thread_abort(struct k_thread *thread)
1414{
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001415 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001416
Anas Nashif87910122024-02-22 22:24:36 -05001417 if (z_is_thread_essential(thread)) {
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001418 k_spin_unlock(&_sched_spinlock, key);
Andy Rossfb613592022-05-19 12:55:28 -07001419 __ASSERT(false, "aborting essential thread %p", thread);
1420 k_panic();
1421 return;
1422 }
1423
Anas Nashif3f4f3f62021-03-29 17:13:47 -04001424 if ((thread->base.thread_state & _THREAD_DEAD) != 0U) {
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001425 k_spin_unlock(&_sched_spinlock, key);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001426 return;
1427 }
1428
Peter Mitsise7986eb2023-08-14 16:41:05 -04001429 z_thread_halt(thread, key, true);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001430}
1431
1432#if !defined(CONFIG_ARCH_HAS_THREAD_ABORT)
1433void z_impl_k_thread_abort(struct k_thread *thread)
1434{
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001435 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread);
1436
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001437 z_thread_abort(thread);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001438
Andy Rossdec022a2024-04-29 12:50:41 -07001439 __ASSERT_NO_MSG((thread->base.thread_state & _THREAD_DEAD) != 0);
1440
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001441 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, abort, thread);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001442}
Simon Heinbcd1d192024-03-08 12:00:10 +01001443#endif /* !CONFIG_ARCH_HAS_THREAD_ABORT */
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001444
1445int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout)
1446{
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001447 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
Hess Nathan7659cfd2024-04-29 16:31:47 +02001448 int ret;
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001449
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001450 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, join, thread, timeout);
1451
Anas Nashif3f4f3f62021-03-29 17:13:47 -04001452 if ((thread->base.thread_state & _THREAD_DEAD) != 0U) {
Andy Rossa08e23f2023-05-26 09:39:16 -07001453 z_sched_switch_spin(thread);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001454 ret = 0;
1455 } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
1456 ret = -EBUSY;
Anas Nashif3f4f3f62021-03-29 17:13:47 -04001457 } else if ((thread == _current) ||
1458 (thread->base.pended_on == &_current->join_queue)) {
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001459 ret = -EDEADLK;
1460 } else {
1461 __ASSERT(!arch_is_in_isr(), "cannot join in ISR");
1462 add_to_waitq_locked(_current, &thread->join_queue);
1463 add_thread_timeout(_current, timeout);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001464
1465 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_thread, join, thread, timeout);
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001466 ret = z_swap(&_sched_spinlock, key);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001467 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret);
1468
1469 return ret;
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001470 }
1471
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001472 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret);
1473
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001474 k_spin_unlock(&_sched_spinlock, key);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001475 return ret;
1476}
1477
Andrew Boie322816e2020-02-20 16:33:06 -08001478#ifdef CONFIG_USERSPACE
1479/* Special case: don't oops if the thread is uninitialized. This is because
1480 * the initialization bit does double-duty for thread objects; if false, means
1481 * the thread object is truly uninitialized, or the thread ran and exited for
1482 * some reason.
1483 *
1484 * Return true in this case indicating we should just do nothing and return
1485 * success to the caller.
1486 */
1487static bool thread_obj_validate(struct k_thread *thread)
1488{
Anas Nashifc25d0802023-09-27 10:49:28 +00001489 struct k_object *ko = k_object_find(thread);
Anas Nashif21254b22023-09-27 10:50:26 +00001490 int ret = k_object_validate(ko, K_OBJ_THREAD, _OBJ_INIT_TRUE);
Andrew Boie322816e2020-02-20 16:33:06 -08001491
1492 switch (ret) {
1493 case 0:
1494 return false;
1495 case -EINVAL:
1496 return true;
1497 default:
1498#ifdef CONFIG_LOG
Anas Nashif3ab35662023-09-27 10:51:23 +00001499 k_object_dump_error(ret, thread, ko, K_OBJ_THREAD);
Simon Heinbcd1d192024-03-08 12:00:10 +01001500#endif /* CONFIG_LOG */
Anas Nashifa08bfeb2023-09-27 11:20:28 +00001501 K_OOPS(K_SYSCALL_VERIFY_MSG(ret, "access denied"));
Andrew Boie322816e2020-02-20 16:33:06 -08001502 }
Enjia Mai53ca7092021-01-15 17:09:58 +08001503 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
Andrew Boie322816e2020-02-20 16:33:06 -08001504}
1505
Andy Ross78327382020-03-05 15:18:14 -08001506static inline int z_vrfy_k_thread_join(struct k_thread *thread,
1507 k_timeout_t timeout)
Andrew Boie322816e2020-02-20 16:33:06 -08001508{
1509 if (thread_obj_validate(thread)) {
1510 return 0;
1511 }
1512
1513 return z_impl_k_thread_join(thread, timeout);
1514}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +08001515#include <zephyr/syscalls/k_thread_join_mrsh.c>
Andrew Boiea4c91902020-03-24 16:09:24 -07001516
1517static inline void z_vrfy_k_thread_abort(k_tid_t thread)
1518{
1519 if (thread_obj_validate(thread)) {
1520 return;
1521 }
1522
Anas Nashif87910122024-02-22 22:24:36 -05001523 K_OOPS(K_SYSCALL_VERIFY_MSG(!z_is_thread_essential(thread),
Andrew Boiea4c91902020-03-24 16:09:24 -07001524 "aborting essential thread %p", thread));
1525
1526 z_impl_k_thread_abort((struct k_thread *)thread);
1527}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +08001528#include <zephyr/syscalls/k_thread_abort_mrsh.c>
Andrew Boie322816e2020-02-20 16:33:06 -08001529#endif /* CONFIG_USERSPACE */
Peter Bigot0259c862021-01-12 13:45:32 -06001530
1531/*
1532 * future scheduler.h API implementations
1533 */
1534bool z_sched_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data)
1535{
1536 struct k_thread *thread;
1537 bool ret = false;
1538
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001539 K_SPINLOCK(&_sched_spinlock) {
Peter Bigot0259c862021-01-12 13:45:32 -06001540 thread = _priq_wait_best(&wait_q->waitq);
1541
1542 if (thread != NULL) {
1543 z_thread_return_value_set_with_data(thread,
1544 swap_retval,
1545 swap_data);
1546 unpend_thread_no_timeout(thread);
1547 (void)z_abort_thread_timeout(thread);
1548 ready_thread(thread);
1549 ret = true;
1550 }
1551 }
1552
1553 return ret;
1554}
1555
1556int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key,
1557 _wait_q_t *wait_q, k_timeout_t timeout, void **data)
1558{
1559 int ret = z_pend_curr(lock, key, wait_q, timeout);
1560
1561 if (data != NULL) {
1562 *data = _current->base.swap_data;
1563 }
1564 return ret;
1565}
Peter Mitsisca583392023-01-05 11:50:21 -05001566
1567int z_sched_waitq_walk(_wait_q_t *wait_q,
1568 int (*func)(struct k_thread *, void *), void *data)
1569{
1570 struct k_thread *thread;
1571 int status = 0;
1572
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001573 K_SPINLOCK(&_sched_spinlock) {
Peter Mitsisca583392023-01-05 11:50:21 -05001574 _WAIT_Q_FOR_EACH(wait_q, thread) {
1575
1576 /*
1577 * Invoke the callback function on each waiting thread
1578 * for as long as there are both waiting threads AND
1579 * it returns 0.
1580 */
1581
1582 status = func(thread, data);
1583 if (status != 0) {
1584 break;
1585 }
1586 }
1587 }
1588
1589 return status;
1590}