blob: 501b273615766259fa89f048bd9b596d27ae2307 [file] [log] [blame]
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001/*
Andy Ross1acd8c22018-05-03 14:51:49 -07002 * Copyright (c) 2018 Intel Corporation
Benjamin Walsh456c6da2016-09-02 18:55:39 -04003 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08004 * SPDX-License-Identifier: Apache-2.0
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005 */
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +02006#include <zephyr/kernel.h>
Benjamin Walshb4b108d2016-10-13 10:31:48 -04007#include <ksched.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +02008#include <zephyr/spinlock.h>
Anas Nashif8634c3b2023-08-29 17:03:12 +00009#include <wait_q.h>
Anas Nashif9e834132024-02-26 17:03:35 -050010#include <kthread.h>
Anas Nashif46484da2024-02-26 11:30:49 -050011#include <priority_q.h>
Andy Ross9c62cc62018-01-25 15:24:15 -080012#include <kswap.h>
Anas Nashif37df4852024-03-08 07:51:01 -050013#include <ipi.h>
Andy Ross1acd8c22018-05-03 14:51:49 -070014#include <kernel_arch_func.h>
Anas Nashif4e396172023-09-26 22:46:01 +000015#include <zephyr/internal/syscall_handler.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020016#include <zephyr/drivers/timer/system_timer.h>
Flavio Ceolin80418602018-11-21 16:22:15 -080017#include <stdbool.h>
Andrew Boiefe031612019-09-21 17:54:37 -070018#include <kernel_internal.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020019#include <zephyr/logging/log.h>
20#include <zephyr/sys/atomic.h>
21#include <zephyr/sys/math_extras.h>
22#include <zephyr/timing/timing.h>
Gerard Marull-Paretas4863c5f2023-04-11 15:34:39 +020023#include <zephyr/sys/util.h>
Andy Ross52351452021-09-28 09:38:43 -070024
Krzysztof Chruscinski3ed80832020-11-26 19:32:34 +010025LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040026
Anas Nashif37df4852024-03-08 07:51:01 -050027#if defined(CONFIG_SWAP_NONATOMIC) && defined(CONFIG_TIMESLICING)
28extern struct k_thread *pending_current;
29#endif
30
Anas Nashif0d8da5f2024-03-06 15:59:36 -050031struct k_spinlock _sched_spinlock;
Andy Ross1acd8c22018-05-03 14:51:49 -070032
Andy Rossf0fd54c2024-03-26 08:38:01 -040033/* Storage to "complete" the context switch from an invalid/incomplete thread
34 * context (ex: exiting an ISR that aborted _current)
35 */
Andy Rossfd340eb2024-04-19 15:03:09 -070036__incoherent struct k_thread _thread_dummy;
Andy Rossf0fd54c2024-03-26 08:38:01 -040037
Maksim Masalski78ba2ec2021-06-01 15:44:45 +080038static void update_cache(int preempt_ok);
Peter Mitsise1db1ce2023-08-14 14:06:52 -040039static void halt_thread(struct k_thread *thread, uint8_t new_state);
Peter Mitsisb1384a72023-08-14 14:22:05 -040040static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q);
Andrew Boie8e0f6a52020-09-05 11:50:18 -070041
Peter Mitsisf8b76f32021-11-29 09:52:11 -050042
Florian Grandelcc4d1bd2023-08-28 17:31:54 +020043BUILD_ASSERT(CONFIG_NUM_COOP_PRIORITIES >= CONFIG_NUM_METAIRQ_PRIORITIES,
44 "You need to provide at least as many CONFIG_NUM_COOP_PRIORITIES as "
45 "CONFIG_NUM_METAIRQ_PRIORITIES as Meta IRQs are just a special class of cooperative "
46 "threads.");
47
James Harris2cd0f662021-03-01 09:19:57 -080048/*
49 * Return value same as e.g. memcmp
50 * > 0 -> thread 1 priority > thread 2 priority
51 * = 0 -> thread 1 priority == thread 2 priority
52 * < 0 -> thread 1 priority < thread 2 priority
53 * Do not rely on the actual value returned aside from the above.
54 * (Again, like memcmp.)
55 */
56int32_t z_sched_prio_cmp(struct k_thread *thread_1,
57 struct k_thread *thread_2)
Andy Ross4a2e50f2018-05-15 11:06:25 -070058{
James Harris2cd0f662021-03-01 09:19:57 -080059 /* `prio` is <32b, so the below cannot overflow. */
60 int32_t b1 = thread_1->base.prio;
61 int32_t b2 = thread_2->base.prio;
62
63 if (b1 != b2) {
64 return b2 - b1;
Andy Ross4a2e50f2018-05-15 11:06:25 -070065 }
66
67#ifdef CONFIG_SCHED_DEADLINE
Andy Rossef626572020-07-10 09:43:36 -070068 /* If we assume all deadlines live within the same "half" of
69 * the 32 bit modulus space (this is a documented API rule),
James Harris2cd0f662021-03-01 09:19:57 -080070 * then the latest deadline in the queue minus the earliest is
Andy Rossef626572020-07-10 09:43:36 -070071 * guaranteed to be (2's complement) non-negative. We can
72 * leverage that to compare the values without having to check
73 * the current time.
Andy Ross4a2e50f2018-05-15 11:06:25 -070074 */
James Harris2cd0f662021-03-01 09:19:57 -080075 uint32_t d1 = thread_1->base.prio_deadline;
76 uint32_t d2 = thread_2->base.prio_deadline;
Andy Ross4a2e50f2018-05-15 11:06:25 -070077
James Harris2cd0f662021-03-01 09:19:57 -080078 if (d1 != d2) {
79 /* Sooner deadline means higher effective priority.
80 * Doing the calculation with unsigned types and casting
81 * to signed isn't perfect, but at least reduces this
82 * from UB on overflow to impdef.
83 */
84 return (int32_t) (d2 - d1);
Andy Ross4a2e50f2018-05-15 11:06:25 -070085 }
Simon Heinbcd1d192024-03-08 12:00:10 +010086#endif /* CONFIG_SCHED_DEADLINE */
James Harris2cd0f662021-03-01 09:19:57 -080087 return 0;
Andy Ross4a2e50f2018-05-15 11:06:25 -070088}
89
Andy Rossb11e7962021-09-24 10:57:39 -070090static ALWAYS_INLINE void *thread_runq(struct k_thread *thread)
Andy Ross387fdd22021-09-23 18:44:40 -070091{
Andy Rossb11e7962021-09-24 10:57:39 -070092#ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
93 int cpu, m = thread->base.cpu_mask;
94
95 /* Edge case: it's legal per the API to "make runnable" a
96 * thread with all CPUs masked off (i.e. one that isn't
97 * actually runnable!). Sort of a wart in the API and maybe
98 * we should address this in docs/assertions instead to avoid
99 * the extra test.
100 */
101 cpu = m == 0 ? 0 : u32_count_trailing_zeros(m);
102
103 return &_kernel.cpus[cpu].ready_q.runq;
104#else
Benjamin Cabéa46f1b92023-08-21 15:30:26 +0200105 ARG_UNUSED(thread);
Andy Rossb11e7962021-09-24 10:57:39 -0700106 return &_kernel.ready_q.runq;
Simon Heinbcd1d192024-03-08 12:00:10 +0100107#endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
Andy Ross387fdd22021-09-23 18:44:40 -0700108}
109
Andy Rossb11e7962021-09-24 10:57:39 -0700110static ALWAYS_INLINE void *curr_cpu_runq(void)
Andy Ross387fdd22021-09-23 18:44:40 -0700111{
Andy Rossb11e7962021-09-24 10:57:39 -0700112#ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
113 return &arch_curr_cpu()->ready_q.runq;
114#else
115 return &_kernel.ready_q.runq;
Simon Heinbcd1d192024-03-08 12:00:10 +0100116#endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
Andy Ross387fdd22021-09-23 18:44:40 -0700117}
118
Andy Rossb11e7962021-09-24 10:57:39 -0700119static ALWAYS_INLINE void runq_add(struct k_thread *thread)
Andy Ross387fdd22021-09-23 18:44:40 -0700120{
Anas Nashif4593f0d2024-04-11 11:59:07 -0400121 __ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
122
Andy Rossb11e7962021-09-24 10:57:39 -0700123 _priq_run_add(thread_runq(thread), thread);
124}
125
126static ALWAYS_INLINE void runq_remove(struct k_thread *thread)
127{
Anas Nashif4593f0d2024-04-11 11:59:07 -0400128 __ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
129
Andy Rossb11e7962021-09-24 10:57:39 -0700130 _priq_run_remove(thread_runq(thread), thread);
131}
132
133static ALWAYS_INLINE struct k_thread *runq_best(void)
134{
135 return _priq_run_best(curr_cpu_runq());
Andy Ross387fdd22021-09-23 18:44:40 -0700136}
137
Andy Ross4ff45712021-02-08 08:28:54 -0800138/* _current is never in the run queue until context switch on
139 * SMP configurations, see z_requeue_current()
140 */
Anas Nashif595ff632024-02-27 09:49:07 -0500141static inline bool should_queue_thread(struct k_thread *thread)
Andy Ross4ff45712021-02-08 08:28:54 -0800142{
Hess Nathan6d417d52024-04-30 13:26:35 +0200143 return !IS_ENABLED(CONFIG_SMP) || (thread != _current);
Andy Ross4ff45712021-02-08 08:28:54 -0800144}
145
Andy Rossc230fb32021-09-23 16:41:30 -0700146static ALWAYS_INLINE void queue_thread(struct k_thread *thread)
Andy Ross91946ef2021-02-07 13:03:09 -0800147{
148 thread->base.thread_state |= _THREAD_QUEUED;
Andy Ross4ff45712021-02-08 08:28:54 -0800149 if (should_queue_thread(thread)) {
Andy Ross387fdd22021-09-23 18:44:40 -0700150 runq_add(thread);
Andy Ross4ff45712021-02-08 08:28:54 -0800151 }
152#ifdef CONFIG_SMP
153 if (thread == _current) {
154 /* add current to end of queue means "yield" */
155 _current_cpu->swap_ok = true;
156 }
Simon Heinbcd1d192024-03-08 12:00:10 +0100157#endif /* CONFIG_SMP */
Andy Ross91946ef2021-02-07 13:03:09 -0800158}
159
Andy Rossc230fb32021-09-23 16:41:30 -0700160static ALWAYS_INLINE void dequeue_thread(struct k_thread *thread)
Andy Ross91946ef2021-02-07 13:03:09 -0800161{
162 thread->base.thread_state &= ~_THREAD_QUEUED;
Andy Ross4ff45712021-02-08 08:28:54 -0800163 if (should_queue_thread(thread)) {
Andy Ross387fdd22021-09-23 18:44:40 -0700164 runq_remove(thread);
Andy Ross4ff45712021-02-08 08:28:54 -0800165 }
Andy Ross91946ef2021-02-07 13:03:09 -0800166}
167
Andy Ross4ff45712021-02-08 08:28:54 -0800168/* Called out of z_swap() when CONFIG_SMP. The current thread can
169 * never live in the run queue until we are inexorably on the context
170 * switch path on SMP, otherwise there is a deadlock condition where a
171 * set of CPUs pick a cycle of threads to run and wait for them all to
172 * context switch forever.
173 */
Anas Nashif595ff632024-02-27 09:49:07 -0500174void z_requeue_current(struct k_thread *thread)
Andy Ross4ff45712021-02-08 08:28:54 -0800175{
Anas Nashif595ff632024-02-27 09:49:07 -0500176 if (z_is_thread_queued(thread)) {
177 runq_add(thread);
Andy Ross4ff45712021-02-08 08:28:54 -0800178 }
Andy Rossb4e9ef02022-04-06 10:10:17 -0700179 signal_pending_ipi();
Andy Ross4ff45712021-02-08 08:28:54 -0800180}
Andy Ross4ff45712021-02-08 08:28:54 -0800181
Peter Mitsise7986eb2023-08-14 16:41:05 -0400182/* Return true if the thread is aborting, else false */
Andy Ross6fb6d3c2021-02-19 15:32:19 -0800183static inline bool is_aborting(struct k_thread *thread)
184{
Anas Nashifbbbc38b2021-03-29 10:03:49 -0400185 return (thread->base.thread_state & _THREAD_ABORTING) != 0U;
Andy Ross6fb6d3c2021-02-19 15:32:19 -0800186}
Peter Mitsise7986eb2023-08-14 16:41:05 -0400187
188/* Return true if the thread is aborting or suspending, else false */
189static inline bool is_halting(struct k_thread *thread)
190{
191 return (thread->base.thread_state &
192 (_THREAD_ABORTING | _THREAD_SUSPENDING)) != 0U;
193}
Andy Ross6fb6d3c2021-02-19 15:32:19 -0800194
Peter Mitsise7986eb2023-08-14 16:41:05 -0400195/* Clear the halting bits (_THREAD_ABORTING and _THREAD_SUSPENDING) */
196static inline void clear_halting(struct k_thread *thread)
197{
Andy Ross47ab6632024-04-19 15:08:55 -0700198 barrier_dmem_fence_full(); /* Other cpus spin on this locklessly! */
Peter Mitsise7986eb2023-08-14 16:41:05 -0400199 thread->base.thread_state &= ~(_THREAD_ABORTING | _THREAD_SUSPENDING);
200}
201
Andy Rossb2791b02019-01-28 09:36:36 -0800202static ALWAYS_INLINE struct k_thread *next_up(void)
Andy Ross1acd8c22018-05-03 14:51:49 -0700203{
Vadim Shakirov73944c62023-07-24 15:42:52 +0300204#ifdef CONFIG_SMP
Peter Mitsise7986eb2023-08-14 16:41:05 -0400205 if (is_halting(_current)) {
206 halt_thread(_current, is_aborting(_current) ?
207 _THREAD_DEAD : _THREAD_SUSPENDED);
Vadim Shakirov73944c62023-07-24 15:42:52 +0300208 }
Simon Heinbcd1d192024-03-08 12:00:10 +0100209#endif /* CONFIG_SMP */
Vadim Shakirov73944c62023-07-24 15:42:52 +0300210
Andy Ross387fdd22021-09-23 18:44:40 -0700211 struct k_thread *thread = runq_best();
Andy Ross11a050b2019-11-13 09:41:52 -0800212
Florian Grandelcc4d1bd2023-08-28 17:31:54 +0200213#if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && \
214 (CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES)
Andy Ross11a050b2019-11-13 09:41:52 -0800215 /* MetaIRQs must always attempt to return back to a
216 * cooperative thread they preempted and not whatever happens
217 * to be highest priority now. The cooperative thread was
218 * promised it wouldn't be preempted (by non-metairq threads)!
219 */
220 struct k_thread *mirqp = _current_cpu->metairq_preempted;
221
Anas Nashif17c874f2024-03-28 07:15:04 -0400222 if (mirqp != NULL && (thread == NULL || !thread_is_metairq(thread))) {
Andy Ross11a050b2019-11-13 09:41:52 -0800223 if (!z_is_thread_prevented_from_running(mirqp)) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500224 thread = mirqp;
Andy Ross11a050b2019-11-13 09:41:52 -0800225 } else {
226 _current_cpu->metairq_preempted = NULL;
227 }
228 }
229#endif
Simon Heinbcd1d192024-03-08 12:00:10 +0100230/* CONFIG_NUM_METAIRQ_PRIORITIES > 0 &&
231 * CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES
232 */
Andy Ross11a050b2019-11-13 09:41:52 -0800233
Andy Ross1acd8c22018-05-03 14:51:49 -0700234#ifndef CONFIG_SMP
235 /* In uniprocessor mode, we can leave the current thread in
236 * the queue (actually we have to, otherwise the assembly
237 * context switch code for all architectures would be
Patrik Flykt4344e272019-03-08 14:19:05 -0700238 * responsible for putting it back in z_swap and ISR return!),
Andy Ross1acd8c22018-05-03 14:51:49 -0700239 * which makes this choice simple.
240 */
Anas Nashif3f4f3f62021-03-29 17:13:47 -0400241 return (thread != NULL) ? thread : _current_cpu->idle_thread;
Andy Ross1acd8c22018-05-03 14:51:49 -0700242#else
243 /* Under SMP, the "cache" mechanism for selecting the next
244 * thread doesn't work, so we have more work to do to test
Andy Ross11a050b2019-11-13 09:41:52 -0800245 * _current against the best choice from the queue. Here, the
246 * thread selected above represents "the best thread that is
247 * not current".
Andy Rosseace1df2018-05-30 11:23:02 -0700248 *
249 * Subtle note on "queued": in SMP mode, _current does not
250 * live in the queue, so this isn't exactly the same thing as
251 * "ready", it means "is _current already added back to the
252 * queue such that we don't want to re-add it".
Andy Ross1acd8c22018-05-03 14:51:49 -0700253 */
Simon Hein02cfbfe2022-07-19 22:30:17 +0200254 bool queued = z_is_thread_queued(_current);
255 bool active = !z_is_thread_prevented_from_running(_current);
Andy Ross1acd8c22018-05-03 14:51:49 -0700256
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500257 if (thread == NULL) {
258 thread = _current_cpu->idle_thread;
Andy Ross1acd8c22018-05-03 14:51:49 -0700259 }
260
Andy Rosseace1df2018-05-30 11:23:02 -0700261 if (active) {
James Harris2cd0f662021-03-01 09:19:57 -0800262 int32_t cmp = z_sched_prio_cmp(_current, thread);
Andy Ross4ff45712021-02-08 08:28:54 -0800263
264 /* Ties only switch if state says we yielded */
James Harris2cd0f662021-03-01 09:19:57 -0800265 if ((cmp > 0) || ((cmp == 0) && !_current_cpu->swap_ok)) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500266 thread = _current;
Andy Rosseace1df2018-05-30 11:23:02 -0700267 }
268
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500269 if (!should_preempt(thread, _current_cpu->swap_ok)) {
270 thread = _current;
Andy Rosseace1df2018-05-30 11:23:02 -0700271 }
Andy Ross1acd8c22018-05-03 14:51:49 -0700272 }
273
Andy Rosseace1df2018-05-30 11:23:02 -0700274 /* Put _current back into the queue */
Hess Nathan6d417d52024-04-30 13:26:35 +0200275 if ((thread != _current) && active &&
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500276 !z_is_idle_thread_object(_current) && !queued) {
Andy Rossc230fb32021-09-23 16:41:30 -0700277 queue_thread(_current);
Andy Ross1acd8c22018-05-03 14:51:49 -0700278 }
279
Andy Rosseace1df2018-05-30 11:23:02 -0700280 /* Take the new _current out of the queue */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500281 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -0700282 dequeue_thread(thread);
Andy Rosseace1df2018-05-30 11:23:02 -0700283 }
Andy Ross1acd8c22018-05-03 14:51:49 -0700284
Andy Ross4ff45712021-02-08 08:28:54 -0800285 _current_cpu->swap_ok = false;
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500286 return thread;
Simon Heinbcd1d192024-03-08 12:00:10 +0100287#endif /* CONFIG_SMP */
Andy Ross1acd8c22018-05-03 14:51:49 -0700288}
289
Anas Nashif37df4852024-03-08 07:51:01 -0500290void move_thread_to_end_of_prio_q(struct k_thread *thread)
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700291{
292 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -0700293 dequeue_thread(thread);
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700294 }
Andy Rossc230fb32021-09-23 16:41:30 -0700295 queue_thread(thread);
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700296 update_cache(thread == _current);
297}
298
Andy Ross11a050b2019-11-13 09:41:52 -0800299/* Track cooperative threads preempted by metairqs so we can return to
300 * them specifically. Called at the moment a new thread has been
301 * selected to run.
302 */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500303static void update_metairq_preempt(struct k_thread *thread)
Andy Ross11a050b2019-11-13 09:41:52 -0800304{
Florian Grandelcc4d1bd2023-08-28 17:31:54 +0200305#if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && \
306 (CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES)
Anas Nashif17c874f2024-03-28 07:15:04 -0400307 if (thread_is_metairq(thread) && !thread_is_metairq(_current) &&
Anas Nashif5c170c72024-03-28 07:20:51 -0400308 !thread_is_preemptible(_current)) {
Andy Ross11a050b2019-11-13 09:41:52 -0800309 /* Record new preemption */
310 _current_cpu->metairq_preempted = _current;
Anas Nashif17c874f2024-03-28 07:15:04 -0400311 } else if (!thread_is_metairq(thread) && !z_is_idle_thread_object(thread)) {
Andy Ross11a050b2019-11-13 09:41:52 -0800312 /* Returning from existing preemption */
313 _current_cpu->metairq_preempted = NULL;
314 }
Benjamin Cabéa46f1b92023-08-21 15:30:26 +0200315#else
316 ARG_UNUSED(thread);
Andy Ross11a050b2019-11-13 09:41:52 -0800317#endif
Simon Heinbcd1d192024-03-08 12:00:10 +0100318/* CONFIG_NUM_METAIRQ_PRIORITIES > 0 &&
319 * CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES
320 */
Andy Ross11a050b2019-11-13 09:41:52 -0800321}
322
Andy Ross1856e222018-05-21 11:48:35 -0700323static void update_cache(int preempt_ok)
Andy Ross1acd8c22018-05-03 14:51:49 -0700324{
325#ifndef CONFIG_SMP
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500326 struct k_thread *thread = next_up();
Andy Ross1856e222018-05-21 11:48:35 -0700327
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500328 if (should_preempt(thread, preempt_ok)) {
Andy Rosscb3964f2019-08-16 21:29:26 -0700329#ifdef CONFIG_TIMESLICING
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500330 if (thread != _current) {
Andy Ross3e696892021-11-30 18:26:26 -0800331 z_reset_time_slice(thread);
Andy Ross9098a452018-09-25 10:56:09 -0700332 }
Simon Heinbcd1d192024-03-08 12:00:10 +0100333#endif /* CONFIG_TIMESLICING */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500334 update_metairq_preempt(thread);
335 _kernel.ready_q.cache = thread;
Andy Rosseace1df2018-05-30 11:23:02 -0700336 } else {
337 _kernel.ready_q.cache = _current;
Andy Ross1856e222018-05-21 11:48:35 -0700338 }
Andy Rosseace1df2018-05-30 11:23:02 -0700339
340#else
341 /* The way this works is that the CPU record keeps its
342 * "cooperative swapping is OK" flag until the next reschedule
343 * call or context switch. It doesn't need to be tracked per
344 * thread because if the thread gets preempted for whatever
345 * reason the scheduler will make the same decision anyway.
346 */
347 _current_cpu->swap_ok = preempt_ok;
Simon Heinbcd1d192024-03-08 12:00:10 +0100348#endif /* CONFIG_SMP */
Andy Ross1acd8c22018-05-03 14:51:49 -0700349}
350
Peter Mitsis9ff52212024-03-01 14:44:26 -0500351static struct _cpu *thread_active_elsewhere(struct k_thread *thread)
Andy Ross05c468f2021-02-19 15:24:24 -0800352{
Peter Mitsis9ff52212024-03-01 14:44:26 -0500353 /* Returns pointer to _cpu if the thread is currently running on
354 * another CPU. There are more scalable designs to answer this
355 * question in constant time, but this is fine for now.
Andy Ross05c468f2021-02-19 15:24:24 -0800356 */
357#ifdef CONFIG_SMP
358 int currcpu = _current_cpu->id;
359
Kumar Galaa1195ae2022-10-18 09:45:13 -0500360 unsigned int num_cpus = arch_num_cpus();
361
362 for (int i = 0; i < num_cpus; i++) {
Andy Ross05c468f2021-02-19 15:24:24 -0800363 if ((i != currcpu) &&
364 (_kernel.cpus[i].current == thread)) {
Peter Mitsis9ff52212024-03-01 14:44:26 -0500365 return &_kernel.cpus[i];
Andy Ross05c468f2021-02-19 15:24:24 -0800366 }
367 }
Simon Heinbcd1d192024-03-08 12:00:10 +0100368#endif /* CONFIG_SMP */
Benjamin Cabéa46f1b92023-08-21 15:30:26 +0200369 ARG_UNUSED(thread);
Peter Mitsis9ff52212024-03-01 14:44:26 -0500370 return NULL;
Andy Ross05c468f2021-02-19 15:24:24 -0800371}
372
Andy Ross96ccc462020-01-23 13:28:30 -0800373static void ready_thread(struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -0700374{
Anas Nashif39f632e2020-12-07 13:15:42 -0500375#ifdef CONFIG_KERNEL_COHERENCE
Andy Rossf6d32ab2020-05-13 15:34:04 +0000376 __ASSERT_NO_MSG(arch_mem_coherent(thread));
Simon Heinbcd1d192024-03-08 12:00:10 +0100377#endif /* CONFIG_KERNEL_COHERENCE */
Andy Rossf6d32ab2020-05-13 15:34:04 +0000378
Anas Nashif081605e2020-10-16 20:00:17 -0400379 /* If thread is queued already, do not try and added it to the
380 * run queue again
381 */
382 if (!z_is_thread_queued(thread) && z_is_thread_ready(thread)) {
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100383 SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_ready, thread);
384
Andy Rossc230fb32021-09-23 16:41:30 -0700385 queue_thread(thread);
Andy Ross1856e222018-05-21 11:48:35 -0700386 update_cache(0);
Peter Mitsisd8a4c8a2024-02-16 13:54:47 -0500387
388 flag_ipi(ipi_mask_create(thread));
Andy Ross1acd8c22018-05-03 14:51:49 -0700389 }
390}
391
Daniel Leung378131c2024-03-26 11:54:31 -0700392void z_ready_thread_locked(struct k_thread *thread)
393{
Peter Mitsis9ff52212024-03-01 14:44:26 -0500394 if (thread_active_elsewhere(thread) == NULL) {
Daniel Leung378131c2024-03-26 11:54:31 -0700395 ready_thread(thread);
396 }
397}
398
Andy Ross96ccc462020-01-23 13:28:30 -0800399void z_ready_thread(struct k_thread *thread)
400{
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500401 K_SPINLOCK(&_sched_spinlock) {
Peter Mitsis9ff52212024-03-01 14:44:26 -0500402 if (thread_active_elsewhere(thread) == NULL) {
Andy Ross05c468f2021-02-19 15:24:24 -0800403 ready_thread(thread);
404 }
Andy Ross96ccc462020-01-23 13:28:30 -0800405 }
406}
407
Patrik Flykt4344e272019-03-08 14:19:05 -0700408void z_move_thread_to_end_of_prio_q(struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -0700409{
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500410 K_SPINLOCK(&_sched_spinlock) {
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700411 move_thread_to_end_of_prio_q(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700412 }
413}
414
Andy Ross96ccc462020-01-23 13:28:30 -0800415void z_sched_start(struct k_thread *thread)
416{
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500417 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
Andy Ross96ccc462020-01-23 13:28:30 -0800418
419 if (z_has_thread_started(thread)) {
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500420 k_spin_unlock(&_sched_spinlock, key);
Andy Ross96ccc462020-01-23 13:28:30 -0800421 return;
422 }
423
424 z_mark_thread_as_started(thread);
425 ready_thread(thread);
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500426 z_reschedule(&_sched_spinlock, key);
Andy Ross96ccc462020-01-23 13:28:30 -0800427}
428
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700429/* Spins in ISR context, waiting for a thread known to be running on
430 * another CPU to catch the IPI we sent and halt. Note that we check
431 * for ourselves being asynchronously halted first to prevent simple
432 * deadlocks (but not complex ones involving cycles of 3+ threads!).
Andy Rossf0fd54c2024-03-26 08:38:01 -0400433 * Acts to release the provided lock before returning.
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700434 */
Andy Rossf0fd54c2024-03-26 08:38:01 -0400435static void thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key)
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700436{
437 if (is_halting(_current)) {
438 halt_thread(_current,
439 is_aborting(_current) ? _THREAD_DEAD : _THREAD_SUSPENDED);
440 }
441 k_spin_unlock(&_sched_spinlock, key);
442 while (is_halting(thread)) {
Andy Rossf0fd54c2024-03-26 08:38:01 -0400443 unsigned int k = arch_irq_lock();
444
445 arch_spin_relax(); /* Requires interrupts be masked */
446 arch_irq_unlock(k);
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700447 }
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700448}
449
450/* Shared handler for k_thread_{suspend,abort}(). Called with the
451 * scheduler lock held and the key passed (which it may
452 * release/reacquire!) which will be released before a possible return
453 * (aborting _current will not return, obviously), which may be after
454 * a context switch.
Peter Mitsisb1384a72023-08-14 14:22:05 -0400455 */
Peter Mitsise7986eb2023-08-14 16:41:05 -0400456static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
457 bool terminate)
Peter Mitsisb1384a72023-08-14 14:22:05 -0400458{
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700459 _wait_q_t *wq = &thread->join_queue;
Peter Mitsisb1384a72023-08-14 14:22:05 -0400460#ifdef CONFIG_SMP
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700461 wq = terminate ? wq : &thread->halt_queue;
462#endif
Peter Mitsisb1384a72023-08-14 14:22:05 -0400463
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700464 /* If the target is a thread running on another CPU, flag and
465 * poke (note that we might spin to wait, so a true
466 * synchronous IPI is needed here, not deferred!), it will
467 * halt itself in the IPI. Otherwise it's unscheduled, so we
468 * can clean it up directly.
469 */
Peter Mitsis9ff52212024-03-01 14:44:26 -0500470
471 struct _cpu *cpu = thread_active_elsewhere(thread);
472
473 if (cpu != NULL) {
Peter Mitsise7986eb2023-08-14 16:41:05 -0400474 thread->base.thread_state |= (terminate ? _THREAD_ABORTING
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700475 : _THREAD_SUSPENDING);
476#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
Peter Mitsis0bcdae22024-03-04 10:52:24 -0500477#ifdef CONFIG_ARCH_HAS_DIRECTED_IPIS
478 arch_sched_directed_ipi(IPI_CPU_MASK(cpu->id));
479#else
480 arch_sched_broadcast_ipi();
481#endif
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700482#endif
Peter Mitsisb1384a72023-08-14 14:22:05 -0400483 if (arch_is_in_isr()) {
Andy Rossf0fd54c2024-03-26 08:38:01 -0400484 thread_halt_spin(thread, key);
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700485 } else {
486 add_to_waitq_locked(_current, wq);
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500487 z_swap(&_sched_spinlock, key);
Peter Mitsisb1384a72023-08-14 14:22:05 -0400488 }
Peter Mitsise7986eb2023-08-14 16:41:05 -0400489 } else {
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700490 halt_thread(thread, terminate ? _THREAD_DEAD : _THREAD_SUSPENDED);
491 if ((thread == _current) && !arch_is_in_isr()) {
492 z_swap(&_sched_spinlock, key);
493 __ASSERT(!terminate, "aborted _current back from dead");
494 } else {
495 k_spin_unlock(&_sched_spinlock, key);
496 }
Peter Mitsisb1384a72023-08-14 14:22:05 -0400497 }
Andy Rossf0fd54c2024-03-26 08:38:01 -0400498 /* NOTE: the scheduler lock has been released. Don't put
499 * logic here, it's likely to be racy/deadlocky even if you
500 * re-take the lock!
501 */
Peter Mitsisb1384a72023-08-14 14:22:05 -0400502}
503
Andy Ross5fa2b6f2024-04-06 07:44:47 -0700504
Andrew Boie6cf496f2020-02-14 10:52:49 -0800505void z_impl_k_thread_suspend(struct k_thread *thread)
Andy Ross8bdabcc2020-01-07 09:58:46 -0800506{
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100507 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, suspend, thread);
508
Andy Ross8bdabcc2020-01-07 09:58:46 -0800509 (void)z_abort_thread_timeout(thread);
510
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500511 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
Peter Mitsise7986eb2023-08-14 16:41:05 -0400512
513 if ((thread->base.thread_state & _THREAD_SUSPENDED) != 0U) {
514
515 /* The target thread is already suspended. Nothing to do. */
516
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500517 k_spin_unlock(&_sched_spinlock, key);
Peter Mitsise7986eb2023-08-14 16:41:05 -0400518 return;
Andy Ross8bdabcc2020-01-07 09:58:46 -0800519 }
520
Peter Mitsise7986eb2023-08-14 16:41:05 -0400521 z_thread_halt(thread, key, false);
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100522
523 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, suspend, thread);
Andy Ross8bdabcc2020-01-07 09:58:46 -0800524}
525
Andrew Boie6cf496f2020-02-14 10:52:49 -0800526#ifdef CONFIG_USERSPACE
527static inline void z_vrfy_k_thread_suspend(struct k_thread *thread)
528{
Anas Nashifa08bfeb2023-09-27 11:20:28 +0000529 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
Andrew Boie6cf496f2020-02-14 10:52:49 -0800530 z_impl_k_thread_suspend(thread);
531}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +0800532#include <zephyr/syscalls/k_thread_suspend_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +0100533#endif /* CONFIG_USERSPACE */
Andrew Boie6cf496f2020-02-14 10:52:49 -0800534
535void z_impl_k_thread_resume(struct k_thread *thread)
536{
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100537 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, resume, thread);
538
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500539 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
Andrew Boie6cf496f2020-02-14 10:52:49 -0800540
Anas Nashifbf69afc2020-10-16 19:53:56 -0400541 /* Do not try to resume a thread that was not suspended */
542 if (!z_is_thread_suspended(thread)) {
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500543 k_spin_unlock(&_sched_spinlock, key);
Anas Nashifbf69afc2020-10-16 19:53:56 -0400544 return;
545 }
546
Andrew Boie6cf496f2020-02-14 10:52:49 -0800547 z_mark_thread_as_not_suspended(thread);
548 ready_thread(thread);
549
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500550 z_reschedule(&_sched_spinlock, key);
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100551
552 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, resume, thread);
Andrew Boie6cf496f2020-02-14 10:52:49 -0800553}
554
555#ifdef CONFIG_USERSPACE
556static inline void z_vrfy_k_thread_resume(struct k_thread *thread)
557{
Anas Nashifa08bfeb2023-09-27 11:20:28 +0000558 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
Andrew Boie6cf496f2020-02-14 10:52:49 -0800559 z_impl_k_thread_resume(thread);
560}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +0800561#include <zephyr/syscalls/k_thread_resume_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +0100562#endif /* CONFIG_USERSPACE */
Andrew Boie6cf496f2020-02-14 10:52:49 -0800563
Maksim Masalski970820e2021-05-25 14:40:14 +0800564static _wait_q_t *pended_on_thread(struct k_thread *thread)
Andy Ross8bdabcc2020-01-07 09:58:46 -0800565{
566 __ASSERT_NO_MSG(thread->base.pended_on);
567
568 return thread->base.pended_on;
569}
570
Andy Rossed6b4fb2020-01-23 13:04:15 -0800571static void unready_thread(struct k_thread *thread)
572{
573 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -0700574 dequeue_thread(thread);
Andy Rossed6b4fb2020-01-23 13:04:15 -0800575 }
576 update_cache(thread == _current);
577}
578
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500579/* _sched_spinlock must be held */
Andrew Boie322816e2020-02-20 16:33:06 -0800580static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q)
Andy Ross1acd8c22018-05-03 14:51:49 -0700581{
Andrew Boie322816e2020-02-20 16:33:06 -0800582 unready_thread(thread);
583 z_mark_thread_as_pending(thread);
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100584
585 SYS_PORT_TRACING_FUNC(k_thread, sched_pend, thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700586
Andrew Boie322816e2020-02-20 16:33:06 -0800587 if (wait_q != NULL) {
588 thread->base.pended_on = wait_q;
Anas Nashif4593f0d2024-04-11 11:59:07 -0400589 _priq_wait_add(&wait_q->waitq, thread);
Andy Ross15d52082018-09-26 13:19:31 -0700590 }
Andrew Boie322816e2020-02-20 16:33:06 -0800591}
Andy Ross15d52082018-09-26 13:19:31 -0700592
Andy Ross78327382020-03-05 15:18:14 -0800593static void add_thread_timeout(struct k_thread *thread, k_timeout_t timeout)
Andrew Boie322816e2020-02-20 16:33:06 -0800594{
Andy Ross78327382020-03-05 15:18:14 -0800595 if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
Andy Ross78327382020-03-05 15:18:14 -0800596 z_add_thread_timeout(thread, timeout);
Andy Ross1acd8c22018-05-03 14:51:49 -0700597 }
Andy Rosse7ded112018-04-11 14:52:47 -0700598}
599
Andy Rossc32f3762022-10-08 07:24:28 -0700600static void pend_locked(struct k_thread *thread, _wait_q_t *wait_q,
601 k_timeout_t timeout)
Andrew Boie322816e2020-02-20 16:33:06 -0800602{
Anas Nashif39f632e2020-12-07 13:15:42 -0500603#ifdef CONFIG_KERNEL_COHERENCE
Andy Ross1ba74142021-02-09 13:48:25 -0800604 __ASSERT_NO_MSG(wait_q == NULL || arch_mem_coherent(wait_q));
Simon Heinbcd1d192024-03-08 12:00:10 +0100605#endif /* CONFIG_KERNEL_COHERENCE */
Andy Rossc32f3762022-10-08 07:24:28 -0700606 add_to_waitq_locked(thread, wait_q);
Andy Ross78327382020-03-05 15:18:14 -0800607 add_thread_timeout(thread, timeout);
Andrew Boie322816e2020-02-20 16:33:06 -0800608}
609
Andy Ross78327382020-03-05 15:18:14 -0800610void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
611 k_timeout_t timeout)
Andy Rosse7ded112018-04-11 14:52:47 -0700612{
Patrik Flykt4344e272019-03-08 14:19:05 -0700613 __ASSERT_NO_MSG(thread == _current || is_thread_dummy(thread));
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500614 K_SPINLOCK(&_sched_spinlock) {
Andy Rossc32f3762022-10-08 07:24:28 -0700615 pend_locked(thread, wait_q, timeout);
616 }
Andy Ross1acd8c22018-05-03 14:51:49 -0700617}
618
Andrew Boieffc5bdf2020-09-05 11:44:01 -0700619static inline void unpend_thread_no_timeout(struct k_thread *thread)
620{
Maksim Masalski970820e2021-05-25 14:40:14 +0800621 _priq_wait_remove(&pended_on_thread(thread)->waitq, thread);
Andrew Boieffc5bdf2020-09-05 11:44:01 -0700622 z_mark_thread_as_not_pending(thread);
623 thread->base.pended_on = NULL;
624}
625
Patrik Flykt4344e272019-03-08 14:19:05 -0700626ALWAYS_INLINE void z_unpend_thread_no_timeout(struct k_thread *thread)
Andy Rosse7ded112018-04-11 14:52:47 -0700627{
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500628 K_SPINLOCK(&_sched_spinlock) {
Peter Mitsis31dfd84f2023-01-06 13:20:28 -0500629 if (thread->base.pended_on != NULL) {
630 unpend_thread_no_timeout(thread);
631 }
Andy Ross1acd8c22018-05-03 14:51:49 -0700632 }
Andy Rosse7ded112018-04-11 14:52:47 -0700633}
634
Aastha Grover55377762023-03-08 16:54:12 -0500635void z_sched_wake_thread(struct k_thread *thread, bool is_timeout)
636{
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500637 K_SPINLOCK(&_sched_spinlock) {
Peter Mitsise7986eb2023-08-14 16:41:05 -0400638 bool killed = (thread->base.thread_state &
639 (_THREAD_DEAD | _THREAD_ABORTING));
Aastha Grover55377762023-03-08 16:54:12 -0500640
Aastha Grover877fc3d2023-03-08 16:56:31 -0500641#ifdef CONFIG_EVENTS
642 bool do_nothing = thread->no_wake_on_timeout && is_timeout;
643
644 thread->no_wake_on_timeout = false;
645
646 if (do_nothing) {
647 continue;
648 }
Simon Heinbcd1d192024-03-08 12:00:10 +0100649#endif /* CONFIG_EVENTS */
Aastha Grover877fc3d2023-03-08 16:56:31 -0500650
Aastha Grover55377762023-03-08 16:54:12 -0500651 if (!killed) {
652 /* The thread is not being killed */
653 if (thread->base.pended_on != NULL) {
654 unpend_thread_no_timeout(thread);
655 }
656 z_mark_thread_as_started(thread);
657 if (is_timeout) {
658 z_mark_thread_as_not_suspended(thread);
659 }
660 ready_thread(thread);
661 }
662 }
663
664}
665
Andy Ross987c0e52018-09-27 16:50:00 -0700666#ifdef CONFIG_SYS_CLOCK_EXISTS
667/* Timeout handler for *_thread_timeout() APIs */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500668void z_thread_timeout(struct _timeout *timeout)
Andy Ross987c0e52018-09-27 16:50:00 -0700669{
Andy Ross37866332021-02-17 10:12:36 -0800670 struct k_thread *thread = CONTAINER_OF(timeout,
671 struct k_thread, base.timeout);
Andy Ross987c0e52018-09-27 16:50:00 -0700672
Aastha Grover55377762023-03-08 16:54:12 -0500673 z_sched_wake_thread(thread, true);
Andy Ross987c0e52018-09-27 16:50:00 -0700674}
Simon Heinbcd1d192024-03-08 12:00:10 +0100675#endif /* CONFIG_SYS_CLOCK_EXISTS */
Andy Ross987c0e52018-09-27 16:50:00 -0700676
Patrik Flykt4344e272019-03-08 14:19:05 -0700677int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
Andy Ross78327382020-03-05 15:18:14 -0800678 _wait_q_t *wait_q, k_timeout_t timeout)
Andy Rossec554f42018-07-24 13:37:59 -0700679{
680#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
681 pending_current = _current;
Simon Heinbcd1d192024-03-08 12:00:10 +0100682#endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500683 __ASSERT_NO_MSG(sizeof(_sched_spinlock) == 0 || lock != &_sched_spinlock);
Andy Rossc32f3762022-10-08 07:24:28 -0700684
685 /* We do a "lock swap" prior to calling z_swap(), such that
686 * the caller's lock gets released as desired. But we ensure
687 * that we hold the scheduler lock and leave local interrupts
688 * masked until we reach the context swich. z_swap() itself
689 * has similar code; the duplication is because it's a legacy
690 * API that doesn't expect to be called with scheduler lock
691 * held.
692 */
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500693 (void) k_spin_lock(&_sched_spinlock);
Andy Rossc32f3762022-10-08 07:24:28 -0700694 pend_locked(_current, wait_q, timeout);
695 k_spin_release(lock);
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500696 return z_swap(&_sched_spinlock, key);
Andy Rossec554f42018-07-24 13:37:59 -0700697}
698
Andy Ross604f0f42021-02-09 16:47:47 -0800699struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q)
700{
701 struct k_thread *thread = NULL;
702
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500703 K_SPINLOCK(&_sched_spinlock) {
Andy Ross604f0f42021-02-09 16:47:47 -0800704 thread = _priq_wait_best(&wait_q->waitq);
705
706 if (thread != NULL) {
707 unpend_thread_no_timeout(thread);
708 }
709 }
710
711 return thread;
712}
713
Patrik Flykt4344e272019-03-08 14:19:05 -0700714struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q)
Andy Rosse7ded112018-04-11 14:52:47 -0700715{
Andy Ross604f0f42021-02-09 16:47:47 -0800716 struct k_thread *thread = NULL;
Andy Rosse7ded112018-04-11 14:52:47 -0700717
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500718 K_SPINLOCK(&_sched_spinlock) {
Andy Ross604f0f42021-02-09 16:47:47 -0800719 thread = _priq_wait_best(&wait_q->waitq);
720
721 if (thread != NULL) {
722 unpend_thread_no_timeout(thread);
723 (void)z_abort_thread_timeout(thread);
724 }
Andy Ross1acd8c22018-05-03 14:51:49 -0700725 }
Andy Rosse7ded112018-04-11 14:52:47 -0700726
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500727 return thread;
Andy Ross1acd8c22018-05-03 14:51:49 -0700728}
Andy Rosse7ded112018-04-11 14:52:47 -0700729
Patrik Flykt4344e272019-03-08 14:19:05 -0700730void z_unpend_thread(struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -0700731{
Patrik Flykt4344e272019-03-08 14:19:05 -0700732 z_unpend_thread_no_timeout(thread);
733 (void)z_abort_thread_timeout(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700734}
735
Andy Ross6f139802019-08-20 11:21:28 -0700736/* Priority set utility that does no rescheduling, it just changes the
737 * run queue state, returning true if a reschedule is needed later.
738 */
Anas Nashif868f0992024-02-24 11:37:56 -0500739bool z_thread_prio_set(struct k_thread *thread, int prio)
Andy Ross1acd8c22018-05-03 14:51:49 -0700740{
Flavio Ceolin02ed85b2018-09-20 15:43:57 -0700741 bool need_sched = 0;
Peter Mitsis9ff52212024-03-01 14:44:26 -0500742 int old_prio = thread->base.prio;
Andy Ross1acd8c22018-05-03 14:51:49 -0700743
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500744 K_SPINLOCK(&_sched_spinlock) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700745 need_sched = z_is_thread_ready(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700746
747 if (need_sched) {
Andy Ross4d8e1f22019-07-01 10:25:55 -0700748 if (!IS_ENABLED(CONFIG_SMP) || z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -0700749 dequeue_thread(thread);
Andy Ross4d8e1f22019-07-01 10:25:55 -0700750 thread->base.prio = prio;
Andy Rossc230fb32021-09-23 16:41:30 -0700751 queue_thread(thread);
Peter Mitsis9ff52212024-03-01 14:44:26 -0500752
753 if (old_prio > prio) {
Peter Mitsisd8a4c8a2024-02-16 13:54:47 -0500754 flag_ipi(ipi_mask_create(thread));
Peter Mitsis9ff52212024-03-01 14:44:26 -0500755 }
Andy Ross4d8e1f22019-07-01 10:25:55 -0700756 } else {
Peter Mitsis9ff52212024-03-01 14:44:26 -0500757 /*
758 * This is a running thread on SMP. Update its
759 * priority, but do not requeue it. An IPI is
760 * needed if the priority is both being lowered
761 * and it is running on another CPU.
762 */
763
Andy Ross4d8e1f22019-07-01 10:25:55 -0700764 thread->base.prio = prio;
Peter Mitsis9ff52212024-03-01 14:44:26 -0500765
766 struct _cpu *cpu;
767
768 cpu = thread_active_elsewhere(thread);
769 if ((cpu != NULL) && (old_prio < prio)) {
Peter Mitsisd8a4c8a2024-02-16 13:54:47 -0500770 flag_ipi(IPI_CPU_MASK(cpu->id));
Peter Mitsis9ff52212024-03-01 14:44:26 -0500771 }
Andy Ross4d8e1f22019-07-01 10:25:55 -0700772 }
Peter Mitsis9ff52212024-03-01 14:44:26 -0500773
Andy Ross1856e222018-05-21 11:48:35 -0700774 update_cache(1);
Andy Ross1acd8c22018-05-03 14:51:49 -0700775 } else {
776 thread->base.prio = prio;
Andy Rosse7ded112018-04-11 14:52:47 -0700777 }
778 }
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100779
780 SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_priority_set, thread, prio);
Andy Rosse7ded112018-04-11 14:52:47 -0700781
Andy Ross6f139802019-08-20 11:21:28 -0700782 return need_sched;
783}
784
Anas Nashif3f4f3f62021-03-29 17:13:47 -0400785static inline bool resched(uint32_t key)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400786{
Andy Rosseace1df2018-05-30 11:23:02 -0700787#ifdef CONFIG_SMP
Andy Rosseace1df2018-05-30 11:23:02 -0700788 _current_cpu->swap_ok = 0;
Simon Heinbcd1d192024-03-08 12:00:10 +0100789#endif /* CONFIG_SMP */
Andy Rosseace1df2018-05-30 11:23:02 -0700790
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800791 return arch_irq_unlocked(key) && !arch_is_in_isr();
Andy Rossec554f42018-07-24 13:37:59 -0700792}
793
Anas Nashif379b93f2020-08-10 15:47:02 -0400794/*
795 * Check if the next ready thread is the same as the current thread
796 * and save the trip if true.
797 */
798static inline bool need_swap(void)
799{
800 /* the SMP case will be handled in C based z_swap() */
801#ifdef CONFIG_SMP
802 return true;
803#else
804 struct k_thread *new_thread;
805
806 /* Check if the next ready thread is the same as the current thread */
Andy Ross6b84ab32021-02-18 10:15:23 -0800807 new_thread = _kernel.ready_q.cache;
Anas Nashif379b93f2020-08-10 15:47:02 -0400808 return new_thread != _current;
Simon Heinbcd1d192024-03-08 12:00:10 +0100809#endif /* CONFIG_SMP */
Anas Nashif379b93f2020-08-10 15:47:02 -0400810}
811
Patrik Flykt4344e272019-03-08 14:19:05 -0700812void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key)
Andy Rossec554f42018-07-24 13:37:59 -0700813{
Anas Nashif379b93f2020-08-10 15:47:02 -0400814 if (resched(key.key) && need_swap()) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700815 z_swap(lock, key);
Andy Rossec554f42018-07-24 13:37:59 -0700816 } else {
817 k_spin_unlock(lock, key);
Andy Rossb4e9ef02022-04-06 10:10:17 -0700818 signal_pending_ipi();
Andy Rosseace1df2018-05-30 11:23:02 -0700819 }
Andy Rossec554f42018-07-24 13:37:59 -0700820}
Andy Rosseace1df2018-05-30 11:23:02 -0700821
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500822void z_reschedule_irqlock(uint32_t key)
Andy Rossec554f42018-07-24 13:37:59 -0700823{
Gaetan Perrot68581ca2023-12-21 11:01:54 +0900824 if (resched(key) && need_swap()) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700825 z_swap_irqlock(key);
Andy Rossec554f42018-07-24 13:37:59 -0700826 } else {
827 irq_unlock(key);
Andy Rossb4e9ef02022-04-06 10:10:17 -0700828 signal_pending_ipi();
Andy Rossec554f42018-07-24 13:37:59 -0700829 }
Andy Ross8606fab2018-03-26 10:54:40 -0700830}
831
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500832void k_sched_lock(void)
833{
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500834 K_SPINLOCK(&_sched_spinlock) {
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100835 SYS_PORT_TRACING_FUNC(k_thread, sched_lock);
836
Patrik Flykt4344e272019-03-08 14:19:05 -0700837 z_sched_lock();
Andy Ross1856e222018-05-21 11:48:35 -0700838 }
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500839}
840
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400841void k_sched_unlock(void)
842{
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500843 K_SPINLOCK(&_sched_spinlock) {
Anas Nashifbbbc38b2021-03-29 10:03:49 -0400844 __ASSERT(_current->base.sched_locked != 0U, "");
Andy Rosseefd3da2020-02-06 13:39:52 -0800845 __ASSERT(!arch_is_in_isr(), "");
846
Andy Ross1856e222018-05-21 11:48:35 -0700847 ++_current->base.sched_locked;
Yasushi SHOJI20d07242019-07-31 11:19:08 +0900848 update_cache(0);
Andy Ross1856e222018-05-21 11:48:35 -0700849 }
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400850
Anas Nashif2c5d4042019-12-02 10:24:08 -0500851 LOG_DBG("scheduler unlocked (%p:%d)",
Benjamin Walsha4e033f2016-11-18 16:08:24 -0500852 _current, _current->base.sched_locked);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400853
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100854 SYS_PORT_TRACING_FUNC(k_thread, sched_unlock);
855
Patrik Flykt4344e272019-03-08 14:19:05 -0700856 z_reschedule_unlocked();
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400857}
858
Andy Ross6b84ab32021-02-18 10:15:23 -0800859struct k_thread *z_swap_next_thread(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400860{
Andy Ross6b84ab32021-02-18 10:15:23 -0800861#ifdef CONFIG_SMP
Andy Rossb4e9ef02022-04-06 10:10:17 -0700862 struct k_thread *ret = next_up();
863
864 if (ret == _current) {
865 /* When not swapping, have to signal IPIs here. In
866 * the context switch case it must happen later, after
867 * _current gets requeued.
868 */
869 signal_pending_ipi();
870 }
871 return ret;
Andy Ross6b84ab32021-02-18 10:15:23 -0800872#else
873 return _kernel.ready_q.cache;
Simon Heinbcd1d192024-03-08 12:00:10 +0100874#endif /* CONFIG_SMP */
Andy Ross6b84ab32021-02-18 10:15:23 -0800875}
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400876
Jeremy Bettis1e0a36c2021-12-06 10:56:33 -0700877#ifdef CONFIG_USE_SWITCH
Andy Rossb18685b2019-02-19 17:24:30 -0800878/* Just a wrapper around _current = xxx with tracing */
879static inline void set_current(struct k_thread *new_thread)
880{
Daniel Leung11e6b432020-08-27 16:12:01 -0700881 z_thread_mark_switched_out();
Andy Rosseefd3da2020-02-06 13:39:52 -0800882 _current_cpu->current = new_thread;
Andy Rossb18685b2019-02-19 17:24:30 -0800883}
884
Nicolas Pitrec9e3e0d2022-03-15 22:36:20 -0400885/**
886 * @brief Determine next thread to execute upon completion of an interrupt
887 *
888 * Thread preemption is performed by context switching after the completion
889 * of a non-recursed interrupt. This function determines which thread to
890 * switch to if any. This function accepts as @p interrupted either:
891 *
892 * - The handle for the interrupted thread in which case the thread's context
893 * must already be fully saved and ready to be picked up by a different CPU.
894 *
895 * - NULL if more work is required to fully save the thread's state after
896 * it is known that a new thread is to be scheduled. It is up to the caller
897 * to store the handle resulting from the thread that is being switched out
898 * in that thread's "switch_handle" field after its
899 * context has fully been saved, following the same requirements as with
900 * the @ref arch_switch() function.
901 *
902 * If a new thread needs to be scheduled then its handle is returned.
903 * Otherwise the same value provided as @p interrupted is returned back.
904 * Those handles are the same opaque types used by the @ref arch_switch()
905 * function.
906 *
907 * @warning
908 * The @ref _current value may have changed after this call and not refer
909 * to the interrupted thread anymore. It might be necessary to make a local
910 * copy before calling this function.
911 *
912 * @param interrupted Handle for the thread that was interrupted or NULL.
913 * @retval Handle for the next thread to execute, or @p interrupted when
914 * no new thread is to be scheduled.
915 */
Patrik Flykt4344e272019-03-08 14:19:05 -0700916void *z_get_next_switch_handle(void *interrupted)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400917{
Andrew Boieae0d1b22019-03-29 16:25:27 -0700918 z_check_stack_sentinel();
919
Andy Rosseace1df2018-05-30 11:23:02 -0700920#ifdef CONFIG_SMP
Andy Rossdd432212021-02-05 08:15:02 -0800921 void *ret = NULL;
922
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500923 K_SPINLOCK(&_sched_spinlock) {
Andy Rossf6d32ab2020-05-13 15:34:04 +0000924 struct k_thread *old_thread = _current, *new_thread;
Andy Rosseace1df2018-05-30 11:23:02 -0700925
Andy Ross4ff45712021-02-08 08:28:54 -0800926 if (IS_ENABLED(CONFIG_SMP)) {
927 old_thread->switch_handle = NULL;
928 }
Andy Rossf6d32ab2020-05-13 15:34:04 +0000929 new_thread = next_up();
930
Andy Ross40d12c12021-09-27 08:22:43 -0700931 z_sched_usage_switch(new_thread);
932
Andy Rossf6d32ab2020-05-13 15:34:04 +0000933 if (old_thread != new_thread) {
Peter Mitsisada3c902024-04-23 13:53:40 -0400934 uint8_t cpu_id;
935
Andy Rossf6d32ab2020-05-13 15:34:04 +0000936 update_metairq_preempt(new_thread);
Andy Rossb89e4272023-05-26 09:12:51 -0700937 z_sched_switch_spin(new_thread);
Andy Rossf6d32ab2020-05-13 15:34:04 +0000938 arch_cohere_stacks(old_thread, interrupted, new_thread);
Andy Ross11a050b2019-11-13 09:41:52 -0800939
Andy Rosseace1df2018-05-30 11:23:02 -0700940 _current_cpu->swap_ok = 0;
Peter Mitsisada3c902024-04-23 13:53:40 -0400941 cpu_id = arch_curr_cpu()->id;
942 new_thread->base.cpu = cpu_id;
Andy Rossf6d32ab2020-05-13 15:34:04 +0000943 set_current(new_thread);
944
Andy Ross3e696892021-11-30 18:26:26 -0800945#ifdef CONFIG_TIMESLICING
946 z_reset_time_slice(new_thread);
Simon Heinbcd1d192024-03-08 12:00:10 +0100947#endif /* CONFIG_TIMESLICING */
Andy Ross3e696892021-11-30 18:26:26 -0800948
Danny Oerndrupc9d78402019-12-13 11:24:56 +0100949#ifdef CONFIG_SPIN_VALIDATE
Andy Ross8c1bdda2019-02-20 10:07:31 -0800950 /* Changed _current! Update the spinlock
Anas Nashif6df44052021-04-30 09:58:20 -0400951 * bookkeeping so the validation doesn't get
Andy Ross8c1bdda2019-02-20 10:07:31 -0800952 * confused when the "wrong" thread tries to
953 * release the lock.
954 */
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500955 z_spin_lock_set_owner(&_sched_spinlock);
Simon Heinbcd1d192024-03-08 12:00:10 +0100956#endif /* CONFIG_SPIN_VALIDATE */
Andy Ross4ff45712021-02-08 08:28:54 -0800957
958 /* A queued (runnable) old/current thread
959 * needs to be added back to the run queue
960 * here, and atomically with its switch handle
961 * being set below. This is safe now, as we
962 * will not return into it.
963 */
964 if (z_is_thread_queued(old_thread)) {
Peter Mitsisada3c902024-04-23 13:53:40 -0400965#ifdef CONFIG_SCHED_IPI_CASCADE
966 if ((new_thread->base.cpu_mask != -1) &&
967 (old_thread->base.cpu_mask != BIT(cpu_id))) {
968 flag_ipi(ipi_mask_create(old_thread));
969 }
970#endif
Andy Ross387fdd22021-09-23 18:44:40 -0700971 runq_add(old_thread);
Andy Ross4ff45712021-02-08 08:28:54 -0800972 }
Andy Rosseace1df2018-05-30 11:23:02 -0700973 }
Andy Rossf6d32ab2020-05-13 15:34:04 +0000974 old_thread->switch_handle = interrupted;
Andy Rossdd432212021-02-05 08:15:02 -0800975 ret = new_thread->switch_handle;
Andy Ross4ff45712021-02-08 08:28:54 -0800976 if (IS_ENABLED(CONFIG_SMP)) {
977 /* Active threads MUST have a null here */
978 new_thread->switch_handle = NULL;
979 }
Benjamin Walshb8c21602016-12-23 19:34:41 -0500980 }
Andy Rossb4e9ef02022-04-06 10:10:17 -0700981 signal_pending_ipi();
Andy Rossdd432212021-02-05 08:15:02 -0800982 return ret;
Andy Rosseace1df2018-05-30 11:23:02 -0700983#else
Andy Ross40d12c12021-09-27 08:22:43 -0700984 z_sched_usage_switch(_kernel.ready_q.cache);
Andy Rossf6d32ab2020-05-13 15:34:04 +0000985 _current->switch_handle = interrupted;
Andy Ross6b84ab32021-02-18 10:15:23 -0800986 set_current(_kernel.ready_q.cache);
Andy Ross1acd8c22018-05-03 14:51:49 -0700987 return _current->switch_handle;
Simon Heinbcd1d192024-03-08 12:00:10 +0100988#endif /* CONFIG_SMP */
Andy Ross1acd8c22018-05-03 14:51:49 -0700989}
Simon Heinbcd1d192024-03-08 12:00:10 +0100990#endif /* CONFIG_USE_SWITCH */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400991
Patrik Flykt4344e272019-03-08 14:19:05 -0700992int z_unpend_all(_wait_q_t *wait_q)
Andy Ross4ca0e072018-05-10 09:45:42 -0700993{
Andy Rossccf3bf72018-05-10 11:10:34 -0700994 int need_sched = 0;
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500995 struct k_thread *thread;
Andy Ross4ca0e072018-05-10 09:45:42 -0700996
Hess Nathan20b55422024-05-02 14:02:20 +0200997 for (thread = z_waitq_head(wait_q); thread != NULL; thread = z_waitq_head(wait_q)) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500998 z_unpend_thread(thread);
999 z_ready_thread(thread);
Andy Ross4ca0e072018-05-10 09:45:42 -07001000 need_sched = 1;
1001 }
Andy Rossccf3bf72018-05-10 11:10:34 -07001002
1003 return need_sched;
Andy Ross4ca0e072018-05-10 09:45:42 -07001004}
1005
Anas Nashif477a04a2024-02-28 08:15:15 -05001006void init_ready_q(struct _ready_q *ready_q)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001007{
Andy Rossb155d062021-09-24 13:49:14 -07001008#if defined(CONFIG_SCHED_SCALABLE)
Anas Nashif477a04a2024-02-28 08:15:15 -05001009 ready_q->runq = (struct _priq_rb) {
Andy Ross1acd8c22018-05-03 14:51:49 -07001010 .tree = {
Patrik Flykt4344e272019-03-08 14:19:05 -07001011 .lessthan_fn = z_priq_rb_lessthan,
Andy Ross1acd8c22018-05-03 14:51:49 -07001012 }
1013 };
Andy Rossb155d062021-09-24 13:49:14 -07001014#elif defined(CONFIG_SCHED_MULTIQ)
Andy Ross9f06a352018-06-28 10:38:14 -07001015 for (int i = 0; i < ARRAY_SIZE(_kernel.ready_q.runq.queues); i++) {
Anas Nashif477a04a2024-02-28 08:15:15 -05001016 sys_dlist_init(&ready_q->runq.queues[i]);
Andy Ross9f06a352018-06-28 10:38:14 -07001017 }
Andy Rossb155d062021-09-24 13:49:14 -07001018#else
Anas Nashif477a04a2024-02-28 08:15:15 -05001019 sys_dlist_init(&ready_q->runq);
Andy Ross9f06a352018-06-28 10:38:14 -07001020#endif
Andy Rossb155d062021-09-24 13:49:14 -07001021}
1022
1023void z_sched_init(void)
1024{
Andy Rossb11e7962021-09-24 10:57:39 -07001025#ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
Nicolas Pitre907eea02023-03-16 17:54:25 -04001026 for (int i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
Andy Rossb11e7962021-09-24 10:57:39 -07001027 init_ready_q(&_kernel.cpus[i].ready_q);
1028 }
1029#else
Andy Rossb155d062021-09-24 13:49:14 -07001030 init_ready_q(&_kernel.ready_q);
Simon Heinbcd1d192024-03-08 12:00:10 +01001031#endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001032}
1033
Anas Nashif25c87db2021-03-29 10:54:23 -04001034void z_impl_k_thread_priority_set(k_tid_t thread, int prio)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001035{
Benjamin Walsh3cc2ba92016-11-08 15:44:05 -05001036 /*
1037 * Use NULL, since we cannot know what the entry point is (we do not
1038 * keep track of it) and idle cannot change its priority.
1039 */
Patrik Flykt4344e272019-03-08 14:19:05 -07001040 Z_ASSERT_VALID_PRIO(prio, NULL);
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001041 __ASSERT(!arch_is_in_isr(), "");
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001042
Anas Nashif868f0992024-02-24 11:37:56 -05001043 bool need_sched = z_thread_prio_set((struct k_thread *)thread, prio);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001044
Peter Mitsis9ff52212024-03-01 14:44:26 -05001045 if ((need_sched) && (IS_ENABLED(CONFIG_SMP) ||
1046 (_current->base.sched_locked == 0U))) {
Anas Nashif5e591c32024-02-24 10:37:06 -05001047 z_reschedule_unlocked();
1048 }
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001049}
1050
Andrew Boie468190a2017-09-29 14:00:48 -07001051#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -07001052static inline void z_vrfy_k_thread_priority_set(k_tid_t thread, int prio)
Andrew Boie468190a2017-09-29 14:00:48 -07001053{
Anas Nashifa08bfeb2023-09-27 11:20:28 +00001054 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1055 K_OOPS(K_SYSCALL_VERIFY_MSG(_is_valid_prio(prio, NULL),
Andy Ross65649742019-08-06 13:34:31 -07001056 "invalid thread priority %d", prio));
Anas Nashif5e591c32024-02-24 10:37:06 -05001057#ifndef CONFIG_USERSPACE_THREAD_MAY_RAISE_PRIORITY
Anas Nashifa08bfeb2023-09-27 11:20:28 +00001058 K_OOPS(K_SYSCALL_VERIFY_MSG((int8_t)prio >= thread->base.prio,
Andrew Boie8345e5e2018-05-04 15:57:57 -07001059 "thread priority may only be downgraded (%d < %d)",
1060 prio, thread->base.prio));
Simon Heinbcd1d192024-03-08 12:00:10 +01001061#endif /* CONFIG_USERSPACE_THREAD_MAY_RAISE_PRIORITY */
Andy Ross65649742019-08-06 13:34:31 -07001062 z_impl_k_thread_priority_set(thread, prio);
Andrew Boie468190a2017-09-29 14:00:48 -07001063}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +08001064#include <zephyr/syscalls/k_thread_priority_set_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +01001065#endif /* CONFIG_USERSPACE */
Andrew Boie468190a2017-09-29 14:00:48 -07001066
Andy Ross4a2e50f2018-05-15 11:06:25 -07001067#ifdef CONFIG_SCHED_DEADLINE
Patrik Flykt4344e272019-03-08 14:19:05 -07001068void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline)
Andy Ross4a2e50f2018-05-15 11:06:25 -07001069{
TaiJu Wu555c07e2024-03-14 03:09:41 +08001070
1071 deadline = CLAMP(deadline, 0, INT_MAX);
1072
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001073 struct k_thread *thread = tid;
Andy Rossf2280d12024-03-08 08:42:08 -08001074 int32_t newdl = k_cycle_get_32() + deadline;
Andy Ross4a2e50f2018-05-15 11:06:25 -07001075
Andy Rossf2280d12024-03-08 08:42:08 -08001076 /* The prio_deadline field changes the sorting order, so can't
1077 * change it while the thread is in the run queue (dlists
1078 * actually are benign as long as we requeue it before we
1079 * release the lock, but an rbtree will blow up if we break
1080 * sorting!)
1081 */
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001082 K_SPINLOCK(&_sched_spinlock) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001083 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -07001084 dequeue_thread(thread);
Andy Rossf2280d12024-03-08 08:42:08 -08001085 thread->base.prio_deadline = newdl;
Andy Rossc230fb32021-09-23 16:41:30 -07001086 queue_thread(thread);
Andy Rossf2280d12024-03-08 08:42:08 -08001087 } else {
1088 thread->base.prio_deadline = newdl;
Andy Ross4a2e50f2018-05-15 11:06:25 -07001089 }
1090 }
1091}
1092
1093#ifdef CONFIG_USERSPACE
Andy Ross075c94f2019-08-13 11:34:34 -07001094static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline)
Andy Ross4a2e50f2018-05-15 11:06:25 -07001095{
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001096 struct k_thread *thread = tid;
Andy Ross4a2e50f2018-05-15 11:06:25 -07001097
Anas Nashifa08bfeb2023-09-27 11:20:28 +00001098 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1099 K_OOPS(K_SYSCALL_VERIFY_MSG(deadline > 0,
Andy Ross4a2e50f2018-05-15 11:06:25 -07001100 "invalid thread deadline %d",
1101 (int)deadline));
1102
Patrik Flykt4344e272019-03-08 14:19:05 -07001103 z_impl_k_thread_deadline_set((k_tid_t)thread, deadline);
Andy Ross4a2e50f2018-05-15 11:06:25 -07001104}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +08001105#include <zephyr/syscalls/k_thread_deadline_set_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +01001106#endif /* CONFIG_USERSPACE */
1107#endif /* CONFIG_SCHED_DEADLINE */
Andy Ross4a2e50f2018-05-15 11:06:25 -07001108
Jordan Yates1ef647f2022-03-26 09:55:23 +10001109bool k_can_yield(void)
1110{
1111 return !(k_is_pre_kernel() || k_is_in_isr() ||
1112 z_is_idle_thread_object(_current));
1113}
1114
Patrik Flykt4344e272019-03-08 14:19:05 -07001115void z_impl_k_yield(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001116{
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001117 __ASSERT(!arch_is_in_isr(), "");
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001118
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001119 SYS_PORT_TRACING_FUNC(k_thread, yield);
1120
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001121 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
James Harris6543e062021-03-01 10:14:13 -08001122
Andy Ross851d14a2021-05-13 15:46:43 -07001123 if (!IS_ENABLED(CONFIG_SMP) ||
1124 z_is_thread_queued(_current)) {
Andy Rossc230fb32021-09-23 16:41:30 -07001125 dequeue_thread(_current);
Andy Ross1acd8c22018-05-03 14:51:49 -07001126 }
Andy Rossc230fb32021-09-23 16:41:30 -07001127 queue_thread(_current);
Andy Ross851d14a2021-05-13 15:46:43 -07001128 update_cache(1);
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001129 z_swap(&_sched_spinlock, key);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001130}
1131
Andrew Boie468190a2017-09-29 14:00:48 -07001132#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -07001133static inline void z_vrfy_k_yield(void)
1134{
1135 z_impl_k_yield();
1136}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +08001137#include <zephyr/syscalls/k_yield_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +01001138#endif /* CONFIG_USERSPACE */
Andrew Boie468190a2017-09-29 14:00:48 -07001139
Flavio Ceolin7a815d52020-10-19 21:37:22 -07001140static int32_t z_tick_sleep(k_ticks_t ticks)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001141{
Flavio Ceolin9a160972020-11-16 10:40:46 -08001142 uint32_t expected_wakeup_ticks;
Carles Cufi9849df82016-12-02 15:31:08 +01001143
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001144 __ASSERT(!arch_is_in_isr(), "");
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001145
Gerard Marull-Paretas737d7992022-11-23 13:42:04 +01001146 LOG_DBG("thread %p for %lu ticks", _current, (unsigned long)ticks);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001147
Benjamin Walsh5596f782016-12-09 19:57:17 -05001148 /* wait of 0 ms is treated as a 'yield' */
Charles E. Youseb1863032019-05-08 13:22:46 -07001149 if (ticks == 0) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001150 k_yield();
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001151 return 0;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001152 }
1153
Lauren Murphy4c85b462021-05-25 17:49:28 -05001154 if (Z_TICK_ABS(ticks) <= 0) {
1155 expected_wakeup_ticks = ticks + sys_clock_tick_get_32();
1156 } else {
1157 expected_wakeup_ticks = Z_TICK_ABS(ticks);
1158 }
Andy Rossd27d4e62019-02-05 15:36:01 -08001159
Gerson Fernando Budkeb8188e52023-10-16 20:15:31 +02001160 k_timeout_t timeout = Z_TIMEOUT_TICKS(ticks);
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001161 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001162
Andy Rossdff6b712019-02-25 21:17:29 -08001163#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
1164 pending_current = _current;
Simon Heinbcd1d192024-03-08 12:00:10 +01001165#endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
Andrew Boiea8775ab2020-09-05 12:53:42 -07001166 unready_thread(_current);
Andy Ross78327382020-03-05 15:18:14 -08001167 z_add_thread_timeout(_current, timeout);
Andy Ross4521e0c2019-03-22 10:30:19 -07001168 z_mark_thread_as_suspended(_current);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001169
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001170 (void)z_swap(&_sched_spinlock, key);
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001171
Andy Ross4521e0c2019-03-22 10:30:19 -07001172 __ASSERT(!z_is_thread_state_set(_current, _THREAD_SUSPENDED), "");
1173
Anas Nashif5c90ceb2021-03-13 08:19:53 -05001174 ticks = (k_ticks_t)expected_wakeup_ticks - sys_clock_tick_get_32();
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001175 if (ticks > 0) {
Charles E. Youseb1863032019-05-08 13:22:46 -07001176 return ticks;
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001177 }
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001178
1179 return 0;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001180}
1181
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001182int32_t z_impl_k_sleep(k_timeout_t timeout)
Charles E. Youseb1863032019-05-08 13:22:46 -07001183{
Andy Ross78327382020-03-05 15:18:14 -08001184 k_ticks_t ticks;
Charles E. Youseb1863032019-05-08 13:22:46 -07001185
Peter Bigot8162e582019-12-12 16:07:07 -06001186 __ASSERT(!arch_is_in_isr(), "");
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001187
1188 SYS_PORT_TRACING_FUNC_ENTER(k_thread, sleep, timeout);
Peter Bigot8162e582019-12-12 16:07:07 -06001189
Anas Nashifd2c71792020-10-17 07:52:17 -04001190 /* in case of K_FOREVER, we suspend */
Andy Ross78327382020-03-05 15:18:14 -08001191 if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
Anas Nashif20b2c982024-03-28 10:09:26 -04001192
Andrew Boied2b89222019-11-08 10:44:22 -08001193 k_thread_suspend(_current);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001194 SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, (int32_t) K_TICKS_FOREVER);
1195
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001196 return (int32_t) K_TICKS_FOREVER;
Andrew Boied2b89222019-11-08 10:44:22 -08001197 }
1198
Andy Ross78327382020-03-05 15:18:14 -08001199 ticks = timeout.ticks;
Andy Ross78327382020-03-05 15:18:14 -08001200
Charles E. Youseb1863032019-05-08 13:22:46 -07001201 ticks = z_tick_sleep(ticks);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001202
Peter Mitsisa3e5af92023-12-05 13:40:19 -05001203 int32_t ret = k_ticks_to_ms_ceil64(ticks);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001204
1205 SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, ret);
1206
1207 return ret;
Charles E. Youseb1863032019-05-08 13:22:46 -07001208}
1209
Andrew Boie76c04a22017-09-27 14:45:10 -07001210#ifdef CONFIG_USERSPACE
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001211static inline int32_t z_vrfy_k_sleep(k_timeout_t timeout)
Andrew Boie76c04a22017-09-27 14:45:10 -07001212{
Andy Ross78327382020-03-05 15:18:14 -08001213 return z_impl_k_sleep(timeout);
Charles E. Yousea5678312019-05-09 16:46:46 -07001214}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +08001215#include <zephyr/syscalls/k_sleep_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +01001216#endif /* CONFIG_USERSPACE */
Charles E. Yousea5678312019-05-09 16:46:46 -07001217
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001218int32_t z_impl_k_usleep(int us)
Charles E. Yousea5678312019-05-09 16:46:46 -07001219{
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001220 int32_t ticks;
Charles E. Yousea5678312019-05-09 16:46:46 -07001221
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001222 SYS_PORT_TRACING_FUNC_ENTER(k_thread, usleep, us);
1223
Andy Ross88924062019-10-03 11:43:10 -07001224 ticks = k_us_to_ticks_ceil64(us);
Charles E. Yousea5678312019-05-09 16:46:46 -07001225 ticks = z_tick_sleep(ticks);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001226
Peter Mitsisa3e5af92023-12-05 13:40:19 -05001227 int32_t ret = k_ticks_to_us_ceil64(ticks);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001228
Peter Mitsisa3e5af92023-12-05 13:40:19 -05001229 SYS_PORT_TRACING_FUNC_EXIT(k_thread, usleep, us, ret);
1230
1231 return ret;
Charles E. Yousea5678312019-05-09 16:46:46 -07001232}
1233
1234#ifdef CONFIG_USERSPACE
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001235static inline int32_t z_vrfy_k_usleep(int us)
Charles E. Yousea5678312019-05-09 16:46:46 -07001236{
1237 return z_impl_k_usleep(us);
Andrew Boie76c04a22017-09-27 14:45:10 -07001238}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +08001239#include <zephyr/syscalls/k_usleep_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +01001240#endif /* CONFIG_USERSPACE */
Andrew Boie76c04a22017-09-27 14:45:10 -07001241
Patrik Flykt4344e272019-03-08 14:19:05 -07001242void z_impl_k_wakeup(k_tid_t thread)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001243{
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001244 SYS_PORT_TRACING_OBJ_FUNC(k_thread, wakeup, thread);
1245
Patrik Flykt4344e272019-03-08 14:19:05 -07001246 if (z_is_thread_pending(thread)) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001247 return;
1248 }
1249
Patrik Flykt4344e272019-03-08 14:19:05 -07001250 if (z_abort_thread_timeout(thread) < 0) {
Andrew Boied2b89222019-11-08 10:44:22 -08001251 /* Might have just been sleeping forever */
1252 if (thread->base.thread_state != _THREAD_SUSPENDED) {
1253 return;
1254 }
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001255 }
1256
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001257 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
Peter Mitsis51ae9932024-02-20 11:50:54 -05001258
Andy Ross4521e0c2019-03-22 10:30:19 -07001259 z_mark_thread_as_not_suspended(thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001260
Peter Mitsis9ff52212024-03-01 14:44:26 -05001261 if (thread_active_elsewhere(thread) == NULL) {
Peter Mitsis51ae9932024-02-20 11:50:54 -05001262 ready_thread(thread);
1263 }
Andy Ross5737b5c2020-02-04 13:52:09 -08001264
Peter Mitsis51ae9932024-02-20 11:50:54 -05001265 if (arch_is_in_isr()) {
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001266 k_spin_unlock(&_sched_spinlock, key);
Peter Mitsis51ae9932024-02-20 11:50:54 -05001267 } else {
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001268 z_reschedule(&_sched_spinlock, key);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001269 }
1270}
1271
Andrew Boie468190a2017-09-29 14:00:48 -07001272#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -07001273static inline void z_vrfy_k_wakeup(k_tid_t thread)
1274{
Anas Nashifa08bfeb2023-09-27 11:20:28 +00001275 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
Andy Ross65649742019-08-06 13:34:31 -07001276 z_impl_k_wakeup(thread);
1277}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +08001278#include <zephyr/syscalls/k_wakeup_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +01001279#endif /* CONFIG_USERSPACE */
Andrew Boie468190a2017-09-29 14:00:48 -07001280
Daniel Leung0a50ff32023-09-25 11:56:10 -07001281k_tid_t z_impl_k_sched_current_thread_query(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001282{
Andy Rosseefd3da2020-02-06 13:39:52 -08001283#ifdef CONFIG_SMP
1284 /* In SMP, _current is a field read from _current_cpu, which
1285 * can race with preemption before it is read. We must lock
1286 * local interrupts when reading it.
1287 */
1288 unsigned int k = arch_irq_lock();
Simon Heinbcd1d192024-03-08 12:00:10 +01001289#endif /* CONFIG_SMP */
Andy Rosseefd3da2020-02-06 13:39:52 -08001290
1291 k_tid_t ret = _current_cpu->current;
1292
1293#ifdef CONFIG_SMP
1294 arch_irq_unlock(k);
Simon Heinbcd1d192024-03-08 12:00:10 +01001295#endif /* CONFIG_SMP */
Andy Rosseefd3da2020-02-06 13:39:52 -08001296 return ret;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001297}
1298
Andrew Boie76c04a22017-09-27 14:45:10 -07001299#ifdef CONFIG_USERSPACE
Daniel Leung0a50ff32023-09-25 11:56:10 -07001300static inline k_tid_t z_vrfy_k_sched_current_thread_query(void)
Andy Ross65649742019-08-06 13:34:31 -07001301{
Daniel Leung0a50ff32023-09-25 11:56:10 -07001302 return z_impl_k_sched_current_thread_query();
Andy Ross65649742019-08-06 13:34:31 -07001303}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +08001304#include <zephyr/syscalls/k_sched_current_thread_query_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +01001305#endif /* CONFIG_USERSPACE */
Andrew Boie76c04a22017-09-27 14:45:10 -07001306
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001307static inline void unpend_all(_wait_q_t *wait_q)
1308{
1309 struct k_thread *thread;
1310
Hess Nathan20b55422024-05-02 14:02:20 +02001311 for (thread = z_waitq_head(wait_q); thread != NULL; thread = z_waitq_head(wait_q)) {
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001312 unpend_thread_no_timeout(thread);
1313 (void)z_abort_thread_timeout(thread);
1314 arch_thread_return_value_set(thread, 0);
1315 ready_thread(thread);
1316 }
1317}
1318
Anas Nashifa6ce4222024-02-22 14:10:17 -05001319#ifdef CONFIG_THREAD_ABORT_HOOK
1320extern void thread_abort_hook(struct k_thread *thread);
Simon Heinbcd1d192024-03-08 12:00:10 +01001321#endif /* CONFIG_THREAD_ABORT_HOOK */
Chen Peng10f63d112021-09-06 13:59:40 +08001322
Peter Mitsise1db1ce2023-08-14 14:06:52 -04001323/**
1324 * @brief Dequeues the specified thread
1325 *
1326 * Dequeues the specified thread and move it into the specified new state.
1327 *
1328 * @param thread Identify the thread to halt
Peter Mitsise7986eb2023-08-14 16:41:05 -04001329 * @param new_state New thread state (_THREAD_DEAD or _THREAD_SUSPENDED)
Peter Mitsise1db1ce2023-08-14 14:06:52 -04001330 */
1331static void halt_thread(struct k_thread *thread, uint8_t new_state)
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001332{
Andy Rossf0fd54c2024-03-26 08:38:01 -04001333 bool dummify = false;
1334
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001335 /* We hold the lock, and the thread is known not to be running
1336 * anywhere.
1337 */
Peter Mitsise1db1ce2023-08-14 14:06:52 -04001338 if ((thread->base.thread_state & new_state) == 0U) {
1339 thread->base.thread_state |= new_state;
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001340 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -07001341 dequeue_thread(thread);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001342 }
Peter Mitsise7986eb2023-08-14 16:41:05 -04001343
1344 if (new_state == _THREAD_DEAD) {
1345 if (thread->base.pended_on != NULL) {
1346 unpend_thread_no_timeout(thread);
1347 }
1348 (void)z_abort_thread_timeout(thread);
1349 unpend_all(&thread->join_queue);
Andy Rossf0fd54c2024-03-26 08:38:01 -04001350
1351 /* Edge case: aborting _current from within an
1352 * ISR that preempted it requires clearing the
1353 * _current pointer so the upcoming context
1354 * switch doesn't clobber the now-freed
1355 * memory
1356 */
1357 if (thread == _current && arch_is_in_isr()) {
1358 dummify = true;
1359 }
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001360 }
Peter Mitsise7986eb2023-08-14 16:41:05 -04001361#ifdef CONFIG_SMP
1362 unpend_all(&thread->halt_queue);
Simon Heinbcd1d192024-03-08 12:00:10 +01001363#endif /* CONFIG_SMP */
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001364 update_cache(1);
1365
Peter Mitsise7986eb2023-08-14 16:41:05 -04001366 if (new_state == _THREAD_SUSPENDED) {
Andy Ross47ab6632024-04-19 15:08:55 -07001367 clear_halting(thread);
Peter Mitsise7986eb2023-08-14 16:41:05 -04001368 return;
1369 }
1370
Grant Ramsay45701e62023-08-14 09:41:52 +12001371#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
1372 arch_float_disable(thread);
Simon Heinbcd1d192024-03-08 12:00:10 +01001373#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
Grant Ramsay45701e62023-08-14 09:41:52 +12001374
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001375 SYS_PORT_TRACING_FUNC(k_thread, sched_abort, thread);
1376
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001377 z_thread_monitor_exit(thread);
Anas Nashifa6ce4222024-02-22 14:10:17 -05001378#ifdef CONFIG_THREAD_ABORT_HOOK
1379 thread_abort_hook(thread);
Simon Heinbcd1d192024-03-08 12:00:10 +01001380#endif /* CONFIG_THREAD_ABORT_HOOK */
Chen Peng10f63d112021-09-06 13:59:40 +08001381
Peter Mitsis6df8efe2023-05-11 14:06:46 -04001382#ifdef CONFIG_OBJ_CORE_THREAD
Peter Mitsise6f10902023-06-01 12:16:40 -04001383#ifdef CONFIG_OBJ_CORE_STATS_THREAD
1384 k_obj_core_stats_deregister(K_OBJ_CORE(thread));
Simon Heinbcd1d192024-03-08 12:00:10 +01001385#endif /* CONFIG_OBJ_CORE_STATS_THREAD */
Peter Mitsis6df8efe2023-05-11 14:06:46 -04001386 k_obj_core_unlink(K_OBJ_CORE(thread));
Simon Heinbcd1d192024-03-08 12:00:10 +01001387#endif /* CONFIG_OBJ_CORE_THREAD */
Peter Mitsis6df8efe2023-05-11 14:06:46 -04001388
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001389#ifdef CONFIG_USERSPACE
1390 z_mem_domain_exit_thread(thread);
Anas Nashif70cf96b2023-09-27 10:45:48 +00001391 k_thread_perms_all_clear(thread);
Anas Nashif7a18c2b2023-09-27 10:45:18 +00001392 k_object_uninit(thread->stack_obj);
1393 k_object_uninit(thread);
Simon Heinbcd1d192024-03-08 12:00:10 +01001394#endif /* CONFIG_USERSPACE */
Daniel Leung378131c2024-03-26 11:54:31 -07001395
1396#ifdef CONFIG_THREAD_ABORT_NEED_CLEANUP
1397 k_thread_abort_cleanup(thread);
1398#endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
Andy Rossf0fd54c2024-03-26 08:38:01 -04001399
1400 /* Do this "set _current to dummy" step last so that
1401 * subsystems above can rely on _current being
1402 * unchanged. Disabled for posix as that arch
1403 * continues to use the _current pointer in its swap
Andy Rossdec022a2024-04-29 12:50:41 -07001404 * code. Note that we must leave a non-null switch
1405 * handle for any threads spinning in join() (this can
1406 * never be used, as our thread is flagged dead, but
1407 * it must not be NULL otherwise join can deadlock).
Andy Rossf0fd54c2024-03-26 08:38:01 -04001408 */
1409 if (dummify && !IS_ENABLED(CONFIG_ARCH_POSIX)) {
Andy Rossdec022a2024-04-29 12:50:41 -07001410#ifdef CONFIG_USE_SWITCH
1411 _current->switch_handle = _current;
1412#endif
Andy Rossfd340eb2024-04-19 15:03:09 -07001413 z_dummy_thread_init(&_thread_dummy);
Andy Rossdec022a2024-04-29 12:50:41 -07001414
Andy Rossf0fd54c2024-03-26 08:38:01 -04001415 }
Andy Ross47ab6632024-04-19 15:08:55 -07001416
1417 /* Finally update the halting thread state, on which
1418 * other CPUs might be spinning (see
1419 * thread_halt_spin()).
1420 */
1421 clear_halting(thread);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001422 }
1423}
1424
1425void z_thread_abort(struct k_thread *thread)
1426{
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001427 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001428
Anas Nashif87910122024-02-22 22:24:36 -05001429 if (z_is_thread_essential(thread)) {
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001430 k_spin_unlock(&_sched_spinlock, key);
Andy Rossfb613592022-05-19 12:55:28 -07001431 __ASSERT(false, "aborting essential thread %p", thread);
1432 k_panic();
1433 return;
1434 }
1435
Anas Nashif3f4f3f62021-03-29 17:13:47 -04001436 if ((thread->base.thread_state & _THREAD_DEAD) != 0U) {
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001437 k_spin_unlock(&_sched_spinlock, key);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001438 return;
1439 }
1440
Peter Mitsise7986eb2023-08-14 16:41:05 -04001441 z_thread_halt(thread, key, true);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001442}
1443
1444#if !defined(CONFIG_ARCH_HAS_THREAD_ABORT)
1445void z_impl_k_thread_abort(struct k_thread *thread)
1446{
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001447 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread);
1448
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001449 z_thread_abort(thread);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001450
Andy Rossdec022a2024-04-29 12:50:41 -07001451 __ASSERT_NO_MSG((thread->base.thread_state & _THREAD_DEAD) != 0);
1452
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001453 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, abort, thread);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001454}
Simon Heinbcd1d192024-03-08 12:00:10 +01001455#endif /* !CONFIG_ARCH_HAS_THREAD_ABORT */
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001456
1457int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout)
1458{
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001459 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
Hess Nathan7659cfd2024-04-29 16:31:47 +02001460 int ret;
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001461
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001462 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, join, thread, timeout);
1463
Anas Nashif3f4f3f62021-03-29 17:13:47 -04001464 if ((thread->base.thread_state & _THREAD_DEAD) != 0U) {
Andy Rossa08e23f2023-05-26 09:39:16 -07001465 z_sched_switch_spin(thread);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001466 ret = 0;
1467 } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
1468 ret = -EBUSY;
Anas Nashif3f4f3f62021-03-29 17:13:47 -04001469 } else if ((thread == _current) ||
1470 (thread->base.pended_on == &_current->join_queue)) {
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001471 ret = -EDEADLK;
1472 } else {
1473 __ASSERT(!arch_is_in_isr(), "cannot join in ISR");
1474 add_to_waitq_locked(_current, &thread->join_queue);
1475 add_thread_timeout(_current, timeout);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001476
1477 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_thread, join, thread, timeout);
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001478 ret = z_swap(&_sched_spinlock, key);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001479 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret);
1480
1481 return ret;
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001482 }
1483
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001484 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret);
1485
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001486 k_spin_unlock(&_sched_spinlock, key);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001487 return ret;
1488}
1489
Andrew Boie322816e2020-02-20 16:33:06 -08001490#ifdef CONFIG_USERSPACE
1491/* Special case: don't oops if the thread is uninitialized. This is because
1492 * the initialization bit does double-duty for thread objects; if false, means
1493 * the thread object is truly uninitialized, or the thread ran and exited for
1494 * some reason.
1495 *
1496 * Return true in this case indicating we should just do nothing and return
1497 * success to the caller.
1498 */
1499static bool thread_obj_validate(struct k_thread *thread)
1500{
Anas Nashifc25d0802023-09-27 10:49:28 +00001501 struct k_object *ko = k_object_find(thread);
Anas Nashif21254b22023-09-27 10:50:26 +00001502 int ret = k_object_validate(ko, K_OBJ_THREAD, _OBJ_INIT_TRUE);
Andrew Boie322816e2020-02-20 16:33:06 -08001503
1504 switch (ret) {
1505 case 0:
1506 return false;
1507 case -EINVAL:
1508 return true;
1509 default:
1510#ifdef CONFIG_LOG
Anas Nashif3ab35662023-09-27 10:51:23 +00001511 k_object_dump_error(ret, thread, ko, K_OBJ_THREAD);
Simon Heinbcd1d192024-03-08 12:00:10 +01001512#endif /* CONFIG_LOG */
Anas Nashifa08bfeb2023-09-27 11:20:28 +00001513 K_OOPS(K_SYSCALL_VERIFY_MSG(ret, "access denied"));
Andrew Boie322816e2020-02-20 16:33:06 -08001514 }
Enjia Mai53ca7092021-01-15 17:09:58 +08001515 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
Andrew Boie322816e2020-02-20 16:33:06 -08001516}
1517
Andy Ross78327382020-03-05 15:18:14 -08001518static inline int z_vrfy_k_thread_join(struct k_thread *thread,
1519 k_timeout_t timeout)
Andrew Boie322816e2020-02-20 16:33:06 -08001520{
1521 if (thread_obj_validate(thread)) {
1522 return 0;
1523 }
1524
1525 return z_impl_k_thread_join(thread, timeout);
1526}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +08001527#include <zephyr/syscalls/k_thread_join_mrsh.c>
Andrew Boiea4c91902020-03-24 16:09:24 -07001528
1529static inline void z_vrfy_k_thread_abort(k_tid_t thread)
1530{
1531 if (thread_obj_validate(thread)) {
1532 return;
1533 }
1534
Anas Nashif87910122024-02-22 22:24:36 -05001535 K_OOPS(K_SYSCALL_VERIFY_MSG(!z_is_thread_essential(thread),
Andrew Boiea4c91902020-03-24 16:09:24 -07001536 "aborting essential thread %p", thread));
1537
1538 z_impl_k_thread_abort((struct k_thread *)thread);
1539}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +08001540#include <zephyr/syscalls/k_thread_abort_mrsh.c>
Andrew Boie322816e2020-02-20 16:33:06 -08001541#endif /* CONFIG_USERSPACE */
Peter Bigot0259c862021-01-12 13:45:32 -06001542
1543/*
1544 * future scheduler.h API implementations
1545 */
1546bool z_sched_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data)
1547{
1548 struct k_thread *thread;
1549 bool ret = false;
1550
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001551 K_SPINLOCK(&_sched_spinlock) {
Peter Bigot0259c862021-01-12 13:45:32 -06001552 thread = _priq_wait_best(&wait_q->waitq);
1553
1554 if (thread != NULL) {
1555 z_thread_return_value_set_with_data(thread,
1556 swap_retval,
1557 swap_data);
1558 unpend_thread_no_timeout(thread);
1559 (void)z_abort_thread_timeout(thread);
1560 ready_thread(thread);
1561 ret = true;
1562 }
1563 }
1564
1565 return ret;
1566}
1567
1568int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key,
1569 _wait_q_t *wait_q, k_timeout_t timeout, void **data)
1570{
1571 int ret = z_pend_curr(lock, key, wait_q, timeout);
1572
1573 if (data != NULL) {
1574 *data = _current->base.swap_data;
1575 }
1576 return ret;
1577}
Peter Mitsisca583392023-01-05 11:50:21 -05001578
1579int z_sched_waitq_walk(_wait_q_t *wait_q,
1580 int (*func)(struct k_thread *, void *), void *data)
1581{
1582 struct k_thread *thread;
1583 int status = 0;
1584
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001585 K_SPINLOCK(&_sched_spinlock) {
Peter Mitsisca583392023-01-05 11:50:21 -05001586 _WAIT_Q_FOR_EACH(wait_q, thread) {
1587
1588 /*
1589 * Invoke the callback function on each waiting thread
1590 * for as long as there are both waiting threads AND
1591 * it returns 0.
1592 */
1593
1594 status = func(thread, data);
1595 if (status != 0) {
1596 break;
1597 }
1598 }
1599 }
1600
1601 return status;
1602}