blob: 57974044f94ed464796b26eda8101dfd2d403970 [file] [log] [blame]
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001/*
Andy Ross1acd8c22018-05-03 14:51:49 -07002 * Copyright (c) 2018 Intel Corporation
Benjamin Walsh456c6da2016-09-02 18:55:39 -04003 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08004 * SPDX-License-Identifier: Apache-2.0
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005 */
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +02006#include <zephyr/kernel.h>
Benjamin Walshb4b108d2016-10-13 10:31:48 -04007#include <ksched.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +02008#include <zephyr/spinlock.h>
Anas Nashif8634c3b2023-08-29 17:03:12 +00009#include <wait_q.h>
Anas Nashif9e834132024-02-26 17:03:35 -050010#include <kthread.h>
Anas Nashif46484da2024-02-26 11:30:49 -050011#include <priority_q.h>
Andy Ross9c62cc62018-01-25 15:24:15 -080012#include <kswap.h>
Andy Ross1acd8c22018-05-03 14:51:49 -070013#include <kernel_arch_func.h>
Anas Nashif4e396172023-09-26 22:46:01 +000014#include <zephyr/internal/syscall_handler.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020015#include <zephyr/drivers/timer/system_timer.h>
Flavio Ceolin80418602018-11-21 16:22:15 -080016#include <stdbool.h>
Andrew Boiefe031612019-09-21 17:54:37 -070017#include <kernel_internal.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020018#include <zephyr/logging/log.h>
19#include <zephyr/sys/atomic.h>
20#include <zephyr/sys/math_extras.h>
21#include <zephyr/timing/timing.h>
Gerard Marull-Paretas4863c5f2023-04-11 15:34:39 +020022#include <zephyr/sys/util.h>
Andy Ross52351452021-09-28 09:38:43 -070023
Krzysztof Chruscinski3ed80832020-11-26 19:32:34 +010024LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040025
Anas Nashif0d8da5f2024-03-06 15:59:36 -050026struct k_spinlock _sched_spinlock;
Andy Ross1acd8c22018-05-03 14:51:49 -070027
Maksim Masalski78ba2ec2021-06-01 15:44:45 +080028static void update_cache(int preempt_ok);
Peter Mitsise1db1ce2023-08-14 14:06:52 -040029static void halt_thread(struct k_thread *thread, uint8_t new_state);
Peter Mitsisb1384a72023-08-14 14:22:05 -040030static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q);
Andrew Boie8e0f6a52020-09-05 11:50:18 -070031
Peter Mitsisf8b76f32021-11-29 09:52:11 -050032
Anas Nashif46484da2024-02-26 11:30:49 -050033
Patrik Flykt4344e272019-03-08 14:19:05 -070034static inline int is_preempt(struct k_thread *thread)
Andy Rosse7ded112018-04-11 14:52:47 -070035{
Andy Rosse7ded112018-04-11 14:52:47 -070036 /* explanation in kernel_struct.h */
37 return thread->base.preempt <= _PREEMPT_THRESHOLD;
Andy Rosse7ded112018-04-11 14:52:47 -070038}
39
Florian Grandelcc4d1bd2023-08-28 17:31:54 +020040BUILD_ASSERT(CONFIG_NUM_COOP_PRIORITIES >= CONFIG_NUM_METAIRQ_PRIORITIES,
41 "You need to provide at least as many CONFIG_NUM_COOP_PRIORITIES as "
42 "CONFIG_NUM_METAIRQ_PRIORITIES as Meta IRQs are just a special class of cooperative "
43 "threads.");
44
Andy Ross7aa25fa2018-05-11 14:02:42 -070045static inline int is_metairq(struct k_thread *thread)
46{
47#if CONFIG_NUM_METAIRQ_PRIORITIES > 0
48 return (thread->base.prio - K_HIGHEST_THREAD_PRIO)
49 < CONFIG_NUM_METAIRQ_PRIORITIES;
50#else
Benjamin Cabéa46f1b92023-08-21 15:30:26 +020051 ARG_UNUSED(thread);
Andy Ross7aa25fa2018-05-11 14:02:42 -070052 return 0;
53#endif
54}
55
Anas Nashif80e6a972018-06-23 08:20:34 -050056#if CONFIG_ASSERT
Flavio Ceolin2df02cc2019-03-14 14:32:45 -070057static inline bool is_thread_dummy(struct k_thread *thread)
Andy Rosse7ded112018-04-11 14:52:47 -070058{
Patrik Flykt21358ba2019-03-28 14:57:54 -060059 return (thread->base.thread_state & _THREAD_DUMMY) != 0U;
Andy Ross1acd8c22018-05-03 14:51:49 -070060}
Anas Nashif80e6a972018-06-23 08:20:34 -050061#endif
Andy Ross1acd8c22018-05-03 14:51:49 -070062
James Harris2cd0f662021-03-01 09:19:57 -080063/*
64 * Return value same as e.g. memcmp
65 * > 0 -> thread 1 priority > thread 2 priority
66 * = 0 -> thread 1 priority == thread 2 priority
67 * < 0 -> thread 1 priority < thread 2 priority
68 * Do not rely on the actual value returned aside from the above.
69 * (Again, like memcmp.)
70 */
71int32_t z_sched_prio_cmp(struct k_thread *thread_1,
72 struct k_thread *thread_2)
Andy Ross4a2e50f2018-05-15 11:06:25 -070073{
James Harris2cd0f662021-03-01 09:19:57 -080074 /* `prio` is <32b, so the below cannot overflow. */
75 int32_t b1 = thread_1->base.prio;
76 int32_t b2 = thread_2->base.prio;
77
78 if (b1 != b2) {
79 return b2 - b1;
Andy Ross4a2e50f2018-05-15 11:06:25 -070080 }
81
82#ifdef CONFIG_SCHED_DEADLINE
Andy Rossef626572020-07-10 09:43:36 -070083 /* If we assume all deadlines live within the same "half" of
84 * the 32 bit modulus space (this is a documented API rule),
James Harris2cd0f662021-03-01 09:19:57 -080085 * then the latest deadline in the queue minus the earliest is
Andy Rossef626572020-07-10 09:43:36 -070086 * guaranteed to be (2's complement) non-negative. We can
87 * leverage that to compare the values without having to check
88 * the current time.
Andy Ross4a2e50f2018-05-15 11:06:25 -070089 */
James Harris2cd0f662021-03-01 09:19:57 -080090 uint32_t d1 = thread_1->base.prio_deadline;
91 uint32_t d2 = thread_2->base.prio_deadline;
Andy Ross4a2e50f2018-05-15 11:06:25 -070092
James Harris2cd0f662021-03-01 09:19:57 -080093 if (d1 != d2) {
94 /* Sooner deadline means higher effective priority.
95 * Doing the calculation with unsigned types and casting
96 * to signed isn't perfect, but at least reduces this
97 * from UB on overflow to impdef.
98 */
99 return (int32_t) (d2 - d1);
Andy Ross4a2e50f2018-05-15 11:06:25 -0700100 }
101#endif
James Harris2cd0f662021-03-01 09:19:57 -0800102 return 0;
Andy Ross4a2e50f2018-05-15 11:06:25 -0700103}
104
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500105static ALWAYS_INLINE bool should_preempt(struct k_thread *thread,
106 int preempt_ok)
Andy Rosseace1df2018-05-30 11:23:02 -0700107{
Andy Ross43553da2018-05-31 11:13:49 -0700108 /* Preemption is OK if it's being explicitly allowed by
109 * software state (e.g. the thread called k_yield())
Andy Rosseace1df2018-05-30 11:23:02 -0700110 */
Flavio Ceolin80418602018-11-21 16:22:15 -0800111 if (preempt_ok != 0) {
112 return true;
Andy Ross43553da2018-05-31 11:13:49 -0700113 }
114
Andy Ross1763a012019-01-28 10:59:41 -0800115 __ASSERT(_current != NULL, "");
116
Andy Ross43553da2018-05-31 11:13:49 -0700117 /* Or if we're pended/suspended/dummy (duh) */
Patrik Flykt4344e272019-03-08 14:19:05 -0700118 if (z_is_thread_prevented_from_running(_current)) {
Andy Ross23c5a632019-01-04 12:52:17 -0800119 return true;
120 }
121
122 /* Edge case on ARM where a thread can be pended out of an
123 * interrupt handler before the "synchronous" swap starts
124 * context switching. Platforms with atomic swap can never
125 * hit this.
126 */
127 if (IS_ENABLED(CONFIG_SWAP_NONATOMIC)
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500128 && z_is_thread_timeout_active(thread)) {
Flavio Ceolin80418602018-11-21 16:22:15 -0800129 return true;
Andy Ross43553da2018-05-31 11:13:49 -0700130 }
131
132 /* Otherwise we have to be running a preemptible thread or
133 * switching to a metairq
134 */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500135 if (is_preempt(_current) || is_metairq(thread)) {
Flavio Ceolin80418602018-11-21 16:22:15 -0800136 return true;
Andy Rosseace1df2018-05-30 11:23:02 -0700137 }
138
Flavio Ceolin80418602018-11-21 16:22:15 -0800139 return false;
Andy Rosseace1df2018-05-30 11:23:02 -0700140}
141
Andy Rossab46b1b2019-01-30 15:00:42 -0800142#ifdef CONFIG_SCHED_CPU_MASK
143static ALWAYS_INLINE struct k_thread *_priq_dumb_mask_best(sys_dlist_t *pq)
144{
145 /* With masks enabled we need to be prepared to walk the list
146 * looking for one we can run
147 */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500148 struct k_thread *thread;
Andy Rossab46b1b2019-01-30 15:00:42 -0800149
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500150 SYS_DLIST_FOR_EACH_CONTAINER(pq, thread, base.qnode_dlist) {
151 if ((thread->base.cpu_mask & BIT(_current_cpu->id)) != 0) {
152 return thread;
Andy Rossab46b1b2019-01-30 15:00:42 -0800153 }
154 }
155 return NULL;
156}
157#endif
158
Flavio Ceolin2757e712023-01-06 12:51:16 -0800159#if defined(CONFIG_SCHED_DUMB) || defined(CONFIG_WAITQ_DUMB)
Peter Mitsisf8b76f32021-11-29 09:52:11 -0500160static ALWAYS_INLINE void z_priq_dumb_add(sys_dlist_t *pq,
161 struct k_thread *thread)
Andy Ross0d763e02021-09-07 15:34:04 -0700162{
163 struct k_thread *t;
164
165 __ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
166
167 SYS_DLIST_FOR_EACH_CONTAINER(pq, t, base.qnode_dlist) {
168 if (z_sched_prio_cmp(thread, t) > 0) {
169 sys_dlist_insert(&t->base.qnode_dlist,
170 &thread->base.qnode_dlist);
171 return;
172 }
173 }
174
175 sys_dlist_append(pq, &thread->base.qnode_dlist);
176}
Flavio Ceolin2757e712023-01-06 12:51:16 -0800177#endif
Andy Ross0d763e02021-09-07 15:34:04 -0700178
Andy Rossb11e7962021-09-24 10:57:39 -0700179static ALWAYS_INLINE void *thread_runq(struct k_thread *thread)
Andy Ross387fdd22021-09-23 18:44:40 -0700180{
Andy Rossb11e7962021-09-24 10:57:39 -0700181#ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
182 int cpu, m = thread->base.cpu_mask;
183
184 /* Edge case: it's legal per the API to "make runnable" a
185 * thread with all CPUs masked off (i.e. one that isn't
186 * actually runnable!). Sort of a wart in the API and maybe
187 * we should address this in docs/assertions instead to avoid
188 * the extra test.
189 */
190 cpu = m == 0 ? 0 : u32_count_trailing_zeros(m);
191
192 return &_kernel.cpus[cpu].ready_q.runq;
193#else
Benjamin Cabéa46f1b92023-08-21 15:30:26 +0200194 ARG_UNUSED(thread);
Andy Rossb11e7962021-09-24 10:57:39 -0700195 return &_kernel.ready_q.runq;
196#endif
Andy Ross387fdd22021-09-23 18:44:40 -0700197}
198
Andy Rossb11e7962021-09-24 10:57:39 -0700199static ALWAYS_INLINE void *curr_cpu_runq(void)
Andy Ross387fdd22021-09-23 18:44:40 -0700200{
Andy Rossb11e7962021-09-24 10:57:39 -0700201#ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
202 return &arch_curr_cpu()->ready_q.runq;
203#else
204 return &_kernel.ready_q.runq;
205#endif
Andy Ross387fdd22021-09-23 18:44:40 -0700206}
207
Andy Rossb11e7962021-09-24 10:57:39 -0700208static ALWAYS_INLINE void runq_add(struct k_thread *thread)
Andy Ross387fdd22021-09-23 18:44:40 -0700209{
Andy Rossb11e7962021-09-24 10:57:39 -0700210 _priq_run_add(thread_runq(thread), thread);
211}
212
213static ALWAYS_INLINE void runq_remove(struct k_thread *thread)
214{
215 _priq_run_remove(thread_runq(thread), thread);
216}
217
218static ALWAYS_INLINE struct k_thread *runq_best(void)
219{
220 return _priq_run_best(curr_cpu_runq());
Andy Ross387fdd22021-09-23 18:44:40 -0700221}
222
Andy Ross4ff45712021-02-08 08:28:54 -0800223/* _current is never in the run queue until context switch on
224 * SMP configurations, see z_requeue_current()
225 */
Anas Nashif595ff632024-02-27 09:49:07 -0500226static inline bool should_queue_thread(struct k_thread *thread)
Andy Ross4ff45712021-02-08 08:28:54 -0800227{
Anas Nashif595ff632024-02-27 09:49:07 -0500228 return !IS_ENABLED(CONFIG_SMP) || thread != _current;
Andy Ross4ff45712021-02-08 08:28:54 -0800229}
230
Andy Rossc230fb32021-09-23 16:41:30 -0700231static ALWAYS_INLINE void queue_thread(struct k_thread *thread)
Andy Ross91946ef2021-02-07 13:03:09 -0800232{
233 thread->base.thread_state |= _THREAD_QUEUED;
Andy Ross4ff45712021-02-08 08:28:54 -0800234 if (should_queue_thread(thread)) {
Andy Ross387fdd22021-09-23 18:44:40 -0700235 runq_add(thread);
Andy Ross4ff45712021-02-08 08:28:54 -0800236 }
237#ifdef CONFIG_SMP
238 if (thread == _current) {
239 /* add current to end of queue means "yield" */
240 _current_cpu->swap_ok = true;
241 }
242#endif
Andy Ross91946ef2021-02-07 13:03:09 -0800243}
244
Andy Rossc230fb32021-09-23 16:41:30 -0700245static ALWAYS_INLINE void dequeue_thread(struct k_thread *thread)
Andy Ross91946ef2021-02-07 13:03:09 -0800246{
247 thread->base.thread_state &= ~_THREAD_QUEUED;
Andy Ross4ff45712021-02-08 08:28:54 -0800248 if (should_queue_thread(thread)) {
Andy Ross387fdd22021-09-23 18:44:40 -0700249 runq_remove(thread);
Andy Ross4ff45712021-02-08 08:28:54 -0800250 }
Andy Ross91946ef2021-02-07 13:03:09 -0800251}
252
Andy Rossb4e9ef02022-04-06 10:10:17 -0700253static void signal_pending_ipi(void)
254{
255 /* Synchronization note: you might think we need to lock these
256 * two steps, but an IPI is idempotent. It's OK if we do it
257 * twice. All we require is that if a CPU sees the flag true,
258 * it is guaranteed to send the IPI, and if a core sets
259 * pending_ipi, the IPI will be sent the next time through
260 * this code.
261 */
262#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
Kumar Gala4f458ba2022-10-18 11:11:46 -0500263 if (arch_num_cpus() > 1) {
Andy Rossb4e9ef02022-04-06 10:10:17 -0700264 if (_kernel.pending_ipi) {
265 _kernel.pending_ipi = false;
266 arch_sched_ipi();
267 }
268 }
269#endif
270}
271
Andy Ross4ff45712021-02-08 08:28:54 -0800272#ifdef CONFIG_SMP
273/* Called out of z_swap() when CONFIG_SMP. The current thread can
274 * never live in the run queue until we are inexorably on the context
275 * switch path on SMP, otherwise there is a deadlock condition where a
276 * set of CPUs pick a cycle of threads to run and wait for them all to
277 * context switch forever.
278 */
Anas Nashif595ff632024-02-27 09:49:07 -0500279void z_requeue_current(struct k_thread *thread)
Andy Ross4ff45712021-02-08 08:28:54 -0800280{
Anas Nashif595ff632024-02-27 09:49:07 -0500281 if (z_is_thread_queued(thread)) {
282 runq_add(thread);
Andy Ross4ff45712021-02-08 08:28:54 -0800283 }
Andy Rossb4e9ef02022-04-06 10:10:17 -0700284 signal_pending_ipi();
Andy Ross4ff45712021-02-08 08:28:54 -0800285}
Andy Ross4ff45712021-02-08 08:28:54 -0800286
Peter Mitsise7986eb2023-08-14 16:41:05 -0400287/* Return true if the thread is aborting, else false */
Andy Ross6fb6d3c2021-02-19 15:32:19 -0800288static inline bool is_aborting(struct k_thread *thread)
289{
Anas Nashifbbbc38b2021-03-29 10:03:49 -0400290 return (thread->base.thread_state & _THREAD_ABORTING) != 0U;
Andy Ross6fb6d3c2021-02-19 15:32:19 -0800291}
Peter Mitsise7986eb2023-08-14 16:41:05 -0400292
293/* Return true if the thread is aborting or suspending, else false */
294static inline bool is_halting(struct k_thread *thread)
295{
296 return (thread->base.thread_state &
297 (_THREAD_ABORTING | _THREAD_SUSPENDING)) != 0U;
298}
Jeremy Bettis1e0a36c2021-12-06 10:56:33 -0700299#endif
Andy Ross6fb6d3c2021-02-19 15:32:19 -0800300
Peter Mitsise7986eb2023-08-14 16:41:05 -0400301/* Clear the halting bits (_THREAD_ABORTING and _THREAD_SUSPENDING) */
302static inline void clear_halting(struct k_thread *thread)
303{
304 thread->base.thread_state &= ~(_THREAD_ABORTING | _THREAD_SUSPENDING);
305}
306
Andy Rossb2791b02019-01-28 09:36:36 -0800307static ALWAYS_INLINE struct k_thread *next_up(void)
Andy Ross1acd8c22018-05-03 14:51:49 -0700308{
Vadim Shakirov73944c62023-07-24 15:42:52 +0300309#ifdef CONFIG_SMP
Peter Mitsise7986eb2023-08-14 16:41:05 -0400310 if (is_halting(_current)) {
311 halt_thread(_current, is_aborting(_current) ?
312 _THREAD_DEAD : _THREAD_SUSPENDED);
Vadim Shakirov73944c62023-07-24 15:42:52 +0300313 }
314#endif
315
Andy Ross387fdd22021-09-23 18:44:40 -0700316 struct k_thread *thread = runq_best();
Andy Ross11a050b2019-11-13 09:41:52 -0800317
Florian Grandelcc4d1bd2023-08-28 17:31:54 +0200318#if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && \
319 (CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES)
Andy Ross11a050b2019-11-13 09:41:52 -0800320 /* MetaIRQs must always attempt to return back to a
321 * cooperative thread they preempted and not whatever happens
322 * to be highest priority now. The cooperative thread was
323 * promised it wouldn't be preempted (by non-metairq threads)!
324 */
325 struct k_thread *mirqp = _current_cpu->metairq_preempted;
326
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500327 if (mirqp != NULL && (thread == NULL || !is_metairq(thread))) {
Andy Ross11a050b2019-11-13 09:41:52 -0800328 if (!z_is_thread_prevented_from_running(mirqp)) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500329 thread = mirqp;
Andy Ross11a050b2019-11-13 09:41:52 -0800330 } else {
331 _current_cpu->metairq_preempted = NULL;
332 }
333 }
334#endif
335
Andy Ross1acd8c22018-05-03 14:51:49 -0700336#ifndef CONFIG_SMP
337 /* In uniprocessor mode, we can leave the current thread in
338 * the queue (actually we have to, otherwise the assembly
339 * context switch code for all architectures would be
Patrik Flykt4344e272019-03-08 14:19:05 -0700340 * responsible for putting it back in z_swap and ISR return!),
Andy Ross1acd8c22018-05-03 14:51:49 -0700341 * which makes this choice simple.
342 */
Anas Nashif3f4f3f62021-03-29 17:13:47 -0400343 return (thread != NULL) ? thread : _current_cpu->idle_thread;
Andy Ross1acd8c22018-05-03 14:51:49 -0700344#else
345 /* Under SMP, the "cache" mechanism for selecting the next
346 * thread doesn't work, so we have more work to do to test
Andy Ross11a050b2019-11-13 09:41:52 -0800347 * _current against the best choice from the queue. Here, the
348 * thread selected above represents "the best thread that is
349 * not current".
Andy Rosseace1df2018-05-30 11:23:02 -0700350 *
351 * Subtle note on "queued": in SMP mode, _current does not
352 * live in the queue, so this isn't exactly the same thing as
353 * "ready", it means "is _current already added back to the
354 * queue such that we don't want to re-add it".
Andy Ross1acd8c22018-05-03 14:51:49 -0700355 */
Simon Hein02cfbfe2022-07-19 22:30:17 +0200356 bool queued = z_is_thread_queued(_current);
357 bool active = !z_is_thread_prevented_from_running(_current);
Andy Ross1acd8c22018-05-03 14:51:49 -0700358
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500359 if (thread == NULL) {
360 thread = _current_cpu->idle_thread;
Andy Ross1acd8c22018-05-03 14:51:49 -0700361 }
362
Andy Rosseace1df2018-05-30 11:23:02 -0700363 if (active) {
James Harris2cd0f662021-03-01 09:19:57 -0800364 int32_t cmp = z_sched_prio_cmp(_current, thread);
Andy Ross4ff45712021-02-08 08:28:54 -0800365
366 /* Ties only switch if state says we yielded */
James Harris2cd0f662021-03-01 09:19:57 -0800367 if ((cmp > 0) || ((cmp == 0) && !_current_cpu->swap_ok)) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500368 thread = _current;
Andy Rosseace1df2018-05-30 11:23:02 -0700369 }
370
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500371 if (!should_preempt(thread, _current_cpu->swap_ok)) {
372 thread = _current;
Andy Rosseace1df2018-05-30 11:23:02 -0700373 }
Andy Ross1acd8c22018-05-03 14:51:49 -0700374 }
375
Andy Rosseace1df2018-05-30 11:23:02 -0700376 /* Put _current back into the queue */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500377 if (thread != _current && active &&
378 !z_is_idle_thread_object(_current) && !queued) {
Andy Rossc230fb32021-09-23 16:41:30 -0700379 queue_thread(_current);
Andy Ross1acd8c22018-05-03 14:51:49 -0700380 }
381
Andy Rosseace1df2018-05-30 11:23:02 -0700382 /* Take the new _current out of the queue */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500383 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -0700384 dequeue_thread(thread);
Andy Rosseace1df2018-05-30 11:23:02 -0700385 }
Andy Ross1acd8c22018-05-03 14:51:49 -0700386
Andy Ross4ff45712021-02-08 08:28:54 -0800387 _current_cpu->swap_ok = false;
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500388 return thread;
Andy Ross1acd8c22018-05-03 14:51:49 -0700389#endif
390}
391
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700392static void move_thread_to_end_of_prio_q(struct k_thread *thread)
393{
394 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -0700395 dequeue_thread(thread);
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700396 }
Andy Rossc230fb32021-09-23 16:41:30 -0700397 queue_thread(thread);
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700398 update_cache(thread == _current);
399}
400
Andy Rossc5c3ad92023-03-07 08:29:31 -0800401static void flag_ipi(void)
402{
403#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
404 if (arch_num_cpus() > 1) {
405 _kernel.pending_ipi = true;
406 }
407#endif
408}
409
Andy Ross9098a452018-09-25 10:56:09 -0700410#ifdef CONFIG_TIMESLICING
411
Gerard Marull-Paretas4863c5f2023-04-11 15:34:39 +0200412static int slice_ticks = DIV_ROUND_UP(CONFIG_TIMESLICE_SIZE * Z_HZ_ticks, Z_HZ_ms);
Nicolas Pitre524ac8a2023-03-31 12:31:28 -0400413static int slice_max_prio = CONFIG_TIMESLICE_PRIORITY;
Nicolas Pitre5879d2d2023-03-09 22:45:18 -0500414static struct _timeout slice_timeouts[CONFIG_MP_MAX_NUM_CPUS];
415static bool slice_expired[CONFIG_MP_MAX_NUM_CPUS];
Andy Ross3e696892021-11-30 18:26:26 -0800416
Andy Ross7fb8eb52019-01-04 12:54:23 -0800417#ifdef CONFIG_SWAP_NONATOMIC
Patrik Flykt4344e272019-03-08 14:19:05 -0700418/* If z_swap() isn't atomic, then it's possible for a timer interrupt
Andy Ross7fb8eb52019-01-04 12:54:23 -0800419 * to try to timeslice away _current after it has already pended
420 * itself but before the corresponding context switch. Treat that as
421 * a noop condition in z_time_slice().
422 */
423static struct k_thread *pending_current;
424#endif
425
Nicolas Pitre5879d2d2023-03-09 22:45:18 -0500426static inline int slice_time(struct k_thread *thread)
427{
428 int ret = slice_ticks;
429
430#ifdef CONFIG_TIMESLICE_PER_THREAD
431 if (thread->base.slice_ticks != 0) {
432 ret = thread->base.slice_ticks;
433 }
Benjamin Cabéa46f1b92023-08-21 15:30:26 +0200434#else
435 ARG_UNUSED(thread);
Nicolas Pitre5879d2d2023-03-09 22:45:18 -0500436#endif
437 return ret;
438}
439
440static inline bool sliceable(struct k_thread *thread)
441{
442 bool ret = is_preempt(thread)
443 && slice_time(thread) != 0
444 && !z_is_prio_higher(thread->base.prio, slice_max_prio)
445 && !z_is_thread_prevented_from_running(thread)
446 && !z_is_idle_thread_object(thread);
447
448#ifdef CONFIG_TIMESLICE_PER_THREAD
449 ret |= thread->base.slice_ticks != 0;
450#endif
451
452 return ret;
453}
454
Anas Nashif477a04a2024-02-28 08:15:15 -0500455static void slice_timeout(struct _timeout *timeout)
Andy Rossf3afd5a2023-03-06 14:31:35 -0800456{
Anas Nashif477a04a2024-02-28 08:15:15 -0500457 int cpu = ARRAY_INDEX(slice_timeouts, timeout);
Andy Rossf3afd5a2023-03-06 14:31:35 -0800458
459 slice_expired[cpu] = true;
Andy Rossc5c3ad92023-03-07 08:29:31 -0800460
461 /* We need an IPI if we just handled a timeslice expiration
462 * for a different CPU. Ideally this would be able to target
463 * the specific core, but that's not part of the API yet.
464 */
465 if (IS_ENABLED(CONFIG_SMP) && cpu != _current_cpu->id) {
466 flag_ipi();
467 }
Andy Rossf3afd5a2023-03-06 14:31:35 -0800468}
469
Anas Nashif595ff632024-02-27 09:49:07 -0500470void z_reset_time_slice(struct k_thread *thread)
Andy Ross9098a452018-09-25 10:56:09 -0700471{
Andy Rossf3afd5a2023-03-06 14:31:35 -0800472 int cpu = _current_cpu->id;
473
474 z_abort_timeout(&slice_timeouts[cpu]);
Nicolas Pitre5879d2d2023-03-09 22:45:18 -0500475 slice_expired[cpu] = false;
Anas Nashif595ff632024-02-27 09:49:07 -0500476 if (sliceable(thread)) {
Andy Rossf3afd5a2023-03-06 14:31:35 -0800477 z_add_timeout(&slice_timeouts[cpu], slice_timeout,
Anas Nashif595ff632024-02-27 09:49:07 -0500478 K_TICKS(slice_time(thread) - 1));
Andy Rossed7d8632019-06-15 19:32:04 -0700479 }
Andy Ross9098a452018-09-25 10:56:09 -0700480}
481
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500482void k_sched_time_slice_set(int32_t slice, int prio)
Andy Ross9098a452018-09-25 10:56:09 -0700483{
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500484 K_SPINLOCK(&_sched_spinlock) {
Andy Ross3e696892021-11-30 18:26:26 -0800485 slice_ticks = k_ms_to_ticks_ceil32(slice);
Andy Ross1c305142018-10-15 11:10:49 -0700486 slice_max_prio = prio;
Andy Ross3e696892021-11-30 18:26:26 -0800487 z_reset_time_slice(_current);
Andy Ross1c305142018-10-15 11:10:49 -0700488 }
Andy Ross9098a452018-09-25 10:56:09 -0700489}
490
Andy Ross3e696892021-11-30 18:26:26 -0800491#ifdef CONFIG_TIMESLICE_PER_THREAD
Anas Nashif595ff632024-02-27 09:49:07 -0500492void k_thread_time_slice_set(struct k_thread *thread, int32_t thread_slice_ticks,
Andy Ross3e696892021-11-30 18:26:26 -0800493 k_thread_timeslice_fn_t expired, void *data)
Andy Ross9098a452018-09-25 10:56:09 -0700494{
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500495 K_SPINLOCK(&_sched_spinlock) {
Anas Nashif595ff632024-02-27 09:49:07 -0500496 thread->base.slice_ticks = thread_slice_ticks;
497 thread->base.slice_expired = expired;
498 thread->base.slice_data = data;
Andy Ross3e696892021-11-30 18:26:26 -0800499 }
500}
501#endif
502
Andy Ross9098a452018-09-25 10:56:09 -0700503/* Called out of each timer interrupt */
Andy Rossf3afd5a2023-03-06 14:31:35 -0800504void z_time_slice(void)
Andy Ross9098a452018-09-25 10:56:09 -0700505{
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500506 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
Nicolas Pitre5879d2d2023-03-09 22:45:18 -0500507 struct k_thread *curr = _current;
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700508
Andy Ross7fb8eb52019-01-04 12:54:23 -0800509#ifdef CONFIG_SWAP_NONATOMIC
Nicolas Pitre5879d2d2023-03-09 22:45:18 -0500510 if (pending_current == curr) {
511 z_reset_time_slice(curr);
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500512 k_spin_unlock(&_sched_spinlock, key);
Andy Ross7fb8eb52019-01-04 12:54:23 -0800513 return;
514 }
515 pending_current = NULL;
516#endif
517
Nicolas Pitre5879d2d2023-03-09 22:45:18 -0500518 if (slice_expired[_current_cpu->id] && sliceable(curr)) {
519#ifdef CONFIG_TIMESLICE_PER_THREAD
520 if (curr->base.slice_expired) {
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500521 k_spin_unlock(&_sched_spinlock, key);
Nicolas Pitre5879d2d2023-03-09 22:45:18 -0500522 curr->base.slice_expired(curr, curr->base.slice_data);
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500523 key = k_spin_lock(&_sched_spinlock);
Andy Ross9098a452018-09-25 10:56:09 -0700524 }
Nicolas Pitre5879d2d2023-03-09 22:45:18 -0500525#endif
526 if (!z_is_thread_prevented_from_running(curr)) {
527 move_thread_to_end_of_prio_q(curr);
528 }
529 z_reset_time_slice(curr);
Andy Ross9098a452018-09-25 10:56:09 -0700530 }
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500531 k_spin_unlock(&_sched_spinlock, key);
Andy Ross9098a452018-09-25 10:56:09 -0700532}
Andy Ross9098a452018-09-25 10:56:09 -0700533#endif
534
Andy Ross11a050b2019-11-13 09:41:52 -0800535/* Track cooperative threads preempted by metairqs so we can return to
536 * them specifically. Called at the moment a new thread has been
537 * selected to run.
538 */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500539static void update_metairq_preempt(struct k_thread *thread)
Andy Ross11a050b2019-11-13 09:41:52 -0800540{
Florian Grandelcc4d1bd2023-08-28 17:31:54 +0200541#if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && \
542 (CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES)
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500543 if (is_metairq(thread) && !is_metairq(_current) &&
544 !is_preempt(_current)) {
Andy Ross11a050b2019-11-13 09:41:52 -0800545 /* Record new preemption */
546 _current_cpu->metairq_preempted = _current;
Andrew Boief5a7e1a2020-09-02 09:20:38 -0700547 } else if (!is_metairq(thread) && !z_is_idle_thread_object(thread)) {
Andy Ross11a050b2019-11-13 09:41:52 -0800548 /* Returning from existing preemption */
549 _current_cpu->metairq_preempted = NULL;
550 }
Benjamin Cabéa46f1b92023-08-21 15:30:26 +0200551#else
552 ARG_UNUSED(thread);
Andy Ross11a050b2019-11-13 09:41:52 -0800553#endif
554}
555
Andy Ross1856e222018-05-21 11:48:35 -0700556static void update_cache(int preempt_ok)
Andy Ross1acd8c22018-05-03 14:51:49 -0700557{
558#ifndef CONFIG_SMP
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500559 struct k_thread *thread = next_up();
Andy Ross1856e222018-05-21 11:48:35 -0700560
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500561 if (should_preempt(thread, preempt_ok)) {
Andy Rosscb3964f2019-08-16 21:29:26 -0700562#ifdef CONFIG_TIMESLICING
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500563 if (thread != _current) {
Andy Ross3e696892021-11-30 18:26:26 -0800564 z_reset_time_slice(thread);
Andy Ross9098a452018-09-25 10:56:09 -0700565 }
Andy Rosscb3964f2019-08-16 21:29:26 -0700566#endif
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500567 update_metairq_preempt(thread);
568 _kernel.ready_q.cache = thread;
Andy Rosseace1df2018-05-30 11:23:02 -0700569 } else {
570 _kernel.ready_q.cache = _current;
Andy Ross1856e222018-05-21 11:48:35 -0700571 }
Andy Rosseace1df2018-05-30 11:23:02 -0700572
573#else
574 /* The way this works is that the CPU record keeps its
575 * "cooperative swapping is OK" flag until the next reschedule
576 * call or context switch. It doesn't need to be tracked per
577 * thread because if the thread gets preempted for whatever
578 * reason the scheduler will make the same decision anyway.
579 */
580 _current_cpu->swap_ok = preempt_ok;
Andy Ross1acd8c22018-05-03 14:51:49 -0700581#endif
582}
583
Andy Ross05c468f2021-02-19 15:24:24 -0800584static bool thread_active_elsewhere(struct k_thread *thread)
585{
586 /* True if the thread is currently running on another CPU.
587 * There are more scalable designs to answer this question in
588 * constant time, but this is fine for now.
589 */
590#ifdef CONFIG_SMP
591 int currcpu = _current_cpu->id;
592
Kumar Galaa1195ae2022-10-18 09:45:13 -0500593 unsigned int num_cpus = arch_num_cpus();
594
595 for (int i = 0; i < num_cpus; i++) {
Andy Ross05c468f2021-02-19 15:24:24 -0800596 if ((i != currcpu) &&
597 (_kernel.cpus[i].current == thread)) {
598 return true;
599 }
600 }
601#endif
Benjamin Cabéa46f1b92023-08-21 15:30:26 +0200602 ARG_UNUSED(thread);
Andy Ross05c468f2021-02-19 15:24:24 -0800603 return false;
604}
605
Andy Ross96ccc462020-01-23 13:28:30 -0800606static void ready_thread(struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -0700607{
Anas Nashif39f632e2020-12-07 13:15:42 -0500608#ifdef CONFIG_KERNEL_COHERENCE
Andy Rossf6d32ab2020-05-13 15:34:04 +0000609 __ASSERT_NO_MSG(arch_mem_coherent(thread));
610#endif
611
Anas Nashif081605e2020-10-16 20:00:17 -0400612 /* If thread is queued already, do not try and added it to the
613 * run queue again
614 */
615 if (!z_is_thread_queued(thread) && z_is_thread_ready(thread)) {
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100616 SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_ready, thread);
617
Andy Rossc230fb32021-09-23 16:41:30 -0700618 queue_thread(thread);
Andy Ross1856e222018-05-21 11:48:35 -0700619 update_cache(0);
Andy Ross3267cd32022-04-06 09:58:20 -0700620 flag_ipi();
Andy Ross1acd8c22018-05-03 14:51:49 -0700621 }
622}
623
Andy Ross96ccc462020-01-23 13:28:30 -0800624void z_ready_thread(struct k_thread *thread)
625{
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500626 K_SPINLOCK(&_sched_spinlock) {
Andy Ross05c468f2021-02-19 15:24:24 -0800627 if (!thread_active_elsewhere(thread)) {
628 ready_thread(thread);
629 }
Andy Ross96ccc462020-01-23 13:28:30 -0800630 }
631}
632
Patrik Flykt4344e272019-03-08 14:19:05 -0700633void z_move_thread_to_end_of_prio_q(struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -0700634{
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500635 K_SPINLOCK(&_sched_spinlock) {
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700636 move_thread_to_end_of_prio_q(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700637 }
638}
639
Andy Ross96ccc462020-01-23 13:28:30 -0800640void z_sched_start(struct k_thread *thread)
641{
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500642 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
Andy Ross96ccc462020-01-23 13:28:30 -0800643
644 if (z_has_thread_started(thread)) {
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500645 k_spin_unlock(&_sched_spinlock, key);
Andy Ross96ccc462020-01-23 13:28:30 -0800646 return;
647 }
648
649 z_mark_thread_as_started(thread);
650 ready_thread(thread);
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500651 z_reschedule(&_sched_spinlock, key);
Andy Ross96ccc462020-01-23 13:28:30 -0800652}
653
Peter Mitsisb1384a72023-08-14 14:22:05 -0400654/**
655 * @brief Halt a thread
656 *
657 * If the target thread is running on another CPU, flag it as needing to
658 * abort and send an IPI (if supported) to force a schedule point and wait
659 * until the target thread is switched out (ISRs will spin to wait and threads
660 * will block to wait). If the target thread is not running on another CPU,
661 * then it is safe to act immediately.
662 *
663 * Upon entry to this routine, the scheduler lock is already held. It is
664 * released before this routine returns.
665 *
666 * @param thread Thread to suspend or abort
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500667 * @param key Current key for _sched_spinlock
Peter Mitsise7986eb2023-08-14 16:41:05 -0400668 * @param terminate True if aborting thread, false if suspending thread
Peter Mitsisb1384a72023-08-14 14:22:05 -0400669 */
Peter Mitsise7986eb2023-08-14 16:41:05 -0400670static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
671 bool terminate)
Peter Mitsisb1384a72023-08-14 14:22:05 -0400672{
673#ifdef CONFIG_SMP
Peter Mitsise7986eb2023-08-14 16:41:05 -0400674 if (is_halting(_current) && arch_is_in_isr()) {
675 /* Another CPU (in an ISR) or thread is waiting for the
676 * current thread to halt. Halt it now to help avoid a
677 * potential deadlock.
678 */
679 halt_thread(_current,
680 is_aborting(_current) ? _THREAD_DEAD
681 : _THREAD_SUSPENDED);
Peter Mitsisb1384a72023-08-14 14:22:05 -0400682 }
683
684 bool active = thread_active_elsewhere(thread);
685
686 if (active) {
687 /* It's running somewhere else, flag and poke */
Peter Mitsise7986eb2023-08-14 16:41:05 -0400688 thread->base.thread_state |= (terminate ? _THREAD_ABORTING
689 : _THREAD_SUSPENDING);
Peter Mitsisb1384a72023-08-14 14:22:05 -0400690
Peter Mitsise7986eb2023-08-14 16:41:05 -0400691 /* We might spin to wait, so a true synchronous IPI is needed
Peter Mitsisb1384a72023-08-14 14:22:05 -0400692 * here, not deferred!
693 */
694#ifdef CONFIG_SCHED_IPI_SUPPORTED
695 arch_sched_ipi();
696#endif
697 }
698
Peter Mitsise7986eb2023-08-14 16:41:05 -0400699 if (is_halting(thread) && (thread != _current)) {
Peter Mitsisb1384a72023-08-14 14:22:05 -0400700 if (arch_is_in_isr()) {
701 /* ISRs can only spin waiting another CPU */
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500702 k_spin_unlock(&_sched_spinlock, key);
Peter Mitsise7986eb2023-08-14 16:41:05 -0400703 while (is_halting(thread)) {
Peter Mitsisb1384a72023-08-14 14:22:05 -0400704 }
705
Peter Mitsise7986eb2023-08-14 16:41:05 -0400706 /* Now we know it's halting, but not necessarily
707 * halted (suspended or aborted). Wait for the switch
708 * to happen!
Peter Mitsisb1384a72023-08-14 14:22:05 -0400709 */
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500710 key = k_spin_lock(&_sched_spinlock);
Peter Mitsisb1384a72023-08-14 14:22:05 -0400711 z_sched_switch_spin(thread);
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500712 k_spin_unlock(&_sched_spinlock, key);
Peter Mitsisb1384a72023-08-14 14:22:05 -0400713 } else if (active) {
Peter Mitsise7986eb2023-08-14 16:41:05 -0400714 /* Threads can wait on a queue */
715 add_to_waitq_locked(_current, terminate ?
716 &thread->join_queue :
717 &thread->halt_queue);
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500718 z_swap(&_sched_spinlock, key);
Peter Mitsisb1384a72023-08-14 14:22:05 -0400719 }
720 return; /* lock has been released */
721 }
722#endif
Peter Mitsise7986eb2023-08-14 16:41:05 -0400723 halt_thread(thread, terminate ? _THREAD_DEAD : _THREAD_SUSPENDED);
Peter Mitsisb1384a72023-08-14 14:22:05 -0400724 if ((thread == _current) && !arch_is_in_isr()) {
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500725 z_swap(&_sched_spinlock, key);
Peter Mitsise7986eb2023-08-14 16:41:05 -0400726 __ASSERT(!terminate, "aborted _current back from dead");
727 } else {
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500728 k_spin_unlock(&_sched_spinlock, key);
Peter Mitsisb1384a72023-08-14 14:22:05 -0400729 }
Peter Mitsisb1384a72023-08-14 14:22:05 -0400730}
731
Andrew Boie6cf496f2020-02-14 10:52:49 -0800732void z_impl_k_thread_suspend(struct k_thread *thread)
Andy Ross8bdabcc2020-01-07 09:58:46 -0800733{
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100734 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, suspend, thread);
735
Andy Ross8bdabcc2020-01-07 09:58:46 -0800736 (void)z_abort_thread_timeout(thread);
737
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500738 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
Peter Mitsise7986eb2023-08-14 16:41:05 -0400739
740 if ((thread->base.thread_state & _THREAD_SUSPENDED) != 0U) {
741
742 /* The target thread is already suspended. Nothing to do. */
743
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500744 k_spin_unlock(&_sched_spinlock, key);
Peter Mitsise7986eb2023-08-14 16:41:05 -0400745 return;
Andy Ross8bdabcc2020-01-07 09:58:46 -0800746 }
747
Peter Mitsise7986eb2023-08-14 16:41:05 -0400748 z_thread_halt(thread, key, false);
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100749
750 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, suspend, thread);
Andy Ross8bdabcc2020-01-07 09:58:46 -0800751}
752
Andrew Boie6cf496f2020-02-14 10:52:49 -0800753#ifdef CONFIG_USERSPACE
754static inline void z_vrfy_k_thread_suspend(struct k_thread *thread)
755{
Anas Nashifa08bfeb2023-09-27 11:20:28 +0000756 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
Andrew Boie6cf496f2020-02-14 10:52:49 -0800757 z_impl_k_thread_suspend(thread);
758}
759#include <syscalls/k_thread_suspend_mrsh.c>
760#endif
761
762void z_impl_k_thread_resume(struct k_thread *thread)
763{
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100764 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, resume, thread);
765
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500766 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
Andrew Boie6cf496f2020-02-14 10:52:49 -0800767
Anas Nashifbf69afc2020-10-16 19:53:56 -0400768 /* Do not try to resume a thread that was not suspended */
769 if (!z_is_thread_suspended(thread)) {
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500770 k_spin_unlock(&_sched_spinlock, key);
Anas Nashifbf69afc2020-10-16 19:53:56 -0400771 return;
772 }
773
Andrew Boie6cf496f2020-02-14 10:52:49 -0800774 z_mark_thread_as_not_suspended(thread);
775 ready_thread(thread);
776
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500777 z_reschedule(&_sched_spinlock, key);
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100778
779 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, resume, thread);
Andrew Boie6cf496f2020-02-14 10:52:49 -0800780}
781
782#ifdef CONFIG_USERSPACE
783static inline void z_vrfy_k_thread_resume(struct k_thread *thread)
784{
Anas Nashifa08bfeb2023-09-27 11:20:28 +0000785 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
Andrew Boie6cf496f2020-02-14 10:52:49 -0800786 z_impl_k_thread_resume(thread);
787}
788#include <syscalls/k_thread_resume_mrsh.c>
789#endif
790
Maksim Masalski970820e2021-05-25 14:40:14 +0800791static _wait_q_t *pended_on_thread(struct k_thread *thread)
Andy Ross8bdabcc2020-01-07 09:58:46 -0800792{
793 __ASSERT_NO_MSG(thread->base.pended_on);
794
795 return thread->base.pended_on;
796}
797
Andy Rossed6b4fb2020-01-23 13:04:15 -0800798static void unready_thread(struct k_thread *thread)
799{
800 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -0700801 dequeue_thread(thread);
Andy Rossed6b4fb2020-01-23 13:04:15 -0800802 }
803 update_cache(thread == _current);
804}
805
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500806/* _sched_spinlock must be held */
Andrew Boie322816e2020-02-20 16:33:06 -0800807static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q)
Andy Ross1acd8c22018-05-03 14:51:49 -0700808{
Andrew Boie322816e2020-02-20 16:33:06 -0800809 unready_thread(thread);
810 z_mark_thread_as_pending(thread);
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100811
812 SYS_PORT_TRACING_FUNC(k_thread, sched_pend, thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700813
Andrew Boie322816e2020-02-20 16:33:06 -0800814 if (wait_q != NULL) {
815 thread->base.pended_on = wait_q;
816 z_priq_wait_add(&wait_q->waitq, thread);
Andy Ross15d52082018-09-26 13:19:31 -0700817 }
Andrew Boie322816e2020-02-20 16:33:06 -0800818}
Andy Ross15d52082018-09-26 13:19:31 -0700819
Andy Ross78327382020-03-05 15:18:14 -0800820static void add_thread_timeout(struct k_thread *thread, k_timeout_t timeout)
Andrew Boie322816e2020-02-20 16:33:06 -0800821{
Andy Ross78327382020-03-05 15:18:14 -0800822 if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
Andy Ross78327382020-03-05 15:18:14 -0800823 z_add_thread_timeout(thread, timeout);
Andy Ross1acd8c22018-05-03 14:51:49 -0700824 }
Andy Rosse7ded112018-04-11 14:52:47 -0700825}
826
Andy Rossc32f3762022-10-08 07:24:28 -0700827static void pend_locked(struct k_thread *thread, _wait_q_t *wait_q,
828 k_timeout_t timeout)
Andrew Boie322816e2020-02-20 16:33:06 -0800829{
Anas Nashif39f632e2020-12-07 13:15:42 -0500830#ifdef CONFIG_KERNEL_COHERENCE
Andy Ross1ba74142021-02-09 13:48:25 -0800831 __ASSERT_NO_MSG(wait_q == NULL || arch_mem_coherent(wait_q));
Andy Rossf6d32ab2020-05-13 15:34:04 +0000832#endif
Andy Rossc32f3762022-10-08 07:24:28 -0700833 add_to_waitq_locked(thread, wait_q);
Andy Ross78327382020-03-05 15:18:14 -0800834 add_thread_timeout(thread, timeout);
Andrew Boie322816e2020-02-20 16:33:06 -0800835}
836
Andy Ross78327382020-03-05 15:18:14 -0800837void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
838 k_timeout_t timeout)
Andy Rosse7ded112018-04-11 14:52:47 -0700839{
Patrik Flykt4344e272019-03-08 14:19:05 -0700840 __ASSERT_NO_MSG(thread == _current || is_thread_dummy(thread));
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500841 K_SPINLOCK(&_sched_spinlock) {
Andy Rossc32f3762022-10-08 07:24:28 -0700842 pend_locked(thread, wait_q, timeout);
843 }
Andy Ross1acd8c22018-05-03 14:51:49 -0700844}
845
Andrew Boieffc5bdf2020-09-05 11:44:01 -0700846static inline void unpend_thread_no_timeout(struct k_thread *thread)
847{
Maksim Masalski970820e2021-05-25 14:40:14 +0800848 _priq_wait_remove(&pended_on_thread(thread)->waitq, thread);
Andrew Boieffc5bdf2020-09-05 11:44:01 -0700849 z_mark_thread_as_not_pending(thread);
850 thread->base.pended_on = NULL;
851}
852
Patrik Flykt4344e272019-03-08 14:19:05 -0700853ALWAYS_INLINE void z_unpend_thread_no_timeout(struct k_thread *thread)
Andy Rosse7ded112018-04-11 14:52:47 -0700854{
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500855 K_SPINLOCK(&_sched_spinlock) {
Peter Mitsis31dfd84f2023-01-06 13:20:28 -0500856 if (thread->base.pended_on != NULL) {
857 unpend_thread_no_timeout(thread);
858 }
Andy Ross1acd8c22018-05-03 14:51:49 -0700859 }
Andy Rosse7ded112018-04-11 14:52:47 -0700860}
861
Aastha Grover55377762023-03-08 16:54:12 -0500862void z_sched_wake_thread(struct k_thread *thread, bool is_timeout)
863{
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500864 K_SPINLOCK(&_sched_spinlock) {
Peter Mitsise7986eb2023-08-14 16:41:05 -0400865 bool killed = (thread->base.thread_state &
866 (_THREAD_DEAD | _THREAD_ABORTING));
Aastha Grover55377762023-03-08 16:54:12 -0500867
Aastha Grover877fc3d2023-03-08 16:56:31 -0500868#ifdef CONFIG_EVENTS
869 bool do_nothing = thread->no_wake_on_timeout && is_timeout;
870
871 thread->no_wake_on_timeout = false;
872
873 if (do_nothing) {
874 continue;
875 }
876#endif
877
Aastha Grover55377762023-03-08 16:54:12 -0500878 if (!killed) {
879 /* The thread is not being killed */
880 if (thread->base.pended_on != NULL) {
881 unpend_thread_no_timeout(thread);
882 }
883 z_mark_thread_as_started(thread);
884 if (is_timeout) {
885 z_mark_thread_as_not_suspended(thread);
886 }
887 ready_thread(thread);
888 }
889 }
890
891}
892
Andy Ross987c0e52018-09-27 16:50:00 -0700893#ifdef CONFIG_SYS_CLOCK_EXISTS
894/* Timeout handler for *_thread_timeout() APIs */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500895void z_thread_timeout(struct _timeout *timeout)
Andy Ross987c0e52018-09-27 16:50:00 -0700896{
Andy Ross37866332021-02-17 10:12:36 -0800897 struct k_thread *thread = CONTAINER_OF(timeout,
898 struct k_thread, base.timeout);
Andy Ross987c0e52018-09-27 16:50:00 -0700899
Aastha Grover55377762023-03-08 16:54:12 -0500900 z_sched_wake_thread(thread, true);
Andy Ross987c0e52018-09-27 16:50:00 -0700901}
902#endif
903
Patrik Flykt4344e272019-03-08 14:19:05 -0700904int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
Andy Ross78327382020-03-05 15:18:14 -0800905 _wait_q_t *wait_q, k_timeout_t timeout)
Andy Rossec554f42018-07-24 13:37:59 -0700906{
907#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
908 pending_current = _current;
909#endif
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500910 __ASSERT_NO_MSG(sizeof(_sched_spinlock) == 0 || lock != &_sched_spinlock);
Andy Rossc32f3762022-10-08 07:24:28 -0700911
912 /* We do a "lock swap" prior to calling z_swap(), such that
913 * the caller's lock gets released as desired. But we ensure
914 * that we hold the scheduler lock and leave local interrupts
915 * masked until we reach the context swich. z_swap() itself
916 * has similar code; the duplication is because it's a legacy
917 * API that doesn't expect to be called with scheduler lock
918 * held.
919 */
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500920 (void) k_spin_lock(&_sched_spinlock);
Andy Rossc32f3762022-10-08 07:24:28 -0700921 pend_locked(_current, wait_q, timeout);
922 k_spin_release(lock);
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500923 return z_swap(&_sched_spinlock, key);
Andy Rossec554f42018-07-24 13:37:59 -0700924}
925
Andy Ross604f0f42021-02-09 16:47:47 -0800926struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q)
927{
928 struct k_thread *thread = NULL;
929
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500930 K_SPINLOCK(&_sched_spinlock) {
Andy Ross604f0f42021-02-09 16:47:47 -0800931 thread = _priq_wait_best(&wait_q->waitq);
932
933 if (thread != NULL) {
934 unpend_thread_no_timeout(thread);
935 }
936 }
937
938 return thread;
939}
940
Patrik Flykt4344e272019-03-08 14:19:05 -0700941struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q)
Andy Rosse7ded112018-04-11 14:52:47 -0700942{
Andy Ross604f0f42021-02-09 16:47:47 -0800943 struct k_thread *thread = NULL;
Andy Rosse7ded112018-04-11 14:52:47 -0700944
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500945 K_SPINLOCK(&_sched_spinlock) {
Andy Ross604f0f42021-02-09 16:47:47 -0800946 thread = _priq_wait_best(&wait_q->waitq);
947
948 if (thread != NULL) {
949 unpend_thread_no_timeout(thread);
950 (void)z_abort_thread_timeout(thread);
951 }
Andy Ross1acd8c22018-05-03 14:51:49 -0700952 }
Andy Rosse7ded112018-04-11 14:52:47 -0700953
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500954 return thread;
Andy Ross1acd8c22018-05-03 14:51:49 -0700955}
Andy Rosse7ded112018-04-11 14:52:47 -0700956
Patrik Flykt4344e272019-03-08 14:19:05 -0700957void z_unpend_thread(struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -0700958{
Patrik Flykt4344e272019-03-08 14:19:05 -0700959 z_unpend_thread_no_timeout(thread);
960 (void)z_abort_thread_timeout(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700961}
962
Andy Ross6f139802019-08-20 11:21:28 -0700963/* Priority set utility that does no rescheduling, it just changes the
964 * run queue state, returning true if a reschedule is needed later.
965 */
Anas Nashif868f0992024-02-24 11:37:56 -0500966bool z_thread_prio_set(struct k_thread *thread, int prio)
Andy Ross1acd8c22018-05-03 14:51:49 -0700967{
Flavio Ceolin02ed85b2018-09-20 15:43:57 -0700968 bool need_sched = 0;
Andy Ross1acd8c22018-05-03 14:51:49 -0700969
Anas Nashif0d8da5f2024-03-06 15:59:36 -0500970 K_SPINLOCK(&_sched_spinlock) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700971 need_sched = z_is_thread_ready(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700972
973 if (need_sched) {
Andy Ross4d8e1f22019-07-01 10:25:55 -0700974 /* Don't requeue on SMP if it's the running thread */
975 if (!IS_ENABLED(CONFIG_SMP) || z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -0700976 dequeue_thread(thread);
Andy Ross4d8e1f22019-07-01 10:25:55 -0700977 thread->base.prio = prio;
Andy Rossc230fb32021-09-23 16:41:30 -0700978 queue_thread(thread);
Andy Ross4d8e1f22019-07-01 10:25:55 -0700979 } else {
980 thread->base.prio = prio;
981 }
Andy Ross1856e222018-05-21 11:48:35 -0700982 update_cache(1);
Andy Ross1acd8c22018-05-03 14:51:49 -0700983 } else {
984 thread->base.prio = prio;
Andy Rosse7ded112018-04-11 14:52:47 -0700985 }
986 }
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100987
988 SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_priority_set, thread, prio);
Andy Rosse7ded112018-04-11 14:52:47 -0700989
Andy Ross6f139802019-08-20 11:21:28 -0700990 return need_sched;
991}
992
Anas Nashif3f4f3f62021-03-29 17:13:47 -0400993static inline bool resched(uint32_t key)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400994{
Andy Rosseace1df2018-05-30 11:23:02 -0700995#ifdef CONFIG_SMP
Andy Rosseace1df2018-05-30 11:23:02 -0700996 _current_cpu->swap_ok = 0;
997#endif
998
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800999 return arch_irq_unlocked(key) && !arch_is_in_isr();
Andy Rossec554f42018-07-24 13:37:59 -07001000}
1001
Anas Nashif379b93f2020-08-10 15:47:02 -04001002/*
1003 * Check if the next ready thread is the same as the current thread
1004 * and save the trip if true.
1005 */
1006static inline bool need_swap(void)
1007{
1008 /* the SMP case will be handled in C based z_swap() */
1009#ifdef CONFIG_SMP
1010 return true;
1011#else
1012 struct k_thread *new_thread;
1013
1014 /* Check if the next ready thread is the same as the current thread */
Andy Ross6b84ab32021-02-18 10:15:23 -08001015 new_thread = _kernel.ready_q.cache;
Anas Nashif379b93f2020-08-10 15:47:02 -04001016 return new_thread != _current;
1017#endif
1018}
1019
Patrik Flykt4344e272019-03-08 14:19:05 -07001020void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key)
Andy Rossec554f42018-07-24 13:37:59 -07001021{
Anas Nashif379b93f2020-08-10 15:47:02 -04001022 if (resched(key.key) && need_swap()) {
Patrik Flykt4344e272019-03-08 14:19:05 -07001023 z_swap(lock, key);
Andy Rossec554f42018-07-24 13:37:59 -07001024 } else {
1025 k_spin_unlock(lock, key);
Andy Rossb4e9ef02022-04-06 10:10:17 -07001026 signal_pending_ipi();
Andy Rosseace1df2018-05-30 11:23:02 -07001027 }
Andy Rossec554f42018-07-24 13:37:59 -07001028}
Andy Rosseace1df2018-05-30 11:23:02 -07001029
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001030void z_reschedule_irqlock(uint32_t key)
Andy Rossec554f42018-07-24 13:37:59 -07001031{
Gaetan Perrot68581ca2023-12-21 11:01:54 +09001032 if (resched(key) && need_swap()) {
Patrik Flykt4344e272019-03-08 14:19:05 -07001033 z_swap_irqlock(key);
Andy Rossec554f42018-07-24 13:37:59 -07001034 } else {
1035 irq_unlock(key);
Andy Rossb4e9ef02022-04-06 10:10:17 -07001036 signal_pending_ipi();
Andy Rossec554f42018-07-24 13:37:59 -07001037 }
Andy Ross8606fab2018-03-26 10:54:40 -07001038}
1039
Benjamin Walshd7ad1762016-11-10 14:46:58 -05001040void k_sched_lock(void)
1041{
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001042 K_SPINLOCK(&_sched_spinlock) {
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001043 SYS_PORT_TRACING_FUNC(k_thread, sched_lock);
1044
Patrik Flykt4344e272019-03-08 14:19:05 -07001045 z_sched_lock();
Andy Ross1856e222018-05-21 11:48:35 -07001046 }
Benjamin Walshd7ad1762016-11-10 14:46:58 -05001047}
1048
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001049void k_sched_unlock(void)
1050{
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001051 K_SPINLOCK(&_sched_spinlock) {
Anas Nashifbbbc38b2021-03-29 10:03:49 -04001052 __ASSERT(_current->base.sched_locked != 0U, "");
Andy Rosseefd3da2020-02-06 13:39:52 -08001053 __ASSERT(!arch_is_in_isr(), "");
1054
Andy Ross1856e222018-05-21 11:48:35 -07001055 ++_current->base.sched_locked;
Yasushi SHOJI20d07242019-07-31 11:19:08 +09001056 update_cache(0);
Andy Ross1856e222018-05-21 11:48:35 -07001057 }
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001058
Anas Nashif2c5d4042019-12-02 10:24:08 -05001059 LOG_DBG("scheduler unlocked (%p:%d)",
Benjamin Walsha4e033f2016-11-18 16:08:24 -05001060 _current, _current->base.sched_locked);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001061
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001062 SYS_PORT_TRACING_FUNC(k_thread, sched_unlock);
1063
Patrik Flykt4344e272019-03-08 14:19:05 -07001064 z_reschedule_unlocked();
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001065}
1066
Andy Ross6b84ab32021-02-18 10:15:23 -08001067struct k_thread *z_swap_next_thread(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001068{
Andy Ross6b84ab32021-02-18 10:15:23 -08001069#ifdef CONFIG_SMP
Andy Rossb4e9ef02022-04-06 10:10:17 -07001070 struct k_thread *ret = next_up();
1071
1072 if (ret == _current) {
1073 /* When not swapping, have to signal IPIs here. In
1074 * the context switch case it must happen later, after
1075 * _current gets requeued.
1076 */
1077 signal_pending_ipi();
1078 }
1079 return ret;
Andy Ross6b84ab32021-02-18 10:15:23 -08001080#else
1081 return _kernel.ready_q.cache;
Benjamin Walsh62092182016-12-20 14:39:08 -05001082#endif
Andy Ross6b84ab32021-02-18 10:15:23 -08001083}
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001084
Jeremy Bettis1e0a36c2021-12-06 10:56:33 -07001085#ifdef CONFIG_USE_SWITCH
Andy Rossb18685b2019-02-19 17:24:30 -08001086/* Just a wrapper around _current = xxx with tracing */
1087static inline void set_current(struct k_thread *new_thread)
1088{
Daniel Leung11e6b432020-08-27 16:12:01 -07001089 z_thread_mark_switched_out();
Andy Rosseefd3da2020-02-06 13:39:52 -08001090 _current_cpu->current = new_thread;
Andy Rossb18685b2019-02-19 17:24:30 -08001091}
1092
Nicolas Pitrec9e3e0d2022-03-15 22:36:20 -04001093/**
1094 * @brief Determine next thread to execute upon completion of an interrupt
1095 *
1096 * Thread preemption is performed by context switching after the completion
1097 * of a non-recursed interrupt. This function determines which thread to
1098 * switch to if any. This function accepts as @p interrupted either:
1099 *
1100 * - The handle for the interrupted thread in which case the thread's context
1101 * must already be fully saved and ready to be picked up by a different CPU.
1102 *
1103 * - NULL if more work is required to fully save the thread's state after
1104 * it is known that a new thread is to be scheduled. It is up to the caller
1105 * to store the handle resulting from the thread that is being switched out
1106 * in that thread's "switch_handle" field after its
1107 * context has fully been saved, following the same requirements as with
1108 * the @ref arch_switch() function.
1109 *
1110 * If a new thread needs to be scheduled then its handle is returned.
1111 * Otherwise the same value provided as @p interrupted is returned back.
1112 * Those handles are the same opaque types used by the @ref arch_switch()
1113 * function.
1114 *
1115 * @warning
1116 * The @ref _current value may have changed after this call and not refer
1117 * to the interrupted thread anymore. It might be necessary to make a local
1118 * copy before calling this function.
1119 *
1120 * @param interrupted Handle for the thread that was interrupted or NULL.
1121 * @retval Handle for the next thread to execute, or @p interrupted when
1122 * no new thread is to be scheduled.
1123 */
Patrik Flykt4344e272019-03-08 14:19:05 -07001124void *z_get_next_switch_handle(void *interrupted)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001125{
Andrew Boieae0d1b22019-03-29 16:25:27 -07001126 z_check_stack_sentinel();
1127
Andy Rosseace1df2018-05-30 11:23:02 -07001128#ifdef CONFIG_SMP
Andy Rossdd432212021-02-05 08:15:02 -08001129 void *ret = NULL;
1130
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001131 K_SPINLOCK(&_sched_spinlock) {
Andy Rossf6d32ab2020-05-13 15:34:04 +00001132 struct k_thread *old_thread = _current, *new_thread;
Andy Rosseace1df2018-05-30 11:23:02 -07001133
Andy Ross4ff45712021-02-08 08:28:54 -08001134 if (IS_ENABLED(CONFIG_SMP)) {
1135 old_thread->switch_handle = NULL;
1136 }
Andy Rossf6d32ab2020-05-13 15:34:04 +00001137 new_thread = next_up();
1138
Andy Ross40d12c12021-09-27 08:22:43 -07001139 z_sched_usage_switch(new_thread);
1140
Andy Rossf6d32ab2020-05-13 15:34:04 +00001141 if (old_thread != new_thread) {
1142 update_metairq_preempt(new_thread);
Andy Rossb89e4272023-05-26 09:12:51 -07001143 z_sched_switch_spin(new_thread);
Andy Rossf6d32ab2020-05-13 15:34:04 +00001144 arch_cohere_stacks(old_thread, interrupted, new_thread);
Andy Ross11a050b2019-11-13 09:41:52 -08001145
Andy Rosseace1df2018-05-30 11:23:02 -07001146 _current_cpu->swap_ok = 0;
Andy Rossf6d32ab2020-05-13 15:34:04 +00001147 set_current(new_thread);
1148
Andy Ross3e696892021-11-30 18:26:26 -08001149#ifdef CONFIG_TIMESLICING
1150 z_reset_time_slice(new_thread);
1151#endif
1152
Danny Oerndrupc9d78402019-12-13 11:24:56 +01001153#ifdef CONFIG_SPIN_VALIDATE
Andy Ross8c1bdda2019-02-20 10:07:31 -08001154 /* Changed _current! Update the spinlock
Anas Nashif6df44052021-04-30 09:58:20 -04001155 * bookkeeping so the validation doesn't get
Andy Ross8c1bdda2019-02-20 10:07:31 -08001156 * confused when the "wrong" thread tries to
1157 * release the lock.
1158 */
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001159 z_spin_lock_set_owner(&_sched_spinlock);
Andy Ross8c1bdda2019-02-20 10:07:31 -08001160#endif
Andy Ross4ff45712021-02-08 08:28:54 -08001161
1162 /* A queued (runnable) old/current thread
1163 * needs to be added back to the run queue
1164 * here, and atomically with its switch handle
1165 * being set below. This is safe now, as we
1166 * will not return into it.
1167 */
1168 if (z_is_thread_queued(old_thread)) {
Andy Ross387fdd22021-09-23 18:44:40 -07001169 runq_add(old_thread);
Andy Ross4ff45712021-02-08 08:28:54 -08001170 }
Andy Rosseace1df2018-05-30 11:23:02 -07001171 }
Andy Rossf6d32ab2020-05-13 15:34:04 +00001172 old_thread->switch_handle = interrupted;
Andy Rossdd432212021-02-05 08:15:02 -08001173 ret = new_thread->switch_handle;
Andy Ross4ff45712021-02-08 08:28:54 -08001174 if (IS_ENABLED(CONFIG_SMP)) {
1175 /* Active threads MUST have a null here */
1176 new_thread->switch_handle = NULL;
1177 }
Benjamin Walshb8c21602016-12-23 19:34:41 -05001178 }
Andy Rossb4e9ef02022-04-06 10:10:17 -07001179 signal_pending_ipi();
Andy Rossdd432212021-02-05 08:15:02 -08001180 return ret;
Andy Rosseace1df2018-05-30 11:23:02 -07001181#else
Andy Ross40d12c12021-09-27 08:22:43 -07001182 z_sched_usage_switch(_kernel.ready_q.cache);
Andy Rossf6d32ab2020-05-13 15:34:04 +00001183 _current->switch_handle = interrupted;
Andy Ross6b84ab32021-02-18 10:15:23 -08001184 set_current(_kernel.ready_q.cache);
Andy Ross1acd8c22018-05-03 14:51:49 -07001185 return _current->switch_handle;
Andy Rossdd432212021-02-05 08:15:02 -08001186#endif
Andy Ross1acd8c22018-05-03 14:51:49 -07001187}
Benjamin Walshb12a8e02016-12-14 15:24:12 -05001188#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001189
Patrik Flykt4344e272019-03-08 14:19:05 -07001190int z_unpend_all(_wait_q_t *wait_q)
Andy Ross4ca0e072018-05-10 09:45:42 -07001191{
Andy Rossccf3bf72018-05-10 11:10:34 -07001192 int need_sched = 0;
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001193 struct k_thread *thread;
Andy Ross4ca0e072018-05-10 09:45:42 -07001194
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001195 while ((thread = z_waitq_head(wait_q)) != NULL) {
1196 z_unpend_thread(thread);
1197 z_ready_thread(thread);
Andy Ross4ca0e072018-05-10 09:45:42 -07001198 need_sched = 1;
1199 }
Andy Rossccf3bf72018-05-10 11:10:34 -07001200
1201 return need_sched;
Andy Ross4ca0e072018-05-10 09:45:42 -07001202}
1203
Anas Nashif477a04a2024-02-28 08:15:15 -05001204void init_ready_q(struct _ready_q *ready_q)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001205{
Andy Rossb155d062021-09-24 13:49:14 -07001206#if defined(CONFIG_SCHED_SCALABLE)
Anas Nashif477a04a2024-02-28 08:15:15 -05001207 ready_q->runq = (struct _priq_rb) {
Andy Ross1acd8c22018-05-03 14:51:49 -07001208 .tree = {
Patrik Flykt4344e272019-03-08 14:19:05 -07001209 .lessthan_fn = z_priq_rb_lessthan,
Andy Ross1acd8c22018-05-03 14:51:49 -07001210 }
1211 };
Andy Rossb155d062021-09-24 13:49:14 -07001212#elif defined(CONFIG_SCHED_MULTIQ)
Andy Ross9f06a352018-06-28 10:38:14 -07001213 for (int i = 0; i < ARRAY_SIZE(_kernel.ready_q.runq.queues); i++) {
Anas Nashif477a04a2024-02-28 08:15:15 -05001214 sys_dlist_init(&ready_q->runq.queues[i]);
Andy Ross9f06a352018-06-28 10:38:14 -07001215 }
Andy Rossb155d062021-09-24 13:49:14 -07001216#else
Anas Nashif477a04a2024-02-28 08:15:15 -05001217 sys_dlist_init(&ready_q->runq);
Andy Ross9f06a352018-06-28 10:38:14 -07001218#endif
Andy Rossb155d062021-09-24 13:49:14 -07001219}
1220
1221void z_sched_init(void)
1222{
Andy Rossb11e7962021-09-24 10:57:39 -07001223#ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
Nicolas Pitre907eea02023-03-16 17:54:25 -04001224 for (int i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
Andy Rossb11e7962021-09-24 10:57:39 -07001225 init_ready_q(&_kernel.cpus[i].ready_q);
1226 }
1227#else
Andy Rossb155d062021-09-24 13:49:14 -07001228 init_ready_q(&_kernel.ready_q);
Andy Rossb11e7962021-09-24 10:57:39 -07001229#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001230}
1231
Patrik Flykt4344e272019-03-08 14:19:05 -07001232int z_impl_k_thread_priority_get(k_tid_t thread)
Allan Stephens399d0ad2016-10-07 13:41:34 -05001233{
Benjamin Walshf6ca7de2016-11-08 10:36:50 -05001234 return thread->base.prio;
Allan Stephens399d0ad2016-10-07 13:41:34 -05001235}
1236
Andrew Boie76c04a22017-09-27 14:45:10 -07001237#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -07001238static inline int z_vrfy_k_thread_priority_get(k_tid_t thread)
1239{
Anas Nashifa08bfeb2023-09-27 11:20:28 +00001240 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
Andy Ross65649742019-08-06 13:34:31 -07001241 return z_impl_k_thread_priority_get(thread);
1242}
1243#include <syscalls/k_thread_priority_get_mrsh.c>
Andrew Boie76c04a22017-09-27 14:45:10 -07001244#endif
1245
Anas Nashif25c87db2021-03-29 10:54:23 -04001246void z_impl_k_thread_priority_set(k_tid_t thread, int prio)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001247{
Benjamin Walsh3cc2ba92016-11-08 15:44:05 -05001248 /*
1249 * Use NULL, since we cannot know what the entry point is (we do not
1250 * keep track of it) and idle cannot change its priority.
1251 */
Patrik Flykt4344e272019-03-08 14:19:05 -07001252 Z_ASSERT_VALID_PRIO(prio, NULL);
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001253 __ASSERT(!arch_is_in_isr(), "");
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001254
Anas Nashif868f0992024-02-24 11:37:56 -05001255 bool need_sched = z_thread_prio_set((struct k_thread *)thread, prio);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001256
Anas Nashif5e591c32024-02-24 10:37:06 -05001257 flag_ipi();
1258 if (need_sched && _current->base.sched_locked == 0U) {
1259 z_reschedule_unlocked();
1260 }
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001261}
1262
Andrew Boie468190a2017-09-29 14:00:48 -07001263#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -07001264static inline void z_vrfy_k_thread_priority_set(k_tid_t thread, int prio)
Andrew Boie468190a2017-09-29 14:00:48 -07001265{
Anas Nashifa08bfeb2023-09-27 11:20:28 +00001266 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1267 K_OOPS(K_SYSCALL_VERIFY_MSG(_is_valid_prio(prio, NULL),
Andy Ross65649742019-08-06 13:34:31 -07001268 "invalid thread priority %d", prio));
Anas Nashif5e591c32024-02-24 10:37:06 -05001269#ifndef CONFIG_USERSPACE_THREAD_MAY_RAISE_PRIORITY
Anas Nashifa08bfeb2023-09-27 11:20:28 +00001270 K_OOPS(K_SYSCALL_VERIFY_MSG((int8_t)prio >= thread->base.prio,
Andrew Boie8345e5e2018-05-04 15:57:57 -07001271 "thread priority may only be downgraded (%d < %d)",
1272 prio, thread->base.prio));
Anas Nashif5e591c32024-02-24 10:37:06 -05001273#endif
Andy Ross65649742019-08-06 13:34:31 -07001274 z_impl_k_thread_priority_set(thread, prio);
Andrew Boie468190a2017-09-29 14:00:48 -07001275}
Andy Ross65649742019-08-06 13:34:31 -07001276#include <syscalls/k_thread_priority_set_mrsh.c>
Andrew Boie468190a2017-09-29 14:00:48 -07001277#endif
1278
Andy Ross4a2e50f2018-05-15 11:06:25 -07001279#ifdef CONFIG_SCHED_DEADLINE
Patrik Flykt4344e272019-03-08 14:19:05 -07001280void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline)
Andy Ross4a2e50f2018-05-15 11:06:25 -07001281{
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001282 struct k_thread *thread = tid;
Andy Rossf2280d12024-03-08 08:42:08 -08001283 int32_t newdl = k_cycle_get_32() + deadline;
Andy Ross4a2e50f2018-05-15 11:06:25 -07001284
Andy Rossf2280d12024-03-08 08:42:08 -08001285 /* The prio_deadline field changes the sorting order, so can't
1286 * change it while the thread is in the run queue (dlists
1287 * actually are benign as long as we requeue it before we
1288 * release the lock, but an rbtree will blow up if we break
1289 * sorting!)
1290 */
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001291 K_SPINLOCK(&_sched_spinlock) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001292 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -07001293 dequeue_thread(thread);
Andy Rossf2280d12024-03-08 08:42:08 -08001294 thread->base.prio_deadline = newdl;
Andy Rossc230fb32021-09-23 16:41:30 -07001295 queue_thread(thread);
Andy Rossf2280d12024-03-08 08:42:08 -08001296 } else {
1297 thread->base.prio_deadline = newdl;
Andy Ross4a2e50f2018-05-15 11:06:25 -07001298 }
1299 }
1300}
1301
1302#ifdef CONFIG_USERSPACE
Andy Ross075c94f2019-08-13 11:34:34 -07001303static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline)
Andy Ross4a2e50f2018-05-15 11:06:25 -07001304{
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001305 struct k_thread *thread = tid;
Andy Ross4a2e50f2018-05-15 11:06:25 -07001306
Anas Nashifa08bfeb2023-09-27 11:20:28 +00001307 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1308 K_OOPS(K_SYSCALL_VERIFY_MSG(deadline > 0,
Andy Ross4a2e50f2018-05-15 11:06:25 -07001309 "invalid thread deadline %d",
1310 (int)deadline));
1311
Patrik Flykt4344e272019-03-08 14:19:05 -07001312 z_impl_k_thread_deadline_set((k_tid_t)thread, deadline);
Andy Ross4a2e50f2018-05-15 11:06:25 -07001313}
Andy Ross075c94f2019-08-13 11:34:34 -07001314#include <syscalls/k_thread_deadline_set_mrsh.c>
Andy Ross4a2e50f2018-05-15 11:06:25 -07001315#endif
1316#endif
1317
Jordan Yates1ef647f2022-03-26 09:55:23 +10001318bool k_can_yield(void)
1319{
1320 return !(k_is_pre_kernel() || k_is_in_isr() ||
1321 z_is_idle_thread_object(_current));
1322}
1323
Patrik Flykt4344e272019-03-08 14:19:05 -07001324void z_impl_k_yield(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001325{
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001326 __ASSERT(!arch_is_in_isr(), "");
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001327
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001328 SYS_PORT_TRACING_FUNC(k_thread, yield);
1329
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001330 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
James Harris6543e062021-03-01 10:14:13 -08001331
Andy Ross851d14a2021-05-13 15:46:43 -07001332 if (!IS_ENABLED(CONFIG_SMP) ||
1333 z_is_thread_queued(_current)) {
Andy Rossc230fb32021-09-23 16:41:30 -07001334 dequeue_thread(_current);
Andy Ross1acd8c22018-05-03 14:51:49 -07001335 }
Andy Rossc230fb32021-09-23 16:41:30 -07001336 queue_thread(_current);
Andy Ross851d14a2021-05-13 15:46:43 -07001337 update_cache(1);
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001338 z_swap(&_sched_spinlock, key);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001339}
1340
Andrew Boie468190a2017-09-29 14:00:48 -07001341#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -07001342static inline void z_vrfy_k_yield(void)
1343{
1344 z_impl_k_yield();
1345}
1346#include <syscalls/k_yield_mrsh.c>
Andrew Boie468190a2017-09-29 14:00:48 -07001347#endif
1348
Flavio Ceolin7a815d52020-10-19 21:37:22 -07001349static int32_t z_tick_sleep(k_ticks_t ticks)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001350{
Flavio Ceolin9a160972020-11-16 10:40:46 -08001351 uint32_t expected_wakeup_ticks;
Carles Cufi9849df82016-12-02 15:31:08 +01001352
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001353 __ASSERT(!arch_is_in_isr(), "");
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001354
Gerard Marull-Paretas737d7992022-11-23 13:42:04 +01001355 LOG_DBG("thread %p for %lu ticks", _current, (unsigned long)ticks);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001356
Gerson Fernando Budkeb8188e52023-10-16 20:15:31 +02001357#ifdef CONFIG_MULTITHREADING
Benjamin Walsh5596f782016-12-09 19:57:17 -05001358 /* wait of 0 ms is treated as a 'yield' */
Charles E. Youseb1863032019-05-08 13:22:46 -07001359 if (ticks == 0) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001360 k_yield();
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001361 return 0;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001362 }
Gerson Fernando Budkeb8188e52023-10-16 20:15:31 +02001363#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001364
Lauren Murphy4c85b462021-05-25 17:49:28 -05001365 if (Z_TICK_ABS(ticks) <= 0) {
1366 expected_wakeup_ticks = ticks + sys_clock_tick_get_32();
1367 } else {
1368 expected_wakeup_ticks = Z_TICK_ABS(ticks);
1369 }
Andy Rossd27d4e62019-02-05 15:36:01 -08001370
Gerson Fernando Budkeb8188e52023-10-16 20:15:31 +02001371#ifdef CONFIG_MULTITHREADING
1372 k_timeout_t timeout = Z_TIMEOUT_TICKS(ticks);
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001373 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001374
Andy Rossdff6b712019-02-25 21:17:29 -08001375#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
1376 pending_current = _current;
1377#endif
Andrew Boiea8775ab2020-09-05 12:53:42 -07001378 unready_thread(_current);
Andy Ross78327382020-03-05 15:18:14 -08001379 z_add_thread_timeout(_current, timeout);
Andy Ross4521e0c2019-03-22 10:30:19 -07001380 z_mark_thread_as_suspended(_current);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001381
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001382 (void)z_swap(&_sched_spinlock, key);
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001383
Andy Ross4521e0c2019-03-22 10:30:19 -07001384 __ASSERT(!z_is_thread_state_set(_current, _THREAD_SUSPENDED), "");
1385
Anas Nashif5c90ceb2021-03-13 08:19:53 -05001386 ticks = (k_ticks_t)expected_wakeup_ticks - sys_clock_tick_get_32();
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001387 if (ticks > 0) {
Charles E. Youseb1863032019-05-08 13:22:46 -07001388 return ticks;
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001389 }
Gerson Fernando Budkeb8188e52023-10-16 20:15:31 +02001390#else
1391 /* busy wait to be time coherent since subsystems may depend on it */
1392 z_impl_k_busy_wait(k_ticks_to_us_ceil32(expected_wakeup_ticks));
Benjamin Walshb12a8e02016-12-14 15:24:12 -05001393#endif
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001394
1395 return 0;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001396}
1397
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001398int32_t z_impl_k_sleep(k_timeout_t timeout)
Charles E. Youseb1863032019-05-08 13:22:46 -07001399{
Andy Ross78327382020-03-05 15:18:14 -08001400 k_ticks_t ticks;
Charles E. Youseb1863032019-05-08 13:22:46 -07001401
Peter Bigot8162e582019-12-12 16:07:07 -06001402 __ASSERT(!arch_is_in_isr(), "");
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001403
1404 SYS_PORT_TRACING_FUNC_ENTER(k_thread, sleep, timeout);
Peter Bigot8162e582019-12-12 16:07:07 -06001405
Anas Nashifd2c71792020-10-17 07:52:17 -04001406 /* in case of K_FOREVER, we suspend */
Andy Ross78327382020-03-05 15:18:14 -08001407 if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
Gerson Fernando Budkeb8188e52023-10-16 20:15:31 +02001408#ifdef CONFIG_MULTITHREADING
Andrew Boied2b89222019-11-08 10:44:22 -08001409 k_thread_suspend(_current);
Gerson Fernando Budkeb8188e52023-10-16 20:15:31 +02001410#else
1411 /* In Single Thread, just wait for an interrupt saving power */
1412 k_cpu_idle();
1413#endif
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001414 SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, (int32_t) K_TICKS_FOREVER);
1415
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001416 return (int32_t) K_TICKS_FOREVER;
Andrew Boied2b89222019-11-08 10:44:22 -08001417 }
1418
Andy Ross78327382020-03-05 15:18:14 -08001419 ticks = timeout.ticks;
Andy Ross78327382020-03-05 15:18:14 -08001420
Charles E. Youseb1863032019-05-08 13:22:46 -07001421 ticks = z_tick_sleep(ticks);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001422
Peter Mitsisa3e5af92023-12-05 13:40:19 -05001423 int32_t ret = k_ticks_to_ms_ceil64(ticks);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001424
1425 SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, ret);
1426
1427 return ret;
Charles E. Youseb1863032019-05-08 13:22:46 -07001428}
1429
Andrew Boie76c04a22017-09-27 14:45:10 -07001430#ifdef CONFIG_USERSPACE
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001431static inline int32_t z_vrfy_k_sleep(k_timeout_t timeout)
Andrew Boie76c04a22017-09-27 14:45:10 -07001432{
Andy Ross78327382020-03-05 15:18:14 -08001433 return z_impl_k_sleep(timeout);
Charles E. Yousea5678312019-05-09 16:46:46 -07001434}
Andy Ross65649742019-08-06 13:34:31 -07001435#include <syscalls/k_sleep_mrsh.c>
Charles E. Yousea5678312019-05-09 16:46:46 -07001436#endif
1437
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001438int32_t z_impl_k_usleep(int us)
Charles E. Yousea5678312019-05-09 16:46:46 -07001439{
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001440 int32_t ticks;
Charles E. Yousea5678312019-05-09 16:46:46 -07001441
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001442 SYS_PORT_TRACING_FUNC_ENTER(k_thread, usleep, us);
1443
Andy Ross88924062019-10-03 11:43:10 -07001444 ticks = k_us_to_ticks_ceil64(us);
Charles E. Yousea5678312019-05-09 16:46:46 -07001445 ticks = z_tick_sleep(ticks);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001446
Peter Mitsisa3e5af92023-12-05 13:40:19 -05001447 int32_t ret = k_ticks_to_us_ceil64(ticks);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001448
Peter Mitsisa3e5af92023-12-05 13:40:19 -05001449 SYS_PORT_TRACING_FUNC_EXIT(k_thread, usleep, us, ret);
1450
1451 return ret;
Charles E. Yousea5678312019-05-09 16:46:46 -07001452}
1453
1454#ifdef CONFIG_USERSPACE
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001455static inline int32_t z_vrfy_k_usleep(int us)
Charles E. Yousea5678312019-05-09 16:46:46 -07001456{
1457 return z_impl_k_usleep(us);
Andrew Boie76c04a22017-09-27 14:45:10 -07001458}
Andy Ross65649742019-08-06 13:34:31 -07001459#include <syscalls/k_usleep_mrsh.c>
Andrew Boie76c04a22017-09-27 14:45:10 -07001460#endif
1461
Patrik Flykt4344e272019-03-08 14:19:05 -07001462void z_impl_k_wakeup(k_tid_t thread)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001463{
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001464 SYS_PORT_TRACING_OBJ_FUNC(k_thread, wakeup, thread);
1465
Patrik Flykt4344e272019-03-08 14:19:05 -07001466 if (z_is_thread_pending(thread)) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001467 return;
1468 }
1469
Patrik Flykt4344e272019-03-08 14:19:05 -07001470 if (z_abort_thread_timeout(thread) < 0) {
Andrew Boied2b89222019-11-08 10:44:22 -08001471 /* Might have just been sleeping forever */
1472 if (thread->base.thread_state != _THREAD_SUSPENDED) {
1473 return;
1474 }
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001475 }
1476
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001477 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
Peter Mitsis51ae9932024-02-20 11:50:54 -05001478
Andy Ross4521e0c2019-03-22 10:30:19 -07001479 z_mark_thread_as_not_suspended(thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001480
Peter Mitsis51ae9932024-02-20 11:50:54 -05001481 if (!thread_active_elsewhere(thread)) {
1482 ready_thread(thread);
1483 }
Andy Ross5737b5c2020-02-04 13:52:09 -08001484
Peter Mitsis51ae9932024-02-20 11:50:54 -05001485 if (arch_is_in_isr()) {
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001486 k_spin_unlock(&_sched_spinlock, key);
Peter Mitsis51ae9932024-02-20 11:50:54 -05001487 } else {
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001488 z_reschedule(&_sched_spinlock, key);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001489 }
1490}
1491
Enjia Mai7ac40aa2020-05-28 11:29:50 +08001492#ifdef CONFIG_TRACE_SCHED_IPI
1493extern void z_trace_sched_ipi(void);
1494#endif
1495
Andy Ross42ed12a2019-02-19 16:03:39 -08001496#ifdef CONFIG_SMP
Andy Ross42ed12a2019-02-19 16:03:39 -08001497void z_sched_ipi(void)
1498{
Daniel Leungadac4cb2020-01-09 18:55:07 -08001499 /* NOTE: When adding code to this, make sure this is called
1500 * at appropriate location when !CONFIG_SCHED_IPI_SUPPORTED.
1501 */
Enjia Mai7ac40aa2020-05-28 11:29:50 +08001502#ifdef CONFIG_TRACE_SCHED_IPI
1503 z_trace_sched_ipi();
1504#endif
Andy Rossc5c3ad92023-03-07 08:29:31 -08001505
1506#ifdef CONFIG_TIMESLICING
Nicolas Pitre5879d2d2023-03-09 22:45:18 -05001507 if (sliceable(_current)) {
Andy Rossc5c3ad92023-03-07 08:29:31 -08001508 z_time_slice();
1509 }
1510#endif
Andy Ross42ed12a2019-02-19 16:03:39 -08001511}
Andy Ross42ed12a2019-02-19 16:03:39 -08001512#endif
1513
Andrew Boie468190a2017-09-29 14:00:48 -07001514#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -07001515static inline void z_vrfy_k_wakeup(k_tid_t thread)
1516{
Anas Nashifa08bfeb2023-09-27 11:20:28 +00001517 K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
Andy Ross65649742019-08-06 13:34:31 -07001518 z_impl_k_wakeup(thread);
1519}
1520#include <syscalls/k_wakeup_mrsh.c>
Andrew Boie468190a2017-09-29 14:00:48 -07001521#endif
1522
Daniel Leung0a50ff32023-09-25 11:56:10 -07001523k_tid_t z_impl_k_sched_current_thread_query(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001524{
Andy Rosseefd3da2020-02-06 13:39:52 -08001525#ifdef CONFIG_SMP
1526 /* In SMP, _current is a field read from _current_cpu, which
1527 * can race with preemption before it is read. We must lock
1528 * local interrupts when reading it.
1529 */
1530 unsigned int k = arch_irq_lock();
1531#endif
1532
1533 k_tid_t ret = _current_cpu->current;
1534
1535#ifdef CONFIG_SMP
1536 arch_irq_unlock(k);
1537#endif
1538 return ret;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001539}
1540
Andrew Boie76c04a22017-09-27 14:45:10 -07001541#ifdef CONFIG_USERSPACE
Daniel Leung0a50ff32023-09-25 11:56:10 -07001542static inline k_tid_t z_vrfy_k_sched_current_thread_query(void)
Andy Ross65649742019-08-06 13:34:31 -07001543{
Daniel Leung0a50ff32023-09-25 11:56:10 -07001544 return z_impl_k_sched_current_thread_query();
Andy Ross65649742019-08-06 13:34:31 -07001545}
Daniel Leung0a50ff32023-09-25 11:56:10 -07001546#include <syscalls/k_sched_current_thread_query_mrsh.c>
Andrew Boie76c04a22017-09-27 14:45:10 -07001547#endif
1548
Patrik Flykt4344e272019-03-08 14:19:05 -07001549int z_impl_k_is_preempt_thread(void)
Benjamin Walsh445830d2016-11-10 15:54:27 -05001550{
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001551 return !arch_is_in_isr() && is_preempt(_current);
Benjamin Walsh445830d2016-11-10 15:54:27 -05001552}
Andrew Boie468190a2017-09-29 14:00:48 -07001553
1554#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -07001555static inline int z_vrfy_k_is_preempt_thread(void)
1556{
1557 return z_impl_k_is_preempt_thread();
1558}
1559#include <syscalls/k_is_preempt_thread_mrsh.c>
Andrew Boie468190a2017-09-29 14:00:48 -07001560#endif
Andy Rossab46b1b2019-01-30 15:00:42 -08001561
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001562static inline void unpend_all(_wait_q_t *wait_q)
1563{
1564 struct k_thread *thread;
1565
1566 while ((thread = z_waitq_head(wait_q)) != NULL) {
1567 unpend_thread_no_timeout(thread);
1568 (void)z_abort_thread_timeout(thread);
1569 arch_thread_return_value_set(thread, 0);
1570 ready_thread(thread);
1571 }
1572}
1573
Anas Nashifa6ce4222024-02-22 14:10:17 -05001574#ifdef CONFIG_THREAD_ABORT_HOOK
1575extern void thread_abort_hook(struct k_thread *thread);
Chen Peng10f63d112021-09-06 13:59:40 +08001576#endif
1577
Peter Mitsise1db1ce2023-08-14 14:06:52 -04001578/**
1579 * @brief Dequeues the specified thread
1580 *
1581 * Dequeues the specified thread and move it into the specified new state.
1582 *
1583 * @param thread Identify the thread to halt
Peter Mitsise7986eb2023-08-14 16:41:05 -04001584 * @param new_state New thread state (_THREAD_DEAD or _THREAD_SUSPENDED)
Peter Mitsise1db1ce2023-08-14 14:06:52 -04001585 */
1586static void halt_thread(struct k_thread *thread, uint8_t new_state)
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001587{
1588 /* We hold the lock, and the thread is known not to be running
1589 * anywhere.
1590 */
Peter Mitsise1db1ce2023-08-14 14:06:52 -04001591 if ((thread->base.thread_state & new_state) == 0U) {
1592 thread->base.thread_state |= new_state;
Peter Mitsise7986eb2023-08-14 16:41:05 -04001593 clear_halting(thread);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001594 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -07001595 dequeue_thread(thread);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001596 }
Peter Mitsise7986eb2023-08-14 16:41:05 -04001597
1598 if (new_state == _THREAD_DEAD) {
1599 if (thread->base.pended_on != NULL) {
1600 unpend_thread_no_timeout(thread);
1601 }
1602 (void)z_abort_thread_timeout(thread);
1603 unpend_all(&thread->join_queue);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001604 }
Peter Mitsise7986eb2023-08-14 16:41:05 -04001605#ifdef CONFIG_SMP
1606 unpend_all(&thread->halt_queue);
1607#endif
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001608 update_cache(1);
1609
Peter Mitsise7986eb2023-08-14 16:41:05 -04001610 if (new_state == _THREAD_SUSPENDED) {
1611 return;
1612 }
1613
Grant Ramsay45701e62023-08-14 09:41:52 +12001614#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
1615 arch_float_disable(thread);
1616#endif
1617
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001618 SYS_PORT_TRACING_FUNC(k_thread, sched_abort, thread);
1619
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001620 z_thread_monitor_exit(thread);
Anas Nashifa6ce4222024-02-22 14:10:17 -05001621#ifdef CONFIG_THREAD_ABORT_HOOK
1622 thread_abort_hook(thread);
Chen Peng10f63d112021-09-06 13:59:40 +08001623#endif
1624
Peter Mitsis6df8efe2023-05-11 14:06:46 -04001625#ifdef CONFIG_OBJ_CORE_THREAD
Peter Mitsise6f10902023-06-01 12:16:40 -04001626#ifdef CONFIG_OBJ_CORE_STATS_THREAD
1627 k_obj_core_stats_deregister(K_OBJ_CORE(thread));
1628#endif
Peter Mitsis6df8efe2023-05-11 14:06:46 -04001629 k_obj_core_unlink(K_OBJ_CORE(thread));
1630#endif
1631
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001632#ifdef CONFIG_USERSPACE
1633 z_mem_domain_exit_thread(thread);
Anas Nashif70cf96b2023-09-27 10:45:48 +00001634 k_thread_perms_all_clear(thread);
Anas Nashif7a18c2b2023-09-27 10:45:18 +00001635 k_object_uninit(thread->stack_obj);
1636 k_object_uninit(thread);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001637#endif
1638 }
1639}
1640
1641void z_thread_abort(struct k_thread *thread)
1642{
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001643 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001644
Anas Nashif87910122024-02-22 22:24:36 -05001645 if (z_is_thread_essential(thread)) {
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001646 k_spin_unlock(&_sched_spinlock, key);
Andy Rossfb613592022-05-19 12:55:28 -07001647 __ASSERT(false, "aborting essential thread %p", thread);
1648 k_panic();
1649 return;
1650 }
1651
Anas Nashif3f4f3f62021-03-29 17:13:47 -04001652 if ((thread->base.thread_state & _THREAD_DEAD) != 0U) {
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001653 k_spin_unlock(&_sched_spinlock, key);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001654 return;
1655 }
1656
Peter Mitsise7986eb2023-08-14 16:41:05 -04001657 z_thread_halt(thread, key, true);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001658}
1659
1660#if !defined(CONFIG_ARCH_HAS_THREAD_ABORT)
1661void z_impl_k_thread_abort(struct k_thread *thread)
1662{
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001663 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread);
1664
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001665 z_thread_abort(thread);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001666
1667 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, abort, thread);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001668}
1669#endif
1670
1671int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout)
1672{
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001673 k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001674 int ret = 0;
1675
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001676 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, join, thread, timeout);
1677
Anas Nashif3f4f3f62021-03-29 17:13:47 -04001678 if ((thread->base.thread_state & _THREAD_DEAD) != 0U) {
Andy Rossa08e23f2023-05-26 09:39:16 -07001679 z_sched_switch_spin(thread);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001680 ret = 0;
1681 } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
1682 ret = -EBUSY;
Anas Nashif3f4f3f62021-03-29 17:13:47 -04001683 } else if ((thread == _current) ||
1684 (thread->base.pended_on == &_current->join_queue)) {
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001685 ret = -EDEADLK;
1686 } else {
1687 __ASSERT(!arch_is_in_isr(), "cannot join in ISR");
1688 add_to_waitq_locked(_current, &thread->join_queue);
1689 add_thread_timeout(_current, timeout);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001690
1691 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_thread, join, thread, timeout);
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001692 ret = z_swap(&_sched_spinlock, key);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001693 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret);
1694
1695 return ret;
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001696 }
1697
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001698 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret);
1699
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001700 k_spin_unlock(&_sched_spinlock, key);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001701 return ret;
1702}
1703
Andrew Boie322816e2020-02-20 16:33:06 -08001704#ifdef CONFIG_USERSPACE
1705/* Special case: don't oops if the thread is uninitialized. This is because
1706 * the initialization bit does double-duty for thread objects; if false, means
1707 * the thread object is truly uninitialized, or the thread ran and exited for
1708 * some reason.
1709 *
1710 * Return true in this case indicating we should just do nothing and return
1711 * success to the caller.
1712 */
1713static bool thread_obj_validate(struct k_thread *thread)
1714{
Anas Nashifc25d0802023-09-27 10:49:28 +00001715 struct k_object *ko = k_object_find(thread);
Anas Nashif21254b22023-09-27 10:50:26 +00001716 int ret = k_object_validate(ko, K_OBJ_THREAD, _OBJ_INIT_TRUE);
Andrew Boie322816e2020-02-20 16:33:06 -08001717
1718 switch (ret) {
1719 case 0:
1720 return false;
1721 case -EINVAL:
1722 return true;
1723 default:
1724#ifdef CONFIG_LOG
Anas Nashif3ab35662023-09-27 10:51:23 +00001725 k_object_dump_error(ret, thread, ko, K_OBJ_THREAD);
Andrew Boie322816e2020-02-20 16:33:06 -08001726#endif
Anas Nashifa08bfeb2023-09-27 11:20:28 +00001727 K_OOPS(K_SYSCALL_VERIFY_MSG(ret, "access denied"));
Andrew Boie322816e2020-02-20 16:33:06 -08001728 }
Enjia Mai53ca7092021-01-15 17:09:58 +08001729 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
Andrew Boie322816e2020-02-20 16:33:06 -08001730}
1731
Andy Ross78327382020-03-05 15:18:14 -08001732static inline int z_vrfy_k_thread_join(struct k_thread *thread,
1733 k_timeout_t timeout)
Andrew Boie322816e2020-02-20 16:33:06 -08001734{
1735 if (thread_obj_validate(thread)) {
1736 return 0;
1737 }
1738
1739 return z_impl_k_thread_join(thread, timeout);
1740}
1741#include <syscalls/k_thread_join_mrsh.c>
Andrew Boiea4c91902020-03-24 16:09:24 -07001742
1743static inline void z_vrfy_k_thread_abort(k_tid_t thread)
1744{
1745 if (thread_obj_validate(thread)) {
1746 return;
1747 }
1748
Anas Nashif87910122024-02-22 22:24:36 -05001749 K_OOPS(K_SYSCALL_VERIFY_MSG(!z_is_thread_essential(thread),
Andrew Boiea4c91902020-03-24 16:09:24 -07001750 "aborting essential thread %p", thread));
1751
1752 z_impl_k_thread_abort((struct k_thread *)thread);
1753}
1754#include <syscalls/k_thread_abort_mrsh.c>
Andrew Boie322816e2020-02-20 16:33:06 -08001755#endif /* CONFIG_USERSPACE */
Peter Bigot0259c862021-01-12 13:45:32 -06001756
1757/*
1758 * future scheduler.h API implementations
1759 */
1760bool z_sched_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data)
1761{
1762 struct k_thread *thread;
1763 bool ret = false;
1764
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001765 K_SPINLOCK(&_sched_spinlock) {
Peter Bigot0259c862021-01-12 13:45:32 -06001766 thread = _priq_wait_best(&wait_q->waitq);
1767
1768 if (thread != NULL) {
1769 z_thread_return_value_set_with_data(thread,
1770 swap_retval,
1771 swap_data);
1772 unpend_thread_no_timeout(thread);
1773 (void)z_abort_thread_timeout(thread);
1774 ready_thread(thread);
1775 ret = true;
1776 }
1777 }
1778
1779 return ret;
1780}
1781
1782int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key,
1783 _wait_q_t *wait_q, k_timeout_t timeout, void **data)
1784{
1785 int ret = z_pend_curr(lock, key, wait_q, timeout);
1786
1787 if (data != NULL) {
1788 *data = _current->base.swap_data;
1789 }
1790 return ret;
1791}
Peter Mitsisca583392023-01-05 11:50:21 -05001792
1793int z_sched_waitq_walk(_wait_q_t *wait_q,
1794 int (*func)(struct k_thread *, void *), void *data)
1795{
1796 struct k_thread *thread;
1797 int status = 0;
1798
Anas Nashif0d8da5f2024-03-06 15:59:36 -05001799 K_SPINLOCK(&_sched_spinlock) {
Peter Mitsisca583392023-01-05 11:50:21 -05001800 _WAIT_Q_FOR_EACH(wait_q, thread) {
1801
1802 /*
1803 * Invoke the callback function on each waiting thread
1804 * for as long as there are both waiting threads AND
1805 * it returns 0.
1806 */
1807
1808 status = func(thread, data);
1809 if (status != 0) {
1810 break;
1811 }
1812 }
1813 }
1814
1815 return status;
1816}