blob: 99645337e544f6bcd640a22b71f8c31563694c7a [file] [log] [blame]
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001/*
Andy Ross1acd8c22018-05-03 14:51:49 -07002 * Copyright (c) 2018 Intel Corporation
Benjamin Walsh456c6da2016-09-02 18:55:39 -04003 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08004 * SPDX-License-Identifier: Apache-2.0
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005 */
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +02006#include <zephyr/kernel.h>
Benjamin Walshb4b108d2016-10-13 10:31:48 -04007#include <ksched.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +02008#include <zephyr/spinlock.h>
9#include <zephyr/kernel/sched_priq.h>
10#include <zephyr/wait_q.h>
Andy Ross9c62cc62018-01-25 15:24:15 -080011#include <kswap.h>
Andy Ross1acd8c22018-05-03 14:51:49 -070012#include <kernel_arch_func.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020013#include <zephyr/syscall_handler.h>
14#include <zephyr/drivers/timer/system_timer.h>
Flavio Ceolin80418602018-11-21 16:22:15 -080015#include <stdbool.h>
Andrew Boiefe031612019-09-21 17:54:37 -070016#include <kernel_internal.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020017#include <zephyr/logging/log.h>
18#include <zephyr/sys/atomic.h>
19#include <zephyr/sys/math_extras.h>
20#include <zephyr/timing/timing.h>
Andy Ross52351452021-09-28 09:38:43 -070021
Krzysztof Chruscinski3ed80832020-11-26 19:32:34 +010022LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040023
Andy Ross225c74b2018-06-27 11:20:50 -070024#if defined(CONFIG_SCHED_DUMB)
Patrik Flykt4344e272019-03-08 14:19:05 -070025#define _priq_run_add z_priq_dumb_add
26#define _priq_run_remove z_priq_dumb_remove
Andy Rossab46b1b2019-01-30 15:00:42 -080027# if defined(CONFIG_SCHED_CPU_MASK)
28# define _priq_run_best _priq_dumb_mask_best
29# else
Patrik Flykt4344e272019-03-08 14:19:05 -070030# define _priq_run_best z_priq_dumb_best
Andy Rossab46b1b2019-01-30 15:00:42 -080031# endif
Andy Ross225c74b2018-06-27 11:20:50 -070032#elif defined(CONFIG_SCHED_SCALABLE)
Patrik Flykt4344e272019-03-08 14:19:05 -070033#define _priq_run_add z_priq_rb_add
34#define _priq_run_remove z_priq_rb_remove
35#define _priq_run_best z_priq_rb_best
Andy Ross9f06a352018-06-28 10:38:14 -070036#elif defined(CONFIG_SCHED_MULTIQ)
Patrik Flykt4344e272019-03-08 14:19:05 -070037#define _priq_run_add z_priq_mq_add
38#define _priq_run_remove z_priq_mq_remove
39#define _priq_run_best z_priq_mq_best
Jeremy Bettisfb1c36f2021-12-20 16:24:30 -070040static ALWAYS_INLINE void z_priq_mq_add(struct _priq_mq *pq,
41 struct k_thread *thread);
42static ALWAYS_INLINE void z_priq_mq_remove(struct _priq_mq *pq,
43 struct k_thread *thread);
Andy Rosse7ded112018-04-11 14:52:47 -070044#endif
45
Andy Ross225c74b2018-06-27 11:20:50 -070046#if defined(CONFIG_WAITQ_SCALABLE)
Patrik Flykt4344e272019-03-08 14:19:05 -070047#define z_priq_wait_add z_priq_rb_add
48#define _priq_wait_remove z_priq_rb_remove
49#define _priq_wait_best z_priq_rb_best
Andy Ross225c74b2018-06-27 11:20:50 -070050#elif defined(CONFIG_WAITQ_DUMB)
Patrik Flykt4344e272019-03-08 14:19:05 -070051#define z_priq_wait_add z_priq_dumb_add
52#define _priq_wait_remove z_priq_dumb_remove
53#define _priq_wait_best z_priq_dumb_best
Andy Ross1acd8c22018-05-03 14:51:49 -070054#endif
55
Andy Ross6b84ab32021-02-18 10:15:23 -080056struct k_spinlock sched_spinlock;
Andy Ross1acd8c22018-05-03 14:51:49 -070057
Maksim Masalski78ba2ec2021-06-01 15:44:45 +080058static void update_cache(int preempt_ok);
Andy Ross6fb6d3c2021-02-19 15:32:19 -080059static void end_thread(struct k_thread *thread);
Andrew Boie8e0f6a52020-09-05 11:50:18 -070060
Peter Mitsisf8b76f32021-11-29 09:52:11 -050061
Patrik Flykt4344e272019-03-08 14:19:05 -070062static inline int is_preempt(struct k_thread *thread)
Andy Rosse7ded112018-04-11 14:52:47 -070063{
Andy Rosse7ded112018-04-11 14:52:47 -070064 /* explanation in kernel_struct.h */
65 return thread->base.preempt <= _PREEMPT_THRESHOLD;
Andy Rosse7ded112018-04-11 14:52:47 -070066}
67
Andy Ross7aa25fa2018-05-11 14:02:42 -070068static inline int is_metairq(struct k_thread *thread)
69{
70#if CONFIG_NUM_METAIRQ_PRIORITIES > 0
71 return (thread->base.prio - K_HIGHEST_THREAD_PRIO)
72 < CONFIG_NUM_METAIRQ_PRIORITIES;
73#else
74 return 0;
75#endif
76}
77
Anas Nashif80e6a972018-06-23 08:20:34 -050078#if CONFIG_ASSERT
Flavio Ceolin2df02cc2019-03-14 14:32:45 -070079static inline bool is_thread_dummy(struct k_thread *thread)
Andy Rosse7ded112018-04-11 14:52:47 -070080{
Patrik Flykt21358ba2019-03-28 14:57:54 -060081 return (thread->base.thread_state & _THREAD_DUMMY) != 0U;
Andy Ross1acd8c22018-05-03 14:51:49 -070082}
Anas Nashif80e6a972018-06-23 08:20:34 -050083#endif
Andy Ross1acd8c22018-05-03 14:51:49 -070084
James Harris2cd0f662021-03-01 09:19:57 -080085/*
86 * Return value same as e.g. memcmp
87 * > 0 -> thread 1 priority > thread 2 priority
88 * = 0 -> thread 1 priority == thread 2 priority
89 * < 0 -> thread 1 priority < thread 2 priority
90 * Do not rely on the actual value returned aside from the above.
91 * (Again, like memcmp.)
92 */
93int32_t z_sched_prio_cmp(struct k_thread *thread_1,
94 struct k_thread *thread_2)
Andy Ross4a2e50f2018-05-15 11:06:25 -070095{
James Harris2cd0f662021-03-01 09:19:57 -080096 /* `prio` is <32b, so the below cannot overflow. */
97 int32_t b1 = thread_1->base.prio;
98 int32_t b2 = thread_2->base.prio;
99
100 if (b1 != b2) {
101 return b2 - b1;
Andy Ross4a2e50f2018-05-15 11:06:25 -0700102 }
103
104#ifdef CONFIG_SCHED_DEADLINE
Andy Rossef626572020-07-10 09:43:36 -0700105 /* If we assume all deadlines live within the same "half" of
106 * the 32 bit modulus space (this is a documented API rule),
James Harris2cd0f662021-03-01 09:19:57 -0800107 * then the latest deadline in the queue minus the earliest is
Andy Rossef626572020-07-10 09:43:36 -0700108 * guaranteed to be (2's complement) non-negative. We can
109 * leverage that to compare the values without having to check
110 * the current time.
Andy Ross4a2e50f2018-05-15 11:06:25 -0700111 */
James Harris2cd0f662021-03-01 09:19:57 -0800112 uint32_t d1 = thread_1->base.prio_deadline;
113 uint32_t d2 = thread_2->base.prio_deadline;
Andy Ross4a2e50f2018-05-15 11:06:25 -0700114
James Harris2cd0f662021-03-01 09:19:57 -0800115 if (d1 != d2) {
116 /* Sooner deadline means higher effective priority.
117 * Doing the calculation with unsigned types and casting
118 * to signed isn't perfect, but at least reduces this
119 * from UB on overflow to impdef.
120 */
121 return (int32_t) (d2 - d1);
Andy Ross4a2e50f2018-05-15 11:06:25 -0700122 }
123#endif
James Harris2cd0f662021-03-01 09:19:57 -0800124 return 0;
Andy Ross4a2e50f2018-05-15 11:06:25 -0700125}
126
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500127static ALWAYS_INLINE bool should_preempt(struct k_thread *thread,
128 int preempt_ok)
Andy Rosseace1df2018-05-30 11:23:02 -0700129{
Andy Ross43553da2018-05-31 11:13:49 -0700130 /* Preemption is OK if it's being explicitly allowed by
131 * software state (e.g. the thread called k_yield())
Andy Rosseace1df2018-05-30 11:23:02 -0700132 */
Flavio Ceolin80418602018-11-21 16:22:15 -0800133 if (preempt_ok != 0) {
134 return true;
Andy Ross43553da2018-05-31 11:13:49 -0700135 }
136
Andy Ross1763a012019-01-28 10:59:41 -0800137 __ASSERT(_current != NULL, "");
138
Andy Ross43553da2018-05-31 11:13:49 -0700139 /* Or if we're pended/suspended/dummy (duh) */
Patrik Flykt4344e272019-03-08 14:19:05 -0700140 if (z_is_thread_prevented_from_running(_current)) {
Andy Ross23c5a632019-01-04 12:52:17 -0800141 return true;
142 }
143
144 /* Edge case on ARM where a thread can be pended out of an
145 * interrupt handler before the "synchronous" swap starts
146 * context switching. Platforms with atomic swap can never
147 * hit this.
148 */
149 if (IS_ENABLED(CONFIG_SWAP_NONATOMIC)
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500150 && z_is_thread_timeout_active(thread)) {
Flavio Ceolin80418602018-11-21 16:22:15 -0800151 return true;
Andy Ross43553da2018-05-31 11:13:49 -0700152 }
153
154 /* Otherwise we have to be running a preemptible thread or
155 * switching to a metairq
156 */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500157 if (is_preempt(_current) || is_metairq(thread)) {
Flavio Ceolin80418602018-11-21 16:22:15 -0800158 return true;
Andy Rosseace1df2018-05-30 11:23:02 -0700159 }
160
Flavio Ceolin80418602018-11-21 16:22:15 -0800161 return false;
Andy Rosseace1df2018-05-30 11:23:02 -0700162}
163
Andy Rossab46b1b2019-01-30 15:00:42 -0800164#ifdef CONFIG_SCHED_CPU_MASK
165static ALWAYS_INLINE struct k_thread *_priq_dumb_mask_best(sys_dlist_t *pq)
166{
167 /* With masks enabled we need to be prepared to walk the list
168 * looking for one we can run
169 */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500170 struct k_thread *thread;
Andy Rossab46b1b2019-01-30 15:00:42 -0800171
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500172 SYS_DLIST_FOR_EACH_CONTAINER(pq, thread, base.qnode_dlist) {
173 if ((thread->base.cpu_mask & BIT(_current_cpu->id)) != 0) {
174 return thread;
Andy Rossab46b1b2019-01-30 15:00:42 -0800175 }
176 }
177 return NULL;
178}
179#endif
180
Peter Mitsisf8b76f32021-11-29 09:52:11 -0500181static ALWAYS_INLINE void z_priq_dumb_add(sys_dlist_t *pq,
182 struct k_thread *thread)
Andy Ross0d763e02021-09-07 15:34:04 -0700183{
184 struct k_thread *t;
185
186 __ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
187
188 SYS_DLIST_FOR_EACH_CONTAINER(pq, t, base.qnode_dlist) {
189 if (z_sched_prio_cmp(thread, t) > 0) {
190 sys_dlist_insert(&t->base.qnode_dlist,
191 &thread->base.qnode_dlist);
192 return;
193 }
194 }
195
196 sys_dlist_append(pq, &thread->base.qnode_dlist);
197}
198
Andy Rossb11e7962021-09-24 10:57:39 -0700199static ALWAYS_INLINE void *thread_runq(struct k_thread *thread)
Andy Ross387fdd22021-09-23 18:44:40 -0700200{
Andy Rossb11e7962021-09-24 10:57:39 -0700201#ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
202 int cpu, m = thread->base.cpu_mask;
203
204 /* Edge case: it's legal per the API to "make runnable" a
205 * thread with all CPUs masked off (i.e. one that isn't
206 * actually runnable!). Sort of a wart in the API and maybe
207 * we should address this in docs/assertions instead to avoid
208 * the extra test.
209 */
210 cpu = m == 0 ? 0 : u32_count_trailing_zeros(m);
211
212 return &_kernel.cpus[cpu].ready_q.runq;
213#else
214 return &_kernel.ready_q.runq;
215#endif
Andy Ross387fdd22021-09-23 18:44:40 -0700216}
217
Andy Rossb11e7962021-09-24 10:57:39 -0700218static ALWAYS_INLINE void *curr_cpu_runq(void)
Andy Ross387fdd22021-09-23 18:44:40 -0700219{
Andy Rossb11e7962021-09-24 10:57:39 -0700220#ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
221 return &arch_curr_cpu()->ready_q.runq;
222#else
223 return &_kernel.ready_q.runq;
224#endif
Andy Ross387fdd22021-09-23 18:44:40 -0700225}
226
Andy Rossb11e7962021-09-24 10:57:39 -0700227static ALWAYS_INLINE void runq_add(struct k_thread *thread)
Andy Ross387fdd22021-09-23 18:44:40 -0700228{
Andy Rossb11e7962021-09-24 10:57:39 -0700229 _priq_run_add(thread_runq(thread), thread);
230}
231
232static ALWAYS_INLINE void runq_remove(struct k_thread *thread)
233{
234 _priq_run_remove(thread_runq(thread), thread);
235}
236
237static ALWAYS_INLINE struct k_thread *runq_best(void)
238{
239 return _priq_run_best(curr_cpu_runq());
Andy Ross387fdd22021-09-23 18:44:40 -0700240}
241
Andy Ross4ff45712021-02-08 08:28:54 -0800242/* _current is never in the run queue until context switch on
243 * SMP configurations, see z_requeue_current()
244 */
245static inline bool should_queue_thread(struct k_thread *th)
246{
247 return !IS_ENABLED(CONFIG_SMP) || th != _current;
248}
249
Andy Rossc230fb32021-09-23 16:41:30 -0700250static ALWAYS_INLINE void queue_thread(struct k_thread *thread)
Andy Ross91946ef2021-02-07 13:03:09 -0800251{
252 thread->base.thread_state |= _THREAD_QUEUED;
Andy Ross4ff45712021-02-08 08:28:54 -0800253 if (should_queue_thread(thread)) {
Andy Ross387fdd22021-09-23 18:44:40 -0700254 runq_add(thread);
Andy Ross4ff45712021-02-08 08:28:54 -0800255 }
256#ifdef CONFIG_SMP
257 if (thread == _current) {
258 /* add current to end of queue means "yield" */
259 _current_cpu->swap_ok = true;
260 }
261#endif
Andy Ross91946ef2021-02-07 13:03:09 -0800262}
263
Andy Rossc230fb32021-09-23 16:41:30 -0700264static ALWAYS_INLINE void dequeue_thread(struct k_thread *thread)
Andy Ross91946ef2021-02-07 13:03:09 -0800265{
266 thread->base.thread_state &= ~_THREAD_QUEUED;
Andy Ross4ff45712021-02-08 08:28:54 -0800267 if (should_queue_thread(thread)) {
Andy Ross387fdd22021-09-23 18:44:40 -0700268 runq_remove(thread);
Andy Ross4ff45712021-02-08 08:28:54 -0800269 }
Andy Ross91946ef2021-02-07 13:03:09 -0800270}
271
Andy Rossb4e9ef02022-04-06 10:10:17 -0700272static void signal_pending_ipi(void)
273{
274 /* Synchronization note: you might think we need to lock these
275 * two steps, but an IPI is idempotent. It's OK if we do it
276 * twice. All we require is that if a CPU sees the flag true,
277 * it is guaranteed to send the IPI, and if a core sets
278 * pending_ipi, the IPI will be sent the next time through
279 * this code.
280 */
281#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
282 if (CONFIG_MP_NUM_CPUS > 1) {
283 if (_kernel.pending_ipi) {
284 _kernel.pending_ipi = false;
285 arch_sched_ipi();
286 }
287 }
288#endif
289}
290
Andy Ross4ff45712021-02-08 08:28:54 -0800291#ifdef CONFIG_SMP
292/* Called out of z_swap() when CONFIG_SMP. The current thread can
293 * never live in the run queue until we are inexorably on the context
294 * switch path on SMP, otherwise there is a deadlock condition where a
295 * set of CPUs pick a cycle of threads to run and wait for them all to
296 * context switch forever.
297 */
298void z_requeue_current(struct k_thread *curr)
299{
Andy Ross6b84ab32021-02-18 10:15:23 -0800300 if (z_is_thread_queued(curr)) {
Andy Ross387fdd22021-09-23 18:44:40 -0700301 runq_add(curr);
Andy Ross4ff45712021-02-08 08:28:54 -0800302 }
Andy Rossb4e9ef02022-04-06 10:10:17 -0700303 signal_pending_ipi();
Andy Ross4ff45712021-02-08 08:28:54 -0800304}
Andy Ross4ff45712021-02-08 08:28:54 -0800305
Andy Ross6fb6d3c2021-02-19 15:32:19 -0800306static inline bool is_aborting(struct k_thread *thread)
307{
Anas Nashifbbbc38b2021-03-29 10:03:49 -0400308 return (thread->base.thread_state & _THREAD_ABORTING) != 0U;
Andy Ross6fb6d3c2021-02-19 15:32:19 -0800309}
Jeremy Bettis1e0a36c2021-12-06 10:56:33 -0700310#endif
Andy Ross6fb6d3c2021-02-19 15:32:19 -0800311
Andy Rossb2791b02019-01-28 09:36:36 -0800312static ALWAYS_INLINE struct k_thread *next_up(void)
Andy Ross1acd8c22018-05-03 14:51:49 -0700313{
Andy Ross387fdd22021-09-23 18:44:40 -0700314 struct k_thread *thread = runq_best();
Andy Ross11a050b2019-11-13 09:41:52 -0800315
316#if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && (CONFIG_NUM_COOP_PRIORITIES > 0)
317 /* MetaIRQs must always attempt to return back to a
318 * cooperative thread they preempted and not whatever happens
319 * to be highest priority now. The cooperative thread was
320 * promised it wouldn't be preempted (by non-metairq threads)!
321 */
322 struct k_thread *mirqp = _current_cpu->metairq_preempted;
323
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500324 if (mirqp != NULL && (thread == NULL || !is_metairq(thread))) {
Andy Ross11a050b2019-11-13 09:41:52 -0800325 if (!z_is_thread_prevented_from_running(mirqp)) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500326 thread = mirqp;
Andy Ross11a050b2019-11-13 09:41:52 -0800327 } else {
328 _current_cpu->metairq_preempted = NULL;
329 }
330 }
331#endif
332
Andy Ross1acd8c22018-05-03 14:51:49 -0700333#ifndef CONFIG_SMP
334 /* In uniprocessor mode, we can leave the current thread in
335 * the queue (actually we have to, otherwise the assembly
336 * context switch code for all architectures would be
Patrik Flykt4344e272019-03-08 14:19:05 -0700337 * responsible for putting it back in z_swap and ISR return!),
Andy Ross1acd8c22018-05-03 14:51:49 -0700338 * which makes this choice simple.
339 */
Anas Nashif3f4f3f62021-03-29 17:13:47 -0400340 return (thread != NULL) ? thread : _current_cpu->idle_thread;
Andy Ross1acd8c22018-05-03 14:51:49 -0700341#else
342 /* Under SMP, the "cache" mechanism for selecting the next
343 * thread doesn't work, so we have more work to do to test
Andy Ross11a050b2019-11-13 09:41:52 -0800344 * _current against the best choice from the queue. Here, the
345 * thread selected above represents "the best thread that is
346 * not current".
Andy Rosseace1df2018-05-30 11:23:02 -0700347 *
348 * Subtle note on "queued": in SMP mode, _current does not
349 * live in the queue, so this isn't exactly the same thing as
350 * "ready", it means "is _current already added back to the
351 * queue such that we don't want to re-add it".
Andy Ross1acd8c22018-05-03 14:51:49 -0700352 */
Andy Ross6fb6d3c2021-02-19 15:32:19 -0800353 if (is_aborting(_current)) {
354 end_thread(_current);
355 }
356
Patrik Flykt4344e272019-03-08 14:19:05 -0700357 int queued = z_is_thread_queued(_current);
358 int active = !z_is_thread_prevented_from_running(_current);
Andy Ross1acd8c22018-05-03 14:51:49 -0700359
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500360 if (thread == NULL) {
361 thread = _current_cpu->idle_thread;
Andy Ross1acd8c22018-05-03 14:51:49 -0700362 }
363
Andy Rosseace1df2018-05-30 11:23:02 -0700364 if (active) {
James Harris2cd0f662021-03-01 09:19:57 -0800365 int32_t cmp = z_sched_prio_cmp(_current, thread);
Andy Ross4ff45712021-02-08 08:28:54 -0800366
367 /* Ties only switch if state says we yielded */
James Harris2cd0f662021-03-01 09:19:57 -0800368 if ((cmp > 0) || ((cmp == 0) && !_current_cpu->swap_ok)) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500369 thread = _current;
Andy Rosseace1df2018-05-30 11:23:02 -0700370 }
371
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500372 if (!should_preempt(thread, _current_cpu->swap_ok)) {
373 thread = _current;
Andy Rosseace1df2018-05-30 11:23:02 -0700374 }
Andy Ross1acd8c22018-05-03 14:51:49 -0700375 }
376
Andy Rosseace1df2018-05-30 11:23:02 -0700377 /* Put _current back into the queue */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500378 if (thread != _current && active &&
379 !z_is_idle_thread_object(_current) && !queued) {
Andy Rossc230fb32021-09-23 16:41:30 -0700380 queue_thread(_current);
Andy Ross1acd8c22018-05-03 14:51:49 -0700381 }
382
Andy Rosseace1df2018-05-30 11:23:02 -0700383 /* Take the new _current out of the queue */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500384 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -0700385 dequeue_thread(thread);
Andy Rosseace1df2018-05-30 11:23:02 -0700386 }
Andy Ross1acd8c22018-05-03 14:51:49 -0700387
Andy Ross4ff45712021-02-08 08:28:54 -0800388 _current_cpu->swap_ok = false;
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500389 return thread;
Andy Ross1acd8c22018-05-03 14:51:49 -0700390#endif
391}
392
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700393static void move_thread_to_end_of_prio_q(struct k_thread *thread)
394{
395 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -0700396 dequeue_thread(thread);
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700397 }
Andy Rossc230fb32021-09-23 16:41:30 -0700398 queue_thread(thread);
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700399 update_cache(thread == _current);
400}
401
Andy Ross9098a452018-09-25 10:56:09 -0700402#ifdef CONFIG_TIMESLICING
403
Andy Ross3e696892021-11-30 18:26:26 -0800404static int slice_ticks;
Andy Ross9098a452018-09-25 10:56:09 -0700405static int slice_max_prio;
406
Andy Ross3e696892021-11-30 18:26:26 -0800407static inline int slice_time(struct k_thread *curr)
408{
409 int ret = slice_ticks;
410
411#ifdef CONFIG_TIMESLICE_PER_THREAD
412 if (curr->base.slice_ticks != 0) {
413 ret = curr->base.slice_ticks;
414 }
415#endif
416 return ret;
417}
418
Andy Ross7fb8eb52019-01-04 12:54:23 -0800419#ifdef CONFIG_SWAP_NONATOMIC
Patrik Flykt4344e272019-03-08 14:19:05 -0700420/* If z_swap() isn't atomic, then it's possible for a timer interrupt
Andy Ross7fb8eb52019-01-04 12:54:23 -0800421 * to try to timeslice away _current after it has already pended
422 * itself but before the corresponding context switch. Treat that as
423 * a noop condition in z_time_slice().
424 */
425static struct k_thread *pending_current;
426#endif
427
Andy Ross3e696892021-11-30 18:26:26 -0800428void z_reset_time_slice(struct k_thread *curr)
Andy Ross9098a452018-09-25 10:56:09 -0700429{
Andy Ross7a035c02018-10-04 09:26:11 -0700430 /* Add the elapsed time since the last announced tick to the
431 * slice count, as we'll see those "expired" ticks arrive in a
432 * FUTURE z_time_slice() call.
433 */
Andy Ross3e696892021-11-30 18:26:26 -0800434 if (slice_time(curr) != 0) {
435 _current_cpu->slice_ticks = slice_time(curr) + sys_clock_elapsed();
436 z_set_timeout_expiry(slice_time(curr), false);
Andy Rossed7d8632019-06-15 19:32:04 -0700437 }
Andy Ross9098a452018-09-25 10:56:09 -0700438}
439
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500440void k_sched_time_slice_set(int32_t slice, int prio)
Andy Ross9098a452018-09-25 10:56:09 -0700441{
Patrik Flyktcf2d5792019-02-12 15:50:46 -0700442 LOCKED(&sched_spinlock) {
Andy Ross1c305142018-10-15 11:10:49 -0700443 _current_cpu->slice_ticks = 0;
Andy Ross3e696892021-11-30 18:26:26 -0800444 slice_ticks = k_ms_to_ticks_ceil32(slice);
Andy Ross419f3702021-02-22 15:42:49 -0800445 if (IS_ENABLED(CONFIG_TICKLESS_KERNEL) && slice > 0) {
446 /* It's not possible to reliably set a 1-tick
447 * timeout if ticks aren't regular.
448 */
Andy Ross3e696892021-11-30 18:26:26 -0800449 slice_ticks = MAX(2, slice_ticks);
Andy Ross419f3702021-02-22 15:42:49 -0800450 }
Andy Ross1c305142018-10-15 11:10:49 -0700451 slice_max_prio = prio;
Andy Ross3e696892021-11-30 18:26:26 -0800452 z_reset_time_slice(_current);
Andy Ross1c305142018-10-15 11:10:49 -0700453 }
Andy Ross9098a452018-09-25 10:56:09 -0700454}
455
Andy Ross3e696892021-11-30 18:26:26 -0800456#ifdef CONFIG_TIMESLICE_PER_THREAD
457void k_thread_time_slice_set(struct k_thread *th, int32_t slice_ticks,
458 k_thread_timeslice_fn_t expired, void *data)
Andy Ross9098a452018-09-25 10:56:09 -0700459{
Andy Ross3e696892021-11-30 18:26:26 -0800460 LOCKED(&sched_spinlock) {
461 th->base.slice_ticks = slice_ticks;
462 th->base.slice_expired = expired;
463 th->base.slice_data = data;
464 }
465}
466#endif
467
468static inline bool sliceable(struct k_thread *thread)
469{
470 bool ret = is_preempt(thread)
Andrew Boie83d77702020-09-05 11:46:46 -0700471 && !z_is_thread_prevented_from_running(thread)
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500472 && !z_is_prio_higher(thread->base.prio, slice_max_prio)
Andrew Boie83d77702020-09-05 11:46:46 -0700473 && !z_is_idle_thread_object(thread);
Andy Ross3e696892021-11-30 18:26:26 -0800474
475#ifdef CONFIG_TIMESLICE_PER_THREAD
476 ret |= thread->base.slice_ticks != 0;
477#endif
478
479 return ret;
480}
481
482static k_spinlock_key_t slice_expired_locked(k_spinlock_key_t sched_lock_key)
483{
484 struct k_thread *curr = _current;
485
486#ifdef CONFIG_TIMESLICE_PER_THREAD
487 if (curr->base.slice_expired) {
488 k_spin_unlock(&sched_spinlock, sched_lock_key);
489 curr->base.slice_expired(curr, curr->base.slice_data);
490 sched_lock_key = k_spin_lock(&sched_spinlock);
491 }
492#endif
493 if (!z_is_thread_prevented_from_running(curr)) {
494 move_thread_to_end_of_prio_q(curr);
495 }
496 z_reset_time_slice(curr);
497
498 return sched_lock_key;
Andy Ross9098a452018-09-25 10:56:09 -0700499}
500
501/* Called out of each timer interrupt */
502void z_time_slice(int ticks)
503{
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700504 /* Hold sched_spinlock, so that activity on another CPU
505 * (like a call to k_thread_abort() at just the wrong time)
506 * won't affect the correctness of the decisions made here.
507 * Also prevents any nested interrupts from changing
508 * thread state to avoid similar issues, since this would
509 * normally run with IRQs enabled.
510 */
511 k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
512
Andy Ross7fb8eb52019-01-04 12:54:23 -0800513#ifdef CONFIG_SWAP_NONATOMIC
514 if (pending_current == _current) {
Andy Ross3e696892021-11-30 18:26:26 -0800515 z_reset_time_slice(_current);
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700516 k_spin_unlock(&sched_spinlock, key);
Andy Ross7fb8eb52019-01-04 12:54:23 -0800517 return;
518 }
519 pending_current = NULL;
520#endif
521
Andy Ross3e696892021-11-30 18:26:26 -0800522 if (slice_time(_current) && sliceable(_current)) {
Andy Ross9098a452018-09-25 10:56:09 -0700523 if (ticks >= _current_cpu->slice_ticks) {
Andy Ross3e696892021-11-30 18:26:26 -0800524 /* Note: this will (if so enabled) internally
525 * drop and reacquire the scheduler lock
526 * around the callback! Don't put anything
527 * after this line that requires
528 * synchronization.
529 */
530 key = slice_expired_locked(key);
Andy Ross9098a452018-09-25 10:56:09 -0700531 } else {
532 _current_cpu->slice_ticks -= ticks;
533 }
Wentong Wu2463ded2019-07-24 17:17:33 +0800534 } else {
535 _current_cpu->slice_ticks = 0;
Andy Ross9098a452018-09-25 10:56:09 -0700536 }
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700537 k_spin_unlock(&sched_spinlock, key);
Andy Ross9098a452018-09-25 10:56:09 -0700538}
Andy Ross9098a452018-09-25 10:56:09 -0700539#endif
540
Andy Ross11a050b2019-11-13 09:41:52 -0800541/* Track cooperative threads preempted by metairqs so we can return to
542 * them specifically. Called at the moment a new thread has been
543 * selected to run.
544 */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500545static void update_metairq_preempt(struct k_thread *thread)
Andy Ross11a050b2019-11-13 09:41:52 -0800546{
547#if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && (CONFIG_NUM_COOP_PRIORITIES > 0)
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500548 if (is_metairq(thread) && !is_metairq(_current) &&
549 !is_preempt(_current)) {
Andy Ross11a050b2019-11-13 09:41:52 -0800550 /* Record new preemption */
551 _current_cpu->metairq_preempted = _current;
Andrew Boief5a7e1a2020-09-02 09:20:38 -0700552 } else if (!is_metairq(thread) && !z_is_idle_thread_object(thread)) {
Andy Ross11a050b2019-11-13 09:41:52 -0800553 /* Returning from existing preemption */
554 _current_cpu->metairq_preempted = NULL;
555 }
556#endif
557}
558
Andy Ross1856e222018-05-21 11:48:35 -0700559static void update_cache(int preempt_ok)
Andy Ross1acd8c22018-05-03 14:51:49 -0700560{
561#ifndef CONFIG_SMP
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500562 struct k_thread *thread = next_up();
Andy Ross1856e222018-05-21 11:48:35 -0700563
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500564 if (should_preempt(thread, preempt_ok)) {
Andy Rosscb3964f2019-08-16 21:29:26 -0700565#ifdef CONFIG_TIMESLICING
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500566 if (thread != _current) {
Andy Ross3e696892021-11-30 18:26:26 -0800567 z_reset_time_slice(thread);
Andy Ross9098a452018-09-25 10:56:09 -0700568 }
Andy Rosscb3964f2019-08-16 21:29:26 -0700569#endif
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500570 update_metairq_preempt(thread);
571 _kernel.ready_q.cache = thread;
Andy Rosseace1df2018-05-30 11:23:02 -0700572 } else {
573 _kernel.ready_q.cache = _current;
Andy Ross1856e222018-05-21 11:48:35 -0700574 }
Andy Rosseace1df2018-05-30 11:23:02 -0700575
576#else
577 /* The way this works is that the CPU record keeps its
578 * "cooperative swapping is OK" flag until the next reschedule
579 * call or context switch. It doesn't need to be tracked per
580 * thread because if the thread gets preempted for whatever
581 * reason the scheduler will make the same decision anyway.
582 */
583 _current_cpu->swap_ok = preempt_ok;
Andy Ross1acd8c22018-05-03 14:51:49 -0700584#endif
585}
586
Andy Ross05c468f2021-02-19 15:24:24 -0800587static bool thread_active_elsewhere(struct k_thread *thread)
588{
589 /* True if the thread is currently running on another CPU.
590 * There are more scalable designs to answer this question in
591 * constant time, but this is fine for now.
592 */
593#ifdef CONFIG_SMP
594 int currcpu = _current_cpu->id;
595
596 for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
597 if ((i != currcpu) &&
598 (_kernel.cpus[i].current == thread)) {
599 return true;
600 }
601 }
602#endif
603 return false;
604}
605
Andy Ross3267cd32022-04-06 09:58:20 -0700606static void flag_ipi(void)
607{
Andy Rossb4e9ef02022-04-06 10:10:17 -0700608#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
609 if (CONFIG_MP_NUM_CPUS > 1) {
610 _kernel.pending_ipi = true;
611 }
Andy Ross3267cd32022-04-06 09:58:20 -0700612#endif
613}
614
Andy Ross96ccc462020-01-23 13:28:30 -0800615static void ready_thread(struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -0700616{
Anas Nashif39f632e2020-12-07 13:15:42 -0500617#ifdef CONFIG_KERNEL_COHERENCE
Andy Rossf6d32ab2020-05-13 15:34:04 +0000618 __ASSERT_NO_MSG(arch_mem_coherent(thread));
619#endif
620
Anas Nashif081605e2020-10-16 20:00:17 -0400621 /* If thread is queued already, do not try and added it to the
622 * run queue again
623 */
624 if (!z_is_thread_queued(thread) && z_is_thread_ready(thread)) {
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100625 SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_ready, thread);
626
Andy Rossc230fb32021-09-23 16:41:30 -0700627 queue_thread(thread);
Andy Ross1856e222018-05-21 11:48:35 -0700628 update_cache(0);
Andy Ross3267cd32022-04-06 09:58:20 -0700629 flag_ipi();
Andy Ross1acd8c22018-05-03 14:51:49 -0700630 }
631}
632
Andy Ross96ccc462020-01-23 13:28:30 -0800633void z_ready_thread(struct k_thread *thread)
634{
635 LOCKED(&sched_spinlock) {
Andy Ross05c468f2021-02-19 15:24:24 -0800636 if (!thread_active_elsewhere(thread)) {
637 ready_thread(thread);
638 }
Andy Ross96ccc462020-01-23 13:28:30 -0800639 }
640}
641
Patrik Flykt4344e272019-03-08 14:19:05 -0700642void z_move_thread_to_end_of_prio_q(struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -0700643{
Patrik Flyktcf2d5792019-02-12 15:50:46 -0700644 LOCKED(&sched_spinlock) {
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700645 move_thread_to_end_of_prio_q(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700646 }
647}
648
Andy Ross96ccc462020-01-23 13:28:30 -0800649void z_sched_start(struct k_thread *thread)
650{
651 k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
652
653 if (z_has_thread_started(thread)) {
654 k_spin_unlock(&sched_spinlock, key);
655 return;
656 }
657
658 z_mark_thread_as_started(thread);
659 ready_thread(thread);
660 z_reschedule(&sched_spinlock, key);
661}
662
Andrew Boie6cf496f2020-02-14 10:52:49 -0800663void z_impl_k_thread_suspend(struct k_thread *thread)
Andy Ross8bdabcc2020-01-07 09:58:46 -0800664{
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100665 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, suspend, thread);
666
Andy Ross8bdabcc2020-01-07 09:58:46 -0800667 (void)z_abort_thread_timeout(thread);
668
669 LOCKED(&sched_spinlock) {
670 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -0700671 dequeue_thread(thread);
Andy Ross8bdabcc2020-01-07 09:58:46 -0800672 }
673 z_mark_thread_as_suspended(thread);
674 update_cache(thread == _current);
675 }
676
677 if (thread == _current) {
678 z_reschedule_unlocked();
679 }
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100680
681 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, suspend, thread);
Andy Ross8bdabcc2020-01-07 09:58:46 -0800682}
683
Andrew Boie6cf496f2020-02-14 10:52:49 -0800684#ifdef CONFIG_USERSPACE
685static inline void z_vrfy_k_thread_suspend(struct k_thread *thread)
686{
687 Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
688 z_impl_k_thread_suspend(thread);
689}
690#include <syscalls/k_thread_suspend_mrsh.c>
691#endif
692
693void z_impl_k_thread_resume(struct k_thread *thread)
694{
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100695 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, resume, thread);
696
Andrew Boie6cf496f2020-02-14 10:52:49 -0800697 k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
698
Anas Nashifbf69afc2020-10-16 19:53:56 -0400699 /* Do not try to resume a thread that was not suspended */
700 if (!z_is_thread_suspended(thread)) {
701 k_spin_unlock(&sched_spinlock, key);
702 return;
703 }
704
Andrew Boie6cf496f2020-02-14 10:52:49 -0800705 z_mark_thread_as_not_suspended(thread);
706 ready_thread(thread);
707
708 z_reschedule(&sched_spinlock, key);
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100709
710 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, resume, thread);
Andrew Boie6cf496f2020-02-14 10:52:49 -0800711}
712
713#ifdef CONFIG_USERSPACE
714static inline void z_vrfy_k_thread_resume(struct k_thread *thread)
715{
716 Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
717 z_impl_k_thread_resume(thread);
718}
719#include <syscalls/k_thread_resume_mrsh.c>
720#endif
721
Maksim Masalski970820e2021-05-25 14:40:14 +0800722static _wait_q_t *pended_on_thread(struct k_thread *thread)
Andy Ross8bdabcc2020-01-07 09:58:46 -0800723{
724 __ASSERT_NO_MSG(thread->base.pended_on);
725
726 return thread->base.pended_on;
727}
728
Andy Rossed6b4fb2020-01-23 13:04:15 -0800729static void unready_thread(struct k_thread *thread)
730{
731 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -0700732 dequeue_thread(thread);
Andy Rossed6b4fb2020-01-23 13:04:15 -0800733 }
734 update_cache(thread == _current);
735}
736
Andrew Boie322816e2020-02-20 16:33:06 -0800737/* sched_spinlock must be held */
738static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q)
Andy Ross1acd8c22018-05-03 14:51:49 -0700739{
Andrew Boie322816e2020-02-20 16:33:06 -0800740 unready_thread(thread);
741 z_mark_thread_as_pending(thread);
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100742
743 SYS_PORT_TRACING_FUNC(k_thread, sched_pend, thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700744
Andrew Boie322816e2020-02-20 16:33:06 -0800745 if (wait_q != NULL) {
746 thread->base.pended_on = wait_q;
747 z_priq_wait_add(&wait_q->waitq, thread);
Andy Ross15d52082018-09-26 13:19:31 -0700748 }
Andrew Boie322816e2020-02-20 16:33:06 -0800749}
Andy Ross15d52082018-09-26 13:19:31 -0700750
Andy Ross78327382020-03-05 15:18:14 -0800751static void add_thread_timeout(struct k_thread *thread, k_timeout_t timeout)
Andrew Boie322816e2020-02-20 16:33:06 -0800752{
Andy Ross78327382020-03-05 15:18:14 -0800753 if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
Andy Ross78327382020-03-05 15:18:14 -0800754 z_add_thread_timeout(thread, timeout);
Andy Ross1acd8c22018-05-03 14:51:49 -0700755 }
Andy Rosse7ded112018-04-11 14:52:47 -0700756}
757
Andy Ross78327382020-03-05 15:18:14 -0800758static void pend(struct k_thread *thread, _wait_q_t *wait_q,
759 k_timeout_t timeout)
Andrew Boie322816e2020-02-20 16:33:06 -0800760{
Anas Nashif39f632e2020-12-07 13:15:42 -0500761#ifdef CONFIG_KERNEL_COHERENCE
Andy Ross1ba74142021-02-09 13:48:25 -0800762 __ASSERT_NO_MSG(wait_q == NULL || arch_mem_coherent(wait_q));
Andy Rossf6d32ab2020-05-13 15:34:04 +0000763#endif
764
Andrew Boie322816e2020-02-20 16:33:06 -0800765 LOCKED(&sched_spinlock) {
766 add_to_waitq_locked(thread, wait_q);
767 }
768
Andy Ross78327382020-03-05 15:18:14 -0800769 add_thread_timeout(thread, timeout);
Andrew Boie322816e2020-02-20 16:33:06 -0800770}
771
Andy Ross78327382020-03-05 15:18:14 -0800772void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
773 k_timeout_t timeout)
Andy Rosse7ded112018-04-11 14:52:47 -0700774{
Patrik Flykt4344e272019-03-08 14:19:05 -0700775 __ASSERT_NO_MSG(thread == _current || is_thread_dummy(thread));
Andy Ross1acd8c22018-05-03 14:51:49 -0700776 pend(thread, wait_q, timeout);
777}
778
Andrew Boieffc5bdf2020-09-05 11:44:01 -0700779static inline void unpend_thread_no_timeout(struct k_thread *thread)
780{
Maksim Masalski970820e2021-05-25 14:40:14 +0800781 _priq_wait_remove(&pended_on_thread(thread)->waitq, thread);
Andrew Boieffc5bdf2020-09-05 11:44:01 -0700782 z_mark_thread_as_not_pending(thread);
783 thread->base.pended_on = NULL;
784}
785
Patrik Flykt4344e272019-03-08 14:19:05 -0700786ALWAYS_INLINE void z_unpend_thread_no_timeout(struct k_thread *thread)
Andy Rosse7ded112018-04-11 14:52:47 -0700787{
Patrik Flyktcf2d5792019-02-12 15:50:46 -0700788 LOCKED(&sched_spinlock) {
Andrew Boieffc5bdf2020-09-05 11:44:01 -0700789 unpend_thread_no_timeout(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700790 }
Andy Rosse7ded112018-04-11 14:52:47 -0700791}
792
Andy Ross987c0e52018-09-27 16:50:00 -0700793#ifdef CONFIG_SYS_CLOCK_EXISTS
794/* Timeout handler for *_thread_timeout() APIs */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500795void z_thread_timeout(struct _timeout *timeout)
Andy Ross987c0e52018-09-27 16:50:00 -0700796{
Andy Ross37866332021-02-17 10:12:36 -0800797 struct k_thread *thread = CONTAINER_OF(timeout,
798 struct k_thread, base.timeout);
Andy Ross987c0e52018-09-27 16:50:00 -0700799
Andy Ross37866332021-02-17 10:12:36 -0800800 LOCKED(&sched_spinlock) {
801 bool killed = ((thread->base.thread_state & _THREAD_DEAD) ||
802 (thread->base.thread_state & _THREAD_ABORTING));
803
804 if (!killed) {
805 if (thread->base.pended_on != NULL) {
806 unpend_thread_no_timeout(thread);
807 }
808 z_mark_thread_as_started(thread);
809 z_mark_thread_as_not_suspended(thread);
810 ready_thread(thread);
Andrew Boieffc5bdf2020-09-05 11:44:01 -0700811 }
Andy Ross987c0e52018-09-27 16:50:00 -0700812 }
Andy Ross987c0e52018-09-27 16:50:00 -0700813}
814#endif
815
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500816int z_pend_curr_irqlock(uint32_t key, _wait_q_t *wait_q, k_timeout_t timeout)
Andy Rosse7ded112018-04-11 14:52:47 -0700817{
Andy Ross722aeea2019-03-14 13:50:16 -0700818 pend(_current, wait_q, timeout);
819
Andy Ross7fb8eb52019-01-04 12:54:23 -0800820#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
821 pending_current = _current;
Andy Ross722aeea2019-03-14 13:50:16 -0700822
823 int ret = z_swap_irqlock(key);
824 LOCKED(&sched_spinlock) {
825 if (pending_current == _current) {
826 pending_current = NULL;
827 }
828 }
829 return ret;
830#else
Patrik Flykt4344e272019-03-08 14:19:05 -0700831 return z_swap_irqlock(key);
Andy Ross722aeea2019-03-14 13:50:16 -0700832#endif
Andy Rosse7ded112018-04-11 14:52:47 -0700833}
834
Patrik Flykt4344e272019-03-08 14:19:05 -0700835int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
Andy Ross78327382020-03-05 15:18:14 -0800836 _wait_q_t *wait_q, k_timeout_t timeout)
Andy Rossec554f42018-07-24 13:37:59 -0700837{
838#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
839 pending_current = _current;
840#endif
841 pend(_current, wait_q, timeout);
Patrik Flykt4344e272019-03-08 14:19:05 -0700842 return z_swap(lock, key);
Andy Rossec554f42018-07-24 13:37:59 -0700843}
844
Andy Ross604f0f42021-02-09 16:47:47 -0800845struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q)
846{
847 struct k_thread *thread = NULL;
848
849 LOCKED(&sched_spinlock) {
850 thread = _priq_wait_best(&wait_q->waitq);
851
852 if (thread != NULL) {
853 unpend_thread_no_timeout(thread);
854 }
855 }
856
857 return thread;
858}
859
Patrik Flykt4344e272019-03-08 14:19:05 -0700860struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q)
Andy Rosse7ded112018-04-11 14:52:47 -0700861{
Andy Ross604f0f42021-02-09 16:47:47 -0800862 struct k_thread *thread = NULL;
Andy Rosse7ded112018-04-11 14:52:47 -0700863
Andy Ross604f0f42021-02-09 16:47:47 -0800864 LOCKED(&sched_spinlock) {
865 thread = _priq_wait_best(&wait_q->waitq);
866
867 if (thread != NULL) {
868 unpend_thread_no_timeout(thread);
869 (void)z_abort_thread_timeout(thread);
870 }
Andy Ross1acd8c22018-05-03 14:51:49 -0700871 }
Andy Rosse7ded112018-04-11 14:52:47 -0700872
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500873 return thread;
Andy Ross1acd8c22018-05-03 14:51:49 -0700874}
Andy Rosse7ded112018-04-11 14:52:47 -0700875
Patrik Flykt4344e272019-03-08 14:19:05 -0700876void z_unpend_thread(struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -0700877{
Patrik Flykt4344e272019-03-08 14:19:05 -0700878 z_unpend_thread_no_timeout(thread);
879 (void)z_abort_thread_timeout(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700880}
881
Andy Ross6f139802019-08-20 11:21:28 -0700882/* Priority set utility that does no rescheduling, it just changes the
883 * run queue state, returning true if a reschedule is needed later.
884 */
885bool z_set_prio(struct k_thread *thread, int prio)
Andy Ross1acd8c22018-05-03 14:51:49 -0700886{
Flavio Ceolin02ed85b2018-09-20 15:43:57 -0700887 bool need_sched = 0;
Andy Ross1acd8c22018-05-03 14:51:49 -0700888
Patrik Flyktcf2d5792019-02-12 15:50:46 -0700889 LOCKED(&sched_spinlock) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700890 need_sched = z_is_thread_ready(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700891
892 if (need_sched) {
Andy Ross4d8e1f22019-07-01 10:25:55 -0700893 /* Don't requeue on SMP if it's the running thread */
894 if (!IS_ENABLED(CONFIG_SMP) || z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -0700895 dequeue_thread(thread);
Andy Ross4d8e1f22019-07-01 10:25:55 -0700896 thread->base.prio = prio;
Andy Rossc230fb32021-09-23 16:41:30 -0700897 queue_thread(thread);
Andy Ross4d8e1f22019-07-01 10:25:55 -0700898 } else {
899 thread->base.prio = prio;
900 }
Andy Ross1856e222018-05-21 11:48:35 -0700901 update_cache(1);
Andy Ross1acd8c22018-05-03 14:51:49 -0700902 } else {
903 thread->base.prio = prio;
Andy Rosse7ded112018-04-11 14:52:47 -0700904 }
905 }
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100906
907 SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_priority_set, thread, prio);
Andy Rosse7ded112018-04-11 14:52:47 -0700908
Andy Ross6f139802019-08-20 11:21:28 -0700909 return need_sched;
910}
911
912void z_thread_priority_set(struct k_thread *thread, int prio)
913{
914 bool need_sched = z_set_prio(thread, prio);
915
Andy Ross3267cd32022-04-06 09:58:20 -0700916 flag_ipi();
Andy Ross5737b5c2020-02-04 13:52:09 -0800917
Anas Nashifbbbc38b2021-03-29 10:03:49 -0400918 if (need_sched && _current->base.sched_locked == 0U) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700919 z_reschedule_unlocked();
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400920 }
921}
922
Anas Nashif3f4f3f62021-03-29 17:13:47 -0400923static inline bool resched(uint32_t key)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400924{
Andy Rosseace1df2018-05-30 11:23:02 -0700925#ifdef CONFIG_SMP
Andy Rosseace1df2018-05-30 11:23:02 -0700926 _current_cpu->swap_ok = 0;
927#endif
928
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800929 return arch_irq_unlocked(key) && !arch_is_in_isr();
Andy Rossec554f42018-07-24 13:37:59 -0700930}
931
Anas Nashif379b93f2020-08-10 15:47:02 -0400932/*
933 * Check if the next ready thread is the same as the current thread
934 * and save the trip if true.
935 */
936static inline bool need_swap(void)
937{
938 /* the SMP case will be handled in C based z_swap() */
939#ifdef CONFIG_SMP
940 return true;
941#else
942 struct k_thread *new_thread;
943
944 /* Check if the next ready thread is the same as the current thread */
Andy Ross6b84ab32021-02-18 10:15:23 -0800945 new_thread = _kernel.ready_q.cache;
Anas Nashif379b93f2020-08-10 15:47:02 -0400946 return new_thread != _current;
947#endif
948}
949
Patrik Flykt4344e272019-03-08 14:19:05 -0700950void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key)
Andy Rossec554f42018-07-24 13:37:59 -0700951{
Anas Nashif379b93f2020-08-10 15:47:02 -0400952 if (resched(key.key) && need_swap()) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700953 z_swap(lock, key);
Andy Rossec554f42018-07-24 13:37:59 -0700954 } else {
955 k_spin_unlock(lock, key);
Andy Rossb4e9ef02022-04-06 10:10:17 -0700956 signal_pending_ipi();
Andy Rosseace1df2018-05-30 11:23:02 -0700957 }
Andy Rossec554f42018-07-24 13:37:59 -0700958}
Andy Rosseace1df2018-05-30 11:23:02 -0700959
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500960void z_reschedule_irqlock(uint32_t key)
Andy Rossec554f42018-07-24 13:37:59 -0700961{
Andy Ross312b43f2019-05-24 10:09:13 -0700962 if (resched(key)) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700963 z_swap_irqlock(key);
Andy Rossec554f42018-07-24 13:37:59 -0700964 } else {
965 irq_unlock(key);
Andy Rossb4e9ef02022-04-06 10:10:17 -0700966 signal_pending_ipi();
Andy Rossec554f42018-07-24 13:37:59 -0700967 }
Andy Ross8606fab2018-03-26 10:54:40 -0700968}
969
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500970void k_sched_lock(void)
971{
Patrik Flyktcf2d5792019-02-12 15:50:46 -0700972 LOCKED(&sched_spinlock) {
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100973 SYS_PORT_TRACING_FUNC(k_thread, sched_lock);
974
Patrik Flykt4344e272019-03-08 14:19:05 -0700975 z_sched_lock();
Andy Ross1856e222018-05-21 11:48:35 -0700976 }
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500977}
978
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400979void k_sched_unlock(void)
980{
Patrik Flyktcf2d5792019-02-12 15:50:46 -0700981 LOCKED(&sched_spinlock) {
Anas Nashifbbbc38b2021-03-29 10:03:49 -0400982 __ASSERT(_current->base.sched_locked != 0U, "");
Andy Rosseefd3da2020-02-06 13:39:52 -0800983 __ASSERT(!arch_is_in_isr(), "");
984
Andy Ross1856e222018-05-21 11:48:35 -0700985 ++_current->base.sched_locked;
Yasushi SHOJI20d07242019-07-31 11:19:08 +0900986 update_cache(0);
Andy Ross1856e222018-05-21 11:48:35 -0700987 }
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400988
Anas Nashif2c5d4042019-12-02 10:24:08 -0500989 LOG_DBG("scheduler unlocked (%p:%d)",
Benjamin Walsha4e033f2016-11-18 16:08:24 -0500990 _current, _current->base.sched_locked);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400991
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100992 SYS_PORT_TRACING_FUNC(k_thread, sched_unlock);
993
Patrik Flykt4344e272019-03-08 14:19:05 -0700994 z_reschedule_unlocked();
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400995}
996
Andy Ross6b84ab32021-02-18 10:15:23 -0800997struct k_thread *z_swap_next_thread(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400998{
Andy Ross6b84ab32021-02-18 10:15:23 -0800999#ifdef CONFIG_SMP
Andy Rossb4e9ef02022-04-06 10:10:17 -07001000 struct k_thread *ret = next_up();
1001
1002 if (ret == _current) {
1003 /* When not swapping, have to signal IPIs here. In
1004 * the context switch case it must happen later, after
1005 * _current gets requeued.
1006 */
1007 signal_pending_ipi();
1008 }
1009 return ret;
Andy Ross6b84ab32021-02-18 10:15:23 -08001010#else
1011 return _kernel.ready_q.cache;
Benjamin Walsh62092182016-12-20 14:39:08 -05001012#endif
Andy Ross6b84ab32021-02-18 10:15:23 -08001013}
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001014
Jeremy Bettis1e0a36c2021-12-06 10:56:33 -07001015#ifdef CONFIG_USE_SWITCH
Andy Rossb18685b2019-02-19 17:24:30 -08001016/* Just a wrapper around _current = xxx with tracing */
1017static inline void set_current(struct k_thread *new_thread)
1018{
Daniel Leung11e6b432020-08-27 16:12:01 -07001019 z_thread_mark_switched_out();
Andy Rosseefd3da2020-02-06 13:39:52 -08001020 _current_cpu->current = new_thread;
Andy Rossb18685b2019-02-19 17:24:30 -08001021}
1022
Nicolas Pitrec9e3e0d2022-03-15 22:36:20 -04001023/**
1024 * @brief Determine next thread to execute upon completion of an interrupt
1025 *
1026 * Thread preemption is performed by context switching after the completion
1027 * of a non-recursed interrupt. This function determines which thread to
1028 * switch to if any. This function accepts as @p interrupted either:
1029 *
1030 * - The handle for the interrupted thread in which case the thread's context
1031 * must already be fully saved and ready to be picked up by a different CPU.
1032 *
1033 * - NULL if more work is required to fully save the thread's state after
1034 * it is known that a new thread is to be scheduled. It is up to the caller
1035 * to store the handle resulting from the thread that is being switched out
1036 * in that thread's "switch_handle" field after its
1037 * context has fully been saved, following the same requirements as with
1038 * the @ref arch_switch() function.
1039 *
1040 * If a new thread needs to be scheduled then its handle is returned.
1041 * Otherwise the same value provided as @p interrupted is returned back.
1042 * Those handles are the same opaque types used by the @ref arch_switch()
1043 * function.
1044 *
1045 * @warning
1046 * The @ref _current value may have changed after this call and not refer
1047 * to the interrupted thread anymore. It might be necessary to make a local
1048 * copy before calling this function.
1049 *
1050 * @param interrupted Handle for the thread that was interrupted or NULL.
1051 * @retval Handle for the next thread to execute, or @p interrupted when
1052 * no new thread is to be scheduled.
1053 */
Patrik Flykt4344e272019-03-08 14:19:05 -07001054void *z_get_next_switch_handle(void *interrupted)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001055{
Andrew Boieae0d1b22019-03-29 16:25:27 -07001056 z_check_stack_sentinel();
1057
Andy Rosseace1df2018-05-30 11:23:02 -07001058#ifdef CONFIG_SMP
Andy Rossdd432212021-02-05 08:15:02 -08001059 void *ret = NULL;
1060
Patrik Flyktcf2d5792019-02-12 15:50:46 -07001061 LOCKED(&sched_spinlock) {
Andy Rossf6d32ab2020-05-13 15:34:04 +00001062 struct k_thread *old_thread = _current, *new_thread;
Andy Rosseace1df2018-05-30 11:23:02 -07001063
Andy Ross4ff45712021-02-08 08:28:54 -08001064 if (IS_ENABLED(CONFIG_SMP)) {
1065 old_thread->switch_handle = NULL;
1066 }
Andy Rossf6d32ab2020-05-13 15:34:04 +00001067 new_thread = next_up();
1068
Andy Ross40d12c12021-09-27 08:22:43 -07001069 z_sched_usage_switch(new_thread);
1070
Andy Rossf6d32ab2020-05-13 15:34:04 +00001071 if (old_thread != new_thread) {
1072 update_metairq_preempt(new_thread);
1073 wait_for_switch(new_thread);
1074 arch_cohere_stacks(old_thread, interrupted, new_thread);
Andy Ross11a050b2019-11-13 09:41:52 -08001075
Andy Rosseace1df2018-05-30 11:23:02 -07001076 _current_cpu->swap_ok = 0;
Andy Rossf6d32ab2020-05-13 15:34:04 +00001077 set_current(new_thread);
1078
Andy Ross3e696892021-11-30 18:26:26 -08001079#ifdef CONFIG_TIMESLICING
1080 z_reset_time_slice(new_thread);
1081#endif
1082
Danny Oerndrupc9d78402019-12-13 11:24:56 +01001083#ifdef CONFIG_SPIN_VALIDATE
Andy Ross8c1bdda2019-02-20 10:07:31 -08001084 /* Changed _current! Update the spinlock
Anas Nashif6df44052021-04-30 09:58:20 -04001085 * bookkeeping so the validation doesn't get
Andy Ross8c1bdda2019-02-20 10:07:31 -08001086 * confused when the "wrong" thread tries to
1087 * release the lock.
1088 */
1089 z_spin_lock_set_owner(&sched_spinlock);
1090#endif
Andy Ross4ff45712021-02-08 08:28:54 -08001091
1092 /* A queued (runnable) old/current thread
1093 * needs to be added back to the run queue
1094 * here, and atomically with its switch handle
1095 * being set below. This is safe now, as we
1096 * will not return into it.
1097 */
1098 if (z_is_thread_queued(old_thread)) {
Andy Ross387fdd22021-09-23 18:44:40 -07001099 runq_add(old_thread);
Andy Ross4ff45712021-02-08 08:28:54 -08001100 }
Andy Rosseace1df2018-05-30 11:23:02 -07001101 }
Andy Rossf6d32ab2020-05-13 15:34:04 +00001102 old_thread->switch_handle = interrupted;
Andy Rossdd432212021-02-05 08:15:02 -08001103 ret = new_thread->switch_handle;
Andy Ross4ff45712021-02-08 08:28:54 -08001104 if (IS_ENABLED(CONFIG_SMP)) {
1105 /* Active threads MUST have a null here */
1106 new_thread->switch_handle = NULL;
1107 }
Benjamin Walshb8c21602016-12-23 19:34:41 -05001108 }
Andy Rossb4e9ef02022-04-06 10:10:17 -07001109 signal_pending_ipi();
Andy Rossdd432212021-02-05 08:15:02 -08001110 return ret;
Andy Rosseace1df2018-05-30 11:23:02 -07001111#else
Andy Ross40d12c12021-09-27 08:22:43 -07001112 z_sched_usage_switch(_kernel.ready_q.cache);
Andy Rossf6d32ab2020-05-13 15:34:04 +00001113 _current->switch_handle = interrupted;
Andy Ross6b84ab32021-02-18 10:15:23 -08001114 set_current(_kernel.ready_q.cache);
Andy Ross1acd8c22018-05-03 14:51:49 -07001115 return _current->switch_handle;
Andy Rossdd432212021-02-05 08:15:02 -08001116#endif
Andy Ross1acd8c22018-05-03 14:51:49 -07001117}
Benjamin Walshb12a8e02016-12-14 15:24:12 -05001118#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001119
Patrik Flykt4344e272019-03-08 14:19:05 -07001120void z_priq_dumb_remove(sys_dlist_t *pq, struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -07001121{
Andrew Boie8f0bb6a2019-09-21 18:36:23 -07001122 __ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
Andy Ross1acd8c22018-05-03 14:51:49 -07001123
1124 sys_dlist_remove(&thread->base.qnode_dlist);
1125}
1126
Patrik Flykt4344e272019-03-08 14:19:05 -07001127struct k_thread *z_priq_dumb_best(sys_dlist_t *pq)
Andy Ross1acd8c22018-05-03 14:51:49 -07001128{
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001129 struct k_thread *thread = NULL;
Flavio Ceolin26be3352018-11-15 15:03:32 -08001130 sys_dnode_t *n = sys_dlist_peek_head(pq);
1131
Peter A. Bigot692e1032019-01-03 23:36:28 -06001132 if (n != NULL) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001133 thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist);
Peter A. Bigot692e1032019-01-03 23:36:28 -06001134 }
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001135 return thread;
Andy Ross1acd8c22018-05-03 14:51:49 -07001136}
1137
Patrik Flykt4344e272019-03-08 14:19:05 -07001138bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b)
Andy Ross1acd8c22018-05-03 14:51:49 -07001139{
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001140 struct k_thread *thread_a, *thread_b;
James Harris2cd0f662021-03-01 09:19:57 -08001141 int32_t cmp;
Andy Ross1acd8c22018-05-03 14:51:49 -07001142
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001143 thread_a = CONTAINER_OF(a, struct k_thread, base.qnode_rb);
1144 thread_b = CONTAINER_OF(b, struct k_thread, base.qnode_rb);
Andy Ross1acd8c22018-05-03 14:51:49 -07001145
James Harris2cd0f662021-03-01 09:19:57 -08001146 cmp = z_sched_prio_cmp(thread_a, thread_b);
1147
1148 if (cmp > 0) {
Flavio Ceolin02ed85b2018-09-20 15:43:57 -07001149 return true;
James Harris2cd0f662021-03-01 09:19:57 -08001150 } else if (cmp < 0) {
Flavio Ceolin02ed85b2018-09-20 15:43:57 -07001151 return false;
Andy Ross1acd8c22018-05-03 14:51:49 -07001152 } else {
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001153 return thread_a->base.order_key < thread_b->base.order_key
1154 ? 1 : 0;
Andy Ross1acd8c22018-05-03 14:51:49 -07001155 }
1156}
1157
Patrik Flykt4344e272019-03-08 14:19:05 -07001158void z_priq_rb_add(struct _priq_rb *pq, struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -07001159{
1160 struct k_thread *t;
1161
Andrew Boie8f0bb6a2019-09-21 18:36:23 -07001162 __ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
Andy Ross1acd8c22018-05-03 14:51:49 -07001163
1164 thread->base.order_key = pq->next_order_key++;
1165
1166 /* Renumber at wraparound. This is tiny code, and in practice
1167 * will almost never be hit on real systems. BUT on very
1168 * long-running systems where a priq never completely empties
1169 * AND that contains very large numbers of threads, it can be
1170 * a latency glitch to loop over all the threads like this.
1171 */
1172 if (!pq->next_order_key) {
1173 RB_FOR_EACH_CONTAINER(&pq->tree, t, base.qnode_rb) {
1174 t->base.order_key = pq->next_order_key++;
1175 }
1176 }
1177
1178 rb_insert(&pq->tree, &thread->base.qnode_rb);
1179}
1180
Patrik Flykt4344e272019-03-08 14:19:05 -07001181void z_priq_rb_remove(struct _priq_rb *pq, struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -07001182{
Andrew Boie8f0bb6a2019-09-21 18:36:23 -07001183 __ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
Andy Ross1acd8c22018-05-03 14:51:49 -07001184
1185 rb_remove(&pq->tree, &thread->base.qnode_rb);
1186
1187 if (!pq->tree.root) {
1188 pq->next_order_key = 0;
1189 }
1190}
1191
Patrik Flykt4344e272019-03-08 14:19:05 -07001192struct k_thread *z_priq_rb_best(struct _priq_rb *pq)
Andy Ross1acd8c22018-05-03 14:51:49 -07001193{
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001194 struct k_thread *thread = NULL;
Andy Ross1acd8c22018-05-03 14:51:49 -07001195 struct rbnode *n = rb_get_min(&pq->tree);
1196
Peter A. Bigot692e1032019-01-03 23:36:28 -06001197 if (n != NULL) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001198 thread = CONTAINER_OF(n, struct k_thread, base.qnode_rb);
Peter A. Bigot692e1032019-01-03 23:36:28 -06001199 }
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001200 return thread;
Andy Ross1acd8c22018-05-03 14:51:49 -07001201}
1202
Andy Ross9f06a352018-06-28 10:38:14 -07001203#ifdef CONFIG_SCHED_MULTIQ
1204# if (K_LOWEST_THREAD_PRIO - K_HIGHEST_THREAD_PRIO) > 31
1205# error Too many priorities for multiqueue scheduler (max 32)
1206# endif
Andy Ross9f06a352018-06-28 10:38:14 -07001207
Peter Mitsisf8b76f32021-11-29 09:52:11 -05001208static ALWAYS_INLINE void z_priq_mq_add(struct _priq_mq *pq,
1209 struct k_thread *thread)
Andy Ross9f06a352018-06-28 10:38:14 -07001210{
1211 int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO;
1212
1213 sys_dlist_append(&pq->queues[priority_bit], &thread->base.qnode_dlist);
Flavio Ceolina9962032019-02-26 10:14:04 -08001214 pq->bitmask |= BIT(priority_bit);
Andy Ross9f06a352018-06-28 10:38:14 -07001215}
1216
Peter Mitsisf8b76f32021-11-29 09:52:11 -05001217static ALWAYS_INLINE void z_priq_mq_remove(struct _priq_mq *pq,
1218 struct k_thread *thread)
Andy Ross9f06a352018-06-28 10:38:14 -07001219{
1220 int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO;
1221
1222 sys_dlist_remove(&thread->base.qnode_dlist);
1223 if (sys_dlist_is_empty(&pq->queues[priority_bit])) {
Flavio Ceolina9962032019-02-26 10:14:04 -08001224 pq->bitmask &= ~BIT(priority_bit);
Andy Ross9f06a352018-06-28 10:38:14 -07001225 }
1226}
Jeremy Bettisfb1c36f2021-12-20 16:24:30 -07001227#endif
Andy Ross9f06a352018-06-28 10:38:14 -07001228
Patrik Flykt4344e272019-03-08 14:19:05 -07001229struct k_thread *z_priq_mq_best(struct _priq_mq *pq)
Andy Ross9f06a352018-06-28 10:38:14 -07001230{
1231 if (!pq->bitmask) {
1232 return NULL;
1233 }
1234
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001235 struct k_thread *thread = NULL;
Andy Ross9f06a352018-06-28 10:38:14 -07001236 sys_dlist_t *l = &pq->queues[__builtin_ctz(pq->bitmask)];
Flavio Ceolin26be3352018-11-15 15:03:32 -08001237 sys_dnode_t *n = sys_dlist_peek_head(l);
Andy Ross9f06a352018-06-28 10:38:14 -07001238
Peter A. Bigot692e1032019-01-03 23:36:28 -06001239 if (n != NULL) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001240 thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist);
Peter A. Bigot692e1032019-01-03 23:36:28 -06001241 }
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001242 return thread;
Andy Ross9f06a352018-06-28 10:38:14 -07001243}
1244
Patrik Flykt4344e272019-03-08 14:19:05 -07001245int z_unpend_all(_wait_q_t *wait_q)
Andy Ross4ca0e072018-05-10 09:45:42 -07001246{
Andy Rossccf3bf72018-05-10 11:10:34 -07001247 int need_sched = 0;
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001248 struct k_thread *thread;
Andy Ross4ca0e072018-05-10 09:45:42 -07001249
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001250 while ((thread = z_waitq_head(wait_q)) != NULL) {
1251 z_unpend_thread(thread);
1252 z_ready_thread(thread);
Andy Ross4ca0e072018-05-10 09:45:42 -07001253 need_sched = 1;
1254 }
Andy Rossccf3bf72018-05-10 11:10:34 -07001255
1256 return need_sched;
Andy Ross4ca0e072018-05-10 09:45:42 -07001257}
1258
Andy Rossb155d062021-09-24 13:49:14 -07001259void init_ready_q(struct _ready_q *rq)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001260{
Andy Rossb155d062021-09-24 13:49:14 -07001261#if defined(CONFIG_SCHED_SCALABLE)
1262 rq->runq = (struct _priq_rb) {
Andy Ross1acd8c22018-05-03 14:51:49 -07001263 .tree = {
Patrik Flykt4344e272019-03-08 14:19:05 -07001264 .lessthan_fn = z_priq_rb_lessthan,
Andy Ross1acd8c22018-05-03 14:51:49 -07001265 }
1266 };
Andy Rossb155d062021-09-24 13:49:14 -07001267#elif defined(CONFIG_SCHED_MULTIQ)
Andy Ross9f06a352018-06-28 10:38:14 -07001268 for (int i = 0; i < ARRAY_SIZE(_kernel.ready_q.runq.queues); i++) {
Andy Rossb155d062021-09-24 13:49:14 -07001269 sys_dlist_init(&rq->runq.queues[i]);
Andy Ross9f06a352018-06-28 10:38:14 -07001270 }
Andy Rossb155d062021-09-24 13:49:14 -07001271#else
1272 sys_dlist_init(&rq->runq);
Andy Ross9f06a352018-06-28 10:38:14 -07001273#endif
Andy Rossb155d062021-09-24 13:49:14 -07001274}
1275
1276void z_sched_init(void)
1277{
Andy Rossb11e7962021-09-24 10:57:39 -07001278#ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
1279 for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
1280 init_ready_q(&_kernel.cpus[i].ready_q);
1281 }
1282#else
Andy Rossb155d062021-09-24 13:49:14 -07001283 init_ready_q(&_kernel.ready_q);
Andy Rossb11e7962021-09-24 10:57:39 -07001284#endif
Piotr Zięcik4a39b9e2018-07-26 14:56:39 +02001285
1286#ifdef CONFIG_TIMESLICING
1287 k_sched_time_slice_set(CONFIG_TIMESLICE_SIZE,
1288 CONFIG_TIMESLICE_PRIORITY);
1289#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001290}
1291
Patrik Flykt4344e272019-03-08 14:19:05 -07001292int z_impl_k_thread_priority_get(k_tid_t thread)
Allan Stephens399d0ad2016-10-07 13:41:34 -05001293{
Benjamin Walshf6ca7de2016-11-08 10:36:50 -05001294 return thread->base.prio;
Allan Stephens399d0ad2016-10-07 13:41:34 -05001295}
1296
Andrew Boie76c04a22017-09-27 14:45:10 -07001297#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -07001298static inline int z_vrfy_k_thread_priority_get(k_tid_t thread)
1299{
1300 Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1301 return z_impl_k_thread_priority_get(thread);
1302}
1303#include <syscalls/k_thread_priority_get_mrsh.c>
Andrew Boie76c04a22017-09-27 14:45:10 -07001304#endif
1305
Anas Nashif25c87db2021-03-29 10:54:23 -04001306void z_impl_k_thread_priority_set(k_tid_t thread, int prio)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001307{
Benjamin Walsh3cc2ba92016-11-08 15:44:05 -05001308 /*
1309 * Use NULL, since we cannot know what the entry point is (we do not
1310 * keep track of it) and idle cannot change its priority.
1311 */
Patrik Flykt4344e272019-03-08 14:19:05 -07001312 Z_ASSERT_VALID_PRIO(prio, NULL);
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001313 __ASSERT(!arch_is_in_isr(), "");
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001314
Anas Nashif25c87db2021-03-29 10:54:23 -04001315 struct k_thread *th = (struct k_thread *)thread;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001316
Anas Nashif25c87db2021-03-29 10:54:23 -04001317 z_thread_priority_set(th, prio);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001318}
1319
Andrew Boie468190a2017-09-29 14:00:48 -07001320#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -07001321static inline void z_vrfy_k_thread_priority_set(k_tid_t thread, int prio)
Andrew Boie468190a2017-09-29 14:00:48 -07001322{
Andrew Boie8345e5e2018-05-04 15:57:57 -07001323 Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1324 Z_OOPS(Z_SYSCALL_VERIFY_MSG(_is_valid_prio(prio, NULL),
Andy Ross65649742019-08-06 13:34:31 -07001325 "invalid thread priority %d", prio));
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001326 Z_OOPS(Z_SYSCALL_VERIFY_MSG((int8_t)prio >= thread->base.prio,
Andrew Boie8345e5e2018-05-04 15:57:57 -07001327 "thread priority may only be downgraded (%d < %d)",
1328 prio, thread->base.prio));
Andrew Boie5008fed2017-10-08 10:11:24 -07001329
Andy Ross65649742019-08-06 13:34:31 -07001330 z_impl_k_thread_priority_set(thread, prio);
Andrew Boie468190a2017-09-29 14:00:48 -07001331}
Andy Ross65649742019-08-06 13:34:31 -07001332#include <syscalls/k_thread_priority_set_mrsh.c>
Andrew Boie468190a2017-09-29 14:00:48 -07001333#endif
1334
Andy Ross4a2e50f2018-05-15 11:06:25 -07001335#ifdef CONFIG_SCHED_DEADLINE
Patrik Flykt4344e272019-03-08 14:19:05 -07001336void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline)
Andy Ross4a2e50f2018-05-15 11:06:25 -07001337{
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001338 struct k_thread *thread = tid;
Andy Ross4a2e50f2018-05-15 11:06:25 -07001339
Patrik Flyktcf2d5792019-02-12 15:50:46 -07001340 LOCKED(&sched_spinlock) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001341 thread->base.prio_deadline = k_cycle_get_32() + deadline;
1342 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -07001343 dequeue_thread(thread);
1344 queue_thread(thread);
Andy Ross4a2e50f2018-05-15 11:06:25 -07001345 }
1346 }
1347}
1348
1349#ifdef CONFIG_USERSPACE
Andy Ross075c94f2019-08-13 11:34:34 -07001350static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline)
Andy Ross4a2e50f2018-05-15 11:06:25 -07001351{
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001352 struct k_thread *thread = tid;
Andy Ross4a2e50f2018-05-15 11:06:25 -07001353
1354 Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1355 Z_OOPS(Z_SYSCALL_VERIFY_MSG(deadline > 0,
1356 "invalid thread deadline %d",
1357 (int)deadline));
1358
Patrik Flykt4344e272019-03-08 14:19:05 -07001359 z_impl_k_thread_deadline_set((k_tid_t)thread, deadline);
Andy Ross4a2e50f2018-05-15 11:06:25 -07001360}
Andy Ross075c94f2019-08-13 11:34:34 -07001361#include <syscalls/k_thread_deadline_set_mrsh.c>
Andy Ross4a2e50f2018-05-15 11:06:25 -07001362#endif
1363#endif
1364
Jordan Yates1ef647f2022-03-26 09:55:23 +10001365bool k_can_yield(void)
1366{
1367 return !(k_is_pre_kernel() || k_is_in_isr() ||
1368 z_is_idle_thread_object(_current));
1369}
1370
Patrik Flykt4344e272019-03-08 14:19:05 -07001371void z_impl_k_yield(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001372{
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001373 __ASSERT(!arch_is_in_isr(), "");
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001374
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001375 SYS_PORT_TRACING_FUNC(k_thread, yield);
1376
Andy Ross851d14a2021-05-13 15:46:43 -07001377 k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
James Harris6543e062021-03-01 10:14:13 -08001378
Andy Ross851d14a2021-05-13 15:46:43 -07001379 if (!IS_ENABLED(CONFIG_SMP) ||
1380 z_is_thread_queued(_current)) {
Andy Rossc230fb32021-09-23 16:41:30 -07001381 dequeue_thread(_current);
Andy Ross1acd8c22018-05-03 14:51:49 -07001382 }
Andy Rossc230fb32021-09-23 16:41:30 -07001383 queue_thread(_current);
Andy Ross851d14a2021-05-13 15:46:43 -07001384 update_cache(1);
1385 z_swap(&sched_spinlock, key);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001386}
1387
Andrew Boie468190a2017-09-29 14:00:48 -07001388#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -07001389static inline void z_vrfy_k_yield(void)
1390{
1391 z_impl_k_yield();
1392}
1393#include <syscalls/k_yield_mrsh.c>
Andrew Boie468190a2017-09-29 14:00:48 -07001394#endif
1395
Flavio Ceolin7a815d52020-10-19 21:37:22 -07001396static int32_t z_tick_sleep(k_ticks_t ticks)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001397{
Benjamin Walshb12a8e02016-12-14 15:24:12 -05001398#ifdef CONFIG_MULTITHREADING
Flavio Ceolin9a160972020-11-16 10:40:46 -08001399 uint32_t expected_wakeup_ticks;
Carles Cufi9849df82016-12-02 15:31:08 +01001400
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001401 __ASSERT(!arch_is_in_isr(), "");
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001402
Flavio Ceolin7a815d52020-10-19 21:37:22 -07001403#ifndef CONFIG_TIMEOUT_64BIT
1404 /* LOG subsys does not handle 64-bit values
1405 * https://github.com/zephyrproject-rtos/zephyr/issues/26246
1406 */
1407 LOG_DBG("thread %p for %u ticks", _current, ticks);
1408#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001409
Benjamin Walsh5596f782016-12-09 19:57:17 -05001410 /* wait of 0 ms is treated as a 'yield' */
Charles E. Youseb1863032019-05-08 13:22:46 -07001411 if (ticks == 0) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001412 k_yield();
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001413 return 0;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001414 }
1415
Andy Rosse9566392020-12-18 11:12:39 -08001416 k_timeout_t timeout = Z_TIMEOUT_TICKS(ticks);
Lauren Murphy4c85b462021-05-25 17:49:28 -05001417 if (Z_TICK_ABS(ticks) <= 0) {
1418 expected_wakeup_ticks = ticks + sys_clock_tick_get_32();
1419 } else {
1420 expected_wakeup_ticks = Z_TICK_ABS(ticks);
1421 }
Andy Rossd27d4e62019-02-05 15:36:01 -08001422
Andrew Boiea8775ab2020-09-05 12:53:42 -07001423 k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001424
Andy Rossdff6b712019-02-25 21:17:29 -08001425#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
1426 pending_current = _current;
1427#endif
Andrew Boiea8775ab2020-09-05 12:53:42 -07001428 unready_thread(_current);
Andy Ross78327382020-03-05 15:18:14 -08001429 z_add_thread_timeout(_current, timeout);
Andy Ross4521e0c2019-03-22 10:30:19 -07001430 z_mark_thread_as_suspended(_current);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001431
Andrew Boiea8775ab2020-09-05 12:53:42 -07001432 (void)z_swap(&sched_spinlock, key);
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001433
Andy Ross4521e0c2019-03-22 10:30:19 -07001434 __ASSERT(!z_is_thread_state_set(_current, _THREAD_SUSPENDED), "");
1435
Anas Nashif5c90ceb2021-03-13 08:19:53 -05001436 ticks = (k_ticks_t)expected_wakeup_ticks - sys_clock_tick_get_32();
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001437 if (ticks > 0) {
Charles E. Youseb1863032019-05-08 13:22:46 -07001438 return ticks;
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001439 }
Benjamin Walshb12a8e02016-12-14 15:24:12 -05001440#endif
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001441
1442 return 0;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001443}
1444
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001445int32_t z_impl_k_sleep(k_timeout_t timeout)
Charles E. Youseb1863032019-05-08 13:22:46 -07001446{
Andy Ross78327382020-03-05 15:18:14 -08001447 k_ticks_t ticks;
Charles E. Youseb1863032019-05-08 13:22:46 -07001448
Peter Bigot8162e582019-12-12 16:07:07 -06001449 __ASSERT(!arch_is_in_isr(), "");
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001450
1451 SYS_PORT_TRACING_FUNC_ENTER(k_thread, sleep, timeout);
Peter Bigot8162e582019-12-12 16:07:07 -06001452
Anas Nashifd2c71792020-10-17 07:52:17 -04001453 /* in case of K_FOREVER, we suspend */
Andy Ross78327382020-03-05 15:18:14 -08001454 if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
Andrew Boied2b89222019-11-08 10:44:22 -08001455 k_thread_suspend(_current);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001456
1457 SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, (int32_t) K_TICKS_FOREVER);
1458
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001459 return (int32_t) K_TICKS_FOREVER;
Andrew Boied2b89222019-11-08 10:44:22 -08001460 }
1461
Andy Ross78327382020-03-05 15:18:14 -08001462 ticks = timeout.ticks;
Andy Ross78327382020-03-05 15:18:14 -08001463
Charles E. Youseb1863032019-05-08 13:22:46 -07001464 ticks = z_tick_sleep(ticks);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001465
1466 int32_t ret = k_ticks_to_ms_floor64(ticks);
1467
1468 SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, ret);
1469
1470 return ret;
Charles E. Youseb1863032019-05-08 13:22:46 -07001471}
1472
Andrew Boie76c04a22017-09-27 14:45:10 -07001473#ifdef CONFIG_USERSPACE
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001474static inline int32_t z_vrfy_k_sleep(k_timeout_t timeout)
Andrew Boie76c04a22017-09-27 14:45:10 -07001475{
Andy Ross78327382020-03-05 15:18:14 -08001476 return z_impl_k_sleep(timeout);
Charles E. Yousea5678312019-05-09 16:46:46 -07001477}
Andy Ross65649742019-08-06 13:34:31 -07001478#include <syscalls/k_sleep_mrsh.c>
Charles E. Yousea5678312019-05-09 16:46:46 -07001479#endif
1480
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001481int32_t z_impl_k_usleep(int us)
Charles E. Yousea5678312019-05-09 16:46:46 -07001482{
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001483 int32_t ticks;
Charles E. Yousea5678312019-05-09 16:46:46 -07001484
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001485 SYS_PORT_TRACING_FUNC_ENTER(k_thread, usleep, us);
1486
Andy Ross88924062019-10-03 11:43:10 -07001487 ticks = k_us_to_ticks_ceil64(us);
Charles E. Yousea5678312019-05-09 16:46:46 -07001488 ticks = z_tick_sleep(ticks);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001489
1490 SYS_PORT_TRACING_FUNC_EXIT(k_thread, usleep, us, k_ticks_to_us_floor64(ticks));
1491
Andy Ross88924062019-10-03 11:43:10 -07001492 return k_ticks_to_us_floor64(ticks);
Charles E. Yousea5678312019-05-09 16:46:46 -07001493}
1494
1495#ifdef CONFIG_USERSPACE
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001496static inline int32_t z_vrfy_k_usleep(int us)
Charles E. Yousea5678312019-05-09 16:46:46 -07001497{
1498 return z_impl_k_usleep(us);
Andrew Boie76c04a22017-09-27 14:45:10 -07001499}
Andy Ross65649742019-08-06 13:34:31 -07001500#include <syscalls/k_usleep_mrsh.c>
Andrew Boie76c04a22017-09-27 14:45:10 -07001501#endif
1502
Patrik Flykt4344e272019-03-08 14:19:05 -07001503void z_impl_k_wakeup(k_tid_t thread)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001504{
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001505 SYS_PORT_TRACING_OBJ_FUNC(k_thread, wakeup, thread);
1506
Patrik Flykt4344e272019-03-08 14:19:05 -07001507 if (z_is_thread_pending(thread)) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001508 return;
1509 }
1510
Patrik Flykt4344e272019-03-08 14:19:05 -07001511 if (z_abort_thread_timeout(thread) < 0) {
Andrew Boied2b89222019-11-08 10:44:22 -08001512 /* Might have just been sleeping forever */
1513 if (thread->base.thread_state != _THREAD_SUSPENDED) {
1514 return;
1515 }
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001516 }
1517
Andy Ross4521e0c2019-03-22 10:30:19 -07001518 z_mark_thread_as_not_suspended(thread);
Patrik Flykt4344e272019-03-08 14:19:05 -07001519 z_ready_thread(thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001520
Andy Ross3267cd32022-04-06 09:58:20 -07001521 flag_ipi();
Andy Ross5737b5c2020-02-04 13:52:09 -08001522
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001523 if (!arch_is_in_isr()) {
Patrik Flykt4344e272019-03-08 14:19:05 -07001524 z_reschedule_unlocked();
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001525 }
1526}
1527
Enjia Mai7ac40aa2020-05-28 11:29:50 +08001528#ifdef CONFIG_TRACE_SCHED_IPI
1529extern void z_trace_sched_ipi(void);
1530#endif
1531
Andy Ross42ed12a2019-02-19 16:03:39 -08001532#ifdef CONFIG_SMP
Andy Ross42ed12a2019-02-19 16:03:39 -08001533void z_sched_ipi(void)
1534{
Daniel Leungadac4cb2020-01-09 18:55:07 -08001535 /* NOTE: When adding code to this, make sure this is called
1536 * at appropriate location when !CONFIG_SCHED_IPI_SUPPORTED.
1537 */
Enjia Mai7ac40aa2020-05-28 11:29:50 +08001538#ifdef CONFIG_TRACE_SCHED_IPI
1539 z_trace_sched_ipi();
1540#endif
Andy Ross42ed12a2019-02-19 16:03:39 -08001541}
Andy Ross42ed12a2019-02-19 16:03:39 -08001542#endif
1543
Andrew Boie468190a2017-09-29 14:00:48 -07001544#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -07001545static inline void z_vrfy_k_wakeup(k_tid_t thread)
1546{
1547 Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1548 z_impl_k_wakeup(thread);
1549}
1550#include <syscalls/k_wakeup_mrsh.c>
Andrew Boie468190a2017-09-29 14:00:48 -07001551#endif
1552
Andrew Boief07df422020-11-06 13:11:12 -08001553k_tid_t z_impl_z_current_get(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001554{
Andy Rosseefd3da2020-02-06 13:39:52 -08001555#ifdef CONFIG_SMP
1556 /* In SMP, _current is a field read from _current_cpu, which
1557 * can race with preemption before it is read. We must lock
1558 * local interrupts when reading it.
1559 */
1560 unsigned int k = arch_irq_lock();
1561#endif
1562
1563 k_tid_t ret = _current_cpu->current;
1564
1565#ifdef CONFIG_SMP
1566 arch_irq_unlock(k);
1567#endif
1568 return ret;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001569}
1570
Andrew Boie76c04a22017-09-27 14:45:10 -07001571#ifdef CONFIG_USERSPACE
Andrew Boief07df422020-11-06 13:11:12 -08001572static inline k_tid_t z_vrfy_z_current_get(void)
Andy Ross65649742019-08-06 13:34:31 -07001573{
Andrew Boief07df422020-11-06 13:11:12 -08001574 return z_impl_z_current_get();
Andy Ross65649742019-08-06 13:34:31 -07001575}
Andrew Boief07df422020-11-06 13:11:12 -08001576#include <syscalls/z_current_get_mrsh.c>
Andrew Boie76c04a22017-09-27 14:45:10 -07001577#endif
1578
Patrik Flykt4344e272019-03-08 14:19:05 -07001579int z_impl_k_is_preempt_thread(void)
Benjamin Walsh445830d2016-11-10 15:54:27 -05001580{
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001581 return !arch_is_in_isr() && is_preempt(_current);
Benjamin Walsh445830d2016-11-10 15:54:27 -05001582}
Andrew Boie468190a2017-09-29 14:00:48 -07001583
1584#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -07001585static inline int z_vrfy_k_is_preempt_thread(void)
1586{
1587 return z_impl_k_is_preempt_thread();
1588}
1589#include <syscalls/k_is_preempt_thread_mrsh.c>
Andrew Boie468190a2017-09-29 14:00:48 -07001590#endif
Andy Rossab46b1b2019-01-30 15:00:42 -08001591
1592#ifdef CONFIG_SCHED_CPU_MASK
1593# ifdef CONFIG_SMP
1594/* Right now we use a single byte for this mask */
Oleg Zhurakivskyyb1e1f642020-03-12 17:16:00 +02001595BUILD_ASSERT(CONFIG_MP_NUM_CPUS <= 8, "Too many CPUs for mask word");
Andy Rossab46b1b2019-01-30 15:00:42 -08001596# endif
1597
1598
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001599static int cpu_mask_mod(k_tid_t thread, uint32_t enable_mask, uint32_t disable_mask)
Andy Rossab46b1b2019-01-30 15:00:42 -08001600{
1601 int ret = 0;
1602
Flavio Ceolin551038e2022-05-02 14:31:04 -07001603#ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
1604 __ASSERT((thread->base.thread_state != _THREAD_PRESTART),
1605 "Only PRESTARTED threads can change CPU pin");
1606#endif
1607
Patrik Flyktcf2d5792019-02-12 15:50:46 -07001608 LOCKED(&sched_spinlock) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001609 if (z_is_thread_prevented_from_running(thread)) {
1610 thread->base.cpu_mask |= enable_mask;
1611 thread->base.cpu_mask &= ~disable_mask;
Andy Rossab46b1b2019-01-30 15:00:42 -08001612 } else {
1613 ret = -EINVAL;
1614 }
1615 }
Andy Rossb11e7962021-09-24 10:57:39 -07001616
1617#if defined(CONFIG_ASSERT) && defined(CONFIG_SCHED_CPU_MASK_PIN_ONLY)
1618 int m = thread->base.cpu_mask;
1619
1620 __ASSERT((m == 0) || ((m & (m - 1)) == 0),
1621 "Only one CPU allowed in mask when PIN_ONLY");
1622#endif
1623
Andy Rossab46b1b2019-01-30 15:00:42 -08001624 return ret;
1625}
1626
1627int k_thread_cpu_mask_clear(k_tid_t thread)
1628{
1629 return cpu_mask_mod(thread, 0, 0xffffffff);
1630}
1631
1632int k_thread_cpu_mask_enable_all(k_tid_t thread)
1633{
1634 return cpu_mask_mod(thread, 0xffffffff, 0);
1635}
1636
1637int k_thread_cpu_mask_enable(k_tid_t thread, int cpu)
1638{
1639 return cpu_mask_mod(thread, BIT(cpu), 0);
1640}
1641
1642int k_thread_cpu_mask_disable(k_tid_t thread, int cpu)
1643{
1644 return cpu_mask_mod(thread, 0, BIT(cpu));
1645}
1646
Anas Nashifc9d02482022-04-15 08:27:15 -04001647int k_thread_cpu_pin(k_tid_t thread, int cpu)
1648{
1649 int ret;
1650
1651 ret = k_thread_cpu_mask_clear(thread);
1652 if (ret == 0) {
1653 return k_thread_cpu_mask_enable(thread, cpu);
1654 }
1655 return ret;
1656}
1657
Andy Rossab46b1b2019-01-30 15:00:42 -08001658#endif /* CONFIG_SCHED_CPU_MASK */
Andrew Boie322816e2020-02-20 16:33:06 -08001659
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001660static inline void unpend_all(_wait_q_t *wait_q)
1661{
1662 struct k_thread *thread;
1663
1664 while ((thread = z_waitq_head(wait_q)) != NULL) {
1665 unpend_thread_no_timeout(thread);
1666 (void)z_abort_thread_timeout(thread);
1667 arch_thread_return_value_set(thread, 0);
1668 ready_thread(thread);
1669 }
1670}
1671
Chen Peng10f63d112021-09-06 13:59:40 +08001672#ifdef CONFIG_CMSIS_RTOS_V1
1673extern void z_thread_cmsis_status_mask_clear(struct k_thread *thread);
1674#endif
1675
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001676static void end_thread(struct k_thread *thread)
1677{
1678 /* We hold the lock, and the thread is known not to be running
1679 * anywhere.
1680 */
Anas Nashifbbbc38b2021-03-29 10:03:49 -04001681 if ((thread->base.thread_state & _THREAD_DEAD) == 0U) {
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001682 thread->base.thread_state |= _THREAD_DEAD;
1683 thread->base.thread_state &= ~_THREAD_ABORTING;
1684 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -07001685 dequeue_thread(thread);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001686 }
1687 if (thread->base.pended_on != NULL) {
1688 unpend_thread_no_timeout(thread);
1689 }
1690 (void)z_abort_thread_timeout(thread);
1691 unpend_all(&thread->join_queue);
1692 update_cache(1);
1693
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001694 SYS_PORT_TRACING_FUNC(k_thread, sched_abort, thread);
1695
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001696 z_thread_monitor_exit(thread);
1697
Chen Peng10f63d112021-09-06 13:59:40 +08001698#ifdef CONFIG_CMSIS_RTOS_V1
1699 z_thread_cmsis_status_mask_clear(thread);
1700#endif
1701
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001702#ifdef CONFIG_USERSPACE
1703 z_mem_domain_exit_thread(thread);
1704 z_thread_perms_all_clear(thread);
1705 z_object_uninit(thread->stack_obj);
1706 z_object_uninit(thread);
1707#endif
1708 }
1709}
1710
1711void z_thread_abort(struct k_thread *thread)
1712{
1713 k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
1714
Andy Rossfb613592022-05-19 12:55:28 -07001715 if ((thread->base.user_options & K_ESSENTIAL) != 0) {
1716 k_spin_unlock(&sched_spinlock, key);
1717 __ASSERT(false, "aborting essential thread %p", thread);
1718 k_panic();
1719 return;
1720 }
1721
Anas Nashif3f4f3f62021-03-29 17:13:47 -04001722 if ((thread->base.thread_state & _THREAD_DEAD) != 0U) {
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001723 k_spin_unlock(&sched_spinlock, key);
1724 return;
1725 }
1726
1727#ifdef CONFIG_SMP
1728 if (is_aborting(thread) && thread == _current && arch_is_in_isr()) {
1729 /* Another CPU is spinning for us, don't deadlock */
1730 end_thread(thread);
1731 }
1732
1733 bool active = thread_active_elsewhere(thread);
1734
1735 if (active) {
1736 /* It's running somewhere else, flag and poke */
1737 thread->base.thread_state |= _THREAD_ABORTING;
Lauren Murphyd88ce652021-03-09 16:41:43 -06001738
Andy Rossb4e9ef02022-04-06 10:10:17 -07001739 /* We're going to spin, so need a true synchronous IPI
1740 * here, not deferred!
1741 */
Lauren Murphyd88ce652021-03-09 16:41:43 -06001742#ifdef CONFIG_SCHED_IPI_SUPPORTED
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001743 arch_sched_ipi();
Lauren Murphyd88ce652021-03-09 16:41:43 -06001744#endif
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001745 }
1746
1747 if (is_aborting(thread) && thread != _current) {
1748 if (arch_is_in_isr()) {
1749 /* ISRs can only spin waiting another CPU */
1750 k_spin_unlock(&sched_spinlock, key);
1751 while (is_aborting(thread)) {
1752 }
1753 } else if (active) {
1754 /* Threads can join */
1755 add_to_waitq_locked(_current, &thread->join_queue);
1756 z_swap(&sched_spinlock, key);
1757 }
1758 return; /* lock has been released */
1759 }
1760#endif
1761 end_thread(thread);
1762 if (thread == _current && !arch_is_in_isr()) {
1763 z_swap(&sched_spinlock, key);
1764 __ASSERT(false, "aborted _current back from dead");
1765 }
1766 k_spin_unlock(&sched_spinlock, key);
1767}
1768
1769#if !defined(CONFIG_ARCH_HAS_THREAD_ABORT)
1770void z_impl_k_thread_abort(struct k_thread *thread)
1771{
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001772 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread);
1773
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001774 z_thread_abort(thread);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001775
1776 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, abort, thread);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001777}
1778#endif
1779
1780int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout)
1781{
1782 k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
1783 int ret = 0;
1784
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001785 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, join, thread, timeout);
1786
Anas Nashif3f4f3f62021-03-29 17:13:47 -04001787 if ((thread->base.thread_state & _THREAD_DEAD) != 0U) {
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001788 ret = 0;
1789 } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
1790 ret = -EBUSY;
Anas Nashif3f4f3f62021-03-29 17:13:47 -04001791 } else if ((thread == _current) ||
1792 (thread->base.pended_on == &_current->join_queue)) {
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001793 ret = -EDEADLK;
1794 } else {
1795 __ASSERT(!arch_is_in_isr(), "cannot join in ISR");
1796 add_to_waitq_locked(_current, &thread->join_queue);
1797 add_thread_timeout(_current, timeout);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001798
1799 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_thread, join, thread, timeout);
1800 ret = z_swap(&sched_spinlock, key);
1801 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret);
1802
1803 return ret;
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001804 }
1805
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001806 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret);
1807
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001808 k_spin_unlock(&sched_spinlock, key);
1809 return ret;
1810}
1811
Andrew Boie322816e2020-02-20 16:33:06 -08001812#ifdef CONFIG_USERSPACE
1813/* Special case: don't oops if the thread is uninitialized. This is because
1814 * the initialization bit does double-duty for thread objects; if false, means
1815 * the thread object is truly uninitialized, or the thread ran and exited for
1816 * some reason.
1817 *
1818 * Return true in this case indicating we should just do nothing and return
1819 * success to the caller.
1820 */
1821static bool thread_obj_validate(struct k_thread *thread)
1822{
Andrew Boie2dc2ecf2020-03-11 07:13:07 -07001823 struct z_object *ko = z_object_find(thread);
Andrew Boie322816e2020-02-20 16:33:06 -08001824 int ret = z_object_validate(ko, K_OBJ_THREAD, _OBJ_INIT_TRUE);
1825
1826 switch (ret) {
1827 case 0:
1828 return false;
1829 case -EINVAL:
1830 return true;
1831 default:
1832#ifdef CONFIG_LOG
1833 z_dump_object_error(ret, thread, ko, K_OBJ_THREAD);
1834#endif
1835 Z_OOPS(Z_SYSCALL_VERIFY_MSG(ret, "access denied"));
1836 }
Enjia Mai53ca7092021-01-15 17:09:58 +08001837 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
Andrew Boie322816e2020-02-20 16:33:06 -08001838}
1839
Andy Ross78327382020-03-05 15:18:14 -08001840static inline int z_vrfy_k_thread_join(struct k_thread *thread,
1841 k_timeout_t timeout)
Andrew Boie322816e2020-02-20 16:33:06 -08001842{
1843 if (thread_obj_validate(thread)) {
1844 return 0;
1845 }
1846
1847 return z_impl_k_thread_join(thread, timeout);
1848}
1849#include <syscalls/k_thread_join_mrsh.c>
Andrew Boiea4c91902020-03-24 16:09:24 -07001850
1851static inline void z_vrfy_k_thread_abort(k_tid_t thread)
1852{
1853 if (thread_obj_validate(thread)) {
1854 return;
1855 }
1856
1857 Z_OOPS(Z_SYSCALL_VERIFY_MSG(!(thread->base.user_options & K_ESSENTIAL),
1858 "aborting essential thread %p", thread));
1859
1860 z_impl_k_thread_abort((struct k_thread *)thread);
1861}
1862#include <syscalls/k_thread_abort_mrsh.c>
Andrew Boie322816e2020-02-20 16:33:06 -08001863#endif /* CONFIG_USERSPACE */
Peter Bigot0259c862021-01-12 13:45:32 -06001864
1865/*
1866 * future scheduler.h API implementations
1867 */
1868bool z_sched_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data)
1869{
1870 struct k_thread *thread;
1871 bool ret = false;
1872
1873 LOCKED(&sched_spinlock) {
1874 thread = _priq_wait_best(&wait_q->waitq);
1875
1876 if (thread != NULL) {
1877 z_thread_return_value_set_with_data(thread,
1878 swap_retval,
1879 swap_data);
1880 unpend_thread_no_timeout(thread);
1881 (void)z_abort_thread_timeout(thread);
1882 ready_thread(thread);
1883 ret = true;
1884 }
1885 }
1886
1887 return ret;
1888}
1889
1890int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key,
1891 _wait_q_t *wait_q, k_timeout_t timeout, void **data)
1892{
1893 int ret = z_pend_curr(lock, key, wait_q, timeout);
1894
1895 if (data != NULL) {
1896 *data = _current->base.swap_data;
1897 }
1898 return ret;
1899}