blob: 08e8674157adcaaaea4eb35212b22d6eb8140791 [file] [log] [blame]
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001/*
Andy Ross1acd8c22018-05-03 14:51:49 -07002 * Copyright (c) 2018 Intel Corporation
Benjamin Walsh456c6da2016-09-02 18:55:39 -04003 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08004 * SPDX-License-Identifier: Apache-2.0
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04006#include <kernel.h>
Benjamin Walshb4b108d2016-10-13 10:31:48 -04007#include <ksched.h>
Andy Ross1acd8c22018-05-03 14:51:49 -07008#include <spinlock.h>
Anas Nashif8b3f36c2021-06-14 17:04:04 -04009#include <kernel/sched_priq.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040010#include <wait_q.h>
Andy Ross9c62cc62018-01-25 15:24:15 -080011#include <kswap.h>
Andy Ross1acd8c22018-05-03 14:51:49 -070012#include <kernel_arch_func.h>
13#include <syscall_handler.h>
Anas Nashif68c389c2019-06-21 12:55:37 -040014#include <drivers/timer/system_timer.h>
Flavio Ceolin80418602018-11-21 16:22:15 -080015#include <stdbool.h>
Andrew Boiefe031612019-09-21 17:54:37 -070016#include <kernel_internal.h>
Anas Nashif2c5d4042019-12-02 10:24:08 -050017#include <logging/log.h>
Andrew Boiee0ca4032020-09-05 19:36:08 -070018#include <sys/atomic.h>
Andy Rossb11e7962021-09-24 10:57:39 -070019#include <sys/math_extras.h>
Andy Ross52351452021-09-28 09:38:43 -070020#include <timing/timing.h>
21
Krzysztof Chruscinski3ed80832020-11-26 19:32:34 +010022LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040023
Andy Ross225c74b2018-06-27 11:20:50 -070024#if defined(CONFIG_SCHED_DUMB)
Patrik Flykt4344e272019-03-08 14:19:05 -070025#define _priq_run_add z_priq_dumb_add
26#define _priq_run_remove z_priq_dumb_remove
Andy Rossab46b1b2019-01-30 15:00:42 -080027# if defined(CONFIG_SCHED_CPU_MASK)
28# define _priq_run_best _priq_dumb_mask_best
29# else
Patrik Flykt4344e272019-03-08 14:19:05 -070030# define _priq_run_best z_priq_dumb_best
Andy Rossab46b1b2019-01-30 15:00:42 -080031# endif
Andy Ross225c74b2018-06-27 11:20:50 -070032#elif defined(CONFIG_SCHED_SCALABLE)
Patrik Flykt4344e272019-03-08 14:19:05 -070033#define _priq_run_add z_priq_rb_add
34#define _priq_run_remove z_priq_rb_remove
35#define _priq_run_best z_priq_rb_best
Andy Ross9f06a352018-06-28 10:38:14 -070036#elif defined(CONFIG_SCHED_MULTIQ)
Patrik Flykt4344e272019-03-08 14:19:05 -070037#define _priq_run_add z_priq_mq_add
38#define _priq_run_remove z_priq_mq_remove
39#define _priq_run_best z_priq_mq_best
Jeremy Bettisfb1c36f2021-12-20 16:24:30 -070040static ALWAYS_INLINE void z_priq_mq_add(struct _priq_mq *pq,
41 struct k_thread *thread);
42static ALWAYS_INLINE void z_priq_mq_remove(struct _priq_mq *pq,
43 struct k_thread *thread);
Andy Rosse7ded112018-04-11 14:52:47 -070044#endif
45
Andy Ross225c74b2018-06-27 11:20:50 -070046#if defined(CONFIG_WAITQ_SCALABLE)
Patrik Flykt4344e272019-03-08 14:19:05 -070047#define z_priq_wait_add z_priq_rb_add
48#define _priq_wait_remove z_priq_rb_remove
49#define _priq_wait_best z_priq_rb_best
Andy Ross225c74b2018-06-27 11:20:50 -070050#elif defined(CONFIG_WAITQ_DUMB)
Patrik Flykt4344e272019-03-08 14:19:05 -070051#define z_priq_wait_add z_priq_dumb_add
52#define _priq_wait_remove z_priq_dumb_remove
53#define _priq_wait_best z_priq_dumb_best
Andy Ross1acd8c22018-05-03 14:51:49 -070054#endif
55
Andy Ross6b84ab32021-02-18 10:15:23 -080056struct k_spinlock sched_spinlock;
Andy Ross1acd8c22018-05-03 14:51:49 -070057
Maksim Masalski78ba2ec2021-06-01 15:44:45 +080058static void update_cache(int preempt_ok);
Andy Ross6fb6d3c2021-02-19 15:32:19 -080059static void end_thread(struct k_thread *thread);
Andrew Boie8e0f6a52020-09-05 11:50:18 -070060
Peter Mitsisf8b76f32021-11-29 09:52:11 -050061
Patrik Flykt4344e272019-03-08 14:19:05 -070062static inline int is_preempt(struct k_thread *thread)
Andy Rosse7ded112018-04-11 14:52:47 -070063{
Andy Rosse7ded112018-04-11 14:52:47 -070064 /* explanation in kernel_struct.h */
65 return thread->base.preempt <= _PREEMPT_THRESHOLD;
Andy Rosse7ded112018-04-11 14:52:47 -070066}
67
Andy Ross7aa25fa2018-05-11 14:02:42 -070068static inline int is_metairq(struct k_thread *thread)
69{
70#if CONFIG_NUM_METAIRQ_PRIORITIES > 0
71 return (thread->base.prio - K_HIGHEST_THREAD_PRIO)
72 < CONFIG_NUM_METAIRQ_PRIORITIES;
73#else
74 return 0;
75#endif
76}
77
Anas Nashif80e6a972018-06-23 08:20:34 -050078#if CONFIG_ASSERT
Flavio Ceolin2df02cc2019-03-14 14:32:45 -070079static inline bool is_thread_dummy(struct k_thread *thread)
Andy Rosse7ded112018-04-11 14:52:47 -070080{
Patrik Flykt21358ba2019-03-28 14:57:54 -060081 return (thread->base.thread_state & _THREAD_DUMMY) != 0U;
Andy Ross1acd8c22018-05-03 14:51:49 -070082}
Anas Nashif80e6a972018-06-23 08:20:34 -050083#endif
Andy Ross1acd8c22018-05-03 14:51:49 -070084
James Harris2cd0f662021-03-01 09:19:57 -080085/*
86 * Return value same as e.g. memcmp
87 * > 0 -> thread 1 priority > thread 2 priority
88 * = 0 -> thread 1 priority == thread 2 priority
89 * < 0 -> thread 1 priority < thread 2 priority
90 * Do not rely on the actual value returned aside from the above.
91 * (Again, like memcmp.)
92 */
93int32_t z_sched_prio_cmp(struct k_thread *thread_1,
94 struct k_thread *thread_2)
Andy Ross4a2e50f2018-05-15 11:06:25 -070095{
James Harris2cd0f662021-03-01 09:19:57 -080096 /* `prio` is <32b, so the below cannot overflow. */
97 int32_t b1 = thread_1->base.prio;
98 int32_t b2 = thread_2->base.prio;
99
100 if (b1 != b2) {
101 return b2 - b1;
Andy Ross4a2e50f2018-05-15 11:06:25 -0700102 }
103
104#ifdef CONFIG_SCHED_DEADLINE
Andy Rossef626572020-07-10 09:43:36 -0700105 /* If we assume all deadlines live within the same "half" of
106 * the 32 bit modulus space (this is a documented API rule),
James Harris2cd0f662021-03-01 09:19:57 -0800107 * then the latest deadline in the queue minus the earliest is
Andy Rossef626572020-07-10 09:43:36 -0700108 * guaranteed to be (2's complement) non-negative. We can
109 * leverage that to compare the values without having to check
110 * the current time.
Andy Ross4a2e50f2018-05-15 11:06:25 -0700111 */
James Harris2cd0f662021-03-01 09:19:57 -0800112 uint32_t d1 = thread_1->base.prio_deadline;
113 uint32_t d2 = thread_2->base.prio_deadline;
Andy Ross4a2e50f2018-05-15 11:06:25 -0700114
James Harris2cd0f662021-03-01 09:19:57 -0800115 if (d1 != d2) {
116 /* Sooner deadline means higher effective priority.
117 * Doing the calculation with unsigned types and casting
118 * to signed isn't perfect, but at least reduces this
119 * from UB on overflow to impdef.
120 */
121 return (int32_t) (d2 - d1);
Andy Ross4a2e50f2018-05-15 11:06:25 -0700122 }
123#endif
James Harris2cd0f662021-03-01 09:19:57 -0800124 return 0;
Andy Ross4a2e50f2018-05-15 11:06:25 -0700125}
126
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500127static ALWAYS_INLINE bool should_preempt(struct k_thread *thread,
128 int preempt_ok)
Andy Rosseace1df2018-05-30 11:23:02 -0700129{
Andy Ross43553da2018-05-31 11:13:49 -0700130 /* Preemption is OK if it's being explicitly allowed by
131 * software state (e.g. the thread called k_yield())
Andy Rosseace1df2018-05-30 11:23:02 -0700132 */
Flavio Ceolin80418602018-11-21 16:22:15 -0800133 if (preempt_ok != 0) {
134 return true;
Andy Ross43553da2018-05-31 11:13:49 -0700135 }
136
Andy Ross1763a012019-01-28 10:59:41 -0800137 __ASSERT(_current != NULL, "");
138
Andy Ross43553da2018-05-31 11:13:49 -0700139 /* Or if we're pended/suspended/dummy (duh) */
Patrik Flykt4344e272019-03-08 14:19:05 -0700140 if (z_is_thread_prevented_from_running(_current)) {
Andy Ross23c5a632019-01-04 12:52:17 -0800141 return true;
142 }
143
144 /* Edge case on ARM where a thread can be pended out of an
145 * interrupt handler before the "synchronous" swap starts
146 * context switching. Platforms with atomic swap can never
147 * hit this.
148 */
149 if (IS_ENABLED(CONFIG_SWAP_NONATOMIC)
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500150 && z_is_thread_timeout_active(thread)) {
Flavio Ceolin80418602018-11-21 16:22:15 -0800151 return true;
Andy Ross43553da2018-05-31 11:13:49 -0700152 }
153
154 /* Otherwise we have to be running a preemptible thread or
155 * switching to a metairq
156 */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500157 if (is_preempt(_current) || is_metairq(thread)) {
Flavio Ceolin80418602018-11-21 16:22:15 -0800158 return true;
Andy Rosseace1df2018-05-30 11:23:02 -0700159 }
160
Flavio Ceolin80418602018-11-21 16:22:15 -0800161 return false;
Andy Rosseace1df2018-05-30 11:23:02 -0700162}
163
Andy Rossab46b1b2019-01-30 15:00:42 -0800164#ifdef CONFIG_SCHED_CPU_MASK
165static ALWAYS_INLINE struct k_thread *_priq_dumb_mask_best(sys_dlist_t *pq)
166{
167 /* With masks enabled we need to be prepared to walk the list
168 * looking for one we can run
169 */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500170 struct k_thread *thread;
Andy Rossab46b1b2019-01-30 15:00:42 -0800171
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500172 SYS_DLIST_FOR_EACH_CONTAINER(pq, thread, base.qnode_dlist) {
173 if ((thread->base.cpu_mask & BIT(_current_cpu->id)) != 0) {
174 return thread;
Andy Rossab46b1b2019-01-30 15:00:42 -0800175 }
176 }
177 return NULL;
178}
179#endif
180
Peter Mitsisf8b76f32021-11-29 09:52:11 -0500181static ALWAYS_INLINE void z_priq_dumb_add(sys_dlist_t *pq,
182 struct k_thread *thread)
Andy Ross0d763e02021-09-07 15:34:04 -0700183{
184 struct k_thread *t;
185
186 __ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
187
188 SYS_DLIST_FOR_EACH_CONTAINER(pq, t, base.qnode_dlist) {
189 if (z_sched_prio_cmp(thread, t) > 0) {
190 sys_dlist_insert(&t->base.qnode_dlist,
191 &thread->base.qnode_dlist);
192 return;
193 }
194 }
195
196 sys_dlist_append(pq, &thread->base.qnode_dlist);
197}
198
Andy Rossb11e7962021-09-24 10:57:39 -0700199static ALWAYS_INLINE void *thread_runq(struct k_thread *thread)
Andy Ross387fdd22021-09-23 18:44:40 -0700200{
Andy Rossb11e7962021-09-24 10:57:39 -0700201#ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
202 int cpu, m = thread->base.cpu_mask;
203
204 /* Edge case: it's legal per the API to "make runnable" a
205 * thread with all CPUs masked off (i.e. one that isn't
206 * actually runnable!). Sort of a wart in the API and maybe
207 * we should address this in docs/assertions instead to avoid
208 * the extra test.
209 */
210 cpu = m == 0 ? 0 : u32_count_trailing_zeros(m);
211
212 return &_kernel.cpus[cpu].ready_q.runq;
213#else
214 return &_kernel.ready_q.runq;
215#endif
Andy Ross387fdd22021-09-23 18:44:40 -0700216}
217
Andy Rossb11e7962021-09-24 10:57:39 -0700218static ALWAYS_INLINE void *curr_cpu_runq(void)
Andy Ross387fdd22021-09-23 18:44:40 -0700219{
Andy Rossb11e7962021-09-24 10:57:39 -0700220#ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
221 return &arch_curr_cpu()->ready_q.runq;
222#else
223 return &_kernel.ready_q.runq;
224#endif
Andy Ross387fdd22021-09-23 18:44:40 -0700225}
226
Andy Rossb11e7962021-09-24 10:57:39 -0700227static ALWAYS_INLINE void runq_add(struct k_thread *thread)
Andy Ross387fdd22021-09-23 18:44:40 -0700228{
Andy Rossb11e7962021-09-24 10:57:39 -0700229 _priq_run_add(thread_runq(thread), thread);
230}
231
232static ALWAYS_INLINE void runq_remove(struct k_thread *thread)
233{
234 _priq_run_remove(thread_runq(thread), thread);
235}
236
237static ALWAYS_INLINE struct k_thread *runq_best(void)
238{
239 return _priq_run_best(curr_cpu_runq());
Andy Ross387fdd22021-09-23 18:44:40 -0700240}
241
Andy Ross4ff45712021-02-08 08:28:54 -0800242/* _current is never in the run queue until context switch on
243 * SMP configurations, see z_requeue_current()
244 */
245static inline bool should_queue_thread(struct k_thread *th)
246{
247 return !IS_ENABLED(CONFIG_SMP) || th != _current;
248}
249
Andy Rossc230fb32021-09-23 16:41:30 -0700250static ALWAYS_INLINE void queue_thread(struct k_thread *thread)
Andy Ross91946ef2021-02-07 13:03:09 -0800251{
252 thread->base.thread_state |= _THREAD_QUEUED;
Andy Ross4ff45712021-02-08 08:28:54 -0800253 if (should_queue_thread(thread)) {
Andy Ross387fdd22021-09-23 18:44:40 -0700254 runq_add(thread);
Andy Ross4ff45712021-02-08 08:28:54 -0800255 }
256#ifdef CONFIG_SMP
257 if (thread == _current) {
258 /* add current to end of queue means "yield" */
259 _current_cpu->swap_ok = true;
260 }
261#endif
Andy Ross91946ef2021-02-07 13:03:09 -0800262}
263
Andy Rossc230fb32021-09-23 16:41:30 -0700264static ALWAYS_INLINE void dequeue_thread(struct k_thread *thread)
Andy Ross91946ef2021-02-07 13:03:09 -0800265{
266 thread->base.thread_state &= ~_THREAD_QUEUED;
Andy Ross4ff45712021-02-08 08:28:54 -0800267 if (should_queue_thread(thread)) {
Andy Ross387fdd22021-09-23 18:44:40 -0700268 runq_remove(thread);
Andy Ross4ff45712021-02-08 08:28:54 -0800269 }
Andy Ross91946ef2021-02-07 13:03:09 -0800270}
271
Andy Ross4ff45712021-02-08 08:28:54 -0800272#ifdef CONFIG_SMP
273/* Called out of z_swap() when CONFIG_SMP. The current thread can
274 * never live in the run queue until we are inexorably on the context
275 * switch path on SMP, otherwise there is a deadlock condition where a
276 * set of CPUs pick a cycle of threads to run and wait for them all to
277 * context switch forever.
278 */
279void z_requeue_current(struct k_thread *curr)
280{
Andy Ross6b84ab32021-02-18 10:15:23 -0800281 if (z_is_thread_queued(curr)) {
Andy Ross387fdd22021-09-23 18:44:40 -0700282 runq_add(curr);
Andy Ross4ff45712021-02-08 08:28:54 -0800283 }
284}
Andy Ross4ff45712021-02-08 08:28:54 -0800285
Andy Ross6fb6d3c2021-02-19 15:32:19 -0800286static inline bool is_aborting(struct k_thread *thread)
287{
Anas Nashifbbbc38b2021-03-29 10:03:49 -0400288 return (thread->base.thread_state & _THREAD_ABORTING) != 0U;
Andy Ross6fb6d3c2021-02-19 15:32:19 -0800289}
Jeremy Bettis1e0a36c2021-12-06 10:56:33 -0700290#endif
Andy Ross6fb6d3c2021-02-19 15:32:19 -0800291
Andy Rossb2791b02019-01-28 09:36:36 -0800292static ALWAYS_INLINE struct k_thread *next_up(void)
Andy Ross1acd8c22018-05-03 14:51:49 -0700293{
Andy Ross387fdd22021-09-23 18:44:40 -0700294 struct k_thread *thread = runq_best();
Andy Ross11a050b2019-11-13 09:41:52 -0800295
296#if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && (CONFIG_NUM_COOP_PRIORITIES > 0)
297 /* MetaIRQs must always attempt to return back to a
298 * cooperative thread they preempted and not whatever happens
299 * to be highest priority now. The cooperative thread was
300 * promised it wouldn't be preempted (by non-metairq threads)!
301 */
302 struct k_thread *mirqp = _current_cpu->metairq_preempted;
303
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500304 if (mirqp != NULL && (thread == NULL || !is_metairq(thread))) {
Andy Ross11a050b2019-11-13 09:41:52 -0800305 if (!z_is_thread_prevented_from_running(mirqp)) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500306 thread = mirqp;
Andy Ross11a050b2019-11-13 09:41:52 -0800307 } else {
308 _current_cpu->metairq_preempted = NULL;
309 }
310 }
311#endif
312
Andy Ross1acd8c22018-05-03 14:51:49 -0700313#ifndef CONFIG_SMP
314 /* In uniprocessor mode, we can leave the current thread in
315 * the queue (actually we have to, otherwise the assembly
316 * context switch code for all architectures would be
Patrik Flykt4344e272019-03-08 14:19:05 -0700317 * responsible for putting it back in z_swap and ISR return!),
Andy Ross1acd8c22018-05-03 14:51:49 -0700318 * which makes this choice simple.
319 */
Anas Nashif3f4f3f62021-03-29 17:13:47 -0400320 return (thread != NULL) ? thread : _current_cpu->idle_thread;
Andy Ross1acd8c22018-05-03 14:51:49 -0700321#else
322 /* Under SMP, the "cache" mechanism for selecting the next
323 * thread doesn't work, so we have more work to do to test
Andy Ross11a050b2019-11-13 09:41:52 -0800324 * _current against the best choice from the queue. Here, the
325 * thread selected above represents "the best thread that is
326 * not current".
Andy Rosseace1df2018-05-30 11:23:02 -0700327 *
328 * Subtle note on "queued": in SMP mode, _current does not
329 * live in the queue, so this isn't exactly the same thing as
330 * "ready", it means "is _current already added back to the
331 * queue such that we don't want to re-add it".
Andy Ross1acd8c22018-05-03 14:51:49 -0700332 */
Andy Ross6fb6d3c2021-02-19 15:32:19 -0800333 if (is_aborting(_current)) {
334 end_thread(_current);
335 }
336
Patrik Flykt4344e272019-03-08 14:19:05 -0700337 int queued = z_is_thread_queued(_current);
338 int active = !z_is_thread_prevented_from_running(_current);
Andy Ross1acd8c22018-05-03 14:51:49 -0700339
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500340 if (thread == NULL) {
341 thread = _current_cpu->idle_thread;
Andy Ross1acd8c22018-05-03 14:51:49 -0700342 }
343
Andy Rosseace1df2018-05-30 11:23:02 -0700344 if (active) {
James Harris2cd0f662021-03-01 09:19:57 -0800345 int32_t cmp = z_sched_prio_cmp(_current, thread);
Andy Ross4ff45712021-02-08 08:28:54 -0800346
347 /* Ties only switch if state says we yielded */
James Harris2cd0f662021-03-01 09:19:57 -0800348 if ((cmp > 0) || ((cmp == 0) && !_current_cpu->swap_ok)) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500349 thread = _current;
Andy Rosseace1df2018-05-30 11:23:02 -0700350 }
351
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500352 if (!should_preempt(thread, _current_cpu->swap_ok)) {
353 thread = _current;
Andy Rosseace1df2018-05-30 11:23:02 -0700354 }
Andy Ross1acd8c22018-05-03 14:51:49 -0700355 }
356
Andy Rosseace1df2018-05-30 11:23:02 -0700357 /* Put _current back into the queue */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500358 if (thread != _current && active &&
359 !z_is_idle_thread_object(_current) && !queued) {
Andy Rossc230fb32021-09-23 16:41:30 -0700360 queue_thread(_current);
Andy Ross1acd8c22018-05-03 14:51:49 -0700361 }
362
Andy Rosseace1df2018-05-30 11:23:02 -0700363 /* Take the new _current out of the queue */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500364 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -0700365 dequeue_thread(thread);
Andy Rosseace1df2018-05-30 11:23:02 -0700366 }
Andy Ross1acd8c22018-05-03 14:51:49 -0700367
Andy Ross4ff45712021-02-08 08:28:54 -0800368 _current_cpu->swap_ok = false;
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500369 return thread;
Andy Ross1acd8c22018-05-03 14:51:49 -0700370#endif
371}
372
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700373static void move_thread_to_end_of_prio_q(struct k_thread *thread)
374{
375 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -0700376 dequeue_thread(thread);
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700377 }
Andy Rossc230fb32021-09-23 16:41:30 -0700378 queue_thread(thread);
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700379 update_cache(thread == _current);
380}
381
Andy Ross9098a452018-09-25 10:56:09 -0700382#ifdef CONFIG_TIMESLICING
383
384static int slice_time;
385static int slice_max_prio;
386
Andy Ross7fb8eb52019-01-04 12:54:23 -0800387#ifdef CONFIG_SWAP_NONATOMIC
Patrik Flykt4344e272019-03-08 14:19:05 -0700388/* If z_swap() isn't atomic, then it's possible for a timer interrupt
Andy Ross7fb8eb52019-01-04 12:54:23 -0800389 * to try to timeslice away _current after it has already pended
390 * itself but before the corresponding context switch. Treat that as
391 * a noop condition in z_time_slice().
392 */
393static struct k_thread *pending_current;
394#endif
395
Andy Rosscb3964f2019-08-16 21:29:26 -0700396void z_reset_time_slice(void)
Andy Ross9098a452018-09-25 10:56:09 -0700397{
Andy Ross7a035c02018-10-04 09:26:11 -0700398 /* Add the elapsed time since the last announced tick to the
399 * slice count, as we'll see those "expired" ticks arrive in a
400 * FUTURE z_time_slice() call.
401 */
Andy Rossed7d8632019-06-15 19:32:04 -0700402 if (slice_time != 0) {
Anas Nashif9c1efe62021-02-25 15:33:15 -0500403 _current_cpu->slice_ticks = slice_time + sys_clock_elapsed();
Andy Rossed7d8632019-06-15 19:32:04 -0700404 z_set_timeout_expiry(slice_time, false);
405 }
Andy Ross9098a452018-09-25 10:56:09 -0700406}
407
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500408void k_sched_time_slice_set(int32_t slice, int prio)
Andy Ross9098a452018-09-25 10:56:09 -0700409{
Patrik Flyktcf2d5792019-02-12 15:50:46 -0700410 LOCKED(&sched_spinlock) {
Andy Ross1c305142018-10-15 11:10:49 -0700411 _current_cpu->slice_ticks = 0;
Andy Ross88924062019-10-03 11:43:10 -0700412 slice_time = k_ms_to_ticks_ceil32(slice);
Andy Ross419f3702021-02-22 15:42:49 -0800413 if (IS_ENABLED(CONFIG_TICKLESS_KERNEL) && slice > 0) {
414 /* It's not possible to reliably set a 1-tick
415 * timeout if ticks aren't regular.
416 */
417 slice_time = MAX(2, slice_time);
418 }
Andy Ross1c305142018-10-15 11:10:49 -0700419 slice_max_prio = prio;
Andy Rosscb3964f2019-08-16 21:29:26 -0700420 z_reset_time_slice();
Andy Ross1c305142018-10-15 11:10:49 -0700421 }
Andy Ross9098a452018-09-25 10:56:09 -0700422}
423
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500424static inline int sliceable(struct k_thread *thread)
Andy Ross9098a452018-09-25 10:56:09 -0700425{
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500426 return is_preempt(thread)
Andrew Boie83d77702020-09-05 11:46:46 -0700427 && !z_is_thread_prevented_from_running(thread)
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500428 && !z_is_prio_higher(thread->base.prio, slice_max_prio)
Andrew Boie83d77702020-09-05 11:46:46 -0700429 && !z_is_idle_thread_object(thread);
Andy Ross9098a452018-09-25 10:56:09 -0700430}
431
432/* Called out of each timer interrupt */
433void z_time_slice(int ticks)
434{
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700435 /* Hold sched_spinlock, so that activity on another CPU
436 * (like a call to k_thread_abort() at just the wrong time)
437 * won't affect the correctness of the decisions made here.
438 * Also prevents any nested interrupts from changing
439 * thread state to avoid similar issues, since this would
440 * normally run with IRQs enabled.
441 */
442 k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
443
Andy Ross7fb8eb52019-01-04 12:54:23 -0800444#ifdef CONFIG_SWAP_NONATOMIC
445 if (pending_current == _current) {
Andy Rosscb3964f2019-08-16 21:29:26 -0700446 z_reset_time_slice();
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700447 k_spin_unlock(&sched_spinlock, key);
Andy Ross7fb8eb52019-01-04 12:54:23 -0800448 return;
449 }
450 pending_current = NULL;
451#endif
452
Andy Ross9098a452018-09-25 10:56:09 -0700453 if (slice_time && sliceable(_current)) {
454 if (ticks >= _current_cpu->slice_ticks) {
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700455 move_thread_to_end_of_prio_q(_current);
Andy Rosscb3964f2019-08-16 21:29:26 -0700456 z_reset_time_slice();
Andy Ross9098a452018-09-25 10:56:09 -0700457 } else {
458 _current_cpu->slice_ticks -= ticks;
459 }
Wentong Wu2463ded2019-07-24 17:17:33 +0800460 } else {
461 _current_cpu->slice_ticks = 0;
Andy Ross9098a452018-09-25 10:56:09 -0700462 }
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700463 k_spin_unlock(&sched_spinlock, key);
Andy Ross9098a452018-09-25 10:56:09 -0700464}
Andy Ross9098a452018-09-25 10:56:09 -0700465#endif
466
Andy Ross11a050b2019-11-13 09:41:52 -0800467/* Track cooperative threads preempted by metairqs so we can return to
468 * them specifically. Called at the moment a new thread has been
469 * selected to run.
470 */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500471static void update_metairq_preempt(struct k_thread *thread)
Andy Ross11a050b2019-11-13 09:41:52 -0800472{
473#if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && (CONFIG_NUM_COOP_PRIORITIES > 0)
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500474 if (is_metairq(thread) && !is_metairq(_current) &&
475 !is_preempt(_current)) {
Andy Ross11a050b2019-11-13 09:41:52 -0800476 /* Record new preemption */
477 _current_cpu->metairq_preempted = _current;
Andrew Boief5a7e1a2020-09-02 09:20:38 -0700478 } else if (!is_metairq(thread) && !z_is_idle_thread_object(thread)) {
Andy Ross11a050b2019-11-13 09:41:52 -0800479 /* Returning from existing preemption */
480 _current_cpu->metairq_preempted = NULL;
481 }
482#endif
483}
484
Andy Ross1856e222018-05-21 11:48:35 -0700485static void update_cache(int preempt_ok)
Andy Ross1acd8c22018-05-03 14:51:49 -0700486{
487#ifndef CONFIG_SMP
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500488 struct k_thread *thread = next_up();
Andy Ross1856e222018-05-21 11:48:35 -0700489
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500490 if (should_preempt(thread, preempt_ok)) {
Andy Rosscb3964f2019-08-16 21:29:26 -0700491#ifdef CONFIG_TIMESLICING
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500492 if (thread != _current) {
Andy Rosscb3964f2019-08-16 21:29:26 -0700493 z_reset_time_slice();
Andy Ross9098a452018-09-25 10:56:09 -0700494 }
Andy Rosscb3964f2019-08-16 21:29:26 -0700495#endif
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500496 update_metairq_preempt(thread);
497 _kernel.ready_q.cache = thread;
Andy Rosseace1df2018-05-30 11:23:02 -0700498 } else {
499 _kernel.ready_q.cache = _current;
Andy Ross1856e222018-05-21 11:48:35 -0700500 }
Andy Rosseace1df2018-05-30 11:23:02 -0700501
502#else
503 /* The way this works is that the CPU record keeps its
504 * "cooperative swapping is OK" flag until the next reschedule
505 * call or context switch. It doesn't need to be tracked per
506 * thread because if the thread gets preempted for whatever
507 * reason the scheduler will make the same decision anyway.
508 */
509 _current_cpu->swap_ok = preempt_ok;
Andy Ross1acd8c22018-05-03 14:51:49 -0700510#endif
511}
512
Andy Ross05c468f2021-02-19 15:24:24 -0800513static bool thread_active_elsewhere(struct k_thread *thread)
514{
515 /* True if the thread is currently running on another CPU.
516 * There are more scalable designs to answer this question in
517 * constant time, but this is fine for now.
518 */
519#ifdef CONFIG_SMP
520 int currcpu = _current_cpu->id;
521
522 for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
523 if ((i != currcpu) &&
524 (_kernel.cpus[i].current == thread)) {
525 return true;
526 }
527 }
528#endif
529 return false;
530}
531
Andy Ross96ccc462020-01-23 13:28:30 -0800532static void ready_thread(struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -0700533{
Anas Nashif39f632e2020-12-07 13:15:42 -0500534#ifdef CONFIG_KERNEL_COHERENCE
Andy Rossf6d32ab2020-05-13 15:34:04 +0000535 __ASSERT_NO_MSG(arch_mem_coherent(thread));
536#endif
537
Anas Nashif081605e2020-10-16 20:00:17 -0400538 /* If thread is queued already, do not try and added it to the
539 * run queue again
540 */
541 if (!z_is_thread_queued(thread) && z_is_thread_ready(thread)) {
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100542 SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_ready, thread);
543
Andy Rossc230fb32021-09-23 16:41:30 -0700544 queue_thread(thread);
Andy Ross1856e222018-05-21 11:48:35 -0700545 update_cache(0);
Andy Rossd82f76a2019-08-27 08:53:27 -0700546#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800547 arch_sched_ipi();
Andy Ross11bd67d2019-08-19 14:29:21 -0700548#endif
Andy Ross1acd8c22018-05-03 14:51:49 -0700549 }
550}
551
Andy Ross96ccc462020-01-23 13:28:30 -0800552void z_ready_thread(struct k_thread *thread)
553{
554 LOCKED(&sched_spinlock) {
Andy Ross05c468f2021-02-19 15:24:24 -0800555 if (!thread_active_elsewhere(thread)) {
556 ready_thread(thread);
557 }
Andy Ross96ccc462020-01-23 13:28:30 -0800558 }
559}
560
Patrik Flykt4344e272019-03-08 14:19:05 -0700561void z_move_thread_to_end_of_prio_q(struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -0700562{
Patrik Flyktcf2d5792019-02-12 15:50:46 -0700563 LOCKED(&sched_spinlock) {
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700564 move_thread_to_end_of_prio_q(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700565 }
566}
567
Andy Ross96ccc462020-01-23 13:28:30 -0800568void z_sched_start(struct k_thread *thread)
569{
570 k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
571
572 if (z_has_thread_started(thread)) {
573 k_spin_unlock(&sched_spinlock, key);
574 return;
575 }
576
577 z_mark_thread_as_started(thread);
578 ready_thread(thread);
579 z_reschedule(&sched_spinlock, key);
580}
581
Andrew Boie6cf496f2020-02-14 10:52:49 -0800582void z_impl_k_thread_suspend(struct k_thread *thread)
Andy Ross8bdabcc2020-01-07 09:58:46 -0800583{
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100584 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, suspend, thread);
585
Andy Ross8bdabcc2020-01-07 09:58:46 -0800586 (void)z_abort_thread_timeout(thread);
587
588 LOCKED(&sched_spinlock) {
589 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -0700590 dequeue_thread(thread);
Andy Ross8bdabcc2020-01-07 09:58:46 -0800591 }
592 z_mark_thread_as_suspended(thread);
593 update_cache(thread == _current);
594 }
595
596 if (thread == _current) {
597 z_reschedule_unlocked();
598 }
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100599
600 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, suspend, thread);
Andy Ross8bdabcc2020-01-07 09:58:46 -0800601}
602
Andrew Boie6cf496f2020-02-14 10:52:49 -0800603#ifdef CONFIG_USERSPACE
604static inline void z_vrfy_k_thread_suspend(struct k_thread *thread)
605{
606 Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
607 z_impl_k_thread_suspend(thread);
608}
609#include <syscalls/k_thread_suspend_mrsh.c>
610#endif
611
612void z_impl_k_thread_resume(struct k_thread *thread)
613{
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100614 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, resume, thread);
615
Andrew Boie6cf496f2020-02-14 10:52:49 -0800616 k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
617
Anas Nashifbf69afc2020-10-16 19:53:56 -0400618 /* Do not try to resume a thread that was not suspended */
619 if (!z_is_thread_suspended(thread)) {
620 k_spin_unlock(&sched_spinlock, key);
621 return;
622 }
623
Andrew Boie6cf496f2020-02-14 10:52:49 -0800624 z_mark_thread_as_not_suspended(thread);
625 ready_thread(thread);
626
627 z_reschedule(&sched_spinlock, key);
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100628
629 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, resume, thread);
Andrew Boie6cf496f2020-02-14 10:52:49 -0800630}
631
632#ifdef CONFIG_USERSPACE
633static inline void z_vrfy_k_thread_resume(struct k_thread *thread)
634{
635 Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
636 z_impl_k_thread_resume(thread);
637}
638#include <syscalls/k_thread_resume_mrsh.c>
639#endif
640
Maksim Masalski970820e2021-05-25 14:40:14 +0800641static _wait_q_t *pended_on_thread(struct k_thread *thread)
Andy Ross8bdabcc2020-01-07 09:58:46 -0800642{
643 __ASSERT_NO_MSG(thread->base.pended_on);
644
645 return thread->base.pended_on;
646}
647
Andy Rossed6b4fb2020-01-23 13:04:15 -0800648static void unready_thread(struct k_thread *thread)
649{
650 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -0700651 dequeue_thread(thread);
Andy Rossed6b4fb2020-01-23 13:04:15 -0800652 }
653 update_cache(thread == _current);
654}
655
Andrew Boie322816e2020-02-20 16:33:06 -0800656/* sched_spinlock must be held */
657static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q)
Andy Ross1acd8c22018-05-03 14:51:49 -0700658{
Andrew Boie322816e2020-02-20 16:33:06 -0800659 unready_thread(thread);
660 z_mark_thread_as_pending(thread);
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100661
662 SYS_PORT_TRACING_FUNC(k_thread, sched_pend, thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700663
Andrew Boie322816e2020-02-20 16:33:06 -0800664 if (wait_q != NULL) {
665 thread->base.pended_on = wait_q;
666 z_priq_wait_add(&wait_q->waitq, thread);
Andy Ross15d52082018-09-26 13:19:31 -0700667 }
Andrew Boie322816e2020-02-20 16:33:06 -0800668}
Andy Ross15d52082018-09-26 13:19:31 -0700669
Andy Ross78327382020-03-05 15:18:14 -0800670static void add_thread_timeout(struct k_thread *thread, k_timeout_t timeout)
Andrew Boie322816e2020-02-20 16:33:06 -0800671{
Andy Ross78327382020-03-05 15:18:14 -0800672 if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
Andy Ross78327382020-03-05 15:18:14 -0800673 z_add_thread_timeout(thread, timeout);
Andy Ross1acd8c22018-05-03 14:51:49 -0700674 }
Andy Rosse7ded112018-04-11 14:52:47 -0700675}
676
Andy Ross78327382020-03-05 15:18:14 -0800677static void pend(struct k_thread *thread, _wait_q_t *wait_q,
678 k_timeout_t timeout)
Andrew Boie322816e2020-02-20 16:33:06 -0800679{
Anas Nashif39f632e2020-12-07 13:15:42 -0500680#ifdef CONFIG_KERNEL_COHERENCE
Andy Ross1ba74142021-02-09 13:48:25 -0800681 __ASSERT_NO_MSG(wait_q == NULL || arch_mem_coherent(wait_q));
Andy Rossf6d32ab2020-05-13 15:34:04 +0000682#endif
683
Andrew Boie322816e2020-02-20 16:33:06 -0800684 LOCKED(&sched_spinlock) {
685 add_to_waitq_locked(thread, wait_q);
686 }
687
Andy Ross78327382020-03-05 15:18:14 -0800688 add_thread_timeout(thread, timeout);
Andrew Boie322816e2020-02-20 16:33:06 -0800689}
690
Andy Ross78327382020-03-05 15:18:14 -0800691void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
692 k_timeout_t timeout)
Andy Rosse7ded112018-04-11 14:52:47 -0700693{
Patrik Flykt4344e272019-03-08 14:19:05 -0700694 __ASSERT_NO_MSG(thread == _current || is_thread_dummy(thread));
Andy Ross1acd8c22018-05-03 14:51:49 -0700695 pend(thread, wait_q, timeout);
696}
697
Andrew Boieffc5bdf2020-09-05 11:44:01 -0700698static inline void unpend_thread_no_timeout(struct k_thread *thread)
699{
Maksim Masalski970820e2021-05-25 14:40:14 +0800700 _priq_wait_remove(&pended_on_thread(thread)->waitq, thread);
Andrew Boieffc5bdf2020-09-05 11:44:01 -0700701 z_mark_thread_as_not_pending(thread);
702 thread->base.pended_on = NULL;
703}
704
Patrik Flykt4344e272019-03-08 14:19:05 -0700705ALWAYS_INLINE void z_unpend_thread_no_timeout(struct k_thread *thread)
Andy Rosse7ded112018-04-11 14:52:47 -0700706{
Patrik Flyktcf2d5792019-02-12 15:50:46 -0700707 LOCKED(&sched_spinlock) {
Andrew Boieffc5bdf2020-09-05 11:44:01 -0700708 unpend_thread_no_timeout(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700709 }
Andy Rosse7ded112018-04-11 14:52:47 -0700710}
711
Andy Ross987c0e52018-09-27 16:50:00 -0700712#ifdef CONFIG_SYS_CLOCK_EXISTS
713/* Timeout handler for *_thread_timeout() APIs */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500714void z_thread_timeout(struct _timeout *timeout)
Andy Ross987c0e52018-09-27 16:50:00 -0700715{
Andy Ross37866332021-02-17 10:12:36 -0800716 struct k_thread *thread = CONTAINER_OF(timeout,
717 struct k_thread, base.timeout);
Andy Ross987c0e52018-09-27 16:50:00 -0700718
Andy Ross37866332021-02-17 10:12:36 -0800719 LOCKED(&sched_spinlock) {
720 bool killed = ((thread->base.thread_state & _THREAD_DEAD) ||
721 (thread->base.thread_state & _THREAD_ABORTING));
722
723 if (!killed) {
724 if (thread->base.pended_on != NULL) {
725 unpend_thread_no_timeout(thread);
726 }
727 z_mark_thread_as_started(thread);
728 z_mark_thread_as_not_suspended(thread);
729 ready_thread(thread);
Andrew Boieffc5bdf2020-09-05 11:44:01 -0700730 }
Andy Ross987c0e52018-09-27 16:50:00 -0700731 }
Andy Ross987c0e52018-09-27 16:50:00 -0700732}
733#endif
734
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500735int z_pend_curr_irqlock(uint32_t key, _wait_q_t *wait_q, k_timeout_t timeout)
Andy Rosse7ded112018-04-11 14:52:47 -0700736{
Andy Ross722aeea2019-03-14 13:50:16 -0700737 pend(_current, wait_q, timeout);
738
Andy Ross7fb8eb52019-01-04 12:54:23 -0800739#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
740 pending_current = _current;
Andy Ross722aeea2019-03-14 13:50:16 -0700741
742 int ret = z_swap_irqlock(key);
743 LOCKED(&sched_spinlock) {
744 if (pending_current == _current) {
745 pending_current = NULL;
746 }
747 }
748 return ret;
749#else
Patrik Flykt4344e272019-03-08 14:19:05 -0700750 return z_swap_irqlock(key);
Andy Ross722aeea2019-03-14 13:50:16 -0700751#endif
Andy Rosse7ded112018-04-11 14:52:47 -0700752}
753
Patrik Flykt4344e272019-03-08 14:19:05 -0700754int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
Andy Ross78327382020-03-05 15:18:14 -0800755 _wait_q_t *wait_q, k_timeout_t timeout)
Andy Rossec554f42018-07-24 13:37:59 -0700756{
757#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
758 pending_current = _current;
759#endif
760 pend(_current, wait_q, timeout);
Patrik Flykt4344e272019-03-08 14:19:05 -0700761 return z_swap(lock, key);
Andy Rossec554f42018-07-24 13:37:59 -0700762}
763
Andy Ross604f0f42021-02-09 16:47:47 -0800764struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q)
765{
766 struct k_thread *thread = NULL;
767
768 LOCKED(&sched_spinlock) {
769 thread = _priq_wait_best(&wait_q->waitq);
770
771 if (thread != NULL) {
772 unpend_thread_no_timeout(thread);
773 }
774 }
775
776 return thread;
777}
778
Patrik Flykt4344e272019-03-08 14:19:05 -0700779struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q)
Andy Rosse7ded112018-04-11 14:52:47 -0700780{
Andy Ross604f0f42021-02-09 16:47:47 -0800781 struct k_thread *thread = NULL;
Andy Rosse7ded112018-04-11 14:52:47 -0700782
Andy Ross604f0f42021-02-09 16:47:47 -0800783 LOCKED(&sched_spinlock) {
784 thread = _priq_wait_best(&wait_q->waitq);
785
786 if (thread != NULL) {
787 unpend_thread_no_timeout(thread);
788 (void)z_abort_thread_timeout(thread);
789 }
Andy Ross1acd8c22018-05-03 14:51:49 -0700790 }
Andy Rosse7ded112018-04-11 14:52:47 -0700791
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500792 return thread;
Andy Ross1acd8c22018-05-03 14:51:49 -0700793}
Andy Rosse7ded112018-04-11 14:52:47 -0700794
Patrik Flykt4344e272019-03-08 14:19:05 -0700795void z_unpend_thread(struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -0700796{
Patrik Flykt4344e272019-03-08 14:19:05 -0700797 z_unpend_thread_no_timeout(thread);
798 (void)z_abort_thread_timeout(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700799}
800
Andy Ross6f139802019-08-20 11:21:28 -0700801/* Priority set utility that does no rescheduling, it just changes the
802 * run queue state, returning true if a reschedule is needed later.
803 */
804bool z_set_prio(struct k_thread *thread, int prio)
Andy Ross1acd8c22018-05-03 14:51:49 -0700805{
Flavio Ceolin02ed85b2018-09-20 15:43:57 -0700806 bool need_sched = 0;
Andy Ross1acd8c22018-05-03 14:51:49 -0700807
Patrik Flyktcf2d5792019-02-12 15:50:46 -0700808 LOCKED(&sched_spinlock) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700809 need_sched = z_is_thread_ready(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700810
811 if (need_sched) {
Andy Ross4d8e1f22019-07-01 10:25:55 -0700812 /* Don't requeue on SMP if it's the running thread */
813 if (!IS_ENABLED(CONFIG_SMP) || z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -0700814 dequeue_thread(thread);
Andy Ross4d8e1f22019-07-01 10:25:55 -0700815 thread->base.prio = prio;
Andy Rossc230fb32021-09-23 16:41:30 -0700816 queue_thread(thread);
Andy Ross4d8e1f22019-07-01 10:25:55 -0700817 } else {
818 thread->base.prio = prio;
819 }
Andy Ross1856e222018-05-21 11:48:35 -0700820 update_cache(1);
Andy Ross1acd8c22018-05-03 14:51:49 -0700821 } else {
822 thread->base.prio = prio;
Andy Rosse7ded112018-04-11 14:52:47 -0700823 }
824 }
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100825
826 SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_priority_set, thread, prio);
Andy Rosse7ded112018-04-11 14:52:47 -0700827
Andy Ross6f139802019-08-20 11:21:28 -0700828 return need_sched;
829}
830
831void z_thread_priority_set(struct k_thread *thread, int prio)
832{
833 bool need_sched = z_set_prio(thread, prio);
834
Andy Ross5737b5c2020-02-04 13:52:09 -0800835#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
836 arch_sched_ipi();
837#endif
838
Anas Nashifbbbc38b2021-03-29 10:03:49 -0400839 if (need_sched && _current->base.sched_locked == 0U) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700840 z_reschedule_unlocked();
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400841 }
842}
843
Anas Nashif3f4f3f62021-03-29 17:13:47 -0400844static inline bool resched(uint32_t key)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400845{
Andy Rosseace1df2018-05-30 11:23:02 -0700846#ifdef CONFIG_SMP
Andy Rosseace1df2018-05-30 11:23:02 -0700847 _current_cpu->swap_ok = 0;
848#endif
849
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800850 return arch_irq_unlocked(key) && !arch_is_in_isr();
Andy Rossec554f42018-07-24 13:37:59 -0700851}
852
Anas Nashif379b93f2020-08-10 15:47:02 -0400853/*
854 * Check if the next ready thread is the same as the current thread
855 * and save the trip if true.
856 */
857static inline bool need_swap(void)
858{
859 /* the SMP case will be handled in C based z_swap() */
860#ifdef CONFIG_SMP
861 return true;
862#else
863 struct k_thread *new_thread;
864
865 /* Check if the next ready thread is the same as the current thread */
Andy Ross6b84ab32021-02-18 10:15:23 -0800866 new_thread = _kernel.ready_q.cache;
Anas Nashif379b93f2020-08-10 15:47:02 -0400867 return new_thread != _current;
868#endif
869}
870
Patrik Flykt4344e272019-03-08 14:19:05 -0700871void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key)
Andy Rossec554f42018-07-24 13:37:59 -0700872{
Anas Nashif379b93f2020-08-10 15:47:02 -0400873 if (resched(key.key) && need_swap()) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700874 z_swap(lock, key);
Andy Rossec554f42018-07-24 13:37:59 -0700875 } else {
876 k_spin_unlock(lock, key);
Andy Rosseace1df2018-05-30 11:23:02 -0700877 }
Andy Rossec554f42018-07-24 13:37:59 -0700878}
Andy Rosseace1df2018-05-30 11:23:02 -0700879
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500880void z_reschedule_irqlock(uint32_t key)
Andy Rossec554f42018-07-24 13:37:59 -0700881{
Andy Ross312b43f2019-05-24 10:09:13 -0700882 if (resched(key)) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700883 z_swap_irqlock(key);
Andy Rossec554f42018-07-24 13:37:59 -0700884 } else {
885 irq_unlock(key);
886 }
Andy Ross8606fab2018-03-26 10:54:40 -0700887}
888
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500889void k_sched_lock(void)
890{
Patrik Flyktcf2d5792019-02-12 15:50:46 -0700891 LOCKED(&sched_spinlock) {
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100892 SYS_PORT_TRACING_FUNC(k_thread, sched_lock);
893
Patrik Flykt4344e272019-03-08 14:19:05 -0700894 z_sched_lock();
Andy Ross1856e222018-05-21 11:48:35 -0700895 }
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500896}
897
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400898void k_sched_unlock(void)
899{
Patrik Flyktcf2d5792019-02-12 15:50:46 -0700900 LOCKED(&sched_spinlock) {
Anas Nashifbbbc38b2021-03-29 10:03:49 -0400901 __ASSERT(_current->base.sched_locked != 0U, "");
Andy Rosseefd3da2020-02-06 13:39:52 -0800902 __ASSERT(!arch_is_in_isr(), "");
903
Andy Ross1856e222018-05-21 11:48:35 -0700904 ++_current->base.sched_locked;
Yasushi SHOJI20d07242019-07-31 11:19:08 +0900905 update_cache(0);
Andy Ross1856e222018-05-21 11:48:35 -0700906 }
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400907
Anas Nashif2c5d4042019-12-02 10:24:08 -0500908 LOG_DBG("scheduler unlocked (%p:%d)",
Benjamin Walsha4e033f2016-11-18 16:08:24 -0500909 _current, _current->base.sched_locked);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400910
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100911 SYS_PORT_TRACING_FUNC(k_thread, sched_unlock);
912
Patrik Flykt4344e272019-03-08 14:19:05 -0700913 z_reschedule_unlocked();
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400914}
915
Andy Ross6b84ab32021-02-18 10:15:23 -0800916struct k_thread *z_swap_next_thread(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400917{
Andy Ross6b84ab32021-02-18 10:15:23 -0800918#ifdef CONFIG_SMP
919 return next_up();
920#else
921 return _kernel.ready_q.cache;
Benjamin Walsh62092182016-12-20 14:39:08 -0500922#endif
Andy Ross6b84ab32021-02-18 10:15:23 -0800923}
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400924
Jeremy Bettis1e0a36c2021-12-06 10:56:33 -0700925#ifdef CONFIG_USE_SWITCH
Andy Rossb18685b2019-02-19 17:24:30 -0800926/* Just a wrapper around _current = xxx with tracing */
927static inline void set_current(struct k_thread *new_thread)
928{
Daniel Leung11e6b432020-08-27 16:12:01 -0700929 z_thread_mark_switched_out();
Andy Rosseefd3da2020-02-06 13:39:52 -0800930 _current_cpu->current = new_thread;
Andy Rossb18685b2019-02-19 17:24:30 -0800931}
932
Patrik Flykt4344e272019-03-08 14:19:05 -0700933void *z_get_next_switch_handle(void *interrupted)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400934{
Andrew Boieae0d1b22019-03-29 16:25:27 -0700935 z_check_stack_sentinel();
936
Andy Rosseace1df2018-05-30 11:23:02 -0700937#ifdef CONFIG_SMP
Andy Rossdd432212021-02-05 08:15:02 -0800938 void *ret = NULL;
939
Patrik Flyktcf2d5792019-02-12 15:50:46 -0700940 LOCKED(&sched_spinlock) {
Andy Rossf6d32ab2020-05-13 15:34:04 +0000941 struct k_thread *old_thread = _current, *new_thread;
Andy Rosseace1df2018-05-30 11:23:02 -0700942
Andy Ross4ff45712021-02-08 08:28:54 -0800943 if (IS_ENABLED(CONFIG_SMP)) {
944 old_thread->switch_handle = NULL;
945 }
Andy Rossf6d32ab2020-05-13 15:34:04 +0000946 new_thread = next_up();
947
Andy Ross40d12c12021-09-27 08:22:43 -0700948 z_sched_usage_switch(new_thread);
949
Andy Rossf6d32ab2020-05-13 15:34:04 +0000950 if (old_thread != new_thread) {
951 update_metairq_preempt(new_thread);
952 wait_for_switch(new_thread);
953 arch_cohere_stacks(old_thread, interrupted, new_thread);
Andy Ross11a050b2019-11-13 09:41:52 -0800954
Andy Rosscb3964f2019-08-16 21:29:26 -0700955#ifdef CONFIG_TIMESLICING
956 z_reset_time_slice();
957#endif
Andy Rosseace1df2018-05-30 11:23:02 -0700958 _current_cpu->swap_ok = 0;
Andy Rossf6d32ab2020-05-13 15:34:04 +0000959 set_current(new_thread);
960
Danny Oerndrupc9d78402019-12-13 11:24:56 +0100961#ifdef CONFIG_SPIN_VALIDATE
Andy Ross8c1bdda2019-02-20 10:07:31 -0800962 /* Changed _current! Update the spinlock
Anas Nashif6df44052021-04-30 09:58:20 -0400963 * bookkeeping so the validation doesn't get
Andy Ross8c1bdda2019-02-20 10:07:31 -0800964 * confused when the "wrong" thread tries to
965 * release the lock.
966 */
967 z_spin_lock_set_owner(&sched_spinlock);
968#endif
Andy Ross4ff45712021-02-08 08:28:54 -0800969
970 /* A queued (runnable) old/current thread
971 * needs to be added back to the run queue
972 * here, and atomically with its switch handle
973 * being set below. This is safe now, as we
974 * will not return into it.
975 */
976 if (z_is_thread_queued(old_thread)) {
Andy Ross387fdd22021-09-23 18:44:40 -0700977 runq_add(old_thread);
Andy Ross4ff45712021-02-08 08:28:54 -0800978 }
Andy Rosseace1df2018-05-30 11:23:02 -0700979 }
Andy Rossf6d32ab2020-05-13 15:34:04 +0000980 old_thread->switch_handle = interrupted;
Andy Rossdd432212021-02-05 08:15:02 -0800981 ret = new_thread->switch_handle;
Andy Ross4ff45712021-02-08 08:28:54 -0800982 if (IS_ENABLED(CONFIG_SMP)) {
983 /* Active threads MUST have a null here */
984 new_thread->switch_handle = NULL;
985 }
Benjamin Walshb8c21602016-12-23 19:34:41 -0500986 }
Andy Rossdd432212021-02-05 08:15:02 -0800987 return ret;
Andy Rosseace1df2018-05-30 11:23:02 -0700988#else
Andy Ross40d12c12021-09-27 08:22:43 -0700989 z_sched_usage_switch(_kernel.ready_q.cache);
Andy Rossf6d32ab2020-05-13 15:34:04 +0000990 _current->switch_handle = interrupted;
Andy Ross6b84ab32021-02-18 10:15:23 -0800991 set_current(_kernel.ready_q.cache);
Andy Ross1acd8c22018-05-03 14:51:49 -0700992 return _current->switch_handle;
Andy Rossdd432212021-02-05 08:15:02 -0800993#endif
Andy Ross1acd8c22018-05-03 14:51:49 -0700994}
Benjamin Walshb12a8e02016-12-14 15:24:12 -0500995#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400996
Patrik Flykt4344e272019-03-08 14:19:05 -0700997void z_priq_dumb_remove(sys_dlist_t *pq, struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -0700998{
Andrew Boie8f0bb6a2019-09-21 18:36:23 -0700999 __ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
Andy Ross1acd8c22018-05-03 14:51:49 -07001000
1001 sys_dlist_remove(&thread->base.qnode_dlist);
1002}
1003
Patrik Flykt4344e272019-03-08 14:19:05 -07001004struct k_thread *z_priq_dumb_best(sys_dlist_t *pq)
Andy Ross1acd8c22018-05-03 14:51:49 -07001005{
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001006 struct k_thread *thread = NULL;
Flavio Ceolin26be3352018-11-15 15:03:32 -08001007 sys_dnode_t *n = sys_dlist_peek_head(pq);
1008
Peter A. Bigot692e1032019-01-03 23:36:28 -06001009 if (n != NULL) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001010 thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist);
Peter A. Bigot692e1032019-01-03 23:36:28 -06001011 }
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001012 return thread;
Andy Ross1acd8c22018-05-03 14:51:49 -07001013}
1014
Patrik Flykt4344e272019-03-08 14:19:05 -07001015bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b)
Andy Ross1acd8c22018-05-03 14:51:49 -07001016{
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001017 struct k_thread *thread_a, *thread_b;
James Harris2cd0f662021-03-01 09:19:57 -08001018 int32_t cmp;
Andy Ross1acd8c22018-05-03 14:51:49 -07001019
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001020 thread_a = CONTAINER_OF(a, struct k_thread, base.qnode_rb);
1021 thread_b = CONTAINER_OF(b, struct k_thread, base.qnode_rb);
Andy Ross1acd8c22018-05-03 14:51:49 -07001022
James Harris2cd0f662021-03-01 09:19:57 -08001023 cmp = z_sched_prio_cmp(thread_a, thread_b);
1024
1025 if (cmp > 0) {
Flavio Ceolin02ed85b2018-09-20 15:43:57 -07001026 return true;
James Harris2cd0f662021-03-01 09:19:57 -08001027 } else if (cmp < 0) {
Flavio Ceolin02ed85b2018-09-20 15:43:57 -07001028 return false;
Andy Ross1acd8c22018-05-03 14:51:49 -07001029 } else {
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001030 return thread_a->base.order_key < thread_b->base.order_key
1031 ? 1 : 0;
Andy Ross1acd8c22018-05-03 14:51:49 -07001032 }
1033}
1034
Patrik Flykt4344e272019-03-08 14:19:05 -07001035void z_priq_rb_add(struct _priq_rb *pq, struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -07001036{
1037 struct k_thread *t;
1038
Andrew Boie8f0bb6a2019-09-21 18:36:23 -07001039 __ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
Andy Ross1acd8c22018-05-03 14:51:49 -07001040
1041 thread->base.order_key = pq->next_order_key++;
1042
1043 /* Renumber at wraparound. This is tiny code, and in practice
1044 * will almost never be hit on real systems. BUT on very
1045 * long-running systems where a priq never completely empties
1046 * AND that contains very large numbers of threads, it can be
1047 * a latency glitch to loop over all the threads like this.
1048 */
1049 if (!pq->next_order_key) {
1050 RB_FOR_EACH_CONTAINER(&pq->tree, t, base.qnode_rb) {
1051 t->base.order_key = pq->next_order_key++;
1052 }
1053 }
1054
1055 rb_insert(&pq->tree, &thread->base.qnode_rb);
1056}
1057
Patrik Flykt4344e272019-03-08 14:19:05 -07001058void z_priq_rb_remove(struct _priq_rb *pq, struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -07001059{
Andrew Boie8f0bb6a2019-09-21 18:36:23 -07001060 __ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
Andy Ross1acd8c22018-05-03 14:51:49 -07001061
1062 rb_remove(&pq->tree, &thread->base.qnode_rb);
1063
1064 if (!pq->tree.root) {
1065 pq->next_order_key = 0;
1066 }
1067}
1068
Patrik Flykt4344e272019-03-08 14:19:05 -07001069struct k_thread *z_priq_rb_best(struct _priq_rb *pq)
Andy Ross1acd8c22018-05-03 14:51:49 -07001070{
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001071 struct k_thread *thread = NULL;
Andy Ross1acd8c22018-05-03 14:51:49 -07001072 struct rbnode *n = rb_get_min(&pq->tree);
1073
Peter A. Bigot692e1032019-01-03 23:36:28 -06001074 if (n != NULL) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001075 thread = CONTAINER_OF(n, struct k_thread, base.qnode_rb);
Peter A. Bigot692e1032019-01-03 23:36:28 -06001076 }
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001077 return thread;
Andy Ross1acd8c22018-05-03 14:51:49 -07001078}
1079
Andy Ross9f06a352018-06-28 10:38:14 -07001080#ifdef CONFIG_SCHED_MULTIQ
1081# if (K_LOWEST_THREAD_PRIO - K_HIGHEST_THREAD_PRIO) > 31
1082# error Too many priorities for multiqueue scheduler (max 32)
1083# endif
Andy Ross9f06a352018-06-28 10:38:14 -07001084
Peter Mitsisf8b76f32021-11-29 09:52:11 -05001085static ALWAYS_INLINE void z_priq_mq_add(struct _priq_mq *pq,
1086 struct k_thread *thread)
Andy Ross9f06a352018-06-28 10:38:14 -07001087{
1088 int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO;
1089
1090 sys_dlist_append(&pq->queues[priority_bit], &thread->base.qnode_dlist);
Flavio Ceolina9962032019-02-26 10:14:04 -08001091 pq->bitmask |= BIT(priority_bit);
Andy Ross9f06a352018-06-28 10:38:14 -07001092}
1093
Peter Mitsisf8b76f32021-11-29 09:52:11 -05001094static ALWAYS_INLINE void z_priq_mq_remove(struct _priq_mq *pq,
1095 struct k_thread *thread)
Andy Ross9f06a352018-06-28 10:38:14 -07001096{
1097 int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO;
1098
1099 sys_dlist_remove(&thread->base.qnode_dlist);
1100 if (sys_dlist_is_empty(&pq->queues[priority_bit])) {
Flavio Ceolina9962032019-02-26 10:14:04 -08001101 pq->bitmask &= ~BIT(priority_bit);
Andy Ross9f06a352018-06-28 10:38:14 -07001102 }
1103}
Jeremy Bettisfb1c36f2021-12-20 16:24:30 -07001104#endif
Andy Ross9f06a352018-06-28 10:38:14 -07001105
Patrik Flykt4344e272019-03-08 14:19:05 -07001106struct k_thread *z_priq_mq_best(struct _priq_mq *pq)
Andy Ross9f06a352018-06-28 10:38:14 -07001107{
1108 if (!pq->bitmask) {
1109 return NULL;
1110 }
1111
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001112 struct k_thread *thread = NULL;
Andy Ross9f06a352018-06-28 10:38:14 -07001113 sys_dlist_t *l = &pq->queues[__builtin_ctz(pq->bitmask)];
Flavio Ceolin26be3352018-11-15 15:03:32 -08001114 sys_dnode_t *n = sys_dlist_peek_head(l);
Andy Ross9f06a352018-06-28 10:38:14 -07001115
Peter A. Bigot692e1032019-01-03 23:36:28 -06001116 if (n != NULL) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001117 thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist);
Peter A. Bigot692e1032019-01-03 23:36:28 -06001118 }
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001119 return thread;
Andy Ross9f06a352018-06-28 10:38:14 -07001120}
1121
Patrik Flykt4344e272019-03-08 14:19:05 -07001122int z_unpend_all(_wait_q_t *wait_q)
Andy Ross4ca0e072018-05-10 09:45:42 -07001123{
Andy Rossccf3bf72018-05-10 11:10:34 -07001124 int need_sched = 0;
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001125 struct k_thread *thread;
Andy Ross4ca0e072018-05-10 09:45:42 -07001126
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001127 while ((thread = z_waitq_head(wait_q)) != NULL) {
1128 z_unpend_thread(thread);
1129 z_ready_thread(thread);
Andy Ross4ca0e072018-05-10 09:45:42 -07001130 need_sched = 1;
1131 }
Andy Rossccf3bf72018-05-10 11:10:34 -07001132
1133 return need_sched;
Andy Ross4ca0e072018-05-10 09:45:42 -07001134}
1135
Andy Rossb155d062021-09-24 13:49:14 -07001136void init_ready_q(struct _ready_q *rq)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001137{
Andy Rossb155d062021-09-24 13:49:14 -07001138#if defined(CONFIG_SCHED_SCALABLE)
1139 rq->runq = (struct _priq_rb) {
Andy Ross1acd8c22018-05-03 14:51:49 -07001140 .tree = {
Patrik Flykt4344e272019-03-08 14:19:05 -07001141 .lessthan_fn = z_priq_rb_lessthan,
Andy Ross1acd8c22018-05-03 14:51:49 -07001142 }
1143 };
Andy Rossb155d062021-09-24 13:49:14 -07001144#elif defined(CONFIG_SCHED_MULTIQ)
Andy Ross9f06a352018-06-28 10:38:14 -07001145 for (int i = 0; i < ARRAY_SIZE(_kernel.ready_q.runq.queues); i++) {
Andy Rossb155d062021-09-24 13:49:14 -07001146 sys_dlist_init(&rq->runq.queues[i]);
Andy Ross9f06a352018-06-28 10:38:14 -07001147 }
Andy Rossb155d062021-09-24 13:49:14 -07001148#else
1149 sys_dlist_init(&rq->runq);
Andy Ross9f06a352018-06-28 10:38:14 -07001150#endif
Andy Rossb155d062021-09-24 13:49:14 -07001151}
1152
1153void z_sched_init(void)
1154{
Andy Rossb11e7962021-09-24 10:57:39 -07001155#ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
1156 for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
1157 init_ready_q(&_kernel.cpus[i].ready_q);
1158 }
1159#else
Andy Rossb155d062021-09-24 13:49:14 -07001160 init_ready_q(&_kernel.ready_q);
Andy Rossb11e7962021-09-24 10:57:39 -07001161#endif
Piotr Zięcik4a39b9e2018-07-26 14:56:39 +02001162
1163#ifdef CONFIG_TIMESLICING
1164 k_sched_time_slice_set(CONFIG_TIMESLICE_SIZE,
1165 CONFIG_TIMESLICE_PRIORITY);
1166#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001167}
1168
Patrik Flykt4344e272019-03-08 14:19:05 -07001169int z_impl_k_thread_priority_get(k_tid_t thread)
Allan Stephens399d0ad2016-10-07 13:41:34 -05001170{
Benjamin Walshf6ca7de2016-11-08 10:36:50 -05001171 return thread->base.prio;
Allan Stephens399d0ad2016-10-07 13:41:34 -05001172}
1173
Andrew Boie76c04a22017-09-27 14:45:10 -07001174#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -07001175static inline int z_vrfy_k_thread_priority_get(k_tid_t thread)
1176{
1177 Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1178 return z_impl_k_thread_priority_get(thread);
1179}
1180#include <syscalls/k_thread_priority_get_mrsh.c>
Andrew Boie76c04a22017-09-27 14:45:10 -07001181#endif
1182
Anas Nashif25c87db2021-03-29 10:54:23 -04001183void z_impl_k_thread_priority_set(k_tid_t thread, int prio)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001184{
Benjamin Walsh3cc2ba92016-11-08 15:44:05 -05001185 /*
1186 * Use NULL, since we cannot know what the entry point is (we do not
1187 * keep track of it) and idle cannot change its priority.
1188 */
Patrik Flykt4344e272019-03-08 14:19:05 -07001189 Z_ASSERT_VALID_PRIO(prio, NULL);
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001190 __ASSERT(!arch_is_in_isr(), "");
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001191
Anas Nashif25c87db2021-03-29 10:54:23 -04001192 struct k_thread *th = (struct k_thread *)thread;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001193
Anas Nashif25c87db2021-03-29 10:54:23 -04001194 z_thread_priority_set(th, prio);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001195}
1196
Andrew Boie468190a2017-09-29 14:00:48 -07001197#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -07001198static inline void z_vrfy_k_thread_priority_set(k_tid_t thread, int prio)
Andrew Boie468190a2017-09-29 14:00:48 -07001199{
Andrew Boie8345e5e2018-05-04 15:57:57 -07001200 Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1201 Z_OOPS(Z_SYSCALL_VERIFY_MSG(_is_valid_prio(prio, NULL),
Andy Ross65649742019-08-06 13:34:31 -07001202 "invalid thread priority %d", prio));
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001203 Z_OOPS(Z_SYSCALL_VERIFY_MSG((int8_t)prio >= thread->base.prio,
Andrew Boie8345e5e2018-05-04 15:57:57 -07001204 "thread priority may only be downgraded (%d < %d)",
1205 prio, thread->base.prio));
Andrew Boie5008fed2017-10-08 10:11:24 -07001206
Andy Ross65649742019-08-06 13:34:31 -07001207 z_impl_k_thread_priority_set(thread, prio);
Andrew Boie468190a2017-09-29 14:00:48 -07001208}
Andy Ross65649742019-08-06 13:34:31 -07001209#include <syscalls/k_thread_priority_set_mrsh.c>
Andrew Boie468190a2017-09-29 14:00:48 -07001210#endif
1211
Andy Ross4a2e50f2018-05-15 11:06:25 -07001212#ifdef CONFIG_SCHED_DEADLINE
Patrik Flykt4344e272019-03-08 14:19:05 -07001213void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline)
Andy Ross4a2e50f2018-05-15 11:06:25 -07001214{
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001215 struct k_thread *thread = tid;
Andy Ross4a2e50f2018-05-15 11:06:25 -07001216
Patrik Flyktcf2d5792019-02-12 15:50:46 -07001217 LOCKED(&sched_spinlock) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001218 thread->base.prio_deadline = k_cycle_get_32() + deadline;
1219 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -07001220 dequeue_thread(thread);
1221 queue_thread(thread);
Andy Ross4a2e50f2018-05-15 11:06:25 -07001222 }
1223 }
1224}
1225
1226#ifdef CONFIG_USERSPACE
Andy Ross075c94f2019-08-13 11:34:34 -07001227static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline)
Andy Ross4a2e50f2018-05-15 11:06:25 -07001228{
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001229 struct k_thread *thread = tid;
Andy Ross4a2e50f2018-05-15 11:06:25 -07001230
1231 Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1232 Z_OOPS(Z_SYSCALL_VERIFY_MSG(deadline > 0,
1233 "invalid thread deadline %d",
1234 (int)deadline));
1235
Patrik Flykt4344e272019-03-08 14:19:05 -07001236 z_impl_k_thread_deadline_set((k_tid_t)thread, deadline);
Andy Ross4a2e50f2018-05-15 11:06:25 -07001237}
Andy Ross075c94f2019-08-13 11:34:34 -07001238#include <syscalls/k_thread_deadline_set_mrsh.c>
Andy Ross4a2e50f2018-05-15 11:06:25 -07001239#endif
1240#endif
1241
Patrik Flykt4344e272019-03-08 14:19:05 -07001242void z_impl_k_yield(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001243{
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001244 __ASSERT(!arch_is_in_isr(), "");
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001245
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001246 SYS_PORT_TRACING_FUNC(k_thread, yield);
1247
Andy Ross851d14a2021-05-13 15:46:43 -07001248 k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
James Harris6543e062021-03-01 10:14:13 -08001249
Andy Ross851d14a2021-05-13 15:46:43 -07001250 if (!IS_ENABLED(CONFIG_SMP) ||
1251 z_is_thread_queued(_current)) {
Andy Rossc230fb32021-09-23 16:41:30 -07001252 dequeue_thread(_current);
Andy Ross1acd8c22018-05-03 14:51:49 -07001253 }
Andy Rossc230fb32021-09-23 16:41:30 -07001254 queue_thread(_current);
Andy Ross851d14a2021-05-13 15:46:43 -07001255 update_cache(1);
1256 z_swap(&sched_spinlock, key);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001257}
1258
Andrew Boie468190a2017-09-29 14:00:48 -07001259#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -07001260static inline void z_vrfy_k_yield(void)
1261{
1262 z_impl_k_yield();
1263}
1264#include <syscalls/k_yield_mrsh.c>
Andrew Boie468190a2017-09-29 14:00:48 -07001265#endif
1266
Flavio Ceolin7a815d52020-10-19 21:37:22 -07001267static int32_t z_tick_sleep(k_ticks_t ticks)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001268{
Benjamin Walshb12a8e02016-12-14 15:24:12 -05001269#ifdef CONFIG_MULTITHREADING
Flavio Ceolin9a160972020-11-16 10:40:46 -08001270 uint32_t expected_wakeup_ticks;
Carles Cufi9849df82016-12-02 15:31:08 +01001271
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001272 __ASSERT(!arch_is_in_isr(), "");
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001273
Flavio Ceolin7a815d52020-10-19 21:37:22 -07001274#ifndef CONFIG_TIMEOUT_64BIT
1275 /* LOG subsys does not handle 64-bit values
1276 * https://github.com/zephyrproject-rtos/zephyr/issues/26246
1277 */
1278 LOG_DBG("thread %p for %u ticks", _current, ticks);
1279#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001280
Benjamin Walsh5596f782016-12-09 19:57:17 -05001281 /* wait of 0 ms is treated as a 'yield' */
Charles E. Youseb1863032019-05-08 13:22:46 -07001282 if (ticks == 0) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001283 k_yield();
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001284 return 0;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001285 }
1286
Andy Rosse9566392020-12-18 11:12:39 -08001287 k_timeout_t timeout = Z_TIMEOUT_TICKS(ticks);
Lauren Murphy4c85b462021-05-25 17:49:28 -05001288 if (Z_TICK_ABS(ticks) <= 0) {
1289 expected_wakeup_ticks = ticks + sys_clock_tick_get_32();
1290 } else {
1291 expected_wakeup_ticks = Z_TICK_ABS(ticks);
1292 }
Andy Rossd27d4e62019-02-05 15:36:01 -08001293
Andrew Boiea8775ab2020-09-05 12:53:42 -07001294 k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001295
Andy Rossdff6b712019-02-25 21:17:29 -08001296#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
1297 pending_current = _current;
1298#endif
Andrew Boiea8775ab2020-09-05 12:53:42 -07001299 unready_thread(_current);
Andy Ross78327382020-03-05 15:18:14 -08001300 z_add_thread_timeout(_current, timeout);
Andy Ross4521e0c2019-03-22 10:30:19 -07001301 z_mark_thread_as_suspended(_current);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001302
Andrew Boiea8775ab2020-09-05 12:53:42 -07001303 (void)z_swap(&sched_spinlock, key);
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001304
Andy Ross4521e0c2019-03-22 10:30:19 -07001305 __ASSERT(!z_is_thread_state_set(_current, _THREAD_SUSPENDED), "");
1306
Anas Nashif5c90ceb2021-03-13 08:19:53 -05001307 ticks = (k_ticks_t)expected_wakeup_ticks - sys_clock_tick_get_32();
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001308 if (ticks > 0) {
Charles E. Youseb1863032019-05-08 13:22:46 -07001309 return ticks;
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001310 }
Benjamin Walshb12a8e02016-12-14 15:24:12 -05001311#endif
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001312
1313 return 0;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001314}
1315
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001316int32_t z_impl_k_sleep(k_timeout_t timeout)
Charles E. Youseb1863032019-05-08 13:22:46 -07001317{
Andy Ross78327382020-03-05 15:18:14 -08001318 k_ticks_t ticks;
Charles E. Youseb1863032019-05-08 13:22:46 -07001319
Peter Bigot8162e582019-12-12 16:07:07 -06001320 __ASSERT(!arch_is_in_isr(), "");
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001321
1322 SYS_PORT_TRACING_FUNC_ENTER(k_thread, sleep, timeout);
Peter Bigot8162e582019-12-12 16:07:07 -06001323
Anas Nashifd2c71792020-10-17 07:52:17 -04001324 /* in case of K_FOREVER, we suspend */
Andy Ross78327382020-03-05 15:18:14 -08001325 if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
Andrew Boied2b89222019-11-08 10:44:22 -08001326 k_thread_suspend(_current);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001327
1328 SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, (int32_t) K_TICKS_FOREVER);
1329
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001330 return (int32_t) K_TICKS_FOREVER;
Andrew Boied2b89222019-11-08 10:44:22 -08001331 }
1332
Andy Ross78327382020-03-05 15:18:14 -08001333 ticks = timeout.ticks;
Andy Ross78327382020-03-05 15:18:14 -08001334
Charles E. Youseb1863032019-05-08 13:22:46 -07001335 ticks = z_tick_sleep(ticks);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001336
1337 int32_t ret = k_ticks_to_ms_floor64(ticks);
1338
1339 SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, ret);
1340
1341 return ret;
Charles E. Youseb1863032019-05-08 13:22:46 -07001342}
1343
Andrew Boie76c04a22017-09-27 14:45:10 -07001344#ifdef CONFIG_USERSPACE
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001345static inline int32_t z_vrfy_k_sleep(k_timeout_t timeout)
Andrew Boie76c04a22017-09-27 14:45:10 -07001346{
Andy Ross78327382020-03-05 15:18:14 -08001347 return z_impl_k_sleep(timeout);
Charles E. Yousea5678312019-05-09 16:46:46 -07001348}
Andy Ross65649742019-08-06 13:34:31 -07001349#include <syscalls/k_sleep_mrsh.c>
Charles E. Yousea5678312019-05-09 16:46:46 -07001350#endif
1351
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001352int32_t z_impl_k_usleep(int us)
Charles E. Yousea5678312019-05-09 16:46:46 -07001353{
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001354 int32_t ticks;
Charles E. Yousea5678312019-05-09 16:46:46 -07001355
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001356 SYS_PORT_TRACING_FUNC_ENTER(k_thread, usleep, us);
1357
Andy Ross88924062019-10-03 11:43:10 -07001358 ticks = k_us_to_ticks_ceil64(us);
Charles E. Yousea5678312019-05-09 16:46:46 -07001359 ticks = z_tick_sleep(ticks);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001360
1361 SYS_PORT_TRACING_FUNC_EXIT(k_thread, usleep, us, k_ticks_to_us_floor64(ticks));
1362
Andy Ross88924062019-10-03 11:43:10 -07001363 return k_ticks_to_us_floor64(ticks);
Charles E. Yousea5678312019-05-09 16:46:46 -07001364}
1365
1366#ifdef CONFIG_USERSPACE
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001367static inline int32_t z_vrfy_k_usleep(int us)
Charles E. Yousea5678312019-05-09 16:46:46 -07001368{
1369 return z_impl_k_usleep(us);
Andrew Boie76c04a22017-09-27 14:45:10 -07001370}
Andy Ross65649742019-08-06 13:34:31 -07001371#include <syscalls/k_usleep_mrsh.c>
Andrew Boie76c04a22017-09-27 14:45:10 -07001372#endif
1373
Patrik Flykt4344e272019-03-08 14:19:05 -07001374void z_impl_k_wakeup(k_tid_t thread)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001375{
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001376 SYS_PORT_TRACING_OBJ_FUNC(k_thread, wakeup, thread);
1377
Patrik Flykt4344e272019-03-08 14:19:05 -07001378 if (z_is_thread_pending(thread)) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001379 return;
1380 }
1381
Patrik Flykt4344e272019-03-08 14:19:05 -07001382 if (z_abort_thread_timeout(thread) < 0) {
Andrew Boied2b89222019-11-08 10:44:22 -08001383 /* Might have just been sleeping forever */
1384 if (thread->base.thread_state != _THREAD_SUSPENDED) {
1385 return;
1386 }
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001387 }
1388
Andy Ross4521e0c2019-03-22 10:30:19 -07001389 z_mark_thread_as_not_suspended(thread);
Patrik Flykt4344e272019-03-08 14:19:05 -07001390 z_ready_thread(thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001391
Andy Ross5737b5c2020-02-04 13:52:09 -08001392#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
1393 arch_sched_ipi();
1394#endif
1395
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001396 if (!arch_is_in_isr()) {
Patrik Flykt4344e272019-03-08 14:19:05 -07001397 z_reschedule_unlocked();
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001398 }
1399}
1400
Enjia Mai7ac40aa2020-05-28 11:29:50 +08001401#ifdef CONFIG_TRACE_SCHED_IPI
1402extern void z_trace_sched_ipi(void);
1403#endif
1404
Andy Ross42ed12a2019-02-19 16:03:39 -08001405#ifdef CONFIG_SMP
Andy Ross42ed12a2019-02-19 16:03:39 -08001406void z_sched_ipi(void)
1407{
Daniel Leungadac4cb2020-01-09 18:55:07 -08001408 /* NOTE: When adding code to this, make sure this is called
1409 * at appropriate location when !CONFIG_SCHED_IPI_SUPPORTED.
1410 */
Enjia Mai7ac40aa2020-05-28 11:29:50 +08001411#ifdef CONFIG_TRACE_SCHED_IPI
1412 z_trace_sched_ipi();
1413#endif
Andy Ross42ed12a2019-02-19 16:03:39 -08001414}
Andy Ross42ed12a2019-02-19 16:03:39 -08001415#endif
1416
Andrew Boie468190a2017-09-29 14:00:48 -07001417#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -07001418static inline void z_vrfy_k_wakeup(k_tid_t thread)
1419{
1420 Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1421 z_impl_k_wakeup(thread);
1422}
1423#include <syscalls/k_wakeup_mrsh.c>
Andrew Boie468190a2017-09-29 14:00:48 -07001424#endif
1425
Andrew Boief07df422020-11-06 13:11:12 -08001426k_tid_t z_impl_z_current_get(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001427{
Andy Rosseefd3da2020-02-06 13:39:52 -08001428#ifdef CONFIG_SMP
1429 /* In SMP, _current is a field read from _current_cpu, which
1430 * can race with preemption before it is read. We must lock
1431 * local interrupts when reading it.
1432 */
1433 unsigned int k = arch_irq_lock();
1434#endif
1435
1436 k_tid_t ret = _current_cpu->current;
1437
1438#ifdef CONFIG_SMP
1439 arch_irq_unlock(k);
1440#endif
1441 return ret;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001442}
1443
Andrew Boie76c04a22017-09-27 14:45:10 -07001444#ifdef CONFIG_USERSPACE
Andrew Boief07df422020-11-06 13:11:12 -08001445static inline k_tid_t z_vrfy_z_current_get(void)
Andy Ross65649742019-08-06 13:34:31 -07001446{
Andrew Boief07df422020-11-06 13:11:12 -08001447 return z_impl_z_current_get();
Andy Ross65649742019-08-06 13:34:31 -07001448}
Andrew Boief07df422020-11-06 13:11:12 -08001449#include <syscalls/z_current_get_mrsh.c>
Andrew Boie76c04a22017-09-27 14:45:10 -07001450#endif
1451
Patrik Flykt4344e272019-03-08 14:19:05 -07001452int z_impl_k_is_preempt_thread(void)
Benjamin Walsh445830d2016-11-10 15:54:27 -05001453{
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001454 return !arch_is_in_isr() && is_preempt(_current);
Benjamin Walsh445830d2016-11-10 15:54:27 -05001455}
Andrew Boie468190a2017-09-29 14:00:48 -07001456
1457#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -07001458static inline int z_vrfy_k_is_preempt_thread(void)
1459{
1460 return z_impl_k_is_preempt_thread();
1461}
1462#include <syscalls/k_is_preempt_thread_mrsh.c>
Andrew Boie468190a2017-09-29 14:00:48 -07001463#endif
Andy Rossab46b1b2019-01-30 15:00:42 -08001464
1465#ifdef CONFIG_SCHED_CPU_MASK
1466# ifdef CONFIG_SMP
1467/* Right now we use a single byte for this mask */
Oleg Zhurakivskyyb1e1f642020-03-12 17:16:00 +02001468BUILD_ASSERT(CONFIG_MP_NUM_CPUS <= 8, "Too many CPUs for mask word");
Andy Rossab46b1b2019-01-30 15:00:42 -08001469# endif
1470
1471
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001472static int cpu_mask_mod(k_tid_t thread, uint32_t enable_mask, uint32_t disable_mask)
Andy Rossab46b1b2019-01-30 15:00:42 -08001473{
1474 int ret = 0;
1475
Patrik Flyktcf2d5792019-02-12 15:50:46 -07001476 LOCKED(&sched_spinlock) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001477 if (z_is_thread_prevented_from_running(thread)) {
1478 thread->base.cpu_mask |= enable_mask;
1479 thread->base.cpu_mask &= ~disable_mask;
Andy Rossab46b1b2019-01-30 15:00:42 -08001480 } else {
1481 ret = -EINVAL;
1482 }
1483 }
Andy Rossb11e7962021-09-24 10:57:39 -07001484
1485#if defined(CONFIG_ASSERT) && defined(CONFIG_SCHED_CPU_MASK_PIN_ONLY)
1486 int m = thread->base.cpu_mask;
1487
1488 __ASSERT((m == 0) || ((m & (m - 1)) == 0),
1489 "Only one CPU allowed in mask when PIN_ONLY");
1490#endif
1491
Andy Rossab46b1b2019-01-30 15:00:42 -08001492 return ret;
1493}
1494
1495int k_thread_cpu_mask_clear(k_tid_t thread)
1496{
1497 return cpu_mask_mod(thread, 0, 0xffffffff);
1498}
1499
1500int k_thread_cpu_mask_enable_all(k_tid_t thread)
1501{
1502 return cpu_mask_mod(thread, 0xffffffff, 0);
1503}
1504
1505int k_thread_cpu_mask_enable(k_tid_t thread, int cpu)
1506{
1507 return cpu_mask_mod(thread, BIT(cpu), 0);
1508}
1509
1510int k_thread_cpu_mask_disable(k_tid_t thread, int cpu)
1511{
1512 return cpu_mask_mod(thread, 0, BIT(cpu));
1513}
1514
1515#endif /* CONFIG_SCHED_CPU_MASK */
Andrew Boie322816e2020-02-20 16:33:06 -08001516
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001517static inline void unpend_all(_wait_q_t *wait_q)
1518{
1519 struct k_thread *thread;
1520
1521 while ((thread = z_waitq_head(wait_q)) != NULL) {
1522 unpend_thread_no_timeout(thread);
1523 (void)z_abort_thread_timeout(thread);
1524 arch_thread_return_value_set(thread, 0);
1525 ready_thread(thread);
1526 }
1527}
1528
Chen Peng10f63d112021-09-06 13:59:40 +08001529#ifdef CONFIG_CMSIS_RTOS_V1
1530extern void z_thread_cmsis_status_mask_clear(struct k_thread *thread);
1531#endif
1532
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001533static void end_thread(struct k_thread *thread)
1534{
1535 /* We hold the lock, and the thread is known not to be running
1536 * anywhere.
1537 */
Anas Nashifbbbc38b2021-03-29 10:03:49 -04001538 if ((thread->base.thread_state & _THREAD_DEAD) == 0U) {
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001539 thread->base.thread_state |= _THREAD_DEAD;
1540 thread->base.thread_state &= ~_THREAD_ABORTING;
1541 if (z_is_thread_queued(thread)) {
Andy Rossc230fb32021-09-23 16:41:30 -07001542 dequeue_thread(thread);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001543 }
1544 if (thread->base.pended_on != NULL) {
1545 unpend_thread_no_timeout(thread);
1546 }
1547 (void)z_abort_thread_timeout(thread);
1548 unpend_all(&thread->join_queue);
1549 update_cache(1);
1550
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001551 SYS_PORT_TRACING_FUNC(k_thread, sched_abort, thread);
1552
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001553 z_thread_monitor_exit(thread);
1554
Chen Peng10f63d112021-09-06 13:59:40 +08001555#ifdef CONFIG_CMSIS_RTOS_V1
1556 z_thread_cmsis_status_mask_clear(thread);
1557#endif
1558
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001559#ifdef CONFIG_USERSPACE
1560 z_mem_domain_exit_thread(thread);
1561 z_thread_perms_all_clear(thread);
1562 z_object_uninit(thread->stack_obj);
1563 z_object_uninit(thread);
1564#endif
1565 }
1566}
1567
1568void z_thread_abort(struct k_thread *thread)
1569{
1570 k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
1571
Anas Nashif3f4f3f62021-03-29 17:13:47 -04001572 if ((thread->base.thread_state & _THREAD_DEAD) != 0U) {
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001573 k_spin_unlock(&sched_spinlock, key);
1574 return;
1575 }
1576
1577#ifdef CONFIG_SMP
1578 if (is_aborting(thread) && thread == _current && arch_is_in_isr()) {
1579 /* Another CPU is spinning for us, don't deadlock */
1580 end_thread(thread);
1581 }
1582
1583 bool active = thread_active_elsewhere(thread);
1584
1585 if (active) {
1586 /* It's running somewhere else, flag and poke */
1587 thread->base.thread_state |= _THREAD_ABORTING;
Lauren Murphyd88ce652021-03-09 16:41:43 -06001588
1589#ifdef CONFIG_SCHED_IPI_SUPPORTED
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001590 arch_sched_ipi();
Lauren Murphyd88ce652021-03-09 16:41:43 -06001591#endif
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001592 }
1593
1594 if (is_aborting(thread) && thread != _current) {
1595 if (arch_is_in_isr()) {
1596 /* ISRs can only spin waiting another CPU */
1597 k_spin_unlock(&sched_spinlock, key);
1598 while (is_aborting(thread)) {
1599 }
1600 } else if (active) {
1601 /* Threads can join */
1602 add_to_waitq_locked(_current, &thread->join_queue);
1603 z_swap(&sched_spinlock, key);
1604 }
1605 return; /* lock has been released */
1606 }
1607#endif
1608 end_thread(thread);
1609 if (thread == _current && !arch_is_in_isr()) {
1610 z_swap(&sched_spinlock, key);
1611 __ASSERT(false, "aborted _current back from dead");
1612 }
1613 k_spin_unlock(&sched_spinlock, key);
1614}
1615
1616#if !defined(CONFIG_ARCH_HAS_THREAD_ABORT)
1617void z_impl_k_thread_abort(struct k_thread *thread)
1618{
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001619 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread);
1620
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001621 z_thread_abort(thread);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001622
1623 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, abort, thread);
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001624}
1625#endif
1626
1627int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout)
1628{
1629 k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
1630 int ret = 0;
1631
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001632 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, join, thread, timeout);
1633
Anas Nashif3f4f3f62021-03-29 17:13:47 -04001634 if ((thread->base.thread_state & _THREAD_DEAD) != 0U) {
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001635 ret = 0;
1636 } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
1637 ret = -EBUSY;
Anas Nashif3f4f3f62021-03-29 17:13:47 -04001638 } else if ((thread == _current) ||
1639 (thread->base.pended_on == &_current->join_queue)) {
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001640 ret = -EDEADLK;
1641 } else {
1642 __ASSERT(!arch_is_in_isr(), "cannot join in ISR");
1643 add_to_waitq_locked(_current, &thread->join_queue);
1644 add_thread_timeout(_current, timeout);
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001645
1646 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_thread, join, thread, timeout);
1647 ret = z_swap(&sched_spinlock, key);
1648 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret);
1649
1650 return ret;
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001651 }
1652
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001653 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret);
1654
Andy Ross6fb6d3c2021-02-19 15:32:19 -08001655 k_spin_unlock(&sched_spinlock, key);
1656 return ret;
1657}
1658
Andrew Boie322816e2020-02-20 16:33:06 -08001659#ifdef CONFIG_USERSPACE
1660/* Special case: don't oops if the thread is uninitialized. This is because
1661 * the initialization bit does double-duty for thread objects; if false, means
1662 * the thread object is truly uninitialized, or the thread ran and exited for
1663 * some reason.
1664 *
1665 * Return true in this case indicating we should just do nothing and return
1666 * success to the caller.
1667 */
1668static bool thread_obj_validate(struct k_thread *thread)
1669{
Andrew Boie2dc2ecf2020-03-11 07:13:07 -07001670 struct z_object *ko = z_object_find(thread);
Andrew Boie322816e2020-02-20 16:33:06 -08001671 int ret = z_object_validate(ko, K_OBJ_THREAD, _OBJ_INIT_TRUE);
1672
1673 switch (ret) {
1674 case 0:
1675 return false;
1676 case -EINVAL:
1677 return true;
1678 default:
1679#ifdef CONFIG_LOG
1680 z_dump_object_error(ret, thread, ko, K_OBJ_THREAD);
1681#endif
1682 Z_OOPS(Z_SYSCALL_VERIFY_MSG(ret, "access denied"));
1683 }
Enjia Mai53ca7092021-01-15 17:09:58 +08001684 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
Andrew Boie322816e2020-02-20 16:33:06 -08001685}
1686
Andy Ross78327382020-03-05 15:18:14 -08001687static inline int z_vrfy_k_thread_join(struct k_thread *thread,
1688 k_timeout_t timeout)
Andrew Boie322816e2020-02-20 16:33:06 -08001689{
1690 if (thread_obj_validate(thread)) {
1691 return 0;
1692 }
1693
1694 return z_impl_k_thread_join(thread, timeout);
1695}
1696#include <syscalls/k_thread_join_mrsh.c>
Andrew Boiea4c91902020-03-24 16:09:24 -07001697
1698static inline void z_vrfy_k_thread_abort(k_tid_t thread)
1699{
1700 if (thread_obj_validate(thread)) {
1701 return;
1702 }
1703
1704 Z_OOPS(Z_SYSCALL_VERIFY_MSG(!(thread->base.user_options & K_ESSENTIAL),
1705 "aborting essential thread %p", thread));
1706
1707 z_impl_k_thread_abort((struct k_thread *)thread);
1708}
1709#include <syscalls/k_thread_abort_mrsh.c>
Andrew Boie322816e2020-02-20 16:33:06 -08001710#endif /* CONFIG_USERSPACE */
Peter Bigot0259c862021-01-12 13:45:32 -06001711
1712/*
1713 * future scheduler.h API implementations
1714 */
1715bool z_sched_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data)
1716{
1717 struct k_thread *thread;
1718 bool ret = false;
1719
1720 LOCKED(&sched_spinlock) {
1721 thread = _priq_wait_best(&wait_q->waitq);
1722
1723 if (thread != NULL) {
1724 z_thread_return_value_set_with_data(thread,
1725 swap_retval,
1726 swap_data);
1727 unpend_thread_no_timeout(thread);
1728 (void)z_abort_thread_timeout(thread);
1729 ready_thread(thread);
1730 ret = true;
1731 }
1732 }
1733
1734 return ret;
1735}
1736
1737int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key,
1738 _wait_q_t *wait_q, k_timeout_t timeout, void **data)
1739{
1740 int ret = z_pend_curr(lock, key, wait_q, timeout);
1741
1742 if (data != NULL) {
1743 *data = _current->base.swap_data;
1744 }
1745 return ret;
1746}
Andy Ross40d12c12021-09-27 08:22:43 -07001747
1748#ifdef CONFIG_SCHED_THREAD_USAGE
1749
Andy Ross4ae32502021-09-28 07:59:42 -07001750/* Need one of these for this to work */
1751#if !defined(CONFIG_USE_SWITCH) && !defined(CONFIG_INSTRUMENT_THREAD_SWITCHING)
1752#error "No data backend configured for CONFIG_SCHED_THREAD_USAGE"
1753#endif
1754
Andy Ross40d12c12021-09-27 08:22:43 -07001755static struct k_spinlock usage_lock;
1756
1757static uint32_t usage_now(void)
1758{
Andy Ross52351452021-09-28 09:38:43 -07001759 uint32_t now;
1760
1761#ifdef CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS
1762 now = (uint32_t)timing_counter_get();
1763#else
1764 now = k_cycle_get_32();
1765#endif
Andy Ross40d12c12021-09-27 08:22:43 -07001766
1767 /* Edge case: we use a zero as a null ("stop() already called") */
1768 return (now == 0) ? 1 : now;
1769}
1770
1771void z_sched_usage_start(struct k_thread *thread)
1772{
1773 /* One write through a volatile pointer doesn't require
1774 * synchronization as long as _usage() treats it as volatile
1775 * (we can't race with _stop() by design).
1776 */
1777 _current_cpu->usage0 = usage_now();
1778}
1779
1780void z_sched_usage_stop(void)
1781{
1782 k_spinlock_key_t k = k_spin_lock(&usage_lock);
1783 uint32_t u0 = _current_cpu->usage0;
1784
1785 if (u0 != 0) {
Andy Rossb62d6e12021-09-28 09:13:36 -07001786 uint32_t dt = usage_now() - u0;
1787
1788#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
Andy Ross410f9112021-10-01 14:47:41 -07001789 if (z_is_idle_thread_object(_current)) {
1790 _kernel.idle_thread_usage += dt;
1791 } else {
Andy Rossb62d6e12021-09-28 09:13:36 -07001792 _kernel.all_thread_usage += dt;
1793 }
1794#endif
1795 _current->base.usage += dt;
Andy Ross40d12c12021-09-27 08:22:43 -07001796 }
1797
1798 _current_cpu->usage0 = 0;
1799 k_spin_unlock(&usage_lock, k);
1800}
1801
1802uint64_t z_sched_thread_usage(struct k_thread *thread)
1803{
1804 k_spinlock_key_t k = k_spin_lock(&usage_lock);
1805 uint32_t u0 = _current_cpu->usage0, now = usage_now();
1806 uint64_t ret = thread->base.usage;
1807
1808 if (u0 != 0) {
Andy Rossb62d6e12021-09-28 09:13:36 -07001809 uint32_t dt = now - u0;
1810
1811#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
Andy Ross410f9112021-10-01 14:47:41 -07001812 if (z_is_idle_thread_object(thread)) {
1813 _kernel.idle_thread_usage += dt;
1814 } else {
Andy Rossb62d6e12021-09-28 09:13:36 -07001815 _kernel.all_thread_usage += dt;
1816 }
1817#endif
1818
1819 ret += dt;
Andy Ross40d12c12021-09-27 08:22:43 -07001820 thread->base.usage = ret;
1821 _current_cpu->usage0 = now;
1822 }
1823
1824 k_spin_unlock(&usage_lock, k);
1825 return ret;
1826}
1827
1828#endif /* CONFIG_SCHED_THREAD_USAGE */