blob: 70907027c1e88f414b6ac208db4011f5a75d791f [file] [log] [blame]
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001/*
Andy Ross1acd8c22018-05-03 14:51:49 -07002 * Copyright (c) 2018 Intel Corporation
Benjamin Walsh456c6da2016-09-02 18:55:39 -04003 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08004 * SPDX-License-Identifier: Apache-2.0
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04006#include <kernel.h>
Benjamin Walshb4b108d2016-10-13 10:31:48 -04007#include <ksched.h>
Andy Ross1acd8c22018-05-03 14:51:49 -07008#include <spinlock.h>
9#include <sched_priq.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040010#include <wait_q.h>
Andy Ross9c62cc62018-01-25 15:24:15 -080011#include <kswap.h>
Andy Ross1acd8c22018-05-03 14:51:49 -070012#include <kernel_arch_func.h>
13#include <syscall_handler.h>
Anas Nashif68c389c2019-06-21 12:55:37 -040014#include <drivers/timer/system_timer.h>
Flavio Ceolin80418602018-11-21 16:22:15 -080015#include <stdbool.h>
Andrew Boiefe031612019-09-21 17:54:37 -070016#include <kernel_internal.h>
Anas Nashif2c5d4042019-12-02 10:24:08 -050017#include <logging/log.h>
Andrew Boiee0ca4032020-09-05 19:36:08 -070018#include <sys/atomic.h>
Krzysztof Chruscinski3ed80832020-11-26 19:32:34 +010019LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040020
Andy Rosse06ba702020-01-14 06:26:10 -080021/* Maximum time between the time a self-aborting thread flags itself
22 * DEAD and the last read or write to its stack memory (i.e. the time
23 * of its next swap()). In theory this might be tuned per platform,
24 * but in practice this conservative value should be safe.
25 */
26#define THREAD_ABORT_DELAY_US 500
27
Andy Ross225c74b2018-06-27 11:20:50 -070028#if defined(CONFIG_SCHED_DUMB)
Patrik Flykt4344e272019-03-08 14:19:05 -070029#define _priq_run_add z_priq_dumb_add
30#define _priq_run_remove z_priq_dumb_remove
Andy Rossab46b1b2019-01-30 15:00:42 -080031# if defined(CONFIG_SCHED_CPU_MASK)
32# define _priq_run_best _priq_dumb_mask_best
33# else
Patrik Flykt4344e272019-03-08 14:19:05 -070034# define _priq_run_best z_priq_dumb_best
Andy Rossab46b1b2019-01-30 15:00:42 -080035# endif
Andy Ross225c74b2018-06-27 11:20:50 -070036#elif defined(CONFIG_SCHED_SCALABLE)
Patrik Flykt4344e272019-03-08 14:19:05 -070037#define _priq_run_add z_priq_rb_add
38#define _priq_run_remove z_priq_rb_remove
39#define _priq_run_best z_priq_rb_best
Andy Ross9f06a352018-06-28 10:38:14 -070040#elif defined(CONFIG_SCHED_MULTIQ)
Patrik Flykt4344e272019-03-08 14:19:05 -070041#define _priq_run_add z_priq_mq_add
42#define _priq_run_remove z_priq_mq_remove
43#define _priq_run_best z_priq_mq_best
Andy Rosse7ded112018-04-11 14:52:47 -070044#endif
45
Andy Ross225c74b2018-06-27 11:20:50 -070046#if defined(CONFIG_WAITQ_SCALABLE)
Patrik Flykt4344e272019-03-08 14:19:05 -070047#define z_priq_wait_add z_priq_rb_add
48#define _priq_wait_remove z_priq_rb_remove
49#define _priq_wait_best z_priq_rb_best
Andy Ross225c74b2018-06-27 11:20:50 -070050#elif defined(CONFIG_WAITQ_DUMB)
Patrik Flykt4344e272019-03-08 14:19:05 -070051#define z_priq_wait_add z_priq_dumb_add
52#define _priq_wait_remove z_priq_dumb_remove
53#define _priq_wait_best z_priq_dumb_best
Andy Ross1acd8c22018-05-03 14:51:49 -070054#endif
55
Flavio Ceolina406b882018-11-01 17:50:02 -070056/* the only struct z_kernel instance */
57struct z_kernel _kernel;
Andy Ross1acd8c22018-05-03 14:51:49 -070058
Patrik Flyktcf2d5792019-02-12 15:50:46 -070059static struct k_spinlock sched_spinlock;
Andy Ross1acd8c22018-05-03 14:51:49 -070060
Andrew Boie8e0f6a52020-09-05 11:50:18 -070061static void update_cache(int);
62
Andy Ross1acd8c22018-05-03 14:51:49 -070063#define LOCKED(lck) for (k_spinlock_key_t __i = {}, \
64 __key = k_spin_lock(lck); \
65 !__i.key; \
66 k_spin_unlock(lck, __key), __i.key = 1)
Andy Rosse7ded112018-04-11 14:52:47 -070067
Patrik Flykt4344e272019-03-08 14:19:05 -070068static inline int is_preempt(struct k_thread *thread)
Andy Rosse7ded112018-04-11 14:52:47 -070069{
70#ifdef CONFIG_PREEMPT_ENABLED
71 /* explanation in kernel_struct.h */
72 return thread->base.preempt <= _PREEMPT_THRESHOLD;
73#else
74 return 0;
75#endif
76}
77
Andy Ross7aa25fa2018-05-11 14:02:42 -070078static inline int is_metairq(struct k_thread *thread)
79{
80#if CONFIG_NUM_METAIRQ_PRIORITIES > 0
81 return (thread->base.prio - K_HIGHEST_THREAD_PRIO)
82 < CONFIG_NUM_METAIRQ_PRIORITIES;
83#else
84 return 0;
85#endif
86}
87
Anas Nashif80e6a972018-06-23 08:20:34 -050088#if CONFIG_ASSERT
Flavio Ceolin2df02cc2019-03-14 14:32:45 -070089static inline bool is_thread_dummy(struct k_thread *thread)
Andy Rosse7ded112018-04-11 14:52:47 -070090{
Patrik Flykt21358ba2019-03-28 14:57:54 -060091 return (thread->base.thread_state & _THREAD_DUMMY) != 0U;
Andy Ross1acd8c22018-05-03 14:51:49 -070092}
Anas Nashif80e6a972018-06-23 08:20:34 -050093#endif
Andy Ross1acd8c22018-05-03 14:51:49 -070094
Anas Nashif9e3e7f62019-12-19 08:19:45 -050095bool z_is_t1_higher_prio_than_t2(struct k_thread *thread_1,
96 struct k_thread *thread_2)
Andy Ross4a2e50f2018-05-15 11:06:25 -070097{
Anas Nashif9e3e7f62019-12-19 08:19:45 -050098 if (thread_1->base.prio < thread_2->base.prio) {
Flavio Ceolin02ed85b2018-09-20 15:43:57 -070099 return true;
Andy Ross4a2e50f2018-05-15 11:06:25 -0700100 }
101
102#ifdef CONFIG_SCHED_DEADLINE
103 /* Note that we don't care about wraparound conditions. The
104 * expectation is that the application will have arranged to
105 * block the threads, change their priorities or reset their
106 * deadlines when the job is complete. Letting the deadlines
107 * go negative is fine and in fact prevents aliasing bugs.
108 */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500109 if (thread_1->base.prio == thread_2->base.prio) {
Andy Ross4a2e50f2018-05-15 11:06:25 -0700110 int now = (int) k_cycle_get_32();
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500111 int dt1 = thread_1->base.prio_deadline - now;
112 int dt2 = thread_2->base.prio_deadline - now;
Andy Ross4a2e50f2018-05-15 11:06:25 -0700113
114 return dt1 < dt2;
115 }
116#endif
117
Flavio Ceolin02ed85b2018-09-20 15:43:57 -0700118 return false;
Andy Ross4a2e50f2018-05-15 11:06:25 -0700119}
120
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500121static ALWAYS_INLINE bool should_preempt(struct k_thread *thread,
122 int preempt_ok)
Andy Rosseace1df2018-05-30 11:23:02 -0700123{
Andy Ross43553da2018-05-31 11:13:49 -0700124 /* Preemption is OK if it's being explicitly allowed by
125 * software state (e.g. the thread called k_yield())
Andy Rosseace1df2018-05-30 11:23:02 -0700126 */
Flavio Ceolin80418602018-11-21 16:22:15 -0800127 if (preempt_ok != 0) {
128 return true;
Andy Ross43553da2018-05-31 11:13:49 -0700129 }
130
Andy Ross1763a012019-01-28 10:59:41 -0800131 __ASSERT(_current != NULL, "");
132
Andy Ross43553da2018-05-31 11:13:49 -0700133 /* Or if we're pended/suspended/dummy (duh) */
Patrik Flykt4344e272019-03-08 14:19:05 -0700134 if (z_is_thread_prevented_from_running(_current)) {
Andy Ross23c5a632019-01-04 12:52:17 -0800135 return true;
136 }
137
138 /* Edge case on ARM where a thread can be pended out of an
139 * interrupt handler before the "synchronous" swap starts
140 * context switching. Platforms with atomic swap can never
141 * hit this.
142 */
143 if (IS_ENABLED(CONFIG_SWAP_NONATOMIC)
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500144 && z_is_thread_timeout_active(thread)) {
Flavio Ceolin80418602018-11-21 16:22:15 -0800145 return true;
Andy Ross43553da2018-05-31 11:13:49 -0700146 }
147
148 /* Otherwise we have to be running a preemptible thread or
149 * switching to a metairq
150 */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500151 if (is_preempt(_current) || is_metairq(thread)) {
Flavio Ceolin80418602018-11-21 16:22:15 -0800152 return true;
Andy Rosseace1df2018-05-30 11:23:02 -0700153 }
154
155 /* The idle threads can look "cooperative" if there are no
156 * preemptible priorities (this is sort of an API glitch).
157 * They must always be preemptible.
158 */
Andrew Boie8f0bb6a2019-09-21 18:36:23 -0700159 if (!IS_ENABLED(CONFIG_PREEMPT_ENABLED) &&
160 z_is_idle_thread_object(_current)) {
Flavio Ceolin80418602018-11-21 16:22:15 -0800161 return true;
Andy Rosseace1df2018-05-30 11:23:02 -0700162 }
163
Flavio Ceolin80418602018-11-21 16:22:15 -0800164 return false;
Andy Rosseace1df2018-05-30 11:23:02 -0700165}
166
Andy Rossab46b1b2019-01-30 15:00:42 -0800167#ifdef CONFIG_SCHED_CPU_MASK
168static ALWAYS_INLINE struct k_thread *_priq_dumb_mask_best(sys_dlist_t *pq)
169{
170 /* With masks enabled we need to be prepared to walk the list
171 * looking for one we can run
172 */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500173 struct k_thread *thread;
Andy Rossab46b1b2019-01-30 15:00:42 -0800174
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500175 SYS_DLIST_FOR_EACH_CONTAINER(pq, thread, base.qnode_dlist) {
176 if ((thread->base.cpu_mask & BIT(_current_cpu->id)) != 0) {
177 return thread;
Andy Rossab46b1b2019-01-30 15:00:42 -0800178 }
179 }
180 return NULL;
181}
182#endif
183
Andy Rossb2791b02019-01-28 09:36:36 -0800184static ALWAYS_INLINE struct k_thread *next_up(void)
Andy Ross1acd8c22018-05-03 14:51:49 -0700185{
Andrew Boief5a7e1a2020-09-02 09:20:38 -0700186 struct k_thread *thread;
187
188 /* If a thread self-aborted we need the idle thread to clean it up
189 * before any other thread can run on this CPU
190 */
191 if (_current_cpu->pending_abort != NULL) {
192 return _current_cpu->idle_thread;
193 }
194
195 thread = _priq_run_best(&_kernel.ready_q.runq);
Andy Ross11a050b2019-11-13 09:41:52 -0800196
197#if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && (CONFIG_NUM_COOP_PRIORITIES > 0)
198 /* MetaIRQs must always attempt to return back to a
199 * cooperative thread they preempted and not whatever happens
200 * to be highest priority now. The cooperative thread was
201 * promised it wouldn't be preempted (by non-metairq threads)!
202 */
203 struct k_thread *mirqp = _current_cpu->metairq_preempted;
204
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500205 if (mirqp != NULL && (thread == NULL || !is_metairq(thread))) {
Andy Ross11a050b2019-11-13 09:41:52 -0800206 if (!z_is_thread_prevented_from_running(mirqp)) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500207 thread = mirqp;
Andy Ross11a050b2019-11-13 09:41:52 -0800208 } else {
209 _current_cpu->metairq_preempted = NULL;
210 }
211 }
212#endif
213
Daniel Leungadac4cb2020-01-09 18:55:07 -0800214 /* If the current thread is marked aborting, mark it
215 * dead so it will not be scheduled again.
216 */
217 if (_current->base.thread_state & _THREAD_ABORTING) {
218 _current->base.thread_state |= _THREAD_DEAD;
219#ifdef CONFIG_SMP
220 _current_cpu->swap_ok = true;
221#endif
222 }
223
Andy Ross1acd8c22018-05-03 14:51:49 -0700224#ifndef CONFIG_SMP
225 /* In uniprocessor mode, we can leave the current thread in
226 * the queue (actually we have to, otherwise the assembly
227 * context switch code for all architectures would be
Patrik Flykt4344e272019-03-08 14:19:05 -0700228 * responsible for putting it back in z_swap and ISR return!),
Andy Ross1acd8c22018-05-03 14:51:49 -0700229 * which makes this choice simple.
230 */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500231 return thread ? thread : _current_cpu->idle_thread;
Andy Ross1acd8c22018-05-03 14:51:49 -0700232#else
233 /* Under SMP, the "cache" mechanism for selecting the next
234 * thread doesn't work, so we have more work to do to test
Andy Ross11a050b2019-11-13 09:41:52 -0800235 * _current against the best choice from the queue. Here, the
236 * thread selected above represents "the best thread that is
237 * not current".
Andy Rosseace1df2018-05-30 11:23:02 -0700238 *
239 * Subtle note on "queued": in SMP mode, _current does not
240 * live in the queue, so this isn't exactly the same thing as
241 * "ready", it means "is _current already added back to the
242 * queue such that we don't want to re-add it".
Andy Ross1acd8c22018-05-03 14:51:49 -0700243 */
Patrik Flykt4344e272019-03-08 14:19:05 -0700244 int queued = z_is_thread_queued(_current);
245 int active = !z_is_thread_prevented_from_running(_current);
Andy Ross1acd8c22018-05-03 14:51:49 -0700246
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500247 if (thread == NULL) {
248 thread = _current_cpu->idle_thread;
Andy Ross1acd8c22018-05-03 14:51:49 -0700249 }
250
Andy Rosseace1df2018-05-30 11:23:02 -0700251 if (active) {
252 if (!queued &&
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500253 !z_is_t1_higher_prio_than_t2(thread, _current)) {
254 thread = _current;
Andy Rosseace1df2018-05-30 11:23:02 -0700255 }
256
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500257 if (!should_preempt(thread, _current_cpu->swap_ok)) {
258 thread = _current;
Andy Rosseace1df2018-05-30 11:23:02 -0700259 }
Andy Ross1acd8c22018-05-03 14:51:49 -0700260 }
261
Andy Rosseace1df2018-05-30 11:23:02 -0700262 /* Put _current back into the queue */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500263 if (thread != _current && active &&
264 !z_is_idle_thread_object(_current) && !queued) {
Andy Ross1acd8c22018-05-03 14:51:49 -0700265 _priq_run_add(&_kernel.ready_q.runq, _current);
Patrik Flykt4344e272019-03-08 14:19:05 -0700266 z_mark_thread_as_queued(_current);
Andy Ross1acd8c22018-05-03 14:51:49 -0700267 }
268
Andy Rosseace1df2018-05-30 11:23:02 -0700269 /* Take the new _current out of the queue */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500270 if (z_is_thread_queued(thread)) {
271 _priq_run_remove(&_kernel.ready_q.runq, thread);
Andy Rosseace1df2018-05-30 11:23:02 -0700272 }
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500273 z_mark_thread_as_not_queued(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700274
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500275 return thread;
Andy Ross1acd8c22018-05-03 14:51:49 -0700276#endif
277}
278
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700279static void move_thread_to_end_of_prio_q(struct k_thread *thread)
280{
281 if (z_is_thread_queued(thread)) {
282 _priq_run_remove(&_kernel.ready_q.runq, thread);
283 }
284 _priq_run_add(&_kernel.ready_q.runq, thread);
285 z_mark_thread_as_queued(thread);
286 update_cache(thread == _current);
287}
288
Andy Ross9098a452018-09-25 10:56:09 -0700289#ifdef CONFIG_TIMESLICING
290
291static int slice_time;
292static int slice_max_prio;
293
Andy Ross7fb8eb52019-01-04 12:54:23 -0800294#ifdef CONFIG_SWAP_NONATOMIC
Patrik Flykt4344e272019-03-08 14:19:05 -0700295/* If z_swap() isn't atomic, then it's possible for a timer interrupt
Andy Ross7fb8eb52019-01-04 12:54:23 -0800296 * to try to timeslice away _current after it has already pended
297 * itself but before the corresponding context switch. Treat that as
298 * a noop condition in z_time_slice().
299 */
300static struct k_thread *pending_current;
301#endif
302
Andy Rosscb3964f2019-08-16 21:29:26 -0700303void z_reset_time_slice(void)
Andy Ross9098a452018-09-25 10:56:09 -0700304{
Andy Ross7a035c02018-10-04 09:26:11 -0700305 /* Add the elapsed time since the last announced tick to the
306 * slice count, as we'll see those "expired" ticks arrive in a
307 * FUTURE z_time_slice() call.
308 */
Andy Rossed7d8632019-06-15 19:32:04 -0700309 if (slice_time != 0) {
310 _current_cpu->slice_ticks = slice_time + z_clock_elapsed();
311 z_set_timeout_expiry(slice_time, false);
312 }
Andy Ross9098a452018-09-25 10:56:09 -0700313}
314
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500315void k_sched_time_slice_set(int32_t slice, int prio)
Andy Ross9098a452018-09-25 10:56:09 -0700316{
Patrik Flyktcf2d5792019-02-12 15:50:46 -0700317 LOCKED(&sched_spinlock) {
Andy Ross1c305142018-10-15 11:10:49 -0700318 _current_cpu->slice_ticks = 0;
Andy Ross88924062019-10-03 11:43:10 -0700319 slice_time = k_ms_to_ticks_ceil32(slice);
Andy Ross1c305142018-10-15 11:10:49 -0700320 slice_max_prio = prio;
Andy Rosscb3964f2019-08-16 21:29:26 -0700321 z_reset_time_slice();
Andy Ross1c305142018-10-15 11:10:49 -0700322 }
Andy Ross9098a452018-09-25 10:56:09 -0700323}
324
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500325static inline int sliceable(struct k_thread *thread)
Andy Ross9098a452018-09-25 10:56:09 -0700326{
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500327 return is_preempt(thread)
Andrew Boie83d77702020-09-05 11:46:46 -0700328 && !z_is_thread_prevented_from_running(thread)
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500329 && !z_is_prio_higher(thread->base.prio, slice_max_prio)
Andrew Boie83d77702020-09-05 11:46:46 -0700330 && !z_is_idle_thread_object(thread);
Andy Ross9098a452018-09-25 10:56:09 -0700331}
332
333/* Called out of each timer interrupt */
334void z_time_slice(int ticks)
335{
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700336 /* Hold sched_spinlock, so that activity on another CPU
337 * (like a call to k_thread_abort() at just the wrong time)
338 * won't affect the correctness of the decisions made here.
339 * Also prevents any nested interrupts from changing
340 * thread state to avoid similar issues, since this would
341 * normally run with IRQs enabled.
342 */
343 k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
344
Andy Ross7fb8eb52019-01-04 12:54:23 -0800345#ifdef CONFIG_SWAP_NONATOMIC
346 if (pending_current == _current) {
Andy Rosscb3964f2019-08-16 21:29:26 -0700347 z_reset_time_slice();
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700348 k_spin_unlock(&sched_spinlock, key);
Andy Ross7fb8eb52019-01-04 12:54:23 -0800349 return;
350 }
351 pending_current = NULL;
352#endif
353
Andy Ross9098a452018-09-25 10:56:09 -0700354 if (slice_time && sliceable(_current)) {
355 if (ticks >= _current_cpu->slice_ticks) {
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700356 move_thread_to_end_of_prio_q(_current);
Andy Rosscb3964f2019-08-16 21:29:26 -0700357 z_reset_time_slice();
Andy Ross9098a452018-09-25 10:56:09 -0700358 } else {
359 _current_cpu->slice_ticks -= ticks;
360 }
Wentong Wu2463ded2019-07-24 17:17:33 +0800361 } else {
362 _current_cpu->slice_ticks = 0;
Andy Ross9098a452018-09-25 10:56:09 -0700363 }
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700364 k_spin_unlock(&sched_spinlock, key);
Andy Ross9098a452018-09-25 10:56:09 -0700365}
Andy Ross9098a452018-09-25 10:56:09 -0700366#endif
367
Andy Ross11a050b2019-11-13 09:41:52 -0800368/* Track cooperative threads preempted by metairqs so we can return to
369 * them specifically. Called at the moment a new thread has been
370 * selected to run.
371 */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500372static void update_metairq_preempt(struct k_thread *thread)
Andy Ross11a050b2019-11-13 09:41:52 -0800373{
374#if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && (CONFIG_NUM_COOP_PRIORITIES > 0)
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500375 if (is_metairq(thread) && !is_metairq(_current) &&
376 !is_preempt(_current)) {
Andy Ross11a050b2019-11-13 09:41:52 -0800377 /* Record new preemption */
378 _current_cpu->metairq_preempted = _current;
Andrew Boief5a7e1a2020-09-02 09:20:38 -0700379 } else if (!is_metairq(thread) && !z_is_idle_thread_object(thread)) {
Andy Ross11a050b2019-11-13 09:41:52 -0800380 /* Returning from existing preemption */
381 _current_cpu->metairq_preempted = NULL;
382 }
383#endif
384}
385
Andy Ross1856e222018-05-21 11:48:35 -0700386static void update_cache(int preempt_ok)
Andy Ross1acd8c22018-05-03 14:51:49 -0700387{
388#ifndef CONFIG_SMP
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500389 struct k_thread *thread = next_up();
Andy Ross1856e222018-05-21 11:48:35 -0700390
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500391 if (should_preempt(thread, preempt_ok)) {
Andy Rosscb3964f2019-08-16 21:29:26 -0700392#ifdef CONFIG_TIMESLICING
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500393 if (thread != _current) {
Andy Rosscb3964f2019-08-16 21:29:26 -0700394 z_reset_time_slice();
Andy Ross9098a452018-09-25 10:56:09 -0700395 }
Andy Rosscb3964f2019-08-16 21:29:26 -0700396#endif
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500397 update_metairq_preempt(thread);
398 _kernel.ready_q.cache = thread;
Andy Rosseace1df2018-05-30 11:23:02 -0700399 } else {
400 _kernel.ready_q.cache = _current;
Andy Ross1856e222018-05-21 11:48:35 -0700401 }
Andy Rosseace1df2018-05-30 11:23:02 -0700402
403#else
404 /* The way this works is that the CPU record keeps its
405 * "cooperative swapping is OK" flag until the next reschedule
406 * call or context switch. It doesn't need to be tracked per
407 * thread because if the thread gets preempted for whatever
408 * reason the scheduler will make the same decision anyway.
409 */
410 _current_cpu->swap_ok = preempt_ok;
Andy Ross1acd8c22018-05-03 14:51:49 -0700411#endif
412}
413
Andy Ross96ccc462020-01-23 13:28:30 -0800414static void ready_thread(struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -0700415{
Andy Rossf6d32ab2020-05-13 15:34:04 +0000416#ifdef KERNEL_COHERENCE
417 __ASSERT_NO_MSG(arch_mem_coherent(thread));
418#endif
419
Anas Nashif081605e2020-10-16 20:00:17 -0400420 /* If thread is queued already, do not try and added it to the
421 * run queue again
422 */
423 if (!z_is_thread_queued(thread) && z_is_thread_ready(thread)) {
Andy Ross96ccc462020-01-23 13:28:30 -0800424 sys_trace_thread_ready(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700425 _priq_run_add(&_kernel.ready_q.runq, thread);
Patrik Flykt4344e272019-03-08 14:19:05 -0700426 z_mark_thread_as_queued(thread);
Andy Ross1856e222018-05-21 11:48:35 -0700427 update_cache(0);
Andy Rossd82f76a2019-08-27 08:53:27 -0700428#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800429 arch_sched_ipi();
Andy Ross11bd67d2019-08-19 14:29:21 -0700430#endif
Andy Ross1acd8c22018-05-03 14:51:49 -0700431 }
432}
433
Andy Ross96ccc462020-01-23 13:28:30 -0800434void z_ready_thread(struct k_thread *thread)
435{
436 LOCKED(&sched_spinlock) {
437 ready_thread(thread);
438 }
439}
440
Patrik Flykt4344e272019-03-08 14:19:05 -0700441void z_move_thread_to_end_of_prio_q(struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -0700442{
Patrik Flyktcf2d5792019-02-12 15:50:46 -0700443 LOCKED(&sched_spinlock) {
Andrew Boie8e0f6a52020-09-05 11:50:18 -0700444 move_thread_to_end_of_prio_q(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700445 }
446}
447
Andy Ross96ccc462020-01-23 13:28:30 -0800448void z_sched_start(struct k_thread *thread)
449{
450 k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
451
452 if (z_has_thread_started(thread)) {
453 k_spin_unlock(&sched_spinlock, key);
454 return;
455 }
456
457 z_mark_thread_as_started(thread);
458 ready_thread(thread);
459 z_reschedule(&sched_spinlock, key);
460}
461
Andrew Boie6cf496f2020-02-14 10:52:49 -0800462void z_impl_k_thread_suspend(struct k_thread *thread)
Andy Ross8bdabcc2020-01-07 09:58:46 -0800463{
464 (void)z_abort_thread_timeout(thread);
465
466 LOCKED(&sched_spinlock) {
467 if (z_is_thread_queued(thread)) {
468 _priq_run_remove(&_kernel.ready_q.runq, thread);
469 z_mark_thread_as_not_queued(thread);
470 }
471 z_mark_thread_as_suspended(thread);
472 update_cache(thread == _current);
473 }
474
475 if (thread == _current) {
476 z_reschedule_unlocked();
477 }
478}
479
Andrew Boie6cf496f2020-02-14 10:52:49 -0800480#ifdef CONFIG_USERSPACE
481static inline void z_vrfy_k_thread_suspend(struct k_thread *thread)
482{
483 Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
484 z_impl_k_thread_suspend(thread);
485}
486#include <syscalls/k_thread_suspend_mrsh.c>
487#endif
488
489void z_impl_k_thread_resume(struct k_thread *thread)
490{
491 k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
492
Anas Nashifbf69afc2020-10-16 19:53:56 -0400493 /* Do not try to resume a thread that was not suspended */
494 if (!z_is_thread_suspended(thread)) {
495 k_spin_unlock(&sched_spinlock, key);
496 return;
497 }
498
Andrew Boie6cf496f2020-02-14 10:52:49 -0800499 z_mark_thread_as_not_suspended(thread);
500 ready_thread(thread);
501
502 z_reschedule(&sched_spinlock, key);
503}
504
505#ifdef CONFIG_USERSPACE
506static inline void z_vrfy_k_thread_resume(struct k_thread *thread)
507{
508 Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
509 z_impl_k_thread_resume(thread);
510}
511#include <syscalls/k_thread_resume_mrsh.c>
512#endif
513
Andy Ross8bdabcc2020-01-07 09:58:46 -0800514static _wait_q_t *pended_on(struct k_thread *thread)
515{
516 __ASSERT_NO_MSG(thread->base.pended_on);
517
518 return thread->base.pended_on;
519}
520
521void z_thread_single_abort(struct k_thread *thread)
522{
Andrew Boie933b4202020-09-03 17:56:24 -0700523 void (*fn_abort)(struct k_thread *aborted) = NULL;
Andrew Boief5a7e1a2020-09-02 09:20:38 -0700524
Andrew Boie3425c322020-09-02 08:10:57 -0700525 __ASSERT(!(thread->base.user_options & K_ESSENTIAL),
526 "essential thread aborted");
Andrew Boief5a7e1a2020-09-02 09:20:38 -0700527 __ASSERT(thread != _current || arch_is_in_isr(),
528 "self-abort detected");
Andrew Boie3425c322020-09-02 08:10:57 -0700529
Andrew Boief5a7e1a2020-09-02 09:20:38 -0700530 /* Prevent any of the further logic in this function from running more
531 * than once
532 */
533 k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
534 if ((thread->base.thread_state &
535 (_THREAD_ABORTING | _THREAD_DEAD)) != 0) {
536 LOG_DBG("Thread %p already dead or on the way out", thread);
537 k_spin_unlock(&sched_spinlock, key);
538 return;
Andy Ross8bdabcc2020-01-07 09:58:46 -0800539 }
Andrew Boief5a7e1a2020-09-02 09:20:38 -0700540 thread->base.thread_state |= _THREAD_ABORTING;
541 k_spin_unlock(&sched_spinlock, key);
Andy Ross8bdabcc2020-01-07 09:58:46 -0800542
543 (void)z_abort_thread_timeout(thread);
544
545 if (IS_ENABLED(CONFIG_SMP)) {
546 z_sched_abort(thread);
547 }
548
549 LOCKED(&sched_spinlock) {
Andrew Boief5a7e1a2020-09-02 09:20:38 -0700550 LOG_DBG("Cleanup aborting thread %p", thread);
Andrew Boie322816e2020-02-20 16:33:06 -0800551 struct k_thread *waiter;
552
Andy Ross8bdabcc2020-01-07 09:58:46 -0800553 if (z_is_thread_ready(thread)) {
554 if (z_is_thread_queued(thread)) {
555 _priq_run_remove(&_kernel.ready_q.runq,
556 thread);
557 z_mark_thread_as_not_queued(thread);
558 }
559 update_cache(thread == _current);
560 } else {
561 if (z_is_thread_pending(thread)) {
562 _priq_wait_remove(&pended_on(thread)->waitq,
563 thread);
564 z_mark_thread_as_not_pending(thread);
565 thread->base.pended_on = NULL;
566 }
567 }
Andy Rosse06ba702020-01-14 06:26:10 -0800568
Andrew Boie322816e2020-02-20 16:33:06 -0800569 /* Wake everybody up who was trying to join with this thread.
570 * A reschedule is invoked later by k_thread_abort().
571 */
572 while ((waiter = z_waitq_head(&thread->base.join_waiters)) !=
573 NULL) {
Andrew Boief1b5d9d2020-05-04 14:36:49 -0700574 (void)z_abort_thread_timeout(waiter);
Andrew Boie322816e2020-02-20 16:33:06 -0800575 _priq_wait_remove(&pended_on(waiter)->waitq, waiter);
576 z_mark_thread_as_not_pending(waiter);
577 waiter->base.pended_on = NULL;
578 arch_thread_return_value_set(waiter, 0);
579 ready_thread(waiter);
580 }
Andrew Boief5a7e1a2020-09-02 09:20:38 -0700581
582 if (z_is_idle_thread_object(_current)) {
583 update_cache(1);
584 }
585
586 thread->base.thread_state |= _THREAD_DEAD;
587
588 /* Read this here from the thread struct now instead of
589 * after we unlock
590 */
591 fn_abort = thread->fn_abort;
592
593 /* Keep inside the spinlock as these may use the contents
594 * of the thread object. As soon as we release this spinlock,
595 * the thread object could be destroyed at any time.
596 */
Watson Zeng37f75d22020-09-15 11:00:39 +0800597 sys_trace_thread_abort(thread);
598 z_thread_monitor_exit(thread);
Andrew Boief5a7e1a2020-09-02 09:20:38 -0700599
600#ifdef CONFIG_USERSPACE
Andrew Boieb5a71f72020-10-06 13:39:29 -0700601 /* Remove this thread from its memory domain, which takes
602 * it off the domain's thread list and possibly also arch-
603 * specific tasks.
604 */
605 z_mem_domain_exit_thread(thread);
606
Andrew Boief5a7e1a2020-09-02 09:20:38 -0700607 /* Revoke permissions on thread's ID so that it may be
608 * recycled
609 */
610 z_thread_perms_all_clear(thread);
611
612 /* Clear initialized state so that this thread object may be
613 * re-used and triggers errors if API calls are made on it from
614 * user threads
615 */
616 z_object_uninit(thread->stack_obj);
617 z_object_uninit(thread);
618#endif
619 /* Kernel should never look at the thread object again past
620 * this point unless another thread API is called. If the
621 * object doesn't get corrupted, we'll catch other
622 * k_thread_abort()s on this object, although this is
623 * somewhat undefined behavoir. It must be safe to call
624 * k_thread_create() or free the object at this point.
625 */
Andrew Boiee0ca4032020-09-05 19:36:08 -0700626#if __ASSERT_ON
627 atomic_clear(&thread->base.cookie);
628#endif
Andrew Boief5a7e1a2020-09-02 09:20:38 -0700629 }
630
631 if (fn_abort != NULL) {
Andrew Boie933b4202020-09-03 17:56:24 -0700632 /* Thread object provided to be freed or recycled */
633 fn_abort(thread);
Andy Ross8bdabcc2020-01-07 09:58:46 -0800634 }
Andy Ross8bdabcc2020-01-07 09:58:46 -0800635}
636
Andy Rossed6b4fb2020-01-23 13:04:15 -0800637static void unready_thread(struct k_thread *thread)
638{
639 if (z_is_thread_queued(thread)) {
640 _priq_run_remove(&_kernel.ready_q.runq, thread);
641 z_mark_thread_as_not_queued(thread);
642 }
643 update_cache(thread == _current);
644}
645
Patrik Flykt4344e272019-03-08 14:19:05 -0700646void z_remove_thread_from_ready_q(struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -0700647{
Patrik Flyktcf2d5792019-02-12 15:50:46 -0700648 LOCKED(&sched_spinlock) {
Andy Rossed6b4fb2020-01-23 13:04:15 -0800649 unready_thread(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700650 }
651}
652
Andrew Boie322816e2020-02-20 16:33:06 -0800653/* sched_spinlock must be held */
654static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q)
Andy Ross1acd8c22018-05-03 14:51:49 -0700655{
Andrew Boie322816e2020-02-20 16:33:06 -0800656 unready_thread(thread);
657 z_mark_thread_as_pending(thread);
658 sys_trace_thread_pend(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700659
Andrew Boie322816e2020-02-20 16:33:06 -0800660 if (wait_q != NULL) {
661 thread->base.pended_on = wait_q;
662 z_priq_wait_add(&wait_q->waitq, thread);
Andy Ross15d52082018-09-26 13:19:31 -0700663 }
Andrew Boie322816e2020-02-20 16:33:06 -0800664}
Andy Ross15d52082018-09-26 13:19:31 -0700665
Andy Ross78327382020-03-05 15:18:14 -0800666static void add_thread_timeout(struct k_thread *thread, k_timeout_t timeout)
Andrew Boie322816e2020-02-20 16:33:06 -0800667{
Andy Ross78327382020-03-05 15:18:14 -0800668 if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
669#ifdef CONFIG_LEGACY_TIMEOUT_API
670 timeout = _TICK_ALIGN + k_ms_to_ticks_ceil32(timeout);
671#endif
672 z_add_thread_timeout(thread, timeout);
Andy Ross1acd8c22018-05-03 14:51:49 -0700673 }
Andy Rosse7ded112018-04-11 14:52:47 -0700674}
675
Andy Ross78327382020-03-05 15:18:14 -0800676static void pend(struct k_thread *thread, _wait_q_t *wait_q,
677 k_timeout_t timeout)
Andrew Boie322816e2020-02-20 16:33:06 -0800678{
Andy Rossf6d32ab2020-05-13 15:34:04 +0000679#ifdef KERNEL_COHERENCE
680 __ASSERT_NO_MSG(arch_mem_coherent(wait_q));
681#endif
682
Andrew Boie322816e2020-02-20 16:33:06 -0800683 LOCKED(&sched_spinlock) {
684 add_to_waitq_locked(thread, wait_q);
685 }
686
Andy Ross78327382020-03-05 15:18:14 -0800687 add_thread_timeout(thread, timeout);
Andrew Boie322816e2020-02-20 16:33:06 -0800688}
689
Andy Ross78327382020-03-05 15:18:14 -0800690void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
691 k_timeout_t timeout)
Andy Rosse7ded112018-04-11 14:52:47 -0700692{
Patrik Flykt4344e272019-03-08 14:19:05 -0700693 __ASSERT_NO_MSG(thread == _current || is_thread_dummy(thread));
Andy Ross1acd8c22018-05-03 14:51:49 -0700694 pend(thread, wait_q, timeout);
695}
696
Patrik Flykt4344e272019-03-08 14:19:05 -0700697ALWAYS_INLINE struct k_thread *z_find_first_thread_to_unpend(_wait_q_t *wait_q,
Andy Rossb2791b02019-01-28 09:36:36 -0800698 struct k_thread *from)
Andy Rosse7ded112018-04-11 14:52:47 -0700699{
Andy Ross1acd8c22018-05-03 14:51:49 -0700700 ARG_UNUSED(from);
701
702 struct k_thread *ret = NULL;
703
Patrik Flyktcf2d5792019-02-12 15:50:46 -0700704 LOCKED(&sched_spinlock) {
Andy Ross1acd8c22018-05-03 14:51:49 -0700705 ret = _priq_wait_best(&wait_q->waitq);
706 }
707
708 return ret;
Andy Rosse7ded112018-04-11 14:52:47 -0700709}
710
Andrew Boieffc5bdf2020-09-05 11:44:01 -0700711static inline void unpend_thread_no_timeout(struct k_thread *thread)
712{
713 _priq_wait_remove(&pended_on(thread)->waitq, thread);
714 z_mark_thread_as_not_pending(thread);
715 thread->base.pended_on = NULL;
716}
717
Patrik Flykt4344e272019-03-08 14:19:05 -0700718ALWAYS_INLINE void z_unpend_thread_no_timeout(struct k_thread *thread)
Andy Rosse7ded112018-04-11 14:52:47 -0700719{
Patrik Flyktcf2d5792019-02-12 15:50:46 -0700720 LOCKED(&sched_spinlock) {
Andrew Boieffc5bdf2020-09-05 11:44:01 -0700721 unpend_thread_no_timeout(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700722 }
Andy Rosse7ded112018-04-11 14:52:47 -0700723}
724
Andy Ross987c0e52018-09-27 16:50:00 -0700725#ifdef CONFIG_SYS_CLOCK_EXISTS
726/* Timeout handler for *_thread_timeout() APIs */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500727void z_thread_timeout(struct _timeout *timeout)
Andy Ross987c0e52018-09-27 16:50:00 -0700728{
Andrew Boieffc5bdf2020-09-05 11:44:01 -0700729 LOCKED(&sched_spinlock) {
730 struct k_thread *thread = CONTAINER_OF(timeout,
731 struct k_thread, base.timeout);
Andy Ross987c0e52018-09-27 16:50:00 -0700732
Andrew Boieffc5bdf2020-09-05 11:44:01 -0700733 if (thread->base.pended_on != NULL) {
734 unpend_thread_no_timeout(thread);
735 }
736 z_mark_thread_as_started(thread);
737 z_mark_thread_as_not_suspended(thread);
738 ready_thread(thread);
Andy Ross987c0e52018-09-27 16:50:00 -0700739 }
Andy Ross987c0e52018-09-27 16:50:00 -0700740}
741#endif
742
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500743int z_pend_curr_irqlock(uint32_t key, _wait_q_t *wait_q, k_timeout_t timeout)
Andy Rosse7ded112018-04-11 14:52:47 -0700744{
Andy Ross722aeea2019-03-14 13:50:16 -0700745 pend(_current, wait_q, timeout);
746
Andy Ross7fb8eb52019-01-04 12:54:23 -0800747#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
748 pending_current = _current;
Andy Ross722aeea2019-03-14 13:50:16 -0700749
750 int ret = z_swap_irqlock(key);
751 LOCKED(&sched_spinlock) {
752 if (pending_current == _current) {
753 pending_current = NULL;
754 }
755 }
756 return ret;
757#else
Patrik Flykt4344e272019-03-08 14:19:05 -0700758 return z_swap_irqlock(key);
Andy Ross722aeea2019-03-14 13:50:16 -0700759#endif
Andy Rosse7ded112018-04-11 14:52:47 -0700760}
761
Patrik Flykt4344e272019-03-08 14:19:05 -0700762int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
Andy Ross78327382020-03-05 15:18:14 -0800763 _wait_q_t *wait_q, k_timeout_t timeout)
Andy Rossec554f42018-07-24 13:37:59 -0700764{
765#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
766 pending_current = _current;
767#endif
768 pend(_current, wait_q, timeout);
Patrik Flykt4344e272019-03-08 14:19:05 -0700769 return z_swap(lock, key);
Andy Rossec554f42018-07-24 13:37:59 -0700770}
771
Patrik Flykt4344e272019-03-08 14:19:05 -0700772struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q)
Andy Rosse7ded112018-04-11 14:52:47 -0700773{
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500774 struct k_thread *thread = z_unpend1_no_timeout(wait_q);
Andy Rosse7ded112018-04-11 14:52:47 -0700775
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500776 if (thread != NULL) {
777 (void)z_abort_thread_timeout(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700778 }
Andy Rosse7ded112018-04-11 14:52:47 -0700779
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500780 return thread;
Andy Ross1acd8c22018-05-03 14:51:49 -0700781}
Andy Rosse7ded112018-04-11 14:52:47 -0700782
Patrik Flykt4344e272019-03-08 14:19:05 -0700783void z_unpend_thread(struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -0700784{
Patrik Flykt4344e272019-03-08 14:19:05 -0700785 z_unpend_thread_no_timeout(thread);
786 (void)z_abort_thread_timeout(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700787}
788
Andy Ross6f139802019-08-20 11:21:28 -0700789/* Priority set utility that does no rescheduling, it just changes the
790 * run queue state, returning true if a reschedule is needed later.
791 */
792bool z_set_prio(struct k_thread *thread, int prio)
Andy Ross1acd8c22018-05-03 14:51:49 -0700793{
Flavio Ceolin02ed85b2018-09-20 15:43:57 -0700794 bool need_sched = 0;
Andy Ross1acd8c22018-05-03 14:51:49 -0700795
Patrik Flyktcf2d5792019-02-12 15:50:46 -0700796 LOCKED(&sched_spinlock) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700797 need_sched = z_is_thread_ready(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700798
799 if (need_sched) {
Andy Ross4d8e1f22019-07-01 10:25:55 -0700800 /* Don't requeue on SMP if it's the running thread */
801 if (!IS_ENABLED(CONFIG_SMP) || z_is_thread_queued(thread)) {
802 _priq_run_remove(&_kernel.ready_q.runq, thread);
803 thread->base.prio = prio;
804 _priq_run_add(&_kernel.ready_q.runq, thread);
805 } else {
806 thread->base.prio = prio;
807 }
Andy Ross1856e222018-05-21 11:48:35 -0700808 update_cache(1);
Andy Ross1acd8c22018-05-03 14:51:49 -0700809 } else {
810 thread->base.prio = prio;
Andy Rosse7ded112018-04-11 14:52:47 -0700811 }
812 }
Anas Nashifb6304e62018-07-04 08:03:03 -0500813 sys_trace_thread_priority_set(thread);
Andy Rosse7ded112018-04-11 14:52:47 -0700814
Andy Ross6f139802019-08-20 11:21:28 -0700815 return need_sched;
816}
817
818void z_thread_priority_set(struct k_thread *thread, int prio)
819{
820 bool need_sched = z_set_prio(thread, prio);
821
Andy Ross5737b5c2020-02-04 13:52:09 -0800822#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
823 arch_sched_ipi();
824#endif
825
Andy Ross12028102019-02-06 17:27:14 -0800826 if (need_sched && _current->base.sched_locked == 0) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700827 z_reschedule_unlocked();
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400828 }
829}
830
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500831static inline int resched(uint32_t key)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400832{
Andy Rosseace1df2018-05-30 11:23:02 -0700833#ifdef CONFIG_SMP
Andy Rosseace1df2018-05-30 11:23:02 -0700834 _current_cpu->swap_ok = 0;
835#endif
836
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800837 return arch_irq_unlocked(key) && !arch_is_in_isr();
Andy Rossec554f42018-07-24 13:37:59 -0700838}
839
Anas Nashif379b93f2020-08-10 15:47:02 -0400840/*
841 * Check if the next ready thread is the same as the current thread
842 * and save the trip if true.
843 */
844static inline bool need_swap(void)
845{
846 /* the SMP case will be handled in C based z_swap() */
847#ifdef CONFIG_SMP
848 return true;
849#else
850 struct k_thread *new_thread;
851
852 /* Check if the next ready thread is the same as the current thread */
853 new_thread = z_get_next_ready_thread();
854 return new_thread != _current;
855#endif
856}
857
Patrik Flykt4344e272019-03-08 14:19:05 -0700858void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key)
Andy Rossec554f42018-07-24 13:37:59 -0700859{
Anas Nashif379b93f2020-08-10 15:47:02 -0400860 if (resched(key.key) && need_swap()) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700861 z_swap(lock, key);
Andy Rossec554f42018-07-24 13:37:59 -0700862 } else {
863 k_spin_unlock(lock, key);
Andy Rosseace1df2018-05-30 11:23:02 -0700864 }
Andy Rossec554f42018-07-24 13:37:59 -0700865}
Andy Rosseace1df2018-05-30 11:23:02 -0700866
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500867void z_reschedule_irqlock(uint32_t key)
Andy Rossec554f42018-07-24 13:37:59 -0700868{
Andy Ross312b43f2019-05-24 10:09:13 -0700869 if (resched(key)) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700870 z_swap_irqlock(key);
Andy Rossec554f42018-07-24 13:37:59 -0700871 } else {
872 irq_unlock(key);
873 }
Andy Ross8606fab2018-03-26 10:54:40 -0700874}
875
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500876void k_sched_lock(void)
877{
Patrik Flyktcf2d5792019-02-12 15:50:46 -0700878 LOCKED(&sched_spinlock) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700879 z_sched_lock();
Andy Ross1856e222018-05-21 11:48:35 -0700880 }
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500881}
882
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400883void k_sched_unlock(void)
884{
Benjamin Walsh8e4a5342016-12-14 14:34:29 -0500885#ifdef CONFIG_PREEMPT_ENABLED
Patrik Flyktcf2d5792019-02-12 15:50:46 -0700886 LOCKED(&sched_spinlock) {
Andy Rosseefd3da2020-02-06 13:39:52 -0800887 __ASSERT(_current->base.sched_locked != 0, "");
888 __ASSERT(!arch_is_in_isr(), "");
889
Andy Ross1856e222018-05-21 11:48:35 -0700890 ++_current->base.sched_locked;
Yasushi SHOJI20d07242019-07-31 11:19:08 +0900891 update_cache(0);
Andy Ross1856e222018-05-21 11:48:35 -0700892 }
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400893
Anas Nashif2c5d4042019-12-02 10:24:08 -0500894 LOG_DBG("scheduler unlocked (%p:%d)",
Benjamin Walsha4e033f2016-11-18 16:08:24 -0500895 _current, _current->base.sched_locked);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400896
Patrik Flykt4344e272019-03-08 14:19:05 -0700897 z_reschedule_unlocked();
Benjamin Walsh8e4a5342016-12-14 14:34:29 -0500898#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400899}
900
Andy Ross1acd8c22018-05-03 14:51:49 -0700901#ifdef CONFIG_SMP
Patrik Flykt4344e272019-03-08 14:19:05 -0700902struct k_thread *z_get_next_ready_thread(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400903{
Andy Ross1acd8c22018-05-03 14:51:49 -0700904 struct k_thread *ret = 0;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400905
Patrik Flyktcf2d5792019-02-12 15:50:46 -0700906 LOCKED(&sched_spinlock) {
Andy Ross1acd8c22018-05-03 14:51:49 -0700907 ret = next_up();
908 }
909
910 return ret;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400911}
Benjamin Walsh62092182016-12-20 14:39:08 -0500912#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400913
Andy Rossb18685b2019-02-19 17:24:30 -0800914/* Just a wrapper around _current = xxx with tracing */
915static inline void set_current(struct k_thread *new_thread)
916{
Daniel Leung11e6b432020-08-27 16:12:01 -0700917 z_thread_mark_switched_out();
Andy Rosseefd3da2020-02-06 13:39:52 -0800918 _current_cpu->current = new_thread;
Andy Rossb18685b2019-02-19 17:24:30 -0800919}
920
Andy Ross1acd8c22018-05-03 14:51:49 -0700921#ifdef CONFIG_USE_SWITCH
Patrik Flykt4344e272019-03-08 14:19:05 -0700922void *z_get_next_switch_handle(void *interrupted)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400923{
Andrew Boieae0d1b22019-03-29 16:25:27 -0700924 z_check_stack_sentinel();
925
Andy Rosseace1df2018-05-30 11:23:02 -0700926#ifdef CONFIG_SMP
Patrik Flyktcf2d5792019-02-12 15:50:46 -0700927 LOCKED(&sched_spinlock) {
Andy Rossf6d32ab2020-05-13 15:34:04 +0000928 struct k_thread *old_thread = _current, *new_thread;
Andy Rosseace1df2018-05-30 11:23:02 -0700929
Andy Rossf6d32ab2020-05-13 15:34:04 +0000930 old_thread->switch_handle = NULL;
931 new_thread = next_up();
932
933 if (old_thread != new_thread) {
934 update_metairq_preempt(new_thread);
935 wait_for_switch(new_thread);
936 arch_cohere_stacks(old_thread, interrupted, new_thread);
Andy Ross11a050b2019-11-13 09:41:52 -0800937
Andy Rosscb3964f2019-08-16 21:29:26 -0700938#ifdef CONFIG_TIMESLICING
939 z_reset_time_slice();
940#endif
Andy Rosseace1df2018-05-30 11:23:02 -0700941 _current_cpu->swap_ok = 0;
Andy Rossf6d32ab2020-05-13 15:34:04 +0000942 set_current(new_thread);
943
Danny Oerndrupc9d78402019-12-13 11:24:56 +0100944#ifdef CONFIG_SPIN_VALIDATE
Andy Ross8c1bdda2019-02-20 10:07:31 -0800945 /* Changed _current! Update the spinlock
946 * bookeeping so the validation doesn't get
947 * confused when the "wrong" thread tries to
948 * release the lock.
949 */
950 z_spin_lock_set_owner(&sched_spinlock);
951#endif
Andy Rosseace1df2018-05-30 11:23:02 -0700952 }
Andy Rossf6d32ab2020-05-13 15:34:04 +0000953 old_thread->switch_handle = interrupted;
Benjamin Walshb8c21602016-12-23 19:34:41 -0500954 }
Andy Rosseace1df2018-05-30 11:23:02 -0700955#else
Andy Rossf6d32ab2020-05-13 15:34:04 +0000956 _current->switch_handle = interrupted;
957 set_current(z_get_next_ready_thread());
Andy Rosseace1df2018-05-30 11:23:02 -0700958#endif
Andy Ross1acd8c22018-05-03 14:51:49 -0700959 return _current->switch_handle;
960}
Benjamin Walshb12a8e02016-12-14 15:24:12 -0500961#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400962
Patrik Flykt4344e272019-03-08 14:19:05 -0700963ALWAYS_INLINE void z_priq_dumb_add(sys_dlist_t *pq, struct k_thread *thread)
Andy Ross22642cf2018-04-02 18:24:58 -0700964{
Andy Ross1acd8c22018-05-03 14:51:49 -0700965 struct k_thread *t;
Andy Ross22642cf2018-04-02 18:24:58 -0700966
Andrew Boie8f0bb6a2019-09-21 18:36:23 -0700967 __ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
Andy Ross22642cf2018-04-02 18:24:58 -0700968
Andy Ross1acd8c22018-05-03 14:51:49 -0700969 SYS_DLIST_FOR_EACH_CONTAINER(pq, t, base.qnode_dlist) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700970 if (z_is_t1_higher_prio_than_t2(thread, t)) {
Andy Rosseda4c022019-01-28 09:35:27 -0800971 sys_dlist_insert(&t->base.qnode_dlist,
972 &thread->base.qnode_dlist);
Andy Ross1acd8c22018-05-03 14:51:49 -0700973 return;
974 }
Andy Ross22642cf2018-04-02 18:24:58 -0700975 }
976
Andy Ross1acd8c22018-05-03 14:51:49 -0700977 sys_dlist_append(pq, &thread->base.qnode_dlist);
Andy Ross22642cf2018-04-02 18:24:58 -0700978}
979
Patrik Flykt4344e272019-03-08 14:19:05 -0700980void z_priq_dumb_remove(sys_dlist_t *pq, struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -0700981{
Andy Rossdff6b712019-02-25 21:17:29 -0800982#if defined(CONFIG_SWAP_NONATOMIC) && defined(CONFIG_SCHED_DUMB)
983 if (pq == &_kernel.ready_q.runq && thread == _current &&
Patrik Flykt4344e272019-03-08 14:19:05 -0700984 z_is_thread_prevented_from_running(thread)) {
Andy Rossdff6b712019-02-25 21:17:29 -0800985 return;
986 }
987#endif
988
Andrew Boie8f0bb6a2019-09-21 18:36:23 -0700989 __ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
Andy Ross1acd8c22018-05-03 14:51:49 -0700990
991 sys_dlist_remove(&thread->base.qnode_dlist);
992}
993
Patrik Flykt4344e272019-03-08 14:19:05 -0700994struct k_thread *z_priq_dumb_best(sys_dlist_t *pq)
Andy Ross1acd8c22018-05-03 14:51:49 -0700995{
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500996 struct k_thread *thread = NULL;
Flavio Ceolin26be3352018-11-15 15:03:32 -0800997 sys_dnode_t *n = sys_dlist_peek_head(pq);
998
Peter A. Bigot692e1032019-01-03 23:36:28 -0600999 if (n != NULL) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001000 thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist);
Peter A. Bigot692e1032019-01-03 23:36:28 -06001001 }
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001002 return thread;
Andy Ross1acd8c22018-05-03 14:51:49 -07001003}
1004
Patrik Flykt4344e272019-03-08 14:19:05 -07001005bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b)
Andy Ross1acd8c22018-05-03 14:51:49 -07001006{
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001007 struct k_thread *thread_a, *thread_b;
Andy Ross1acd8c22018-05-03 14:51:49 -07001008
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001009 thread_a = CONTAINER_OF(a, struct k_thread, base.qnode_rb);
1010 thread_b = CONTAINER_OF(b, struct k_thread, base.qnode_rb);
Andy Ross1acd8c22018-05-03 14:51:49 -07001011
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001012 if (z_is_t1_higher_prio_than_t2(thread_a, thread_b)) {
Flavio Ceolin02ed85b2018-09-20 15:43:57 -07001013 return true;
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001014 } else if (z_is_t1_higher_prio_than_t2(thread_b, thread_a)) {
Flavio Ceolin02ed85b2018-09-20 15:43:57 -07001015 return false;
Andy Ross1acd8c22018-05-03 14:51:49 -07001016 } else {
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001017 return thread_a->base.order_key < thread_b->base.order_key
1018 ? 1 : 0;
Andy Ross1acd8c22018-05-03 14:51:49 -07001019 }
1020}
1021
Patrik Flykt4344e272019-03-08 14:19:05 -07001022void z_priq_rb_add(struct _priq_rb *pq, struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -07001023{
1024 struct k_thread *t;
1025
Andrew Boie8f0bb6a2019-09-21 18:36:23 -07001026 __ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
Andy Ross1acd8c22018-05-03 14:51:49 -07001027
1028 thread->base.order_key = pq->next_order_key++;
1029
1030 /* Renumber at wraparound. This is tiny code, and in practice
1031 * will almost never be hit on real systems. BUT on very
1032 * long-running systems where a priq never completely empties
1033 * AND that contains very large numbers of threads, it can be
1034 * a latency glitch to loop over all the threads like this.
1035 */
1036 if (!pq->next_order_key) {
1037 RB_FOR_EACH_CONTAINER(&pq->tree, t, base.qnode_rb) {
1038 t->base.order_key = pq->next_order_key++;
1039 }
1040 }
1041
1042 rb_insert(&pq->tree, &thread->base.qnode_rb);
1043}
1044
Patrik Flykt4344e272019-03-08 14:19:05 -07001045void z_priq_rb_remove(struct _priq_rb *pq, struct k_thread *thread)
Andy Ross1acd8c22018-05-03 14:51:49 -07001046{
Andy Rossdff6b712019-02-25 21:17:29 -08001047#if defined(CONFIG_SWAP_NONATOMIC) && defined(CONFIG_SCHED_SCALABLE)
1048 if (pq == &_kernel.ready_q.runq && thread == _current &&
Patrik Flykt4344e272019-03-08 14:19:05 -07001049 z_is_thread_prevented_from_running(thread)) {
Andy Rossdff6b712019-02-25 21:17:29 -08001050 return;
1051 }
1052#endif
Andrew Boie8f0bb6a2019-09-21 18:36:23 -07001053 __ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
Andy Ross1acd8c22018-05-03 14:51:49 -07001054
1055 rb_remove(&pq->tree, &thread->base.qnode_rb);
1056
1057 if (!pq->tree.root) {
1058 pq->next_order_key = 0;
1059 }
1060}
1061
Patrik Flykt4344e272019-03-08 14:19:05 -07001062struct k_thread *z_priq_rb_best(struct _priq_rb *pq)
Andy Ross1acd8c22018-05-03 14:51:49 -07001063{
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001064 struct k_thread *thread = NULL;
Andy Ross1acd8c22018-05-03 14:51:49 -07001065 struct rbnode *n = rb_get_min(&pq->tree);
1066
Peter A. Bigot692e1032019-01-03 23:36:28 -06001067 if (n != NULL) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001068 thread = CONTAINER_OF(n, struct k_thread, base.qnode_rb);
Peter A. Bigot692e1032019-01-03 23:36:28 -06001069 }
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001070 return thread;
Andy Ross1acd8c22018-05-03 14:51:49 -07001071}
1072
Andy Ross9f06a352018-06-28 10:38:14 -07001073#ifdef CONFIG_SCHED_MULTIQ
1074# if (K_LOWEST_THREAD_PRIO - K_HIGHEST_THREAD_PRIO) > 31
1075# error Too many priorities for multiqueue scheduler (max 32)
1076# endif
1077#endif
1078
Patrik Flykt4344e272019-03-08 14:19:05 -07001079ALWAYS_INLINE void z_priq_mq_add(struct _priq_mq *pq, struct k_thread *thread)
Andy Ross9f06a352018-06-28 10:38:14 -07001080{
1081 int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO;
1082
1083 sys_dlist_append(&pq->queues[priority_bit], &thread->base.qnode_dlist);
Flavio Ceolina9962032019-02-26 10:14:04 -08001084 pq->bitmask |= BIT(priority_bit);
Andy Ross9f06a352018-06-28 10:38:14 -07001085}
1086
Patrik Flykt4344e272019-03-08 14:19:05 -07001087ALWAYS_INLINE void z_priq_mq_remove(struct _priq_mq *pq, struct k_thread *thread)
Andy Ross9f06a352018-06-28 10:38:14 -07001088{
Andy Rossdff6b712019-02-25 21:17:29 -08001089#if defined(CONFIG_SWAP_NONATOMIC) && defined(CONFIG_SCHED_MULTIQ)
1090 if (pq == &_kernel.ready_q.runq && thread == _current &&
Patrik Flykt4344e272019-03-08 14:19:05 -07001091 z_is_thread_prevented_from_running(thread)) {
Andy Rossdff6b712019-02-25 21:17:29 -08001092 return;
1093 }
1094#endif
Andy Ross9f06a352018-06-28 10:38:14 -07001095 int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO;
1096
1097 sys_dlist_remove(&thread->base.qnode_dlist);
1098 if (sys_dlist_is_empty(&pq->queues[priority_bit])) {
Flavio Ceolina9962032019-02-26 10:14:04 -08001099 pq->bitmask &= ~BIT(priority_bit);
Andy Ross9f06a352018-06-28 10:38:14 -07001100 }
1101}
1102
Patrik Flykt4344e272019-03-08 14:19:05 -07001103struct k_thread *z_priq_mq_best(struct _priq_mq *pq)
Andy Ross9f06a352018-06-28 10:38:14 -07001104{
1105 if (!pq->bitmask) {
1106 return NULL;
1107 }
1108
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001109 struct k_thread *thread = NULL;
Andy Ross9f06a352018-06-28 10:38:14 -07001110 sys_dlist_t *l = &pq->queues[__builtin_ctz(pq->bitmask)];
Flavio Ceolin26be3352018-11-15 15:03:32 -08001111 sys_dnode_t *n = sys_dlist_peek_head(l);
Andy Ross9f06a352018-06-28 10:38:14 -07001112
Peter A. Bigot692e1032019-01-03 23:36:28 -06001113 if (n != NULL) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001114 thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist);
Peter A. Bigot692e1032019-01-03 23:36:28 -06001115 }
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001116 return thread;
Andy Ross9f06a352018-06-28 10:38:14 -07001117}
1118
Patrik Flykt4344e272019-03-08 14:19:05 -07001119int z_unpend_all(_wait_q_t *wait_q)
Andy Ross4ca0e072018-05-10 09:45:42 -07001120{
Andy Rossccf3bf72018-05-10 11:10:34 -07001121 int need_sched = 0;
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001122 struct k_thread *thread;
Andy Ross4ca0e072018-05-10 09:45:42 -07001123
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001124 while ((thread = z_waitq_head(wait_q)) != NULL) {
1125 z_unpend_thread(thread);
1126 z_ready_thread(thread);
Andy Ross4ca0e072018-05-10 09:45:42 -07001127 need_sched = 1;
1128 }
Andy Rossccf3bf72018-05-10 11:10:34 -07001129
1130 return need_sched;
Andy Ross4ca0e072018-05-10 09:45:42 -07001131}
1132
Patrik Flykt4344e272019-03-08 14:19:05 -07001133void z_sched_init(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001134{
Andy Ross1acd8c22018-05-03 14:51:49 -07001135#ifdef CONFIG_SCHED_DUMB
1136 sys_dlist_init(&_kernel.ready_q.runq);
Andy Ross9f06a352018-06-28 10:38:14 -07001137#endif
1138
1139#ifdef CONFIG_SCHED_SCALABLE
Andy Ross1acd8c22018-05-03 14:51:49 -07001140 _kernel.ready_q.runq = (struct _priq_rb) {
1141 .tree = {
Patrik Flykt4344e272019-03-08 14:19:05 -07001142 .lessthan_fn = z_priq_rb_lessthan,
Andy Ross1acd8c22018-05-03 14:51:49 -07001143 }
1144 };
1145#endif
Andy Ross9f06a352018-06-28 10:38:14 -07001146
1147#ifdef CONFIG_SCHED_MULTIQ
1148 for (int i = 0; i < ARRAY_SIZE(_kernel.ready_q.runq.queues); i++) {
1149 sys_dlist_init(&_kernel.ready_q.runq.queues[i]);
1150 }
1151#endif
Piotr Zięcik4a39b9e2018-07-26 14:56:39 +02001152
1153#ifdef CONFIG_TIMESLICING
1154 k_sched_time_slice_set(CONFIG_TIMESLICE_SIZE,
1155 CONFIG_TIMESLICE_PRIORITY);
1156#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001157}
1158
Patrik Flykt4344e272019-03-08 14:19:05 -07001159int z_impl_k_thread_priority_get(k_tid_t thread)
Allan Stephens399d0ad2016-10-07 13:41:34 -05001160{
Benjamin Walshf6ca7de2016-11-08 10:36:50 -05001161 return thread->base.prio;
Allan Stephens399d0ad2016-10-07 13:41:34 -05001162}
1163
Andrew Boie76c04a22017-09-27 14:45:10 -07001164#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -07001165static inline int z_vrfy_k_thread_priority_get(k_tid_t thread)
1166{
1167 Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1168 return z_impl_k_thread_priority_get(thread);
1169}
1170#include <syscalls/k_thread_priority_get_mrsh.c>
Andrew Boie76c04a22017-09-27 14:45:10 -07001171#endif
1172
Patrik Flykt4344e272019-03-08 14:19:05 -07001173void z_impl_k_thread_priority_set(k_tid_t tid, int prio)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001174{
Benjamin Walsh3cc2ba92016-11-08 15:44:05 -05001175 /*
1176 * Use NULL, since we cannot know what the entry point is (we do not
1177 * keep track of it) and idle cannot change its priority.
1178 */
Patrik Flykt4344e272019-03-08 14:19:05 -07001179 Z_ASSERT_VALID_PRIO(prio, NULL);
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001180 __ASSERT(!arch_is_in_isr(), "");
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001181
Benjamin Walsh37511232016-10-13 08:10:07 -04001182 struct k_thread *thread = (struct k_thread *)tid;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001183
Patrik Flykt4344e272019-03-08 14:19:05 -07001184 z_thread_priority_set(thread, prio);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001185}
1186
Andrew Boie468190a2017-09-29 14:00:48 -07001187#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -07001188static inline void z_vrfy_k_thread_priority_set(k_tid_t thread, int prio)
Andrew Boie468190a2017-09-29 14:00:48 -07001189{
Andrew Boie8345e5e2018-05-04 15:57:57 -07001190 Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1191 Z_OOPS(Z_SYSCALL_VERIFY_MSG(_is_valid_prio(prio, NULL),
Andy Ross65649742019-08-06 13:34:31 -07001192 "invalid thread priority %d", prio));
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001193 Z_OOPS(Z_SYSCALL_VERIFY_MSG((int8_t)prio >= thread->base.prio,
Andrew Boie8345e5e2018-05-04 15:57:57 -07001194 "thread priority may only be downgraded (%d < %d)",
1195 prio, thread->base.prio));
Andrew Boie5008fed2017-10-08 10:11:24 -07001196
Andy Ross65649742019-08-06 13:34:31 -07001197 z_impl_k_thread_priority_set(thread, prio);
Andrew Boie468190a2017-09-29 14:00:48 -07001198}
Andy Ross65649742019-08-06 13:34:31 -07001199#include <syscalls/k_thread_priority_set_mrsh.c>
Andrew Boie468190a2017-09-29 14:00:48 -07001200#endif
1201
Andy Ross4a2e50f2018-05-15 11:06:25 -07001202#ifdef CONFIG_SCHED_DEADLINE
Patrik Flykt4344e272019-03-08 14:19:05 -07001203void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline)
Andy Ross4a2e50f2018-05-15 11:06:25 -07001204{
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001205 struct k_thread *thread = tid;
Andy Ross4a2e50f2018-05-15 11:06:25 -07001206
Patrik Flyktcf2d5792019-02-12 15:50:46 -07001207 LOCKED(&sched_spinlock) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001208 thread->base.prio_deadline = k_cycle_get_32() + deadline;
1209 if (z_is_thread_queued(thread)) {
1210 _priq_run_remove(&_kernel.ready_q.runq, thread);
1211 _priq_run_add(&_kernel.ready_q.runq, thread);
Andy Ross4a2e50f2018-05-15 11:06:25 -07001212 }
1213 }
1214}
1215
1216#ifdef CONFIG_USERSPACE
Andy Ross075c94f2019-08-13 11:34:34 -07001217static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline)
Andy Ross4a2e50f2018-05-15 11:06:25 -07001218{
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001219 struct k_thread *thread = tid;
Andy Ross4a2e50f2018-05-15 11:06:25 -07001220
1221 Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1222 Z_OOPS(Z_SYSCALL_VERIFY_MSG(deadline > 0,
1223 "invalid thread deadline %d",
1224 (int)deadline));
1225
Patrik Flykt4344e272019-03-08 14:19:05 -07001226 z_impl_k_thread_deadline_set((k_tid_t)thread, deadline);
Andy Ross4a2e50f2018-05-15 11:06:25 -07001227}
Andy Ross075c94f2019-08-13 11:34:34 -07001228#include <syscalls/k_thread_deadline_set_mrsh.c>
Andy Ross4a2e50f2018-05-15 11:06:25 -07001229#endif
1230#endif
1231
Patrik Flykt4344e272019-03-08 14:19:05 -07001232void z_impl_k_yield(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001233{
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001234 __ASSERT(!arch_is_in_isr(), "");
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001235
Andrew Boie8f0bb6a2019-09-21 18:36:23 -07001236 if (!z_is_idle_thread_object(_current)) {
Patrik Flyktcf2d5792019-02-12 15:50:46 -07001237 LOCKED(&sched_spinlock) {
Andy Rossea1c99b2019-02-21 11:09:39 -08001238 if (!IS_ENABLED(CONFIG_SMP) ||
1239 z_is_thread_queued(_current)) {
1240 _priq_run_remove(&_kernel.ready_q.runq,
1241 _current);
Andy Rossea1c99b2019-02-21 11:09:39 -08001242 }
Andy Rossb0158cc2019-08-16 13:14:51 -07001243 _priq_run_add(&_kernel.ready_q.runq, _current);
1244 z_mark_thread_as_queued(_current);
Andy Ross1856e222018-05-21 11:48:35 -07001245 update_cache(1);
1246 }
Andy Ross1acd8c22018-05-03 14:51:49 -07001247 }
Patrik Flykt4344e272019-03-08 14:19:05 -07001248 z_swap_unlocked();
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001249}
1250
Andrew Boie468190a2017-09-29 14:00:48 -07001251#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -07001252static inline void z_vrfy_k_yield(void)
1253{
1254 z_impl_k_yield();
1255}
1256#include <syscalls/k_yield_mrsh.c>
Andrew Boie468190a2017-09-29 14:00:48 -07001257#endif
1258
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001259static int32_t z_tick_sleep(int32_t ticks)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001260{
Benjamin Walshb12a8e02016-12-14 15:24:12 -05001261#ifdef CONFIG_MULTITHREADING
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001262 uint32_t expected_wakeup_time;
Carles Cufi9849df82016-12-02 15:31:08 +01001263
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001264 __ASSERT(!arch_is_in_isr(), "");
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001265
Anas Nashif2c5d4042019-12-02 10:24:08 -05001266 LOG_DBG("thread %p for %d ticks", _current, ticks);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001267
Benjamin Walsh5596f782016-12-09 19:57:17 -05001268 /* wait of 0 ms is treated as a 'yield' */
Charles E. Youseb1863032019-05-08 13:22:46 -07001269 if (ticks == 0) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001270 k_yield();
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001271 return 0;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001272 }
1273
Andy Ross78327382020-03-05 15:18:14 -08001274 k_timeout_t timeout;
1275
1276#ifndef CONFIG_LEGACY_TIMEOUT_API
1277 timeout = Z_TIMEOUT_TICKS(ticks);
1278#else
Charles E. Youseb1863032019-05-08 13:22:46 -07001279 ticks += _TICK_ALIGN;
Andy Ross78327382020-03-05 15:18:14 -08001280 timeout = (k_ticks_t) ticks;
1281#endif
1282
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001283 expected_wakeup_time = ticks + z_tick_get_32();
Andy Rossd27d4e62019-02-05 15:36:01 -08001284
Andrew Boiea8775ab2020-09-05 12:53:42 -07001285 k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001286
Andy Rossdff6b712019-02-25 21:17:29 -08001287#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
1288 pending_current = _current;
1289#endif
Andrew Boiea8775ab2020-09-05 12:53:42 -07001290 unready_thread(_current);
Andy Ross78327382020-03-05 15:18:14 -08001291 z_add_thread_timeout(_current, timeout);
Andy Ross4521e0c2019-03-22 10:30:19 -07001292 z_mark_thread_as_suspended(_current);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001293
Andrew Boiea8775ab2020-09-05 12:53:42 -07001294 (void)z_swap(&sched_spinlock, key);
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001295
Andy Ross4521e0c2019-03-22 10:30:19 -07001296 __ASSERT(!z_is_thread_state_set(_current, _THREAD_SUSPENDED), "");
1297
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001298 ticks = expected_wakeup_time - z_tick_get_32();
1299 if (ticks > 0) {
Charles E. Youseb1863032019-05-08 13:22:46 -07001300 return ticks;
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001301 }
Benjamin Walshb12a8e02016-12-14 15:24:12 -05001302#endif
Piotr Zięcik7700eb22018-10-25 17:45:08 +02001303
1304 return 0;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001305}
1306
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001307int32_t z_impl_k_sleep(k_timeout_t timeout)
Charles E. Youseb1863032019-05-08 13:22:46 -07001308{
Andy Ross78327382020-03-05 15:18:14 -08001309 k_ticks_t ticks;
Charles E. Youseb1863032019-05-08 13:22:46 -07001310
Peter Bigot8162e582019-12-12 16:07:07 -06001311 __ASSERT(!arch_is_in_isr(), "");
Anas Nashif5c31d002020-08-02 23:34:47 -04001312 sys_trace_void(SYS_TRACE_ID_SLEEP);
Peter Bigot8162e582019-12-12 16:07:07 -06001313
Anas Nashifd2c71792020-10-17 07:52:17 -04001314 /* in case of K_FOREVER, we suspend */
Andy Ross78327382020-03-05 15:18:14 -08001315 if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
Andrew Boied2b89222019-11-08 10:44:22 -08001316 k_thread_suspend(_current);
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001317 return (int32_t) K_TICKS_FOREVER;
Andrew Boied2b89222019-11-08 10:44:22 -08001318 }
1319
Andy Ross78327382020-03-05 15:18:14 -08001320#ifdef CONFIG_LEGACY_TIMEOUT_API
1321 ticks = k_ms_to_ticks_ceil32(timeout);
1322#else
1323 ticks = timeout.ticks;
1324#endif
1325
Charles E. Youseb1863032019-05-08 13:22:46 -07001326 ticks = z_tick_sleep(ticks);
Anas Nashif5c31d002020-08-02 23:34:47 -04001327 sys_trace_end_call(SYS_TRACE_ID_SLEEP);
Andy Ross88924062019-10-03 11:43:10 -07001328 return k_ticks_to_ms_floor64(ticks);
Charles E. Youseb1863032019-05-08 13:22:46 -07001329}
1330
Andrew Boie76c04a22017-09-27 14:45:10 -07001331#ifdef CONFIG_USERSPACE
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001332static inline int32_t z_vrfy_k_sleep(k_timeout_t timeout)
Andrew Boie76c04a22017-09-27 14:45:10 -07001333{
Andy Ross78327382020-03-05 15:18:14 -08001334 return z_impl_k_sleep(timeout);
Charles E. Yousea5678312019-05-09 16:46:46 -07001335}
Andy Ross65649742019-08-06 13:34:31 -07001336#include <syscalls/k_sleep_mrsh.c>
Charles E. Yousea5678312019-05-09 16:46:46 -07001337#endif
1338
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001339int32_t z_impl_k_usleep(int us)
Charles E. Yousea5678312019-05-09 16:46:46 -07001340{
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001341 int32_t ticks;
Charles E. Yousea5678312019-05-09 16:46:46 -07001342
Andy Ross88924062019-10-03 11:43:10 -07001343 ticks = k_us_to_ticks_ceil64(us);
Charles E. Yousea5678312019-05-09 16:46:46 -07001344 ticks = z_tick_sleep(ticks);
Andy Ross88924062019-10-03 11:43:10 -07001345 return k_ticks_to_us_floor64(ticks);
Charles E. Yousea5678312019-05-09 16:46:46 -07001346}
1347
1348#ifdef CONFIG_USERSPACE
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001349static inline int32_t z_vrfy_k_usleep(int us)
Charles E. Yousea5678312019-05-09 16:46:46 -07001350{
1351 return z_impl_k_usleep(us);
Andrew Boie76c04a22017-09-27 14:45:10 -07001352}
Andy Ross65649742019-08-06 13:34:31 -07001353#include <syscalls/k_usleep_mrsh.c>
Andrew Boie76c04a22017-09-27 14:45:10 -07001354#endif
1355
Patrik Flykt4344e272019-03-08 14:19:05 -07001356void z_impl_k_wakeup(k_tid_t thread)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001357{
Patrik Flykt4344e272019-03-08 14:19:05 -07001358 if (z_is_thread_pending(thread)) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001359 return;
1360 }
1361
Patrik Flykt4344e272019-03-08 14:19:05 -07001362 if (z_abort_thread_timeout(thread) < 0) {
Andrew Boied2b89222019-11-08 10:44:22 -08001363 /* Might have just been sleeping forever */
1364 if (thread->base.thread_state != _THREAD_SUSPENDED) {
1365 return;
1366 }
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001367 }
1368
Andy Ross4521e0c2019-03-22 10:30:19 -07001369 z_mark_thread_as_not_suspended(thread);
Patrik Flykt4344e272019-03-08 14:19:05 -07001370 z_ready_thread(thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001371
Andy Ross5737b5c2020-02-04 13:52:09 -08001372#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
1373 arch_sched_ipi();
1374#endif
1375
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001376 if (!arch_is_in_isr()) {
Patrik Flykt4344e272019-03-08 14:19:05 -07001377 z_reschedule_unlocked();
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001378 }
1379}
1380
Enjia Mai7ac40aa2020-05-28 11:29:50 +08001381#ifdef CONFIG_TRACE_SCHED_IPI
1382extern void z_trace_sched_ipi(void);
1383#endif
1384
Andy Ross42ed12a2019-02-19 16:03:39 -08001385#ifdef CONFIG_SMP
Andy Ross42ed12a2019-02-19 16:03:39 -08001386void z_sched_ipi(void)
1387{
Daniel Leungadac4cb2020-01-09 18:55:07 -08001388 /* NOTE: When adding code to this, make sure this is called
1389 * at appropriate location when !CONFIG_SCHED_IPI_SUPPORTED.
1390 */
Enjia Mai7ac40aa2020-05-28 11:29:50 +08001391#ifdef CONFIG_TRACE_SCHED_IPI
1392 z_trace_sched_ipi();
1393#endif
Andy Ross42ed12a2019-02-19 16:03:39 -08001394}
1395
1396void z_sched_abort(struct k_thread *thread)
1397{
Wayne Renb1fbe852019-10-14 22:14:28 +08001398 k_spinlock_key_t key;
1399
Andy Ross42ed12a2019-02-19 16:03:39 -08001400 if (thread == _current) {
1401 z_remove_thread_from_ready_q(thread);
1402 return;
1403 }
1404
1405 /* First broadcast an IPI to the other CPUs so they can stop
1406 * it locally. Not all architectures support that, alas. If
1407 * we don't have it, we need to wait for some other interrupt.
1408 */
Andy Ross42ed12a2019-02-19 16:03:39 -08001409#ifdef CONFIG_SCHED_IPI_SUPPORTED
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001410 arch_sched_ipi();
Andy Ross42ed12a2019-02-19 16:03:39 -08001411#endif
1412
1413 /* Wait for it to be flagged dead either by the CPU it was
1414 * running on or because we caught it idle in the queue
1415 */
Patrik Flykt24d71432019-03-26 19:57:45 -06001416 while ((thread->base.thread_state & _THREAD_DEAD) == 0U) {
Wayne Renb1fbe852019-10-14 22:14:28 +08001417 key = k_spin_lock(&sched_spinlock);
1418 if (z_is_thread_prevented_from_running(thread)) {
1419 __ASSERT(!z_is_thread_queued(thread), "");
1420 thread->base.thread_state |= _THREAD_DEAD;
1421 k_spin_unlock(&sched_spinlock, key);
1422 } else if (z_is_thread_queued(thread)) {
1423 _priq_run_remove(&_kernel.ready_q.runq, thread);
1424 z_mark_thread_as_not_queued(thread);
1425 thread->base.thread_state |= _THREAD_DEAD;
1426 k_spin_unlock(&sched_spinlock, key);
1427 } else {
1428 k_spin_unlock(&sched_spinlock, key);
1429 k_busy_wait(100);
Andy Ross42ed12a2019-02-19 16:03:39 -08001430 }
1431 }
1432}
1433#endif
1434
Andrew Boie468190a2017-09-29 14:00:48 -07001435#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -07001436static inline void z_vrfy_k_wakeup(k_tid_t thread)
1437{
1438 Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1439 z_impl_k_wakeup(thread);
1440}
1441#include <syscalls/k_wakeup_mrsh.c>
Andrew Boie468190a2017-09-29 14:00:48 -07001442#endif
1443
Patrik Flykt4344e272019-03-08 14:19:05 -07001444k_tid_t z_impl_k_current_get(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001445{
Andy Rosseefd3da2020-02-06 13:39:52 -08001446#ifdef CONFIG_SMP
1447 /* In SMP, _current is a field read from _current_cpu, which
1448 * can race with preemption before it is read. We must lock
1449 * local interrupts when reading it.
1450 */
1451 unsigned int k = arch_irq_lock();
1452#endif
1453
1454 k_tid_t ret = _current_cpu->current;
1455
1456#ifdef CONFIG_SMP
1457 arch_irq_unlock(k);
1458#endif
1459 return ret;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001460}
1461
Andrew Boie76c04a22017-09-27 14:45:10 -07001462#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -07001463static inline k_tid_t z_vrfy_k_current_get(void)
1464{
1465 return z_impl_k_current_get();
1466}
1467#include <syscalls/k_current_get_mrsh.c>
Andrew Boie76c04a22017-09-27 14:45:10 -07001468#endif
1469
Patrik Flykt4344e272019-03-08 14:19:05 -07001470int z_impl_k_is_preempt_thread(void)
Benjamin Walsh445830d2016-11-10 15:54:27 -05001471{
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001472 return !arch_is_in_isr() && is_preempt(_current);
Benjamin Walsh445830d2016-11-10 15:54:27 -05001473}
Andrew Boie468190a2017-09-29 14:00:48 -07001474
1475#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -07001476static inline int z_vrfy_k_is_preempt_thread(void)
1477{
1478 return z_impl_k_is_preempt_thread();
1479}
1480#include <syscalls/k_is_preempt_thread_mrsh.c>
Andrew Boie468190a2017-09-29 14:00:48 -07001481#endif
Andy Rossab46b1b2019-01-30 15:00:42 -08001482
1483#ifdef CONFIG_SCHED_CPU_MASK
1484# ifdef CONFIG_SMP
1485/* Right now we use a single byte for this mask */
Oleg Zhurakivskyyb1e1f642020-03-12 17:16:00 +02001486BUILD_ASSERT(CONFIG_MP_NUM_CPUS <= 8, "Too many CPUs for mask word");
Andy Rossab46b1b2019-01-30 15:00:42 -08001487# endif
1488
1489
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001490static int cpu_mask_mod(k_tid_t thread, uint32_t enable_mask, uint32_t disable_mask)
Andy Rossab46b1b2019-01-30 15:00:42 -08001491{
1492 int ret = 0;
1493
Patrik Flyktcf2d5792019-02-12 15:50:46 -07001494 LOCKED(&sched_spinlock) {
Anas Nashif9e3e7f62019-12-19 08:19:45 -05001495 if (z_is_thread_prevented_from_running(thread)) {
1496 thread->base.cpu_mask |= enable_mask;
1497 thread->base.cpu_mask &= ~disable_mask;
Andy Rossab46b1b2019-01-30 15:00:42 -08001498 } else {
1499 ret = -EINVAL;
1500 }
1501 }
1502 return ret;
1503}
1504
1505int k_thread_cpu_mask_clear(k_tid_t thread)
1506{
1507 return cpu_mask_mod(thread, 0, 0xffffffff);
1508}
1509
1510int k_thread_cpu_mask_enable_all(k_tid_t thread)
1511{
1512 return cpu_mask_mod(thread, 0xffffffff, 0);
1513}
1514
1515int k_thread_cpu_mask_enable(k_tid_t thread, int cpu)
1516{
1517 return cpu_mask_mod(thread, BIT(cpu), 0);
1518}
1519
1520int k_thread_cpu_mask_disable(k_tid_t thread, int cpu)
1521{
1522 return cpu_mask_mod(thread, 0, BIT(cpu));
1523}
1524
1525#endif /* CONFIG_SCHED_CPU_MASK */
Andrew Boie322816e2020-02-20 16:33:06 -08001526
Andy Ross78327382020-03-05 15:18:14 -08001527int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout)
Andrew Boie322816e2020-02-20 16:33:06 -08001528{
1529 k_spinlock_key_t key;
1530 int ret;
1531
Andy Ross78327382020-03-05 15:18:14 -08001532 __ASSERT(((arch_is_in_isr() == false) ||
1533 K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");
Andrew Boie322816e2020-02-20 16:33:06 -08001534
1535 key = k_spin_lock(&sched_spinlock);
1536
1537 if ((thread->base.pended_on == &_current->base.join_waiters) ||
1538 (thread == _current)) {
1539 ret = -EDEADLK;
1540 goto out;
1541 }
1542
1543 if ((thread->base.thread_state & _THREAD_DEAD) != 0) {
1544 ret = 0;
1545 goto out;
1546 }
1547
Andy Ross78327382020-03-05 15:18:14 -08001548 if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
Andrew Boie322816e2020-02-20 16:33:06 -08001549 ret = -EBUSY;
1550 goto out;
1551 }
1552
1553#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
1554 pending_current = _current;
1555#endif
1556 add_to_waitq_locked(_current, &thread->base.join_waiters);
Andy Ross78327382020-03-05 15:18:14 -08001557 add_thread_timeout(_current, timeout);
Andrew Boie322816e2020-02-20 16:33:06 -08001558
1559 return z_swap(&sched_spinlock, key);
1560out:
1561 k_spin_unlock(&sched_spinlock, key);
1562 return ret;
1563}
1564
1565#ifdef CONFIG_USERSPACE
1566/* Special case: don't oops if the thread is uninitialized. This is because
1567 * the initialization bit does double-duty for thread objects; if false, means
1568 * the thread object is truly uninitialized, or the thread ran and exited for
1569 * some reason.
1570 *
1571 * Return true in this case indicating we should just do nothing and return
1572 * success to the caller.
1573 */
1574static bool thread_obj_validate(struct k_thread *thread)
1575{
Andrew Boie2dc2ecf2020-03-11 07:13:07 -07001576 struct z_object *ko = z_object_find(thread);
Andrew Boie322816e2020-02-20 16:33:06 -08001577 int ret = z_object_validate(ko, K_OBJ_THREAD, _OBJ_INIT_TRUE);
1578
1579 switch (ret) {
1580 case 0:
1581 return false;
1582 case -EINVAL:
1583 return true;
1584 default:
1585#ifdef CONFIG_LOG
1586 z_dump_object_error(ret, thread, ko, K_OBJ_THREAD);
1587#endif
1588 Z_OOPS(Z_SYSCALL_VERIFY_MSG(ret, "access denied"));
1589 }
1590 CODE_UNREACHABLE;
1591}
1592
Andy Ross78327382020-03-05 15:18:14 -08001593static inline int z_vrfy_k_thread_join(struct k_thread *thread,
1594 k_timeout_t timeout)
Andrew Boie322816e2020-02-20 16:33:06 -08001595{
1596 if (thread_obj_validate(thread)) {
1597 return 0;
1598 }
1599
1600 return z_impl_k_thread_join(thread, timeout);
1601}
1602#include <syscalls/k_thread_join_mrsh.c>
Andrew Boiea4c91902020-03-24 16:09:24 -07001603
1604static inline void z_vrfy_k_thread_abort(k_tid_t thread)
1605{
1606 if (thread_obj_validate(thread)) {
1607 return;
1608 }
1609
1610 Z_OOPS(Z_SYSCALL_VERIFY_MSG(!(thread->base.user_options & K_ESSENTIAL),
1611 "aborting essential thread %p", thread));
1612
1613 z_impl_k_thread_abort((struct k_thread *)thread);
1614}
1615#include <syscalls/k_thread_abort_mrsh.c>
Andrew Boie322816e2020-02-20 16:33:06 -08001616#endif /* CONFIG_USERSPACE */