blob: c959cb1fd91d254a05d7922480e258a50186de07 [file] [log] [blame]
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001/*
Andy Ross1acd8c22018-05-03 14:51:49 -07002 * Copyright (c) 2018 Intel Corporation
Benjamin Walsh456c6da2016-09-02 18:55:39 -04003 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08004 * SPDX-License-Identifier: Apache-2.0
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04006#include <kernel.h>
Benjamin Walshb4b108d2016-10-13 10:31:48 -04007#include <ksched.h>
Andy Ross1acd8c22018-05-03 14:51:49 -07008#include <spinlock.h>
9#include <sched_priq.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040010#include <wait_q.h>
Andy Ross9c62cc62018-01-25 15:24:15 -080011#include <kswap.h>
Andy Ross1acd8c22018-05-03 14:51:49 -070012#include <kernel_arch_func.h>
13#include <syscall_handler.h>
Andy Ross1c08aef2018-09-22 15:38:31 -070014#include <drivers/system_timer.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040015
Andy Ross225c74b2018-06-27 11:20:50 -070016#if defined(CONFIG_SCHED_DUMB)
Andy Ross1acd8c22018-05-03 14:51:49 -070017#define _priq_run_add _priq_dumb_add
18#define _priq_run_remove _priq_dumb_remove
19#define _priq_run_best _priq_dumb_best
Andy Ross225c74b2018-06-27 11:20:50 -070020#elif defined(CONFIG_SCHED_SCALABLE)
Andy Ross1acd8c22018-05-03 14:51:49 -070021#define _priq_run_add _priq_rb_add
22#define _priq_run_remove _priq_rb_remove
23#define _priq_run_best _priq_rb_best
Andy Ross9f06a352018-06-28 10:38:14 -070024#elif defined(CONFIG_SCHED_MULTIQ)
25#define _priq_run_add _priq_mq_add
26#define _priq_run_remove _priq_mq_remove
27#define _priq_run_best _priq_mq_best
Andy Rosse7ded112018-04-11 14:52:47 -070028#endif
29
Andy Ross225c74b2018-06-27 11:20:50 -070030#if defined(CONFIG_WAITQ_SCALABLE)
Andy Ross1acd8c22018-05-03 14:51:49 -070031#define _priq_wait_add _priq_rb_add
32#define _priq_wait_remove _priq_rb_remove
33#define _priq_wait_best _priq_rb_best
Andy Ross225c74b2018-06-27 11:20:50 -070034#elif defined(CONFIG_WAITQ_DUMB)
Andy Ross1acd8c22018-05-03 14:51:49 -070035#define _priq_wait_add _priq_dumb_add
36#define _priq_wait_remove _priq_dumb_remove
37#define _priq_wait_best _priq_dumb_best
38#endif
39
Flavio Ceolina406b882018-11-01 17:50:02 -070040/* the only struct z_kernel instance */
41struct z_kernel _kernel;
Andy Ross1acd8c22018-05-03 14:51:49 -070042
43static struct k_spinlock sched_lock;
44
45#define LOCKED(lck) for (k_spinlock_key_t __i = {}, \
46 __key = k_spin_lock(lck); \
47 !__i.key; \
48 k_spin_unlock(lck, __key), __i.key = 1)
Andy Rosse7ded112018-04-11 14:52:47 -070049
50static inline int _is_preempt(struct k_thread *thread)
51{
52#ifdef CONFIG_PREEMPT_ENABLED
53 /* explanation in kernel_struct.h */
54 return thread->base.preempt <= _PREEMPT_THRESHOLD;
55#else
56 return 0;
57#endif
58}
59
Andy Ross7aa25fa2018-05-11 14:02:42 -070060static inline int is_metairq(struct k_thread *thread)
61{
62#if CONFIG_NUM_METAIRQ_PRIORITIES > 0
63 return (thread->base.prio - K_HIGHEST_THREAD_PRIO)
64 < CONFIG_NUM_METAIRQ_PRIORITIES;
65#else
66 return 0;
67#endif
68}
69
Anas Nashif80e6a972018-06-23 08:20:34 -050070#if CONFIG_ASSERT
Andy Ross1acd8c22018-05-03 14:51:49 -070071static inline int _is_thread_dummy(struct k_thread *thread)
Andy Rosse7ded112018-04-11 14:52:47 -070072{
Andy Ross1acd8c22018-05-03 14:51:49 -070073 return !!(thread->base.thread_state & _THREAD_DUMMY);
74}
Anas Nashif80e6a972018-06-23 08:20:34 -050075#endif
Andy Ross1acd8c22018-05-03 14:51:49 -070076
77static inline int _is_idle(struct k_thread *thread)
78{
79#ifdef CONFIG_SMP
80 return thread->base.is_idle;
81#else
82 extern struct k_thread * const _idle_thread;
83
84 return thread == _idle_thread;
85#endif
86}
87
Flavio Ceolin02ed85b2018-09-20 15:43:57 -070088bool _is_t1_higher_prio_than_t2(struct k_thread *t1, struct k_thread *t2)
Andy Ross4a2e50f2018-05-15 11:06:25 -070089{
90 if (t1->base.prio < t2->base.prio) {
Flavio Ceolin02ed85b2018-09-20 15:43:57 -070091 return true;
Andy Ross4a2e50f2018-05-15 11:06:25 -070092 }
93
94#ifdef CONFIG_SCHED_DEADLINE
95 /* Note that we don't care about wraparound conditions. The
96 * expectation is that the application will have arranged to
97 * block the threads, change their priorities or reset their
98 * deadlines when the job is complete. Letting the deadlines
99 * go negative is fine and in fact prevents aliasing bugs.
100 */
101 if (t1->base.prio == t2->base.prio) {
102 int now = (int) k_cycle_get_32();
103 int dt1 = t1->base.prio_deadline - now;
104 int dt2 = t2->base.prio_deadline - now;
105
106 return dt1 < dt2;
107 }
108#endif
109
Flavio Ceolin02ed85b2018-09-20 15:43:57 -0700110 return false;
Andy Ross4a2e50f2018-05-15 11:06:25 -0700111}
112
Andy Rosseace1df2018-05-30 11:23:02 -0700113static int should_preempt(struct k_thread *th, int preempt_ok)
114{
Andy Ross43553da2018-05-31 11:13:49 -0700115 /* Preemption is OK if it's being explicitly allowed by
116 * software state (e.g. the thread called k_yield())
Andy Rosseace1df2018-05-30 11:23:02 -0700117 */
Andy Ross43553da2018-05-31 11:13:49 -0700118 if (preempt_ok) {
119 return 1;
120 }
121
122 /* Or if we're pended/suspended/dummy (duh) */
Michael Scott6c95dafd2018-06-02 14:42:33 -0700123 if (!_current || !_is_thread_ready(_current)) {
Andy Ross43553da2018-05-31 11:13:49 -0700124 return 1;
125 }
126
127 /* Otherwise we have to be running a preemptible thread or
128 * switching to a metairq
129 */
130 if (_is_preempt(_current) || is_metairq(th)) {
Andy Rosseace1df2018-05-30 11:23:02 -0700131 return 1;
132 }
133
134 /* The idle threads can look "cooperative" if there are no
135 * preemptible priorities (this is sort of an API glitch).
136 * They must always be preemptible.
137 */
138 if (_is_idle(_current)) {
139 return 1;
140 }
141
Andy Rosseace1df2018-05-30 11:23:02 -0700142 return 0;
143}
144
Andy Ross1acd8c22018-05-03 14:51:49 -0700145static struct k_thread *next_up(void)
146{
147#ifndef CONFIG_SMP
148 /* In uniprocessor mode, we can leave the current thread in
149 * the queue (actually we have to, otherwise the assembly
150 * context switch code for all architectures would be
151 * responsible for putting it back in _Swap and ISR return!),
152 * which makes this choice simple.
153 */
154 struct k_thread *th = _priq_run_best(&_kernel.ready_q.runq);
155
156 return th ? th : _current_cpu->idle_thread;
157#else
Andy Ross7aa25fa2018-05-11 14:02:42 -0700158
Andy Ross1acd8c22018-05-03 14:51:49 -0700159 /* Under SMP, the "cache" mechanism for selecting the next
160 * thread doesn't work, so we have more work to do to test
161 * _current against the best choice from the queue.
Andy Rosseace1df2018-05-30 11:23:02 -0700162 *
163 * Subtle note on "queued": in SMP mode, _current does not
164 * live in the queue, so this isn't exactly the same thing as
165 * "ready", it means "is _current already added back to the
166 * queue such that we don't want to re-add it".
Andy Ross1acd8c22018-05-03 14:51:49 -0700167 */
Andy Ross1acd8c22018-05-03 14:51:49 -0700168 int queued = _is_thread_queued(_current);
Andy Rosseace1df2018-05-30 11:23:02 -0700169 int active = !_is_thread_prevented_from_running(_current);
Andy Ross1acd8c22018-05-03 14:51:49 -0700170
Andy Rosseace1df2018-05-30 11:23:02 -0700171 /* Choose the best thread that is not current */
Andy Ross1acd8c22018-05-03 14:51:49 -0700172 struct k_thread *th = _priq_run_best(&_kernel.ready_q.runq);
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700173 if (th == NULL) {
Andy Ross1acd8c22018-05-03 14:51:49 -0700174 th = _current_cpu->idle_thread;
175 }
176
Andy Rosseace1df2018-05-30 11:23:02 -0700177 if (active) {
178 if (!queued &&
179 !_is_t1_higher_prio_than_t2(th, _current)) {
180 th = _current;
181 }
182
183 if (!should_preempt(th, _current_cpu->swap_ok)) {
184 th = _current;
185 }
Andy Ross1acd8c22018-05-03 14:51:49 -0700186 }
187
Andy Rosseace1df2018-05-30 11:23:02 -0700188 /* Put _current back into the queue */
189 if (th != _current && active && !_is_idle(_current) && !queued) {
Andy Ross1acd8c22018-05-03 14:51:49 -0700190 _priq_run_add(&_kernel.ready_q.runq, _current);
Andy Rosseace1df2018-05-30 11:23:02 -0700191 _mark_thread_as_queued(_current);
Andy Ross1acd8c22018-05-03 14:51:49 -0700192 }
193
Andy Rosseace1df2018-05-30 11:23:02 -0700194 /* Take the new _current out of the queue */
195 if (_is_thread_queued(th)) {
196 _priq_run_remove(&_kernel.ready_q.runq, th);
197 }
198 _mark_thread_as_not_queued(th);
Andy Ross1acd8c22018-05-03 14:51:49 -0700199
200 return th;
201#endif
202}
203
Andy Ross9098a452018-09-25 10:56:09 -0700204#ifdef CONFIG_TIMESLICING
205
206static int slice_time;
207static int slice_max_prio;
208
209static void reset_time_slice(void)
210{
211 int to = _get_next_timeout_expiry();
212
Andy Ross7a035c02018-10-04 09:26:11 -0700213 /* Add the elapsed time since the last announced tick to the
214 * slice count, as we'll see those "expired" ticks arrive in a
215 * FUTURE z_time_slice() call.
216 */
217 _current_cpu->slice_ticks = slice_time + z_clock_elapsed();
Andy Ross9098a452018-09-25 10:56:09 -0700218
219 if (to == K_FOREVER || slice_time < to) {
220 z_clock_set_timeout(slice_time, false);
221 }
222}
223
224void k_sched_time_slice_set(s32_t duration_in_ms, int prio)
225{
Andy Ross1c305142018-10-15 11:10:49 -0700226 LOCKED(&sched_lock) {
227 _current_cpu->slice_ticks = 0;
228 slice_time = _ms_to_ticks(duration_in_ms);
229 slice_max_prio = prio;
230 reset_time_slice();
231 }
Andy Ross9098a452018-09-25 10:56:09 -0700232}
233
234static inline int sliceable(struct k_thread *t)
235{
236 return _is_preempt(t)
237 && !_is_prio_higher(t->base.prio, slice_max_prio)
Andy Ross1129ea92018-10-02 08:18:07 -0700238 && !_is_idle(t)
239 && !_is_thread_timeout_active(t);
Andy Ross9098a452018-09-25 10:56:09 -0700240}
241
242/* Called out of each timer interrupt */
243void z_time_slice(int ticks)
244{
245 if (slice_time && sliceable(_current)) {
246 if (ticks >= _current_cpu->slice_ticks) {
247 _move_thread_to_end_of_prio_q(_current);
248 reset_time_slice();
249 } else {
250 _current_cpu->slice_ticks -= ticks;
251 }
252 }
253}
254#else
255static void reset_time_slice(void) { /* !CONFIG_TIMESLICING */ }
256#endif
257
Andy Ross1856e222018-05-21 11:48:35 -0700258static void update_cache(int preempt_ok)
Andy Ross1acd8c22018-05-03 14:51:49 -0700259{
260#ifndef CONFIG_SMP
Andy Ross1856e222018-05-21 11:48:35 -0700261 struct k_thread *th = next_up();
262
Andy Rosseace1df2018-05-30 11:23:02 -0700263 if (should_preempt(th, preempt_ok)) {
Andy Ross9098a452018-09-25 10:56:09 -0700264 if (th != _current) {
265 reset_time_slice();
266 }
Andy Rosseace1df2018-05-30 11:23:02 -0700267 _kernel.ready_q.cache = th;
268 } else {
269 _kernel.ready_q.cache = _current;
Andy Ross1856e222018-05-21 11:48:35 -0700270 }
Andy Rosseace1df2018-05-30 11:23:02 -0700271
272#else
273 /* The way this works is that the CPU record keeps its
274 * "cooperative swapping is OK" flag until the next reschedule
275 * call or context switch. It doesn't need to be tracked per
276 * thread because if the thread gets preempted for whatever
277 * reason the scheduler will make the same decision anyway.
278 */
279 _current_cpu->swap_ok = preempt_ok;
Andy Ross1acd8c22018-05-03 14:51:49 -0700280#endif
281}
282
283void _add_thread_to_ready_q(struct k_thread *thread)
284{
285 LOCKED(&sched_lock) {
286 _priq_run_add(&_kernel.ready_q.runq, thread);
287 _mark_thread_as_queued(thread);
Andy Ross1856e222018-05-21 11:48:35 -0700288 update_cache(0);
Andy Ross1acd8c22018-05-03 14:51:49 -0700289 }
290}
291
292void _move_thread_to_end_of_prio_q(struct k_thread *thread)
293{
294 LOCKED(&sched_lock) {
295 _priq_run_remove(&_kernel.ready_q.runq, thread);
296 _priq_run_add(&_kernel.ready_q.runq, thread);
297 _mark_thread_as_queued(thread);
Andy Ross9098a452018-09-25 10:56:09 -0700298 update_cache(thread == _current);
Andy Ross1acd8c22018-05-03 14:51:49 -0700299 }
300}
301
302void _remove_thread_from_ready_q(struct k_thread *thread)
303{
304 LOCKED(&sched_lock) {
305 if (_is_thread_queued(thread)) {
306 _priq_run_remove(&_kernel.ready_q.runq, thread);
307 _mark_thread_as_not_queued(thread);
Andy Ross1856e222018-05-21 11:48:35 -0700308 update_cache(thread == _current);
Andy Ross1acd8c22018-05-03 14:51:49 -0700309 }
310 }
311}
312
313static void pend(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout)
314{
315 _remove_thread_from_ready_q(thread);
316 _mark_thread_as_pending(thread);
317
Andy Ross15d52082018-09-26 13:19:31 -0700318 if (wait_q != NULL) {
319 thread->base.pended_on = wait_q;
320 _priq_wait_add(&wait_q->waitq, thread);
321 }
322
Andy Ross1acd8c22018-05-03 14:51:49 -0700323 if (timeout != K_FOREVER) {
324 s32_t ticks = _TICK_ALIGN + _ms_to_ticks(timeout);
Andy Ross1acd8c22018-05-03 14:51:49 -0700325
Andy Ross5d203522018-09-26 13:57:34 -0700326 _add_thread_timeout(thread, ticks);
Andy Ross1acd8c22018-05-03 14:51:49 -0700327 }
328
Anas Nashifa9f32d62018-09-05 13:41:44 -0500329 sys_trace_thread_pend(thread);
Andy Rosse7ded112018-04-11 14:52:47 -0700330}
331
Andy Ross1acd8c22018-05-03 14:51:49 -0700332void _pend_thread(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout)
Andy Rosse7ded112018-04-11 14:52:47 -0700333{
Andy Ross1acd8c22018-05-03 14:51:49 -0700334 __ASSERT_NO_MSG(thread == _current || _is_thread_dummy(thread));
335 pend(thread, wait_q, timeout);
336}
337
338static _wait_q_t *pended_on(struct k_thread *thread)
339{
Andy Ross1acd8c22018-05-03 14:51:49 -0700340 __ASSERT_NO_MSG(thread->base.pended_on);
341
342 return thread->base.pended_on;
Andy Rosse7ded112018-04-11 14:52:47 -0700343}
344
Andy Ross1acd8c22018-05-03 14:51:49 -0700345struct k_thread *_find_first_thread_to_unpend(_wait_q_t *wait_q,
346 struct k_thread *from)
Andy Rosse7ded112018-04-11 14:52:47 -0700347{
Andy Ross1acd8c22018-05-03 14:51:49 -0700348 ARG_UNUSED(from);
349
350 struct k_thread *ret = NULL;
351
352 LOCKED(&sched_lock) {
353 ret = _priq_wait_best(&wait_q->waitq);
354 }
355
356 return ret;
Andy Rosse7ded112018-04-11 14:52:47 -0700357}
358
Andy Ross1acd8c22018-05-03 14:51:49 -0700359void _unpend_thread_no_timeout(struct k_thread *thread)
Andy Rosse7ded112018-04-11 14:52:47 -0700360{
Andy Ross1acd8c22018-05-03 14:51:49 -0700361 LOCKED(&sched_lock) {
362 _priq_wait_remove(&pended_on(thread)->waitq, thread);
363 _mark_thread_as_not_pending(thread);
364 }
365
Andy Ross1acd8c22018-05-03 14:51:49 -0700366 thread->base.pended_on = NULL;
Andy Rosse7ded112018-04-11 14:52:47 -0700367}
368
Andy Ross987c0e52018-09-27 16:50:00 -0700369#ifdef CONFIG_SYS_CLOCK_EXISTS
370/* Timeout handler for *_thread_timeout() APIs */
371void z_thread_timeout(struct _timeout *to)
372{
373 struct k_thread *th = CONTAINER_OF(to, struct k_thread, base.timeout);
374
375 if (th->base.pended_on != NULL) {
376 _unpend_thread_no_timeout(th);
377 }
378 _mark_thread_as_started(th);
379 _ready_thread(th);
380}
381#endif
382
Adithya Baglody61766922018-10-16 11:48:51 +0530383int _pend_current_thread(u32_t key, _wait_q_t *wait_q, s32_t timeout)
Andy Rosse7ded112018-04-11 14:52:47 -0700384{
Andy Ross1acd8c22018-05-03 14:51:49 -0700385 pend(_current, wait_q, timeout);
386 return _Swap(key);
Andy Rosse7ded112018-04-11 14:52:47 -0700387}
388
Andy Ross1acd8c22018-05-03 14:51:49 -0700389struct k_thread *_unpend_first_thread(_wait_q_t *wait_q)
Andy Rosse7ded112018-04-11 14:52:47 -0700390{
Andy Ross1acd8c22018-05-03 14:51:49 -0700391 struct k_thread *t = _unpend1_no_timeout(wait_q);
Andy Rosse7ded112018-04-11 14:52:47 -0700392
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700393 if (t != NULL) {
Flavio Ceolin8f72f242018-09-12 22:11:00 -0700394 (void)_abort_thread_timeout(t);
Andy Ross1acd8c22018-05-03 14:51:49 -0700395 }
Andy Rosse7ded112018-04-11 14:52:47 -0700396
Andy Ross1acd8c22018-05-03 14:51:49 -0700397 return t;
398}
Andy Rosse7ded112018-04-11 14:52:47 -0700399
Andy Ross1acd8c22018-05-03 14:51:49 -0700400void _unpend_thread(struct k_thread *thread)
401{
402 _unpend_thread_no_timeout(thread);
Flavio Ceolin8f72f242018-09-12 22:11:00 -0700403 (void)_abort_thread_timeout(thread);
Andy Ross1acd8c22018-05-03 14:51:49 -0700404}
405
406/* FIXME: this API is glitchy when used in SMP. If the thread is
407 * currently scheduled on the other CPU, it will silently set it's
408 * priority but nothing will cause a reschedule until the next
409 * interrupt. An audit seems to show that all current usage is to set
410 * priorities on either _current or a pended thread, though, so it's
411 * fine for now.
412 */
413void _thread_priority_set(struct k_thread *thread, int prio)
414{
Flavio Ceolin02ed85b2018-09-20 15:43:57 -0700415 bool need_sched = 0;
Andy Ross1acd8c22018-05-03 14:51:49 -0700416
417 LOCKED(&sched_lock) {
418 need_sched = _is_thread_ready(thread);
419
420 if (need_sched) {
421 _priq_run_remove(&_kernel.ready_q.runq, thread);
422 thread->base.prio = prio;
423 _priq_run_add(&_kernel.ready_q.runq, thread);
Andy Ross1856e222018-05-21 11:48:35 -0700424 update_cache(1);
Andy Ross1acd8c22018-05-03 14:51:49 -0700425 } else {
426 thread->base.prio = prio;
Andy Rosse7ded112018-04-11 14:52:47 -0700427 }
428 }
Anas Nashifb6304e62018-07-04 08:03:03 -0500429 sys_trace_thread_priority_set(thread);
Andy Rosse7ded112018-04-11 14:52:47 -0700430
Andy Ross1acd8c22018-05-03 14:51:49 -0700431 if (need_sched) {
432 _reschedule(irq_lock());
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400433 }
434}
435
Adithya Baglody14245612018-10-11 14:52:56 +0530436void _reschedule(u32_t key)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400437{
Andy Rosseace1df2018-05-30 11:23:02 -0700438#ifdef CONFIG_SMP
439 if (!_current_cpu->swap_ok) {
440 goto noswap;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400441 }
Andy Ross1acd8c22018-05-03 14:51:49 -0700442
Andy Rosseace1df2018-05-30 11:23:02 -0700443 _current_cpu->swap_ok = 0;
444#endif
445
446 if (_is_in_isr()) {
447 goto noswap;
448 }
449
450#ifdef CONFIG_SMP
Spoorthi Kb6cd1922018-10-24 12:01:12 +0530451 (void)_Swap(key);
452 return;
Andy Rosseace1df2018-05-30 11:23:02 -0700453#else
454 if (_get_next_ready_thread() != _current) {
Flavio Ceolin98c64b62018-09-12 17:27:11 -0700455 (void)_Swap(key);
456 return;
Andy Rosseace1df2018-05-30 11:23:02 -0700457 }
458#endif
459
460 noswap:
Andy Ross1acd8c22018-05-03 14:51:49 -0700461 irq_unlock(key);
Andy Ross8606fab2018-03-26 10:54:40 -0700462}
463
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500464void k_sched_lock(void)
465{
Andy Ross1856e222018-05-21 11:48:35 -0700466 LOCKED(&sched_lock) {
467 _sched_lock();
468 }
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500469}
470
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400471void k_sched_unlock(void)
472{
Benjamin Walsh8e4a5342016-12-14 14:34:29 -0500473#ifdef CONFIG_PREEMPT_ENABLED
Benjamin Walshe6a69ca2016-12-21 14:54:04 -0500474 __ASSERT(_current->base.sched_locked != 0, "");
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400475 __ASSERT(!_is_in_isr(), "");
476
Andy Ross1856e222018-05-21 11:48:35 -0700477 LOCKED(&sched_lock) {
478 ++_current->base.sched_locked;
479 update_cache(1);
480 }
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400481
Kumar Gala34a57db2017-04-19 10:39:57 -0500482 K_DEBUG("scheduler unlocked (%p:%d)\n",
Benjamin Walsha4e033f2016-11-18 16:08:24 -0500483 _current, _current->base.sched_locked);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400484
Andy Ross1856e222018-05-21 11:48:35 -0700485 _reschedule(irq_lock());
Benjamin Walsh8e4a5342016-12-14 14:34:29 -0500486#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400487}
488
Andy Ross1acd8c22018-05-03 14:51:49 -0700489#ifdef CONFIG_SMP
490struct k_thread *_get_next_ready_thread(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400491{
Andy Ross1acd8c22018-05-03 14:51:49 -0700492 struct k_thread *ret = 0;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400493
Andy Ross1acd8c22018-05-03 14:51:49 -0700494 LOCKED(&sched_lock) {
495 ret = next_up();
496 }
497
498 return ret;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400499}
Benjamin Walsh62092182016-12-20 14:39:08 -0500500#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400501
Andy Ross1acd8c22018-05-03 14:51:49 -0700502#ifdef CONFIG_USE_SWITCH
503void *_get_next_switch_handle(void *interrupted)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400504{
Andy Ross1acd8c22018-05-03 14:51:49 -0700505 _current->switch_handle = interrupted;
506
Andy Rosseace1df2018-05-30 11:23:02 -0700507#ifdef CONFIG_SMP
Andy Ross1acd8c22018-05-03 14:51:49 -0700508 LOCKED(&sched_lock) {
Andy Rosseace1df2018-05-30 11:23:02 -0700509 struct k_thread *th = next_up();
510
511 if (_current != th) {
Andy Ross9098a452018-09-25 10:56:09 -0700512 reset_time_slice();
Andy Rosseace1df2018-05-30 11:23:02 -0700513 _current_cpu->swap_ok = 0;
Marek Pietae8719382018-10-26 16:54:16 +0200514#ifdef CONFIG_TRACING
515 sys_trace_thread_switched_out();
516#endif
Andy Rosseace1df2018-05-30 11:23:02 -0700517 _current = th;
Marek Pietae8719382018-10-26 16:54:16 +0200518#ifdef CONFIG_TRACING
519 sys_trace_thread_switched_in();
520#endif
Andy Rosseace1df2018-05-30 11:23:02 -0700521 }
Benjamin Walshb8c21602016-12-23 19:34:41 -0500522 }
523
Andy Rosseace1df2018-05-30 11:23:02 -0700524#else
Marek Pietae8719382018-10-26 16:54:16 +0200525#ifdef CONFIG_TRACING
526 sys_trace_thread_switched_out();
527#endif
Andy Rosseace1df2018-05-30 11:23:02 -0700528 _current = _get_next_ready_thread();
Marek Pietae8719382018-10-26 16:54:16 +0200529#ifdef CONFIG_TRACING
530 sys_trace_thread_switched_in();
531#endif
Andy Rosseace1df2018-05-30 11:23:02 -0700532#endif
533
Andy Ross1acd8c22018-05-03 14:51:49 -0700534 _check_stack_sentinel();
Benjamin Walshb8c21602016-12-23 19:34:41 -0500535
Andy Ross1acd8c22018-05-03 14:51:49 -0700536 return _current->switch_handle;
537}
Benjamin Walshb12a8e02016-12-14 15:24:12 -0500538#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400539
Andy Ross1acd8c22018-05-03 14:51:49 -0700540void _priq_dumb_add(sys_dlist_t *pq, struct k_thread *thread)
Andy Ross22642cf2018-04-02 18:24:58 -0700541{
Andy Ross1acd8c22018-05-03 14:51:49 -0700542 struct k_thread *t;
Andy Ross22642cf2018-04-02 18:24:58 -0700543
Andy Ross1acd8c22018-05-03 14:51:49 -0700544 __ASSERT_NO_MSG(!_is_idle(thread));
Andy Ross22642cf2018-04-02 18:24:58 -0700545
Andy Ross1acd8c22018-05-03 14:51:49 -0700546 SYS_DLIST_FOR_EACH_CONTAINER(pq, t, base.qnode_dlist) {
547 if (_is_t1_higher_prio_than_t2(thread, t)) {
548 sys_dlist_insert_before(pq, &t->base.qnode_dlist,
549 &thread->base.qnode_dlist);
550 return;
551 }
Andy Ross22642cf2018-04-02 18:24:58 -0700552 }
553
Andy Ross1acd8c22018-05-03 14:51:49 -0700554 sys_dlist_append(pq, &thread->base.qnode_dlist);
Andy Ross22642cf2018-04-02 18:24:58 -0700555}
556
Andy Ross1acd8c22018-05-03 14:51:49 -0700557void _priq_dumb_remove(sys_dlist_t *pq, struct k_thread *thread)
558{
559 __ASSERT_NO_MSG(!_is_idle(thread));
560
561 sys_dlist_remove(&thread->base.qnode_dlist);
562}
563
564struct k_thread *_priq_dumb_best(sys_dlist_t *pq)
565{
566 return CONTAINER_OF(sys_dlist_peek_head(pq),
567 struct k_thread, base.qnode_dlist);
568}
569
Flavio Ceolin02ed85b2018-09-20 15:43:57 -0700570bool _priq_rb_lessthan(struct rbnode *a, struct rbnode *b)
Andy Ross1acd8c22018-05-03 14:51:49 -0700571{
572 struct k_thread *ta, *tb;
573
574 ta = CONTAINER_OF(a, struct k_thread, base.qnode_rb);
575 tb = CONTAINER_OF(b, struct k_thread, base.qnode_rb);
576
577 if (_is_t1_higher_prio_than_t2(ta, tb)) {
Flavio Ceolin02ed85b2018-09-20 15:43:57 -0700578 return true;
Andy Ross1acd8c22018-05-03 14:51:49 -0700579 } else if (_is_t1_higher_prio_than_t2(tb, ta)) {
Flavio Ceolin02ed85b2018-09-20 15:43:57 -0700580 return false;
Andy Ross1acd8c22018-05-03 14:51:49 -0700581 } else {
582 return ta->base.order_key < tb->base.order_key ? 1 : 0;
583 }
584}
585
586void _priq_rb_add(struct _priq_rb *pq, struct k_thread *thread)
587{
588 struct k_thread *t;
589
590 __ASSERT_NO_MSG(!_is_idle(thread));
591
592 thread->base.order_key = pq->next_order_key++;
593
594 /* Renumber at wraparound. This is tiny code, and in practice
595 * will almost never be hit on real systems. BUT on very
596 * long-running systems where a priq never completely empties
597 * AND that contains very large numbers of threads, it can be
598 * a latency glitch to loop over all the threads like this.
599 */
600 if (!pq->next_order_key) {
601 RB_FOR_EACH_CONTAINER(&pq->tree, t, base.qnode_rb) {
602 t->base.order_key = pq->next_order_key++;
603 }
604 }
605
606 rb_insert(&pq->tree, &thread->base.qnode_rb);
607}
608
609void _priq_rb_remove(struct _priq_rb *pq, struct k_thread *thread)
610{
611 __ASSERT_NO_MSG(!_is_idle(thread));
612
613 rb_remove(&pq->tree, &thread->base.qnode_rb);
614
615 if (!pq->tree.root) {
616 pq->next_order_key = 0;
617 }
618}
619
620struct k_thread *_priq_rb_best(struct _priq_rb *pq)
621{
622 struct rbnode *n = rb_get_min(&pq->tree);
623
624 return CONTAINER_OF(n, struct k_thread, base.qnode_rb);
625}
626
Andy Ross9f06a352018-06-28 10:38:14 -0700627#ifdef CONFIG_SCHED_MULTIQ
628# if (K_LOWEST_THREAD_PRIO - K_HIGHEST_THREAD_PRIO) > 31
629# error Too many priorities for multiqueue scheduler (max 32)
630# endif
631#endif
632
633void _priq_mq_add(struct _priq_mq *pq, struct k_thread *thread)
634{
635 int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO;
636
637 sys_dlist_append(&pq->queues[priority_bit], &thread->base.qnode_dlist);
638 pq->bitmask |= (1 << priority_bit);
639}
640
641void _priq_mq_remove(struct _priq_mq *pq, struct k_thread *thread)
642{
643 int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO;
644
645 sys_dlist_remove(&thread->base.qnode_dlist);
646 if (sys_dlist_is_empty(&pq->queues[priority_bit])) {
647 pq->bitmask &= ~(1 << priority_bit);
648 }
649}
650
651struct k_thread *_priq_mq_best(struct _priq_mq *pq)
652{
653 if (!pq->bitmask) {
654 return NULL;
655 }
656
657 sys_dlist_t *l = &pq->queues[__builtin_ctz(pq->bitmask)];
658
659 return CONTAINER_OF(sys_dlist_peek_head(l),
660 struct k_thread, base.qnode_dlist);
661}
662
Andy Rossccf3bf72018-05-10 11:10:34 -0700663int _unpend_all(_wait_q_t *waitq)
Andy Ross4ca0e072018-05-10 09:45:42 -0700664{
Andy Rossccf3bf72018-05-10 11:10:34 -0700665 int need_sched = 0;
666 struct k_thread *th;
Andy Ross4ca0e072018-05-10 09:45:42 -0700667
Flavio Ceolin02ed85b2018-09-20 15:43:57 -0700668 while ((th = _waitq_head(waitq)) != NULL) {
Andy Ross4ca0e072018-05-10 09:45:42 -0700669 _unpend_thread(th);
670 _ready_thread(th);
671 need_sched = 1;
672 }
Andy Rossccf3bf72018-05-10 11:10:34 -0700673
674 return need_sched;
Andy Ross4ca0e072018-05-10 09:45:42 -0700675}
676
Andy Ross1acd8c22018-05-03 14:51:49 -0700677void _sched_init(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400678{
Andy Ross1acd8c22018-05-03 14:51:49 -0700679#ifdef CONFIG_SCHED_DUMB
680 sys_dlist_init(&_kernel.ready_q.runq);
Andy Ross9f06a352018-06-28 10:38:14 -0700681#endif
682
683#ifdef CONFIG_SCHED_SCALABLE
Andy Ross1acd8c22018-05-03 14:51:49 -0700684 _kernel.ready_q.runq = (struct _priq_rb) {
685 .tree = {
686 .lessthan_fn = _priq_rb_lessthan,
687 }
688 };
689#endif
Andy Ross9f06a352018-06-28 10:38:14 -0700690
691#ifdef CONFIG_SCHED_MULTIQ
692 for (int i = 0; i < ARRAY_SIZE(_kernel.ready_q.runq.queues); i++) {
693 sys_dlist_init(&_kernel.ready_q.runq.queues[i]);
694 }
695#endif
Piotr Zięcik4a39b9e2018-07-26 14:56:39 +0200696
697#ifdef CONFIG_TIMESLICING
698 k_sched_time_slice_set(CONFIG_TIMESLICE_SIZE,
699 CONFIG_TIMESLICE_PRIORITY);
700#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400701}
702
Andrew Boie76c04a22017-09-27 14:45:10 -0700703int _impl_k_thread_priority_get(k_tid_t thread)
Allan Stephens399d0ad2016-10-07 13:41:34 -0500704{
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500705 return thread->base.prio;
Allan Stephens399d0ad2016-10-07 13:41:34 -0500706}
707
Andrew Boie76c04a22017-09-27 14:45:10 -0700708#ifdef CONFIG_USERSPACE
Andrew Boie8345e5e2018-05-04 15:57:57 -0700709Z_SYSCALL_HANDLER1_SIMPLE(k_thread_priority_get, K_OBJ_THREAD,
710 struct k_thread *);
Andrew Boie76c04a22017-09-27 14:45:10 -0700711#endif
712
Andrew Boie468190a2017-09-29 14:00:48 -0700713void _impl_k_thread_priority_set(k_tid_t tid, int prio)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400714{
Benjamin Walsh3cc2ba92016-11-08 15:44:05 -0500715 /*
716 * Use NULL, since we cannot know what the entry point is (we do not
717 * keep track of it) and idle cannot change its priority.
718 */
719 _ASSERT_VALID_PRIO(prio, NULL);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400720 __ASSERT(!_is_in_isr(), "");
721
Benjamin Walsh37511232016-10-13 08:10:07 -0400722 struct k_thread *thread = (struct k_thread *)tid;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400723
724 _thread_priority_set(thread, prio);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400725}
726
Andrew Boie468190a2017-09-29 14:00:48 -0700727#ifdef CONFIG_USERSPACE
Andrew Boie8345e5e2018-05-04 15:57:57 -0700728Z_SYSCALL_HANDLER(k_thread_priority_set, thread_p, prio)
Andrew Boie468190a2017-09-29 14:00:48 -0700729{
Andrew Boie5008fed2017-10-08 10:11:24 -0700730 struct k_thread *thread = (struct k_thread *)thread_p;
731
Andrew Boie8345e5e2018-05-04 15:57:57 -0700732 Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
733 Z_OOPS(Z_SYSCALL_VERIFY_MSG(_is_valid_prio(prio, NULL),
734 "invalid thread priority %d", (int)prio));
735 Z_OOPS(Z_SYSCALL_VERIFY_MSG((s8_t)prio >= thread->base.prio,
736 "thread priority may only be downgraded (%d < %d)",
737 prio, thread->base.prio));
Andrew Boie5008fed2017-10-08 10:11:24 -0700738
Andrew Boie468190a2017-09-29 14:00:48 -0700739 _impl_k_thread_priority_set((k_tid_t)thread, prio);
740 return 0;
741}
742#endif
743
Andy Ross4a2e50f2018-05-15 11:06:25 -0700744#ifdef CONFIG_SCHED_DEADLINE
745void _impl_k_thread_deadline_set(k_tid_t tid, int deadline)
746{
747 struct k_thread *th = tid;
748
749 LOCKED(&sched_lock) {
750 th->base.prio_deadline = k_cycle_get_32() + deadline;
751 if (_is_thread_queued(th)) {
752 _priq_run_remove(&_kernel.ready_q.runq, th);
753 _priq_run_add(&_kernel.ready_q.runq, th);
754 }
755 }
756}
757
758#ifdef CONFIG_USERSPACE
759Z_SYSCALL_HANDLER(k_thread_deadline_set, thread_p, deadline)
760{
761 struct k_thread *thread = (struct k_thread *)thread_p;
762
763 Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
764 Z_OOPS(Z_SYSCALL_VERIFY_MSG(deadline > 0,
765 "invalid thread deadline %d",
766 (int)deadline));
767
768 _impl_k_thread_deadline_set((k_tid_t)thread, deadline);
769 return 0;
770}
771#endif
772#endif
773
Andrew Boie468190a2017-09-29 14:00:48 -0700774void _impl_k_yield(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400775{
776 __ASSERT(!_is_in_isr(), "");
777
Andy Ross1acd8c22018-05-03 14:51:49 -0700778 if (!_is_idle(_current)) {
Andy Ross1856e222018-05-21 11:48:35 -0700779 LOCKED(&sched_lock) {
780 _priq_run_remove(&_kernel.ready_q.runq, _current);
781 _priq_run_add(&_kernel.ready_q.runq, _current);
782 update_cache(1);
783 }
Andy Ross1acd8c22018-05-03 14:51:49 -0700784 }
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400785
Andy Rosseace1df2018-05-30 11:23:02 -0700786#ifdef CONFIG_SMP
Flavio Ceolin5884c7f2018-09-11 18:45:27 -0700787 (void)_Swap(irq_lock());
Andy Rosseace1df2018-05-30 11:23:02 -0700788#else
Andy Ross1acd8c22018-05-03 14:51:49 -0700789 if (_get_next_ready_thread() != _current) {
Flavio Ceolin5884c7f2018-09-11 18:45:27 -0700790 (void)_Swap(irq_lock());
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400791 }
Andy Rosseace1df2018-05-30 11:23:02 -0700792#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400793}
794
Andrew Boie468190a2017-09-29 14:00:48 -0700795#ifdef CONFIG_USERSPACE
Andrew Boie8345e5e2018-05-04 15:57:57 -0700796Z_SYSCALL_HANDLER0_SIMPLE_VOID(k_yield);
Andrew Boie468190a2017-09-29 14:00:48 -0700797#endif
798
Piotr Zięcik7700eb22018-10-25 17:45:08 +0200799s32_t _impl_k_sleep(s32_t duration)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400800{
Benjamin Walshb12a8e02016-12-14 15:24:12 -0500801#ifdef CONFIG_MULTITHREADING
Piotr Zięcik7700eb22018-10-25 17:45:08 +0200802 u32_t expected_wakeup_time;
803 s32_t ticks;
Carles Cufi9849df82016-12-02 15:31:08 +0100804 unsigned int key;
805
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400806 __ASSERT(!_is_in_isr(), "");
Benjamin Walsh688973e2016-10-05 16:03:31 -0400807 __ASSERT(duration != K_FOREVER, "");
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400808
Kumar Gala34a57db2017-04-19 10:39:57 -0500809 K_DEBUG("thread %p for %d ns\n", _current, duration);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400810
Benjamin Walsh5596f782016-12-09 19:57:17 -0500811 /* wait of 0 ms is treated as a 'yield' */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400812 if (duration == 0) {
813 k_yield();
Piotr Zięcik7700eb22018-10-25 17:45:08 +0200814 return 0;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400815 }
816
Carles Cufi9849df82016-12-02 15:31:08 +0100817 ticks = _TICK_ALIGN + _ms_to_ticks(duration);
Piotr Zięcik7700eb22018-10-25 17:45:08 +0200818 expected_wakeup_time = ticks + z_tick_get_32();
Carles Cufi9849df82016-12-02 15:31:08 +0100819 key = irq_lock();
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400820
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400821 _remove_thread_from_ready_q(_current);
Andy Ross5d203522018-09-26 13:57:34 -0700822 _add_thread_timeout(_current, ticks);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400823
Flavio Ceolin5884c7f2018-09-11 18:45:27 -0700824 (void)_Swap(key);
Piotr Zięcik7700eb22018-10-25 17:45:08 +0200825
826 ticks = expected_wakeup_time - z_tick_get_32();
827 if (ticks > 0) {
828 return __ticks_to_ms(ticks);
829 }
Benjamin Walshb12a8e02016-12-14 15:24:12 -0500830#endif
Piotr Zięcik7700eb22018-10-25 17:45:08 +0200831
832 return 0;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400833}
834
Andrew Boie76c04a22017-09-27 14:45:10 -0700835#ifdef CONFIG_USERSPACE
Andrew Boie8345e5e2018-05-04 15:57:57 -0700836Z_SYSCALL_HANDLER(k_sleep, duration)
Andrew Boie76c04a22017-09-27 14:45:10 -0700837{
Andrew Boie225e4c02017-10-12 09:54:26 -0700838 /* FIXME there were some discussions recently on whether we should
839 * relax this, thread would be unscheduled until k_wakeup issued
840 */
Andrew Boie8345e5e2018-05-04 15:57:57 -0700841 Z_OOPS(Z_SYSCALL_VERIFY_MSG(duration != K_FOREVER,
842 "sleeping forever not allowed"));
Andrew Boie76c04a22017-09-27 14:45:10 -0700843
Piotr Zięcik7700eb22018-10-25 17:45:08 +0200844 return _impl_k_sleep(duration);
Andrew Boie76c04a22017-09-27 14:45:10 -0700845}
846#endif
847
Andrew Boie468190a2017-09-29 14:00:48 -0700848void _impl_k_wakeup(k_tid_t thread)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400849{
Flavio Ceolin0866d182018-08-14 17:57:08 -0700850 unsigned int key = irq_lock();
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400851
852 /* verify first if thread is not waiting on an object */
Benjamin Walshce9f7822016-10-06 16:25:39 -0400853 if (_is_thread_pending(thread)) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400854 irq_unlock(key);
855 return;
856 }
857
Benjamin Walshd211a522016-12-06 11:44:01 -0500858 if (_abort_thread_timeout(thread) == _INACTIVE) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400859 irq_unlock(key);
860 return;
861 }
862
863 _ready_thread(thread);
864
865 if (_is_in_isr()) {
866 irq_unlock(key);
867 } else {
Andy Ross15cb5d72018-04-02 18:40:10 -0700868 _reschedule(key);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400869 }
870}
871
Andrew Boie468190a2017-09-29 14:00:48 -0700872#ifdef CONFIG_USERSPACE
Andrew Boie8345e5e2018-05-04 15:57:57 -0700873Z_SYSCALL_HANDLER1_SIMPLE_VOID(k_wakeup, K_OBJ_THREAD, k_tid_t);
Andrew Boie468190a2017-09-29 14:00:48 -0700874#endif
875
Andrew Boie76c04a22017-09-27 14:45:10 -0700876k_tid_t _impl_k_current_get(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400877{
878 return _current;
879}
880
Andrew Boie76c04a22017-09-27 14:45:10 -0700881#ifdef CONFIG_USERSPACE
Andrew Boie8345e5e2018-05-04 15:57:57 -0700882Z_SYSCALL_HANDLER0_SIMPLE(k_current_get);
Andrew Boie76c04a22017-09-27 14:45:10 -0700883#endif
884
Andrew Boie468190a2017-09-29 14:00:48 -0700885int _impl_k_is_preempt_thread(void)
Benjamin Walsh445830d2016-11-10 15:54:27 -0500886{
887 return !_is_in_isr() && _is_preempt(_current);
888}
Andrew Boie468190a2017-09-29 14:00:48 -0700889
890#ifdef CONFIG_USERSPACE
Andrew Boie8345e5e2018-05-04 15:57:57 -0700891Z_SYSCALL_HANDLER0_SIMPLE(k_is_preempt_thread);
Andrew Boie468190a2017-09-29 14:00:48 -0700892#endif