Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1 | /* |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 2 | * Copyright (c) 2018 Intel Corporation |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 3 | * |
David B. Kinder | ac74d8b | 2017-01-18 17:01:01 -0800 | [diff] [blame] | 4 | * SPDX-License-Identifier: Apache-2.0 |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 5 | */ |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 6 | #include <kernel.h> |
Benjamin Walsh | b4b108d | 2016-10-13 10:31:48 -0400 | [diff] [blame] | 7 | #include <ksched.h> |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 8 | #include <spinlock.h> |
| 9 | #include <sched_priq.h> |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 10 | #include <wait_q.h> |
Andy Ross | 9c62cc6 | 2018-01-25 15:24:15 -0800 | [diff] [blame] | 11 | #include <kswap.h> |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 12 | #include <kernel_arch_func.h> |
| 13 | #include <syscall_handler.h> |
Anas Nashif | 68c389c | 2019-06-21 12:55:37 -0400 | [diff] [blame] | 14 | #include <drivers/timer/system_timer.h> |
Flavio Ceolin | 8041860 | 2018-11-21 16:22:15 -0800 | [diff] [blame] | 15 | #include <stdbool.h> |
Andrew Boie | fe03161 | 2019-09-21 17:54:37 -0700 | [diff] [blame] | 16 | #include <kernel_internal.h> |
Anas Nashif | 2c5d404 | 2019-12-02 10:24:08 -0500 | [diff] [blame] | 17 | #include <logging/log.h> |
Andrew Boie | e0ca403 | 2020-09-05 19:36:08 -0700 | [diff] [blame] | 18 | #include <sys/atomic.h> |
Krzysztof Chruscinski | 3ed8083 | 2020-11-26 19:32:34 +0100 | [diff] [blame] | 19 | LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 20 | |
Andy Ross | e06ba70 | 2020-01-14 06:26:10 -0800 | [diff] [blame] | 21 | /* Maximum time between the time a self-aborting thread flags itself |
| 22 | * DEAD and the last read or write to its stack memory (i.e. the time |
| 23 | * of its next swap()). In theory this might be tuned per platform, |
| 24 | * but in practice this conservative value should be safe. |
| 25 | */ |
| 26 | #define THREAD_ABORT_DELAY_US 500 |
| 27 | |
Andy Ross | 225c74b | 2018-06-27 11:20:50 -0700 | [diff] [blame] | 28 | #if defined(CONFIG_SCHED_DUMB) |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 29 | #define _priq_run_add z_priq_dumb_add |
| 30 | #define _priq_run_remove z_priq_dumb_remove |
Andy Ross | ab46b1b | 2019-01-30 15:00:42 -0800 | [diff] [blame] | 31 | # if defined(CONFIG_SCHED_CPU_MASK) |
| 32 | # define _priq_run_best _priq_dumb_mask_best |
| 33 | # else |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 34 | # define _priq_run_best z_priq_dumb_best |
Andy Ross | ab46b1b | 2019-01-30 15:00:42 -0800 | [diff] [blame] | 35 | # endif |
Andy Ross | 225c74b | 2018-06-27 11:20:50 -0700 | [diff] [blame] | 36 | #elif defined(CONFIG_SCHED_SCALABLE) |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 37 | #define _priq_run_add z_priq_rb_add |
| 38 | #define _priq_run_remove z_priq_rb_remove |
| 39 | #define _priq_run_best z_priq_rb_best |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 40 | #elif defined(CONFIG_SCHED_MULTIQ) |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 41 | #define _priq_run_add z_priq_mq_add |
| 42 | #define _priq_run_remove z_priq_mq_remove |
| 43 | #define _priq_run_best z_priq_mq_best |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 44 | #endif |
| 45 | |
Andy Ross | 225c74b | 2018-06-27 11:20:50 -0700 | [diff] [blame] | 46 | #if defined(CONFIG_WAITQ_SCALABLE) |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 47 | #define z_priq_wait_add z_priq_rb_add |
| 48 | #define _priq_wait_remove z_priq_rb_remove |
| 49 | #define _priq_wait_best z_priq_rb_best |
Andy Ross | 225c74b | 2018-06-27 11:20:50 -0700 | [diff] [blame] | 50 | #elif defined(CONFIG_WAITQ_DUMB) |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 51 | #define z_priq_wait_add z_priq_dumb_add |
| 52 | #define _priq_wait_remove z_priq_dumb_remove |
| 53 | #define _priq_wait_best z_priq_dumb_best |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 54 | #endif |
| 55 | |
Flavio Ceolin | a406b88 | 2018-11-01 17:50:02 -0700 | [diff] [blame] | 56 | /* the only struct z_kernel instance */ |
| 57 | struct z_kernel _kernel; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 58 | |
Patrik Flykt | cf2d579 | 2019-02-12 15:50:46 -0700 | [diff] [blame] | 59 | static struct k_spinlock sched_spinlock; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 60 | |
Andrew Boie | 8e0f6a5 | 2020-09-05 11:50:18 -0700 | [diff] [blame] | 61 | static void update_cache(int); |
| 62 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 63 | #define LOCKED(lck) for (k_spinlock_key_t __i = {}, \ |
| 64 | __key = k_spin_lock(lck); \ |
| 65 | !__i.key; \ |
| 66 | k_spin_unlock(lck, __key), __i.key = 1) |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 67 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 68 | static inline int is_preempt(struct k_thread *thread) |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 69 | { |
| 70 | #ifdef CONFIG_PREEMPT_ENABLED |
| 71 | /* explanation in kernel_struct.h */ |
| 72 | return thread->base.preempt <= _PREEMPT_THRESHOLD; |
| 73 | #else |
| 74 | return 0; |
| 75 | #endif |
| 76 | } |
| 77 | |
Andy Ross | 7aa25fa | 2018-05-11 14:02:42 -0700 | [diff] [blame] | 78 | static inline int is_metairq(struct k_thread *thread) |
| 79 | { |
| 80 | #if CONFIG_NUM_METAIRQ_PRIORITIES > 0 |
| 81 | return (thread->base.prio - K_HIGHEST_THREAD_PRIO) |
| 82 | < CONFIG_NUM_METAIRQ_PRIORITIES; |
| 83 | #else |
| 84 | return 0; |
| 85 | #endif |
| 86 | } |
| 87 | |
Anas Nashif | 80e6a97 | 2018-06-23 08:20:34 -0500 | [diff] [blame] | 88 | #if CONFIG_ASSERT |
Flavio Ceolin | 2df02cc | 2019-03-14 14:32:45 -0700 | [diff] [blame] | 89 | static inline bool is_thread_dummy(struct k_thread *thread) |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 90 | { |
Patrik Flykt | 21358ba | 2019-03-28 14:57:54 -0600 | [diff] [blame] | 91 | return (thread->base.thread_state & _THREAD_DUMMY) != 0U; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 92 | } |
Anas Nashif | 80e6a97 | 2018-06-23 08:20:34 -0500 | [diff] [blame] | 93 | #endif |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 94 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 95 | bool z_is_t1_higher_prio_than_t2(struct k_thread *thread_1, |
| 96 | struct k_thread *thread_2) |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 97 | { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 98 | if (thread_1->base.prio < thread_2->base.prio) { |
Flavio Ceolin | 02ed85b | 2018-09-20 15:43:57 -0700 | [diff] [blame] | 99 | return true; |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 100 | } |
| 101 | |
| 102 | #ifdef CONFIG_SCHED_DEADLINE |
| 103 | /* Note that we don't care about wraparound conditions. The |
| 104 | * expectation is that the application will have arranged to |
| 105 | * block the threads, change their priorities or reset their |
| 106 | * deadlines when the job is complete. Letting the deadlines |
| 107 | * go negative is fine and in fact prevents aliasing bugs. |
| 108 | */ |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 109 | if (thread_1->base.prio == thread_2->base.prio) { |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 110 | int now = (int) k_cycle_get_32(); |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 111 | int dt1 = thread_1->base.prio_deadline - now; |
| 112 | int dt2 = thread_2->base.prio_deadline - now; |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 113 | |
| 114 | return dt1 < dt2; |
| 115 | } |
| 116 | #endif |
| 117 | |
Flavio Ceolin | 02ed85b | 2018-09-20 15:43:57 -0700 | [diff] [blame] | 118 | return false; |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 119 | } |
| 120 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 121 | static ALWAYS_INLINE bool should_preempt(struct k_thread *thread, |
| 122 | int preempt_ok) |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 123 | { |
Andy Ross | 43553da | 2018-05-31 11:13:49 -0700 | [diff] [blame] | 124 | /* Preemption is OK if it's being explicitly allowed by |
| 125 | * software state (e.g. the thread called k_yield()) |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 126 | */ |
Flavio Ceolin | 8041860 | 2018-11-21 16:22:15 -0800 | [diff] [blame] | 127 | if (preempt_ok != 0) { |
| 128 | return true; |
Andy Ross | 43553da | 2018-05-31 11:13:49 -0700 | [diff] [blame] | 129 | } |
| 130 | |
Andy Ross | 1763a01 | 2019-01-28 10:59:41 -0800 | [diff] [blame] | 131 | __ASSERT(_current != NULL, ""); |
| 132 | |
Andy Ross | 43553da | 2018-05-31 11:13:49 -0700 | [diff] [blame] | 133 | /* Or if we're pended/suspended/dummy (duh) */ |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 134 | if (z_is_thread_prevented_from_running(_current)) { |
Andy Ross | 23c5a63 | 2019-01-04 12:52:17 -0800 | [diff] [blame] | 135 | return true; |
| 136 | } |
| 137 | |
| 138 | /* Edge case on ARM where a thread can be pended out of an |
| 139 | * interrupt handler before the "synchronous" swap starts |
| 140 | * context switching. Platforms with atomic swap can never |
| 141 | * hit this. |
| 142 | */ |
| 143 | if (IS_ENABLED(CONFIG_SWAP_NONATOMIC) |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 144 | && z_is_thread_timeout_active(thread)) { |
Flavio Ceolin | 8041860 | 2018-11-21 16:22:15 -0800 | [diff] [blame] | 145 | return true; |
Andy Ross | 43553da | 2018-05-31 11:13:49 -0700 | [diff] [blame] | 146 | } |
| 147 | |
| 148 | /* Otherwise we have to be running a preemptible thread or |
| 149 | * switching to a metairq |
| 150 | */ |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 151 | if (is_preempt(_current) || is_metairq(thread)) { |
Flavio Ceolin | 8041860 | 2018-11-21 16:22:15 -0800 | [diff] [blame] | 152 | return true; |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 153 | } |
| 154 | |
| 155 | /* The idle threads can look "cooperative" if there are no |
| 156 | * preemptible priorities (this is sort of an API glitch). |
| 157 | * They must always be preemptible. |
| 158 | */ |
Andrew Boie | 8f0bb6a | 2019-09-21 18:36:23 -0700 | [diff] [blame] | 159 | if (!IS_ENABLED(CONFIG_PREEMPT_ENABLED) && |
| 160 | z_is_idle_thread_object(_current)) { |
Flavio Ceolin | 8041860 | 2018-11-21 16:22:15 -0800 | [diff] [blame] | 161 | return true; |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 162 | } |
| 163 | |
Flavio Ceolin | 8041860 | 2018-11-21 16:22:15 -0800 | [diff] [blame] | 164 | return false; |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 165 | } |
| 166 | |
Andy Ross | ab46b1b | 2019-01-30 15:00:42 -0800 | [diff] [blame] | 167 | #ifdef CONFIG_SCHED_CPU_MASK |
| 168 | static ALWAYS_INLINE struct k_thread *_priq_dumb_mask_best(sys_dlist_t *pq) |
| 169 | { |
| 170 | /* With masks enabled we need to be prepared to walk the list |
| 171 | * looking for one we can run |
| 172 | */ |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 173 | struct k_thread *thread; |
Andy Ross | ab46b1b | 2019-01-30 15:00:42 -0800 | [diff] [blame] | 174 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 175 | SYS_DLIST_FOR_EACH_CONTAINER(pq, thread, base.qnode_dlist) { |
| 176 | if ((thread->base.cpu_mask & BIT(_current_cpu->id)) != 0) { |
| 177 | return thread; |
Andy Ross | ab46b1b | 2019-01-30 15:00:42 -0800 | [diff] [blame] | 178 | } |
| 179 | } |
| 180 | return NULL; |
| 181 | } |
| 182 | #endif |
| 183 | |
Andy Ross | b2791b0 | 2019-01-28 09:36:36 -0800 | [diff] [blame] | 184 | static ALWAYS_INLINE struct k_thread *next_up(void) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 185 | { |
Andrew Boie | f5a7e1a | 2020-09-02 09:20:38 -0700 | [diff] [blame] | 186 | struct k_thread *thread; |
| 187 | |
| 188 | /* If a thread self-aborted we need the idle thread to clean it up |
| 189 | * before any other thread can run on this CPU |
| 190 | */ |
| 191 | if (_current_cpu->pending_abort != NULL) { |
| 192 | return _current_cpu->idle_thread; |
| 193 | } |
| 194 | |
| 195 | thread = _priq_run_best(&_kernel.ready_q.runq); |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 196 | |
| 197 | #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && (CONFIG_NUM_COOP_PRIORITIES > 0) |
| 198 | /* MetaIRQs must always attempt to return back to a |
| 199 | * cooperative thread they preempted and not whatever happens |
| 200 | * to be highest priority now. The cooperative thread was |
| 201 | * promised it wouldn't be preempted (by non-metairq threads)! |
| 202 | */ |
| 203 | struct k_thread *mirqp = _current_cpu->metairq_preempted; |
| 204 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 205 | if (mirqp != NULL && (thread == NULL || !is_metairq(thread))) { |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 206 | if (!z_is_thread_prevented_from_running(mirqp)) { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 207 | thread = mirqp; |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 208 | } else { |
| 209 | _current_cpu->metairq_preempted = NULL; |
| 210 | } |
| 211 | } |
| 212 | #endif |
| 213 | |
Daniel Leung | adac4cb | 2020-01-09 18:55:07 -0800 | [diff] [blame] | 214 | /* If the current thread is marked aborting, mark it |
| 215 | * dead so it will not be scheduled again. |
| 216 | */ |
| 217 | if (_current->base.thread_state & _THREAD_ABORTING) { |
| 218 | _current->base.thread_state |= _THREAD_DEAD; |
| 219 | #ifdef CONFIG_SMP |
| 220 | _current_cpu->swap_ok = true; |
| 221 | #endif |
| 222 | } |
| 223 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 224 | #ifndef CONFIG_SMP |
| 225 | /* In uniprocessor mode, we can leave the current thread in |
| 226 | * the queue (actually we have to, otherwise the assembly |
| 227 | * context switch code for all architectures would be |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 228 | * responsible for putting it back in z_swap and ISR return!), |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 229 | * which makes this choice simple. |
| 230 | */ |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 231 | return thread ? thread : _current_cpu->idle_thread; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 232 | #else |
| 233 | /* Under SMP, the "cache" mechanism for selecting the next |
| 234 | * thread doesn't work, so we have more work to do to test |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 235 | * _current against the best choice from the queue. Here, the |
| 236 | * thread selected above represents "the best thread that is |
| 237 | * not current". |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 238 | * |
| 239 | * Subtle note on "queued": in SMP mode, _current does not |
| 240 | * live in the queue, so this isn't exactly the same thing as |
| 241 | * "ready", it means "is _current already added back to the |
| 242 | * queue such that we don't want to re-add it". |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 243 | */ |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 244 | int queued = z_is_thread_queued(_current); |
| 245 | int active = !z_is_thread_prevented_from_running(_current); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 246 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 247 | if (thread == NULL) { |
| 248 | thread = _current_cpu->idle_thread; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 249 | } |
| 250 | |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 251 | if (active) { |
| 252 | if (!queued && |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 253 | !z_is_t1_higher_prio_than_t2(thread, _current)) { |
| 254 | thread = _current; |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 255 | } |
| 256 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 257 | if (!should_preempt(thread, _current_cpu->swap_ok)) { |
| 258 | thread = _current; |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 259 | } |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 260 | } |
| 261 | |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 262 | /* Put _current back into the queue */ |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 263 | if (thread != _current && active && |
| 264 | !z_is_idle_thread_object(_current) && !queued) { |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 265 | _priq_run_add(&_kernel.ready_q.runq, _current); |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 266 | z_mark_thread_as_queued(_current); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 267 | } |
| 268 | |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 269 | /* Take the new _current out of the queue */ |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 270 | if (z_is_thread_queued(thread)) { |
| 271 | _priq_run_remove(&_kernel.ready_q.runq, thread); |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 272 | } |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 273 | z_mark_thread_as_not_queued(thread); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 274 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 275 | return thread; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 276 | #endif |
| 277 | } |
| 278 | |
Andrew Boie | 8e0f6a5 | 2020-09-05 11:50:18 -0700 | [diff] [blame] | 279 | static void move_thread_to_end_of_prio_q(struct k_thread *thread) |
| 280 | { |
| 281 | if (z_is_thread_queued(thread)) { |
| 282 | _priq_run_remove(&_kernel.ready_q.runq, thread); |
| 283 | } |
| 284 | _priq_run_add(&_kernel.ready_q.runq, thread); |
| 285 | z_mark_thread_as_queued(thread); |
| 286 | update_cache(thread == _current); |
| 287 | } |
| 288 | |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 289 | #ifdef CONFIG_TIMESLICING |
| 290 | |
| 291 | static int slice_time; |
| 292 | static int slice_max_prio; |
| 293 | |
Andy Ross | 7fb8eb5 | 2019-01-04 12:54:23 -0800 | [diff] [blame] | 294 | #ifdef CONFIG_SWAP_NONATOMIC |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 295 | /* If z_swap() isn't atomic, then it's possible for a timer interrupt |
Andy Ross | 7fb8eb5 | 2019-01-04 12:54:23 -0800 | [diff] [blame] | 296 | * to try to timeslice away _current after it has already pended |
| 297 | * itself but before the corresponding context switch. Treat that as |
| 298 | * a noop condition in z_time_slice(). |
| 299 | */ |
| 300 | static struct k_thread *pending_current; |
| 301 | #endif |
| 302 | |
Andy Ross | cb3964f | 2019-08-16 21:29:26 -0700 | [diff] [blame] | 303 | void z_reset_time_slice(void) |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 304 | { |
Andy Ross | 7a035c0 | 2018-10-04 09:26:11 -0700 | [diff] [blame] | 305 | /* Add the elapsed time since the last announced tick to the |
| 306 | * slice count, as we'll see those "expired" ticks arrive in a |
| 307 | * FUTURE z_time_slice() call. |
| 308 | */ |
Andy Ross | ed7d863 | 2019-06-15 19:32:04 -0700 | [diff] [blame] | 309 | if (slice_time != 0) { |
| 310 | _current_cpu->slice_ticks = slice_time + z_clock_elapsed(); |
| 311 | z_set_timeout_expiry(slice_time, false); |
| 312 | } |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 313 | } |
| 314 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 315 | void k_sched_time_slice_set(int32_t slice, int prio) |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 316 | { |
Patrik Flykt | cf2d579 | 2019-02-12 15:50:46 -0700 | [diff] [blame] | 317 | LOCKED(&sched_spinlock) { |
Andy Ross | 1c30514 | 2018-10-15 11:10:49 -0700 | [diff] [blame] | 318 | _current_cpu->slice_ticks = 0; |
Andy Ross | 8892406 | 2019-10-03 11:43:10 -0700 | [diff] [blame] | 319 | slice_time = k_ms_to_ticks_ceil32(slice); |
Andy Ross | 1c30514 | 2018-10-15 11:10:49 -0700 | [diff] [blame] | 320 | slice_max_prio = prio; |
Andy Ross | cb3964f | 2019-08-16 21:29:26 -0700 | [diff] [blame] | 321 | z_reset_time_slice(); |
Andy Ross | 1c30514 | 2018-10-15 11:10:49 -0700 | [diff] [blame] | 322 | } |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 323 | } |
| 324 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 325 | static inline int sliceable(struct k_thread *thread) |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 326 | { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 327 | return is_preempt(thread) |
Andrew Boie | 83d7770 | 2020-09-05 11:46:46 -0700 | [diff] [blame] | 328 | && !z_is_thread_prevented_from_running(thread) |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 329 | && !z_is_prio_higher(thread->base.prio, slice_max_prio) |
Andrew Boie | 83d7770 | 2020-09-05 11:46:46 -0700 | [diff] [blame] | 330 | && !z_is_idle_thread_object(thread); |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 331 | } |
| 332 | |
| 333 | /* Called out of each timer interrupt */ |
| 334 | void z_time_slice(int ticks) |
| 335 | { |
Andrew Boie | 8e0f6a5 | 2020-09-05 11:50:18 -0700 | [diff] [blame] | 336 | /* Hold sched_spinlock, so that activity on another CPU |
| 337 | * (like a call to k_thread_abort() at just the wrong time) |
| 338 | * won't affect the correctness of the decisions made here. |
| 339 | * Also prevents any nested interrupts from changing |
| 340 | * thread state to avoid similar issues, since this would |
| 341 | * normally run with IRQs enabled. |
| 342 | */ |
| 343 | k_spinlock_key_t key = k_spin_lock(&sched_spinlock); |
| 344 | |
Andy Ross | 7fb8eb5 | 2019-01-04 12:54:23 -0800 | [diff] [blame] | 345 | #ifdef CONFIG_SWAP_NONATOMIC |
| 346 | if (pending_current == _current) { |
Andy Ross | cb3964f | 2019-08-16 21:29:26 -0700 | [diff] [blame] | 347 | z_reset_time_slice(); |
Andrew Boie | 8e0f6a5 | 2020-09-05 11:50:18 -0700 | [diff] [blame] | 348 | k_spin_unlock(&sched_spinlock, key); |
Andy Ross | 7fb8eb5 | 2019-01-04 12:54:23 -0800 | [diff] [blame] | 349 | return; |
| 350 | } |
| 351 | pending_current = NULL; |
| 352 | #endif |
| 353 | |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 354 | if (slice_time && sliceable(_current)) { |
| 355 | if (ticks >= _current_cpu->slice_ticks) { |
Andrew Boie | 8e0f6a5 | 2020-09-05 11:50:18 -0700 | [diff] [blame] | 356 | move_thread_to_end_of_prio_q(_current); |
Andy Ross | cb3964f | 2019-08-16 21:29:26 -0700 | [diff] [blame] | 357 | z_reset_time_slice(); |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 358 | } else { |
| 359 | _current_cpu->slice_ticks -= ticks; |
| 360 | } |
Wentong Wu | 2463ded | 2019-07-24 17:17:33 +0800 | [diff] [blame] | 361 | } else { |
| 362 | _current_cpu->slice_ticks = 0; |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 363 | } |
Andrew Boie | 8e0f6a5 | 2020-09-05 11:50:18 -0700 | [diff] [blame] | 364 | k_spin_unlock(&sched_spinlock, key); |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 365 | } |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 366 | #endif |
| 367 | |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 368 | /* Track cooperative threads preempted by metairqs so we can return to |
| 369 | * them specifically. Called at the moment a new thread has been |
| 370 | * selected to run. |
| 371 | */ |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 372 | static void update_metairq_preempt(struct k_thread *thread) |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 373 | { |
| 374 | #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && (CONFIG_NUM_COOP_PRIORITIES > 0) |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 375 | if (is_metairq(thread) && !is_metairq(_current) && |
| 376 | !is_preempt(_current)) { |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 377 | /* Record new preemption */ |
| 378 | _current_cpu->metairq_preempted = _current; |
Andrew Boie | f5a7e1a | 2020-09-02 09:20:38 -0700 | [diff] [blame] | 379 | } else if (!is_metairq(thread) && !z_is_idle_thread_object(thread)) { |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 380 | /* Returning from existing preemption */ |
| 381 | _current_cpu->metairq_preempted = NULL; |
| 382 | } |
| 383 | #endif |
| 384 | } |
| 385 | |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 386 | static void update_cache(int preempt_ok) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 387 | { |
| 388 | #ifndef CONFIG_SMP |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 389 | struct k_thread *thread = next_up(); |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 390 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 391 | if (should_preempt(thread, preempt_ok)) { |
Andy Ross | cb3964f | 2019-08-16 21:29:26 -0700 | [diff] [blame] | 392 | #ifdef CONFIG_TIMESLICING |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 393 | if (thread != _current) { |
Andy Ross | cb3964f | 2019-08-16 21:29:26 -0700 | [diff] [blame] | 394 | z_reset_time_slice(); |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 395 | } |
Andy Ross | cb3964f | 2019-08-16 21:29:26 -0700 | [diff] [blame] | 396 | #endif |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 397 | update_metairq_preempt(thread); |
| 398 | _kernel.ready_q.cache = thread; |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 399 | } else { |
| 400 | _kernel.ready_q.cache = _current; |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 401 | } |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 402 | |
| 403 | #else |
| 404 | /* The way this works is that the CPU record keeps its |
| 405 | * "cooperative swapping is OK" flag until the next reschedule |
| 406 | * call or context switch. It doesn't need to be tracked per |
| 407 | * thread because if the thread gets preempted for whatever |
| 408 | * reason the scheduler will make the same decision anyway. |
| 409 | */ |
| 410 | _current_cpu->swap_ok = preempt_ok; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 411 | #endif |
| 412 | } |
| 413 | |
Andy Ross | 96ccc46 | 2020-01-23 13:28:30 -0800 | [diff] [blame] | 414 | static void ready_thread(struct k_thread *thread) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 415 | { |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 416 | #ifdef KERNEL_COHERENCE |
| 417 | __ASSERT_NO_MSG(arch_mem_coherent(thread)); |
| 418 | #endif |
| 419 | |
Anas Nashif | 081605e | 2020-10-16 20:00:17 -0400 | [diff] [blame] | 420 | /* If thread is queued already, do not try and added it to the |
| 421 | * run queue again |
| 422 | */ |
| 423 | if (!z_is_thread_queued(thread) && z_is_thread_ready(thread)) { |
Andy Ross | 96ccc46 | 2020-01-23 13:28:30 -0800 | [diff] [blame] | 424 | sys_trace_thread_ready(thread); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 425 | _priq_run_add(&_kernel.ready_q.runq, thread); |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 426 | z_mark_thread_as_queued(thread); |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 427 | update_cache(0); |
Andy Ross | d82f76a | 2019-08-27 08:53:27 -0700 | [diff] [blame] | 428 | #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED) |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 429 | arch_sched_ipi(); |
Andy Ross | 11bd67d | 2019-08-19 14:29:21 -0700 | [diff] [blame] | 430 | #endif |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 431 | } |
| 432 | } |
| 433 | |
Andy Ross | 96ccc46 | 2020-01-23 13:28:30 -0800 | [diff] [blame] | 434 | void z_ready_thread(struct k_thread *thread) |
| 435 | { |
| 436 | LOCKED(&sched_spinlock) { |
| 437 | ready_thread(thread); |
| 438 | } |
| 439 | } |
| 440 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 441 | void z_move_thread_to_end_of_prio_q(struct k_thread *thread) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 442 | { |
Patrik Flykt | cf2d579 | 2019-02-12 15:50:46 -0700 | [diff] [blame] | 443 | LOCKED(&sched_spinlock) { |
Andrew Boie | 8e0f6a5 | 2020-09-05 11:50:18 -0700 | [diff] [blame] | 444 | move_thread_to_end_of_prio_q(thread); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 445 | } |
| 446 | } |
| 447 | |
Andy Ross | 96ccc46 | 2020-01-23 13:28:30 -0800 | [diff] [blame] | 448 | void z_sched_start(struct k_thread *thread) |
| 449 | { |
| 450 | k_spinlock_key_t key = k_spin_lock(&sched_spinlock); |
| 451 | |
| 452 | if (z_has_thread_started(thread)) { |
| 453 | k_spin_unlock(&sched_spinlock, key); |
| 454 | return; |
| 455 | } |
| 456 | |
| 457 | z_mark_thread_as_started(thread); |
| 458 | ready_thread(thread); |
| 459 | z_reschedule(&sched_spinlock, key); |
| 460 | } |
| 461 | |
Andrew Boie | 6cf496f | 2020-02-14 10:52:49 -0800 | [diff] [blame] | 462 | void z_impl_k_thread_suspend(struct k_thread *thread) |
Andy Ross | 8bdabcc | 2020-01-07 09:58:46 -0800 | [diff] [blame] | 463 | { |
| 464 | (void)z_abort_thread_timeout(thread); |
| 465 | |
| 466 | LOCKED(&sched_spinlock) { |
| 467 | if (z_is_thread_queued(thread)) { |
| 468 | _priq_run_remove(&_kernel.ready_q.runq, thread); |
| 469 | z_mark_thread_as_not_queued(thread); |
| 470 | } |
| 471 | z_mark_thread_as_suspended(thread); |
| 472 | update_cache(thread == _current); |
| 473 | } |
| 474 | |
| 475 | if (thread == _current) { |
| 476 | z_reschedule_unlocked(); |
| 477 | } |
| 478 | } |
| 479 | |
Andrew Boie | 6cf496f | 2020-02-14 10:52:49 -0800 | [diff] [blame] | 480 | #ifdef CONFIG_USERSPACE |
| 481 | static inline void z_vrfy_k_thread_suspend(struct k_thread *thread) |
| 482 | { |
| 483 | Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD)); |
| 484 | z_impl_k_thread_suspend(thread); |
| 485 | } |
| 486 | #include <syscalls/k_thread_suspend_mrsh.c> |
| 487 | #endif |
| 488 | |
| 489 | void z_impl_k_thread_resume(struct k_thread *thread) |
| 490 | { |
| 491 | k_spinlock_key_t key = k_spin_lock(&sched_spinlock); |
| 492 | |
Anas Nashif | bf69afc | 2020-10-16 19:53:56 -0400 | [diff] [blame] | 493 | /* Do not try to resume a thread that was not suspended */ |
| 494 | if (!z_is_thread_suspended(thread)) { |
| 495 | k_spin_unlock(&sched_spinlock, key); |
| 496 | return; |
| 497 | } |
| 498 | |
Andrew Boie | 6cf496f | 2020-02-14 10:52:49 -0800 | [diff] [blame] | 499 | z_mark_thread_as_not_suspended(thread); |
| 500 | ready_thread(thread); |
| 501 | |
| 502 | z_reschedule(&sched_spinlock, key); |
| 503 | } |
| 504 | |
| 505 | #ifdef CONFIG_USERSPACE |
| 506 | static inline void z_vrfy_k_thread_resume(struct k_thread *thread) |
| 507 | { |
| 508 | Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD)); |
| 509 | z_impl_k_thread_resume(thread); |
| 510 | } |
| 511 | #include <syscalls/k_thread_resume_mrsh.c> |
| 512 | #endif |
| 513 | |
Andy Ross | 8bdabcc | 2020-01-07 09:58:46 -0800 | [diff] [blame] | 514 | static _wait_q_t *pended_on(struct k_thread *thread) |
| 515 | { |
| 516 | __ASSERT_NO_MSG(thread->base.pended_on); |
| 517 | |
| 518 | return thread->base.pended_on; |
| 519 | } |
| 520 | |
| 521 | void z_thread_single_abort(struct k_thread *thread) |
| 522 | { |
Andrew Boie | 933b420 | 2020-09-03 17:56:24 -0700 | [diff] [blame] | 523 | void (*fn_abort)(struct k_thread *aborted) = NULL; |
Andrew Boie | f5a7e1a | 2020-09-02 09:20:38 -0700 | [diff] [blame] | 524 | |
Andrew Boie | 3425c32 | 2020-09-02 08:10:57 -0700 | [diff] [blame] | 525 | __ASSERT(!(thread->base.user_options & K_ESSENTIAL), |
| 526 | "essential thread aborted"); |
Andrew Boie | f5a7e1a | 2020-09-02 09:20:38 -0700 | [diff] [blame] | 527 | __ASSERT(thread != _current || arch_is_in_isr(), |
| 528 | "self-abort detected"); |
Andrew Boie | 3425c32 | 2020-09-02 08:10:57 -0700 | [diff] [blame] | 529 | |
Andrew Boie | f5a7e1a | 2020-09-02 09:20:38 -0700 | [diff] [blame] | 530 | /* Prevent any of the further logic in this function from running more |
| 531 | * than once |
| 532 | */ |
| 533 | k_spinlock_key_t key = k_spin_lock(&sched_spinlock); |
| 534 | if ((thread->base.thread_state & |
| 535 | (_THREAD_ABORTING | _THREAD_DEAD)) != 0) { |
| 536 | LOG_DBG("Thread %p already dead or on the way out", thread); |
| 537 | k_spin_unlock(&sched_spinlock, key); |
| 538 | return; |
Andy Ross | 8bdabcc | 2020-01-07 09:58:46 -0800 | [diff] [blame] | 539 | } |
Andrew Boie | f5a7e1a | 2020-09-02 09:20:38 -0700 | [diff] [blame] | 540 | thread->base.thread_state |= _THREAD_ABORTING; |
| 541 | k_spin_unlock(&sched_spinlock, key); |
Andy Ross | 8bdabcc | 2020-01-07 09:58:46 -0800 | [diff] [blame] | 542 | |
| 543 | (void)z_abort_thread_timeout(thread); |
| 544 | |
| 545 | if (IS_ENABLED(CONFIG_SMP)) { |
| 546 | z_sched_abort(thread); |
| 547 | } |
| 548 | |
| 549 | LOCKED(&sched_spinlock) { |
Andrew Boie | f5a7e1a | 2020-09-02 09:20:38 -0700 | [diff] [blame] | 550 | LOG_DBG("Cleanup aborting thread %p", thread); |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 551 | struct k_thread *waiter; |
| 552 | |
Andy Ross | 8bdabcc | 2020-01-07 09:58:46 -0800 | [diff] [blame] | 553 | if (z_is_thread_ready(thread)) { |
| 554 | if (z_is_thread_queued(thread)) { |
| 555 | _priq_run_remove(&_kernel.ready_q.runq, |
| 556 | thread); |
| 557 | z_mark_thread_as_not_queued(thread); |
| 558 | } |
| 559 | update_cache(thread == _current); |
| 560 | } else { |
| 561 | if (z_is_thread_pending(thread)) { |
| 562 | _priq_wait_remove(&pended_on(thread)->waitq, |
| 563 | thread); |
| 564 | z_mark_thread_as_not_pending(thread); |
| 565 | thread->base.pended_on = NULL; |
| 566 | } |
| 567 | } |
Andy Ross | e06ba70 | 2020-01-14 06:26:10 -0800 | [diff] [blame] | 568 | |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 569 | /* Wake everybody up who was trying to join with this thread. |
| 570 | * A reschedule is invoked later by k_thread_abort(). |
| 571 | */ |
| 572 | while ((waiter = z_waitq_head(&thread->base.join_waiters)) != |
| 573 | NULL) { |
Andrew Boie | f1b5d9d | 2020-05-04 14:36:49 -0700 | [diff] [blame] | 574 | (void)z_abort_thread_timeout(waiter); |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 575 | _priq_wait_remove(&pended_on(waiter)->waitq, waiter); |
| 576 | z_mark_thread_as_not_pending(waiter); |
| 577 | waiter->base.pended_on = NULL; |
| 578 | arch_thread_return_value_set(waiter, 0); |
| 579 | ready_thread(waiter); |
| 580 | } |
Andrew Boie | f5a7e1a | 2020-09-02 09:20:38 -0700 | [diff] [blame] | 581 | |
| 582 | if (z_is_idle_thread_object(_current)) { |
| 583 | update_cache(1); |
| 584 | } |
| 585 | |
| 586 | thread->base.thread_state |= _THREAD_DEAD; |
| 587 | |
| 588 | /* Read this here from the thread struct now instead of |
| 589 | * after we unlock |
| 590 | */ |
| 591 | fn_abort = thread->fn_abort; |
| 592 | |
| 593 | /* Keep inside the spinlock as these may use the contents |
| 594 | * of the thread object. As soon as we release this spinlock, |
| 595 | * the thread object could be destroyed at any time. |
| 596 | */ |
Watson Zeng | 37f75d2 | 2020-09-15 11:00:39 +0800 | [diff] [blame] | 597 | sys_trace_thread_abort(thread); |
| 598 | z_thread_monitor_exit(thread); |
Andrew Boie | f5a7e1a | 2020-09-02 09:20:38 -0700 | [diff] [blame] | 599 | |
| 600 | #ifdef CONFIG_USERSPACE |
Andrew Boie | b5a71f7 | 2020-10-06 13:39:29 -0700 | [diff] [blame] | 601 | /* Remove this thread from its memory domain, which takes |
| 602 | * it off the domain's thread list and possibly also arch- |
| 603 | * specific tasks. |
| 604 | */ |
| 605 | z_mem_domain_exit_thread(thread); |
| 606 | |
Andrew Boie | f5a7e1a | 2020-09-02 09:20:38 -0700 | [diff] [blame] | 607 | /* Revoke permissions on thread's ID so that it may be |
| 608 | * recycled |
| 609 | */ |
| 610 | z_thread_perms_all_clear(thread); |
| 611 | |
| 612 | /* Clear initialized state so that this thread object may be |
| 613 | * re-used and triggers errors if API calls are made on it from |
| 614 | * user threads |
| 615 | */ |
| 616 | z_object_uninit(thread->stack_obj); |
| 617 | z_object_uninit(thread); |
| 618 | #endif |
| 619 | /* Kernel should never look at the thread object again past |
| 620 | * this point unless another thread API is called. If the |
| 621 | * object doesn't get corrupted, we'll catch other |
| 622 | * k_thread_abort()s on this object, although this is |
| 623 | * somewhat undefined behavoir. It must be safe to call |
| 624 | * k_thread_create() or free the object at this point. |
| 625 | */ |
Andrew Boie | e0ca403 | 2020-09-05 19:36:08 -0700 | [diff] [blame] | 626 | #if __ASSERT_ON |
| 627 | atomic_clear(&thread->base.cookie); |
| 628 | #endif |
Andrew Boie | f5a7e1a | 2020-09-02 09:20:38 -0700 | [diff] [blame] | 629 | } |
| 630 | |
| 631 | if (fn_abort != NULL) { |
Andrew Boie | 933b420 | 2020-09-03 17:56:24 -0700 | [diff] [blame] | 632 | /* Thread object provided to be freed or recycled */ |
| 633 | fn_abort(thread); |
Andy Ross | 8bdabcc | 2020-01-07 09:58:46 -0800 | [diff] [blame] | 634 | } |
Andy Ross | 8bdabcc | 2020-01-07 09:58:46 -0800 | [diff] [blame] | 635 | } |
| 636 | |
Andy Ross | ed6b4fb | 2020-01-23 13:04:15 -0800 | [diff] [blame] | 637 | static void unready_thread(struct k_thread *thread) |
| 638 | { |
| 639 | if (z_is_thread_queued(thread)) { |
| 640 | _priq_run_remove(&_kernel.ready_q.runq, thread); |
| 641 | z_mark_thread_as_not_queued(thread); |
| 642 | } |
| 643 | update_cache(thread == _current); |
| 644 | } |
| 645 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 646 | void z_remove_thread_from_ready_q(struct k_thread *thread) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 647 | { |
Patrik Flykt | cf2d579 | 2019-02-12 15:50:46 -0700 | [diff] [blame] | 648 | LOCKED(&sched_spinlock) { |
Andy Ross | ed6b4fb | 2020-01-23 13:04:15 -0800 | [diff] [blame] | 649 | unready_thread(thread); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 650 | } |
| 651 | } |
| 652 | |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 653 | /* sched_spinlock must be held */ |
| 654 | static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 655 | { |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 656 | unready_thread(thread); |
| 657 | z_mark_thread_as_pending(thread); |
| 658 | sys_trace_thread_pend(thread); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 659 | |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 660 | if (wait_q != NULL) { |
| 661 | thread->base.pended_on = wait_q; |
| 662 | z_priq_wait_add(&wait_q->waitq, thread); |
Andy Ross | 15d5208 | 2018-09-26 13:19:31 -0700 | [diff] [blame] | 663 | } |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 664 | } |
Andy Ross | 15d5208 | 2018-09-26 13:19:31 -0700 | [diff] [blame] | 665 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 666 | static void add_thread_timeout(struct k_thread *thread, k_timeout_t timeout) |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 667 | { |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 668 | if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) { |
| 669 | #ifdef CONFIG_LEGACY_TIMEOUT_API |
| 670 | timeout = _TICK_ALIGN + k_ms_to_ticks_ceil32(timeout); |
| 671 | #endif |
| 672 | z_add_thread_timeout(thread, timeout); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 673 | } |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 674 | } |
| 675 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 676 | static void pend(struct k_thread *thread, _wait_q_t *wait_q, |
| 677 | k_timeout_t timeout) |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 678 | { |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 679 | #ifdef KERNEL_COHERENCE |
| 680 | __ASSERT_NO_MSG(arch_mem_coherent(wait_q)); |
| 681 | #endif |
| 682 | |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 683 | LOCKED(&sched_spinlock) { |
| 684 | add_to_waitq_locked(thread, wait_q); |
| 685 | } |
| 686 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 687 | add_thread_timeout(thread, timeout); |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 688 | } |
| 689 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 690 | void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q, |
| 691 | k_timeout_t timeout) |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 692 | { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 693 | __ASSERT_NO_MSG(thread == _current || is_thread_dummy(thread)); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 694 | pend(thread, wait_q, timeout); |
| 695 | } |
| 696 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 697 | ALWAYS_INLINE struct k_thread *z_find_first_thread_to_unpend(_wait_q_t *wait_q, |
Andy Ross | b2791b0 | 2019-01-28 09:36:36 -0800 | [diff] [blame] | 698 | struct k_thread *from) |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 699 | { |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 700 | ARG_UNUSED(from); |
| 701 | |
| 702 | struct k_thread *ret = NULL; |
| 703 | |
Patrik Flykt | cf2d579 | 2019-02-12 15:50:46 -0700 | [diff] [blame] | 704 | LOCKED(&sched_spinlock) { |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 705 | ret = _priq_wait_best(&wait_q->waitq); |
| 706 | } |
| 707 | |
| 708 | return ret; |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 709 | } |
| 710 | |
Andrew Boie | ffc5bdf | 2020-09-05 11:44:01 -0700 | [diff] [blame] | 711 | static inline void unpend_thread_no_timeout(struct k_thread *thread) |
| 712 | { |
| 713 | _priq_wait_remove(&pended_on(thread)->waitq, thread); |
| 714 | z_mark_thread_as_not_pending(thread); |
| 715 | thread->base.pended_on = NULL; |
| 716 | } |
| 717 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 718 | ALWAYS_INLINE void z_unpend_thread_no_timeout(struct k_thread *thread) |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 719 | { |
Patrik Flykt | cf2d579 | 2019-02-12 15:50:46 -0700 | [diff] [blame] | 720 | LOCKED(&sched_spinlock) { |
Andrew Boie | ffc5bdf | 2020-09-05 11:44:01 -0700 | [diff] [blame] | 721 | unpend_thread_no_timeout(thread); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 722 | } |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 723 | } |
| 724 | |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 725 | #ifdef CONFIG_SYS_CLOCK_EXISTS |
| 726 | /* Timeout handler for *_thread_timeout() APIs */ |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 727 | void z_thread_timeout(struct _timeout *timeout) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 728 | { |
Andrew Boie | ffc5bdf | 2020-09-05 11:44:01 -0700 | [diff] [blame] | 729 | LOCKED(&sched_spinlock) { |
| 730 | struct k_thread *thread = CONTAINER_OF(timeout, |
| 731 | struct k_thread, base.timeout); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 732 | |
Andrew Boie | ffc5bdf | 2020-09-05 11:44:01 -0700 | [diff] [blame] | 733 | if (thread->base.pended_on != NULL) { |
| 734 | unpend_thread_no_timeout(thread); |
| 735 | } |
| 736 | z_mark_thread_as_started(thread); |
| 737 | z_mark_thread_as_not_suspended(thread); |
| 738 | ready_thread(thread); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 739 | } |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 740 | } |
| 741 | #endif |
| 742 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 743 | int z_pend_curr_irqlock(uint32_t key, _wait_q_t *wait_q, k_timeout_t timeout) |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 744 | { |
Andy Ross | 722aeea | 2019-03-14 13:50:16 -0700 | [diff] [blame] | 745 | pend(_current, wait_q, timeout); |
| 746 | |
Andy Ross | 7fb8eb5 | 2019-01-04 12:54:23 -0800 | [diff] [blame] | 747 | #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC) |
| 748 | pending_current = _current; |
Andy Ross | 722aeea | 2019-03-14 13:50:16 -0700 | [diff] [blame] | 749 | |
| 750 | int ret = z_swap_irqlock(key); |
| 751 | LOCKED(&sched_spinlock) { |
| 752 | if (pending_current == _current) { |
| 753 | pending_current = NULL; |
| 754 | } |
| 755 | } |
| 756 | return ret; |
| 757 | #else |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 758 | return z_swap_irqlock(key); |
Andy Ross | 722aeea | 2019-03-14 13:50:16 -0700 | [diff] [blame] | 759 | #endif |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 760 | } |
| 761 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 762 | int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key, |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 763 | _wait_q_t *wait_q, k_timeout_t timeout) |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 764 | { |
| 765 | #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC) |
| 766 | pending_current = _current; |
| 767 | #endif |
| 768 | pend(_current, wait_q, timeout); |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 769 | return z_swap(lock, key); |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 770 | } |
| 771 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 772 | struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q) |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 773 | { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 774 | struct k_thread *thread = z_unpend1_no_timeout(wait_q); |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 775 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 776 | if (thread != NULL) { |
| 777 | (void)z_abort_thread_timeout(thread); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 778 | } |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 779 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 780 | return thread; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 781 | } |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 782 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 783 | void z_unpend_thread(struct k_thread *thread) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 784 | { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 785 | z_unpend_thread_no_timeout(thread); |
| 786 | (void)z_abort_thread_timeout(thread); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 787 | } |
| 788 | |
Andy Ross | 6f13980 | 2019-08-20 11:21:28 -0700 | [diff] [blame] | 789 | /* Priority set utility that does no rescheduling, it just changes the |
| 790 | * run queue state, returning true if a reschedule is needed later. |
| 791 | */ |
| 792 | bool z_set_prio(struct k_thread *thread, int prio) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 793 | { |
Flavio Ceolin | 02ed85b | 2018-09-20 15:43:57 -0700 | [diff] [blame] | 794 | bool need_sched = 0; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 795 | |
Patrik Flykt | cf2d579 | 2019-02-12 15:50:46 -0700 | [diff] [blame] | 796 | LOCKED(&sched_spinlock) { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 797 | need_sched = z_is_thread_ready(thread); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 798 | |
| 799 | if (need_sched) { |
Andy Ross | 4d8e1f2 | 2019-07-01 10:25:55 -0700 | [diff] [blame] | 800 | /* Don't requeue on SMP if it's the running thread */ |
| 801 | if (!IS_ENABLED(CONFIG_SMP) || z_is_thread_queued(thread)) { |
| 802 | _priq_run_remove(&_kernel.ready_q.runq, thread); |
| 803 | thread->base.prio = prio; |
| 804 | _priq_run_add(&_kernel.ready_q.runq, thread); |
| 805 | } else { |
| 806 | thread->base.prio = prio; |
| 807 | } |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 808 | update_cache(1); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 809 | } else { |
| 810 | thread->base.prio = prio; |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 811 | } |
| 812 | } |
Anas Nashif | b6304e6 | 2018-07-04 08:03:03 -0500 | [diff] [blame] | 813 | sys_trace_thread_priority_set(thread); |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 814 | |
Andy Ross | 6f13980 | 2019-08-20 11:21:28 -0700 | [diff] [blame] | 815 | return need_sched; |
| 816 | } |
| 817 | |
| 818 | void z_thread_priority_set(struct k_thread *thread, int prio) |
| 819 | { |
| 820 | bool need_sched = z_set_prio(thread, prio); |
| 821 | |
Andy Ross | 5737b5c | 2020-02-04 13:52:09 -0800 | [diff] [blame] | 822 | #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED) |
| 823 | arch_sched_ipi(); |
| 824 | #endif |
| 825 | |
Andy Ross | 1202810 | 2019-02-06 17:27:14 -0800 | [diff] [blame] | 826 | if (need_sched && _current->base.sched_locked == 0) { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 827 | z_reschedule_unlocked(); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 828 | } |
| 829 | } |
| 830 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 831 | static inline int resched(uint32_t key) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 832 | { |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 833 | #ifdef CONFIG_SMP |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 834 | _current_cpu->swap_ok = 0; |
| 835 | #endif |
| 836 | |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 837 | return arch_irq_unlocked(key) && !arch_is_in_isr(); |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 838 | } |
| 839 | |
Anas Nashif | 379b93f | 2020-08-10 15:47:02 -0400 | [diff] [blame] | 840 | /* |
| 841 | * Check if the next ready thread is the same as the current thread |
| 842 | * and save the trip if true. |
| 843 | */ |
| 844 | static inline bool need_swap(void) |
| 845 | { |
| 846 | /* the SMP case will be handled in C based z_swap() */ |
| 847 | #ifdef CONFIG_SMP |
| 848 | return true; |
| 849 | #else |
| 850 | struct k_thread *new_thread; |
| 851 | |
| 852 | /* Check if the next ready thread is the same as the current thread */ |
| 853 | new_thread = z_get_next_ready_thread(); |
| 854 | return new_thread != _current; |
| 855 | #endif |
| 856 | } |
| 857 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 858 | void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key) |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 859 | { |
Anas Nashif | 379b93f | 2020-08-10 15:47:02 -0400 | [diff] [blame] | 860 | if (resched(key.key) && need_swap()) { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 861 | z_swap(lock, key); |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 862 | } else { |
| 863 | k_spin_unlock(lock, key); |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 864 | } |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 865 | } |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 866 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 867 | void z_reschedule_irqlock(uint32_t key) |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 868 | { |
Andy Ross | 312b43f | 2019-05-24 10:09:13 -0700 | [diff] [blame] | 869 | if (resched(key)) { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 870 | z_swap_irqlock(key); |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 871 | } else { |
| 872 | irq_unlock(key); |
| 873 | } |
Andy Ross | 8606fab | 2018-03-26 10:54:40 -0700 | [diff] [blame] | 874 | } |
| 875 | |
Benjamin Walsh | d7ad176 | 2016-11-10 14:46:58 -0500 | [diff] [blame] | 876 | void k_sched_lock(void) |
| 877 | { |
Patrik Flykt | cf2d579 | 2019-02-12 15:50:46 -0700 | [diff] [blame] | 878 | LOCKED(&sched_spinlock) { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 879 | z_sched_lock(); |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 880 | } |
Benjamin Walsh | d7ad176 | 2016-11-10 14:46:58 -0500 | [diff] [blame] | 881 | } |
| 882 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 883 | void k_sched_unlock(void) |
| 884 | { |
Benjamin Walsh | 8e4a534 | 2016-12-14 14:34:29 -0500 | [diff] [blame] | 885 | #ifdef CONFIG_PREEMPT_ENABLED |
Patrik Flykt | cf2d579 | 2019-02-12 15:50:46 -0700 | [diff] [blame] | 886 | LOCKED(&sched_spinlock) { |
Andy Ross | eefd3da | 2020-02-06 13:39:52 -0800 | [diff] [blame] | 887 | __ASSERT(_current->base.sched_locked != 0, ""); |
| 888 | __ASSERT(!arch_is_in_isr(), ""); |
| 889 | |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 890 | ++_current->base.sched_locked; |
Yasushi SHOJI | 20d0724 | 2019-07-31 11:19:08 +0900 | [diff] [blame] | 891 | update_cache(0); |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 892 | } |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 893 | |
Anas Nashif | 2c5d404 | 2019-12-02 10:24:08 -0500 | [diff] [blame] | 894 | LOG_DBG("scheduler unlocked (%p:%d)", |
Benjamin Walsh | a4e033f | 2016-11-18 16:08:24 -0500 | [diff] [blame] | 895 | _current, _current->base.sched_locked); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 896 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 897 | z_reschedule_unlocked(); |
Benjamin Walsh | 8e4a534 | 2016-12-14 14:34:29 -0500 | [diff] [blame] | 898 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 899 | } |
| 900 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 901 | #ifdef CONFIG_SMP |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 902 | struct k_thread *z_get_next_ready_thread(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 903 | { |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 904 | struct k_thread *ret = 0; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 905 | |
Patrik Flykt | cf2d579 | 2019-02-12 15:50:46 -0700 | [diff] [blame] | 906 | LOCKED(&sched_spinlock) { |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 907 | ret = next_up(); |
| 908 | } |
| 909 | |
| 910 | return ret; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 911 | } |
Benjamin Walsh | 6209218 | 2016-12-20 14:39:08 -0500 | [diff] [blame] | 912 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 913 | |
Andy Ross | b18685b | 2019-02-19 17:24:30 -0800 | [diff] [blame] | 914 | /* Just a wrapper around _current = xxx with tracing */ |
| 915 | static inline void set_current(struct k_thread *new_thread) |
| 916 | { |
Daniel Leung | 11e6b43 | 2020-08-27 16:12:01 -0700 | [diff] [blame] | 917 | z_thread_mark_switched_out(); |
Andy Ross | eefd3da | 2020-02-06 13:39:52 -0800 | [diff] [blame] | 918 | _current_cpu->current = new_thread; |
Andy Ross | b18685b | 2019-02-19 17:24:30 -0800 | [diff] [blame] | 919 | } |
| 920 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 921 | #ifdef CONFIG_USE_SWITCH |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 922 | void *z_get_next_switch_handle(void *interrupted) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 923 | { |
Andrew Boie | ae0d1b2 | 2019-03-29 16:25:27 -0700 | [diff] [blame] | 924 | z_check_stack_sentinel(); |
| 925 | |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 926 | #ifdef CONFIG_SMP |
Patrik Flykt | cf2d579 | 2019-02-12 15:50:46 -0700 | [diff] [blame] | 927 | LOCKED(&sched_spinlock) { |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 928 | struct k_thread *old_thread = _current, *new_thread; |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 929 | |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 930 | old_thread->switch_handle = NULL; |
| 931 | new_thread = next_up(); |
| 932 | |
| 933 | if (old_thread != new_thread) { |
| 934 | update_metairq_preempt(new_thread); |
| 935 | wait_for_switch(new_thread); |
| 936 | arch_cohere_stacks(old_thread, interrupted, new_thread); |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 937 | |
Andy Ross | cb3964f | 2019-08-16 21:29:26 -0700 | [diff] [blame] | 938 | #ifdef CONFIG_TIMESLICING |
| 939 | z_reset_time_slice(); |
| 940 | #endif |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 941 | _current_cpu->swap_ok = 0; |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 942 | set_current(new_thread); |
| 943 | |
Danny Oerndrup | c9d7840 | 2019-12-13 11:24:56 +0100 | [diff] [blame] | 944 | #ifdef CONFIG_SPIN_VALIDATE |
Andy Ross | 8c1bdda | 2019-02-20 10:07:31 -0800 | [diff] [blame] | 945 | /* Changed _current! Update the spinlock |
| 946 | * bookeeping so the validation doesn't get |
| 947 | * confused when the "wrong" thread tries to |
| 948 | * release the lock. |
| 949 | */ |
| 950 | z_spin_lock_set_owner(&sched_spinlock); |
| 951 | #endif |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 952 | } |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 953 | old_thread->switch_handle = interrupted; |
Benjamin Walsh | b8c2160 | 2016-12-23 19:34:41 -0500 | [diff] [blame] | 954 | } |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 955 | #else |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 956 | _current->switch_handle = interrupted; |
| 957 | set_current(z_get_next_ready_thread()); |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 958 | #endif |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 959 | return _current->switch_handle; |
| 960 | } |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 961 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 962 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 963 | ALWAYS_INLINE void z_priq_dumb_add(sys_dlist_t *pq, struct k_thread *thread) |
Andy Ross | 22642cf | 2018-04-02 18:24:58 -0700 | [diff] [blame] | 964 | { |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 965 | struct k_thread *t; |
Andy Ross | 22642cf | 2018-04-02 18:24:58 -0700 | [diff] [blame] | 966 | |
Andrew Boie | 8f0bb6a | 2019-09-21 18:36:23 -0700 | [diff] [blame] | 967 | __ASSERT_NO_MSG(!z_is_idle_thread_object(thread)); |
Andy Ross | 22642cf | 2018-04-02 18:24:58 -0700 | [diff] [blame] | 968 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 969 | SYS_DLIST_FOR_EACH_CONTAINER(pq, t, base.qnode_dlist) { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 970 | if (z_is_t1_higher_prio_than_t2(thread, t)) { |
Andy Ross | eda4c02 | 2019-01-28 09:35:27 -0800 | [diff] [blame] | 971 | sys_dlist_insert(&t->base.qnode_dlist, |
| 972 | &thread->base.qnode_dlist); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 973 | return; |
| 974 | } |
Andy Ross | 22642cf | 2018-04-02 18:24:58 -0700 | [diff] [blame] | 975 | } |
| 976 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 977 | sys_dlist_append(pq, &thread->base.qnode_dlist); |
Andy Ross | 22642cf | 2018-04-02 18:24:58 -0700 | [diff] [blame] | 978 | } |
| 979 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 980 | void z_priq_dumb_remove(sys_dlist_t *pq, struct k_thread *thread) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 981 | { |
Andy Ross | dff6b71 | 2019-02-25 21:17:29 -0800 | [diff] [blame] | 982 | #if defined(CONFIG_SWAP_NONATOMIC) && defined(CONFIG_SCHED_DUMB) |
| 983 | if (pq == &_kernel.ready_q.runq && thread == _current && |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 984 | z_is_thread_prevented_from_running(thread)) { |
Andy Ross | dff6b71 | 2019-02-25 21:17:29 -0800 | [diff] [blame] | 985 | return; |
| 986 | } |
| 987 | #endif |
| 988 | |
Andrew Boie | 8f0bb6a | 2019-09-21 18:36:23 -0700 | [diff] [blame] | 989 | __ASSERT_NO_MSG(!z_is_idle_thread_object(thread)); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 990 | |
| 991 | sys_dlist_remove(&thread->base.qnode_dlist); |
| 992 | } |
| 993 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 994 | struct k_thread *z_priq_dumb_best(sys_dlist_t *pq) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 995 | { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 996 | struct k_thread *thread = NULL; |
Flavio Ceolin | 26be335 | 2018-11-15 15:03:32 -0800 | [diff] [blame] | 997 | sys_dnode_t *n = sys_dlist_peek_head(pq); |
| 998 | |
Peter A. Bigot | 692e103 | 2019-01-03 23:36:28 -0600 | [diff] [blame] | 999 | if (n != NULL) { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1000 | thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist); |
Peter A. Bigot | 692e103 | 2019-01-03 23:36:28 -0600 | [diff] [blame] | 1001 | } |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1002 | return thread; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1003 | } |
| 1004 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1005 | bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1006 | { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1007 | struct k_thread *thread_a, *thread_b; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1008 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1009 | thread_a = CONTAINER_OF(a, struct k_thread, base.qnode_rb); |
| 1010 | thread_b = CONTAINER_OF(b, struct k_thread, base.qnode_rb); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1011 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1012 | if (z_is_t1_higher_prio_than_t2(thread_a, thread_b)) { |
Flavio Ceolin | 02ed85b | 2018-09-20 15:43:57 -0700 | [diff] [blame] | 1013 | return true; |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1014 | } else if (z_is_t1_higher_prio_than_t2(thread_b, thread_a)) { |
Flavio Ceolin | 02ed85b | 2018-09-20 15:43:57 -0700 | [diff] [blame] | 1015 | return false; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1016 | } else { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1017 | return thread_a->base.order_key < thread_b->base.order_key |
| 1018 | ? 1 : 0; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1019 | } |
| 1020 | } |
| 1021 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1022 | void z_priq_rb_add(struct _priq_rb *pq, struct k_thread *thread) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1023 | { |
| 1024 | struct k_thread *t; |
| 1025 | |
Andrew Boie | 8f0bb6a | 2019-09-21 18:36:23 -0700 | [diff] [blame] | 1026 | __ASSERT_NO_MSG(!z_is_idle_thread_object(thread)); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1027 | |
| 1028 | thread->base.order_key = pq->next_order_key++; |
| 1029 | |
| 1030 | /* Renumber at wraparound. This is tiny code, and in practice |
| 1031 | * will almost never be hit on real systems. BUT on very |
| 1032 | * long-running systems where a priq never completely empties |
| 1033 | * AND that contains very large numbers of threads, it can be |
| 1034 | * a latency glitch to loop over all the threads like this. |
| 1035 | */ |
| 1036 | if (!pq->next_order_key) { |
| 1037 | RB_FOR_EACH_CONTAINER(&pq->tree, t, base.qnode_rb) { |
| 1038 | t->base.order_key = pq->next_order_key++; |
| 1039 | } |
| 1040 | } |
| 1041 | |
| 1042 | rb_insert(&pq->tree, &thread->base.qnode_rb); |
| 1043 | } |
| 1044 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1045 | void z_priq_rb_remove(struct _priq_rb *pq, struct k_thread *thread) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1046 | { |
Andy Ross | dff6b71 | 2019-02-25 21:17:29 -0800 | [diff] [blame] | 1047 | #if defined(CONFIG_SWAP_NONATOMIC) && defined(CONFIG_SCHED_SCALABLE) |
| 1048 | if (pq == &_kernel.ready_q.runq && thread == _current && |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1049 | z_is_thread_prevented_from_running(thread)) { |
Andy Ross | dff6b71 | 2019-02-25 21:17:29 -0800 | [diff] [blame] | 1050 | return; |
| 1051 | } |
| 1052 | #endif |
Andrew Boie | 8f0bb6a | 2019-09-21 18:36:23 -0700 | [diff] [blame] | 1053 | __ASSERT_NO_MSG(!z_is_idle_thread_object(thread)); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1054 | |
| 1055 | rb_remove(&pq->tree, &thread->base.qnode_rb); |
| 1056 | |
| 1057 | if (!pq->tree.root) { |
| 1058 | pq->next_order_key = 0; |
| 1059 | } |
| 1060 | } |
| 1061 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1062 | struct k_thread *z_priq_rb_best(struct _priq_rb *pq) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1063 | { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1064 | struct k_thread *thread = NULL; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1065 | struct rbnode *n = rb_get_min(&pq->tree); |
| 1066 | |
Peter A. Bigot | 692e103 | 2019-01-03 23:36:28 -0600 | [diff] [blame] | 1067 | if (n != NULL) { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1068 | thread = CONTAINER_OF(n, struct k_thread, base.qnode_rb); |
Peter A. Bigot | 692e103 | 2019-01-03 23:36:28 -0600 | [diff] [blame] | 1069 | } |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1070 | return thread; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1071 | } |
| 1072 | |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1073 | #ifdef CONFIG_SCHED_MULTIQ |
| 1074 | # if (K_LOWEST_THREAD_PRIO - K_HIGHEST_THREAD_PRIO) > 31 |
| 1075 | # error Too many priorities for multiqueue scheduler (max 32) |
| 1076 | # endif |
| 1077 | #endif |
| 1078 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1079 | ALWAYS_INLINE void z_priq_mq_add(struct _priq_mq *pq, struct k_thread *thread) |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1080 | { |
| 1081 | int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO; |
| 1082 | |
| 1083 | sys_dlist_append(&pq->queues[priority_bit], &thread->base.qnode_dlist); |
Flavio Ceolin | a996203 | 2019-02-26 10:14:04 -0800 | [diff] [blame] | 1084 | pq->bitmask |= BIT(priority_bit); |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1085 | } |
| 1086 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1087 | ALWAYS_INLINE void z_priq_mq_remove(struct _priq_mq *pq, struct k_thread *thread) |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1088 | { |
Andy Ross | dff6b71 | 2019-02-25 21:17:29 -0800 | [diff] [blame] | 1089 | #if defined(CONFIG_SWAP_NONATOMIC) && defined(CONFIG_SCHED_MULTIQ) |
| 1090 | if (pq == &_kernel.ready_q.runq && thread == _current && |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1091 | z_is_thread_prevented_from_running(thread)) { |
Andy Ross | dff6b71 | 2019-02-25 21:17:29 -0800 | [diff] [blame] | 1092 | return; |
| 1093 | } |
| 1094 | #endif |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1095 | int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO; |
| 1096 | |
| 1097 | sys_dlist_remove(&thread->base.qnode_dlist); |
| 1098 | if (sys_dlist_is_empty(&pq->queues[priority_bit])) { |
Flavio Ceolin | a996203 | 2019-02-26 10:14:04 -0800 | [diff] [blame] | 1099 | pq->bitmask &= ~BIT(priority_bit); |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1100 | } |
| 1101 | } |
| 1102 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1103 | struct k_thread *z_priq_mq_best(struct _priq_mq *pq) |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1104 | { |
| 1105 | if (!pq->bitmask) { |
| 1106 | return NULL; |
| 1107 | } |
| 1108 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1109 | struct k_thread *thread = NULL; |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1110 | sys_dlist_t *l = &pq->queues[__builtin_ctz(pq->bitmask)]; |
Flavio Ceolin | 26be335 | 2018-11-15 15:03:32 -0800 | [diff] [blame] | 1111 | sys_dnode_t *n = sys_dlist_peek_head(l); |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1112 | |
Peter A. Bigot | 692e103 | 2019-01-03 23:36:28 -0600 | [diff] [blame] | 1113 | if (n != NULL) { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1114 | thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist); |
Peter A. Bigot | 692e103 | 2019-01-03 23:36:28 -0600 | [diff] [blame] | 1115 | } |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1116 | return thread; |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1117 | } |
| 1118 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1119 | int z_unpend_all(_wait_q_t *wait_q) |
Andy Ross | 4ca0e07 | 2018-05-10 09:45:42 -0700 | [diff] [blame] | 1120 | { |
Andy Ross | ccf3bf7 | 2018-05-10 11:10:34 -0700 | [diff] [blame] | 1121 | int need_sched = 0; |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1122 | struct k_thread *thread; |
Andy Ross | 4ca0e07 | 2018-05-10 09:45:42 -0700 | [diff] [blame] | 1123 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1124 | while ((thread = z_waitq_head(wait_q)) != NULL) { |
| 1125 | z_unpend_thread(thread); |
| 1126 | z_ready_thread(thread); |
Andy Ross | 4ca0e07 | 2018-05-10 09:45:42 -0700 | [diff] [blame] | 1127 | need_sched = 1; |
| 1128 | } |
Andy Ross | ccf3bf7 | 2018-05-10 11:10:34 -0700 | [diff] [blame] | 1129 | |
| 1130 | return need_sched; |
Andy Ross | 4ca0e07 | 2018-05-10 09:45:42 -0700 | [diff] [blame] | 1131 | } |
| 1132 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1133 | void z_sched_init(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1134 | { |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1135 | #ifdef CONFIG_SCHED_DUMB |
| 1136 | sys_dlist_init(&_kernel.ready_q.runq); |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1137 | #endif |
| 1138 | |
| 1139 | #ifdef CONFIG_SCHED_SCALABLE |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1140 | _kernel.ready_q.runq = (struct _priq_rb) { |
| 1141 | .tree = { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1142 | .lessthan_fn = z_priq_rb_lessthan, |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1143 | } |
| 1144 | }; |
| 1145 | #endif |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1146 | |
| 1147 | #ifdef CONFIG_SCHED_MULTIQ |
| 1148 | for (int i = 0; i < ARRAY_SIZE(_kernel.ready_q.runq.queues); i++) { |
| 1149 | sys_dlist_init(&_kernel.ready_q.runq.queues[i]); |
| 1150 | } |
| 1151 | #endif |
Piotr Zięcik | 4a39b9e | 2018-07-26 14:56:39 +0200 | [diff] [blame] | 1152 | |
| 1153 | #ifdef CONFIG_TIMESLICING |
| 1154 | k_sched_time_slice_set(CONFIG_TIMESLICE_SIZE, |
| 1155 | CONFIG_TIMESLICE_PRIORITY); |
| 1156 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1157 | } |
| 1158 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1159 | int z_impl_k_thread_priority_get(k_tid_t thread) |
Allan Stephens | 399d0ad | 2016-10-07 13:41:34 -0500 | [diff] [blame] | 1160 | { |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 1161 | return thread->base.prio; |
Allan Stephens | 399d0ad | 2016-10-07 13:41:34 -0500 | [diff] [blame] | 1162 | } |
| 1163 | |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 1164 | #ifdef CONFIG_USERSPACE |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1165 | static inline int z_vrfy_k_thread_priority_get(k_tid_t thread) |
| 1166 | { |
| 1167 | Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD)); |
| 1168 | return z_impl_k_thread_priority_get(thread); |
| 1169 | } |
| 1170 | #include <syscalls/k_thread_priority_get_mrsh.c> |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 1171 | #endif |
| 1172 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1173 | void z_impl_k_thread_priority_set(k_tid_t tid, int prio) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1174 | { |
Benjamin Walsh | 3cc2ba9 | 2016-11-08 15:44:05 -0500 | [diff] [blame] | 1175 | /* |
| 1176 | * Use NULL, since we cannot know what the entry point is (we do not |
| 1177 | * keep track of it) and idle cannot change its priority. |
| 1178 | */ |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1179 | Z_ASSERT_VALID_PRIO(prio, NULL); |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 1180 | __ASSERT(!arch_is_in_isr(), ""); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1181 | |
Benjamin Walsh | 3751123 | 2016-10-13 08:10:07 -0400 | [diff] [blame] | 1182 | struct k_thread *thread = (struct k_thread *)tid; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1183 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1184 | z_thread_priority_set(thread, prio); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1185 | } |
| 1186 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1187 | #ifdef CONFIG_USERSPACE |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1188 | static inline void z_vrfy_k_thread_priority_set(k_tid_t thread, int prio) |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1189 | { |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 1190 | Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD)); |
| 1191 | Z_OOPS(Z_SYSCALL_VERIFY_MSG(_is_valid_prio(prio, NULL), |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1192 | "invalid thread priority %d", prio)); |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 1193 | Z_OOPS(Z_SYSCALL_VERIFY_MSG((int8_t)prio >= thread->base.prio, |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 1194 | "thread priority may only be downgraded (%d < %d)", |
| 1195 | prio, thread->base.prio)); |
Andrew Boie | 5008fed | 2017-10-08 10:11:24 -0700 | [diff] [blame] | 1196 | |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1197 | z_impl_k_thread_priority_set(thread, prio); |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1198 | } |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1199 | #include <syscalls/k_thread_priority_set_mrsh.c> |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1200 | #endif |
| 1201 | |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1202 | #ifdef CONFIG_SCHED_DEADLINE |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1203 | void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline) |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1204 | { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1205 | struct k_thread *thread = tid; |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1206 | |
Patrik Flykt | cf2d579 | 2019-02-12 15:50:46 -0700 | [diff] [blame] | 1207 | LOCKED(&sched_spinlock) { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1208 | thread->base.prio_deadline = k_cycle_get_32() + deadline; |
| 1209 | if (z_is_thread_queued(thread)) { |
| 1210 | _priq_run_remove(&_kernel.ready_q.runq, thread); |
| 1211 | _priq_run_add(&_kernel.ready_q.runq, thread); |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1212 | } |
| 1213 | } |
| 1214 | } |
| 1215 | |
| 1216 | #ifdef CONFIG_USERSPACE |
Andy Ross | 075c94f | 2019-08-13 11:34:34 -0700 | [diff] [blame] | 1217 | static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline) |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1218 | { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1219 | struct k_thread *thread = tid; |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1220 | |
| 1221 | Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD)); |
| 1222 | Z_OOPS(Z_SYSCALL_VERIFY_MSG(deadline > 0, |
| 1223 | "invalid thread deadline %d", |
| 1224 | (int)deadline)); |
| 1225 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1226 | z_impl_k_thread_deadline_set((k_tid_t)thread, deadline); |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1227 | } |
Andy Ross | 075c94f | 2019-08-13 11:34:34 -0700 | [diff] [blame] | 1228 | #include <syscalls/k_thread_deadline_set_mrsh.c> |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1229 | #endif |
| 1230 | #endif |
| 1231 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1232 | void z_impl_k_yield(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1233 | { |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 1234 | __ASSERT(!arch_is_in_isr(), ""); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1235 | |
Andrew Boie | 8f0bb6a | 2019-09-21 18:36:23 -0700 | [diff] [blame] | 1236 | if (!z_is_idle_thread_object(_current)) { |
Patrik Flykt | cf2d579 | 2019-02-12 15:50:46 -0700 | [diff] [blame] | 1237 | LOCKED(&sched_spinlock) { |
Andy Ross | ea1c99b | 2019-02-21 11:09:39 -0800 | [diff] [blame] | 1238 | if (!IS_ENABLED(CONFIG_SMP) || |
| 1239 | z_is_thread_queued(_current)) { |
| 1240 | _priq_run_remove(&_kernel.ready_q.runq, |
| 1241 | _current); |
Andy Ross | ea1c99b | 2019-02-21 11:09:39 -0800 | [diff] [blame] | 1242 | } |
Andy Ross | b0158cc | 2019-08-16 13:14:51 -0700 | [diff] [blame] | 1243 | _priq_run_add(&_kernel.ready_q.runq, _current); |
| 1244 | z_mark_thread_as_queued(_current); |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 1245 | update_cache(1); |
| 1246 | } |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1247 | } |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1248 | z_swap_unlocked(); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1249 | } |
| 1250 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1251 | #ifdef CONFIG_USERSPACE |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1252 | static inline void z_vrfy_k_yield(void) |
| 1253 | { |
| 1254 | z_impl_k_yield(); |
| 1255 | } |
| 1256 | #include <syscalls/k_yield_mrsh.c> |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1257 | #endif |
| 1258 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 1259 | static int32_t z_tick_sleep(int32_t ticks) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1260 | { |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 1261 | #ifdef CONFIG_MULTITHREADING |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 1262 | uint32_t expected_wakeup_time; |
Carles Cufi | 9849df8 | 2016-12-02 15:31:08 +0100 | [diff] [blame] | 1263 | |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 1264 | __ASSERT(!arch_is_in_isr(), ""); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1265 | |
Anas Nashif | 2c5d404 | 2019-12-02 10:24:08 -0500 | [diff] [blame] | 1266 | LOG_DBG("thread %p for %d ticks", _current, ticks); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1267 | |
Benjamin Walsh | 5596f78 | 2016-12-09 19:57:17 -0500 | [diff] [blame] | 1268 | /* wait of 0 ms is treated as a 'yield' */ |
Charles E. Youse | b186303 | 2019-05-08 13:22:46 -0700 | [diff] [blame] | 1269 | if (ticks == 0) { |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1270 | k_yield(); |
Piotr Zięcik | 7700eb2 | 2018-10-25 17:45:08 +0200 | [diff] [blame] | 1271 | return 0; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1272 | } |
| 1273 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1274 | k_timeout_t timeout; |
| 1275 | |
| 1276 | #ifndef CONFIG_LEGACY_TIMEOUT_API |
| 1277 | timeout = Z_TIMEOUT_TICKS(ticks); |
| 1278 | #else |
Charles E. Youse | b186303 | 2019-05-08 13:22:46 -0700 | [diff] [blame] | 1279 | ticks += _TICK_ALIGN; |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1280 | timeout = (k_ticks_t) ticks; |
| 1281 | #endif |
| 1282 | |
Piotr Zięcik | 7700eb2 | 2018-10-25 17:45:08 +0200 | [diff] [blame] | 1283 | expected_wakeup_time = ticks + z_tick_get_32(); |
Andy Ross | d27d4e6 | 2019-02-05 15:36:01 -0800 | [diff] [blame] | 1284 | |
Andrew Boie | a8775ab | 2020-09-05 12:53:42 -0700 | [diff] [blame] | 1285 | k_spinlock_key_t key = k_spin_lock(&sched_spinlock); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1286 | |
Andy Ross | dff6b71 | 2019-02-25 21:17:29 -0800 | [diff] [blame] | 1287 | #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC) |
| 1288 | pending_current = _current; |
| 1289 | #endif |
Andrew Boie | a8775ab | 2020-09-05 12:53:42 -0700 | [diff] [blame] | 1290 | unready_thread(_current); |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1291 | z_add_thread_timeout(_current, timeout); |
Andy Ross | 4521e0c | 2019-03-22 10:30:19 -0700 | [diff] [blame] | 1292 | z_mark_thread_as_suspended(_current); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1293 | |
Andrew Boie | a8775ab | 2020-09-05 12:53:42 -0700 | [diff] [blame] | 1294 | (void)z_swap(&sched_spinlock, key); |
Piotr Zięcik | 7700eb2 | 2018-10-25 17:45:08 +0200 | [diff] [blame] | 1295 | |
Andy Ross | 4521e0c | 2019-03-22 10:30:19 -0700 | [diff] [blame] | 1296 | __ASSERT(!z_is_thread_state_set(_current, _THREAD_SUSPENDED), ""); |
| 1297 | |
Piotr Zięcik | 7700eb2 | 2018-10-25 17:45:08 +0200 | [diff] [blame] | 1298 | ticks = expected_wakeup_time - z_tick_get_32(); |
| 1299 | if (ticks > 0) { |
Charles E. Youse | b186303 | 2019-05-08 13:22:46 -0700 | [diff] [blame] | 1300 | return ticks; |
Piotr Zięcik | 7700eb2 | 2018-10-25 17:45:08 +0200 | [diff] [blame] | 1301 | } |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 1302 | #endif |
Piotr Zięcik | 7700eb2 | 2018-10-25 17:45:08 +0200 | [diff] [blame] | 1303 | |
| 1304 | return 0; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1305 | } |
| 1306 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 1307 | int32_t z_impl_k_sleep(k_timeout_t timeout) |
Charles E. Youse | b186303 | 2019-05-08 13:22:46 -0700 | [diff] [blame] | 1308 | { |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1309 | k_ticks_t ticks; |
Charles E. Youse | b186303 | 2019-05-08 13:22:46 -0700 | [diff] [blame] | 1310 | |
Peter Bigot | 8162e58 | 2019-12-12 16:07:07 -0600 | [diff] [blame] | 1311 | __ASSERT(!arch_is_in_isr(), ""); |
Anas Nashif | 5c31d00 | 2020-08-02 23:34:47 -0400 | [diff] [blame] | 1312 | sys_trace_void(SYS_TRACE_ID_SLEEP); |
Peter Bigot | 8162e58 | 2019-12-12 16:07:07 -0600 | [diff] [blame] | 1313 | |
Anas Nashif | d2c7179 | 2020-10-17 07:52:17 -0400 | [diff] [blame] | 1314 | /* in case of K_FOREVER, we suspend */ |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1315 | if (K_TIMEOUT_EQ(timeout, K_FOREVER)) { |
Andrew Boie | d2b8922 | 2019-11-08 10:44:22 -0800 | [diff] [blame] | 1316 | k_thread_suspend(_current); |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 1317 | return (int32_t) K_TICKS_FOREVER; |
Andrew Boie | d2b8922 | 2019-11-08 10:44:22 -0800 | [diff] [blame] | 1318 | } |
| 1319 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1320 | #ifdef CONFIG_LEGACY_TIMEOUT_API |
| 1321 | ticks = k_ms_to_ticks_ceil32(timeout); |
| 1322 | #else |
| 1323 | ticks = timeout.ticks; |
| 1324 | #endif |
| 1325 | |
Charles E. Youse | b186303 | 2019-05-08 13:22:46 -0700 | [diff] [blame] | 1326 | ticks = z_tick_sleep(ticks); |
Anas Nashif | 5c31d00 | 2020-08-02 23:34:47 -0400 | [diff] [blame] | 1327 | sys_trace_end_call(SYS_TRACE_ID_SLEEP); |
Andy Ross | 8892406 | 2019-10-03 11:43:10 -0700 | [diff] [blame] | 1328 | return k_ticks_to_ms_floor64(ticks); |
Charles E. Youse | b186303 | 2019-05-08 13:22:46 -0700 | [diff] [blame] | 1329 | } |
| 1330 | |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 1331 | #ifdef CONFIG_USERSPACE |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 1332 | static inline int32_t z_vrfy_k_sleep(k_timeout_t timeout) |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 1333 | { |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1334 | return z_impl_k_sleep(timeout); |
Charles E. Youse | a567831 | 2019-05-09 16:46:46 -0700 | [diff] [blame] | 1335 | } |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1336 | #include <syscalls/k_sleep_mrsh.c> |
Charles E. Youse | a567831 | 2019-05-09 16:46:46 -0700 | [diff] [blame] | 1337 | #endif |
| 1338 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 1339 | int32_t z_impl_k_usleep(int us) |
Charles E. Youse | a567831 | 2019-05-09 16:46:46 -0700 | [diff] [blame] | 1340 | { |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 1341 | int32_t ticks; |
Charles E. Youse | a567831 | 2019-05-09 16:46:46 -0700 | [diff] [blame] | 1342 | |
Andy Ross | 8892406 | 2019-10-03 11:43:10 -0700 | [diff] [blame] | 1343 | ticks = k_us_to_ticks_ceil64(us); |
Charles E. Youse | a567831 | 2019-05-09 16:46:46 -0700 | [diff] [blame] | 1344 | ticks = z_tick_sleep(ticks); |
Andy Ross | 8892406 | 2019-10-03 11:43:10 -0700 | [diff] [blame] | 1345 | return k_ticks_to_us_floor64(ticks); |
Charles E. Youse | a567831 | 2019-05-09 16:46:46 -0700 | [diff] [blame] | 1346 | } |
| 1347 | |
| 1348 | #ifdef CONFIG_USERSPACE |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 1349 | static inline int32_t z_vrfy_k_usleep(int us) |
Charles E. Youse | a567831 | 2019-05-09 16:46:46 -0700 | [diff] [blame] | 1350 | { |
| 1351 | return z_impl_k_usleep(us); |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 1352 | } |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1353 | #include <syscalls/k_usleep_mrsh.c> |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 1354 | #endif |
| 1355 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1356 | void z_impl_k_wakeup(k_tid_t thread) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1357 | { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1358 | if (z_is_thread_pending(thread)) { |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1359 | return; |
| 1360 | } |
| 1361 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1362 | if (z_abort_thread_timeout(thread) < 0) { |
Andrew Boie | d2b8922 | 2019-11-08 10:44:22 -0800 | [diff] [blame] | 1363 | /* Might have just been sleeping forever */ |
| 1364 | if (thread->base.thread_state != _THREAD_SUSPENDED) { |
| 1365 | return; |
| 1366 | } |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1367 | } |
| 1368 | |
Andy Ross | 4521e0c | 2019-03-22 10:30:19 -0700 | [diff] [blame] | 1369 | z_mark_thread_as_not_suspended(thread); |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1370 | z_ready_thread(thread); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1371 | |
Andy Ross | 5737b5c | 2020-02-04 13:52:09 -0800 | [diff] [blame] | 1372 | #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED) |
| 1373 | arch_sched_ipi(); |
| 1374 | #endif |
| 1375 | |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 1376 | if (!arch_is_in_isr()) { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1377 | z_reschedule_unlocked(); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1378 | } |
| 1379 | } |
| 1380 | |
Enjia Mai | 7ac40aa | 2020-05-28 11:29:50 +0800 | [diff] [blame] | 1381 | #ifdef CONFIG_TRACE_SCHED_IPI |
| 1382 | extern void z_trace_sched_ipi(void); |
| 1383 | #endif |
| 1384 | |
Andy Ross | 42ed12a | 2019-02-19 16:03:39 -0800 | [diff] [blame] | 1385 | #ifdef CONFIG_SMP |
Andy Ross | 42ed12a | 2019-02-19 16:03:39 -0800 | [diff] [blame] | 1386 | void z_sched_ipi(void) |
| 1387 | { |
Daniel Leung | adac4cb | 2020-01-09 18:55:07 -0800 | [diff] [blame] | 1388 | /* NOTE: When adding code to this, make sure this is called |
| 1389 | * at appropriate location when !CONFIG_SCHED_IPI_SUPPORTED. |
| 1390 | */ |
Enjia Mai | 7ac40aa | 2020-05-28 11:29:50 +0800 | [diff] [blame] | 1391 | #ifdef CONFIG_TRACE_SCHED_IPI |
| 1392 | z_trace_sched_ipi(); |
| 1393 | #endif |
Andy Ross | 42ed12a | 2019-02-19 16:03:39 -0800 | [diff] [blame] | 1394 | } |
| 1395 | |
| 1396 | void z_sched_abort(struct k_thread *thread) |
| 1397 | { |
Wayne Ren | b1fbe85 | 2019-10-14 22:14:28 +0800 | [diff] [blame] | 1398 | k_spinlock_key_t key; |
| 1399 | |
Andy Ross | 42ed12a | 2019-02-19 16:03:39 -0800 | [diff] [blame] | 1400 | if (thread == _current) { |
| 1401 | z_remove_thread_from_ready_q(thread); |
| 1402 | return; |
| 1403 | } |
| 1404 | |
| 1405 | /* First broadcast an IPI to the other CPUs so they can stop |
| 1406 | * it locally. Not all architectures support that, alas. If |
| 1407 | * we don't have it, we need to wait for some other interrupt. |
| 1408 | */ |
Andy Ross | 42ed12a | 2019-02-19 16:03:39 -0800 | [diff] [blame] | 1409 | #ifdef CONFIG_SCHED_IPI_SUPPORTED |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 1410 | arch_sched_ipi(); |
Andy Ross | 42ed12a | 2019-02-19 16:03:39 -0800 | [diff] [blame] | 1411 | #endif |
| 1412 | |
| 1413 | /* Wait for it to be flagged dead either by the CPU it was |
| 1414 | * running on or because we caught it idle in the queue |
| 1415 | */ |
Patrik Flykt | 24d7143 | 2019-03-26 19:57:45 -0600 | [diff] [blame] | 1416 | while ((thread->base.thread_state & _THREAD_DEAD) == 0U) { |
Wayne Ren | b1fbe85 | 2019-10-14 22:14:28 +0800 | [diff] [blame] | 1417 | key = k_spin_lock(&sched_spinlock); |
| 1418 | if (z_is_thread_prevented_from_running(thread)) { |
| 1419 | __ASSERT(!z_is_thread_queued(thread), ""); |
| 1420 | thread->base.thread_state |= _THREAD_DEAD; |
| 1421 | k_spin_unlock(&sched_spinlock, key); |
| 1422 | } else if (z_is_thread_queued(thread)) { |
| 1423 | _priq_run_remove(&_kernel.ready_q.runq, thread); |
| 1424 | z_mark_thread_as_not_queued(thread); |
| 1425 | thread->base.thread_state |= _THREAD_DEAD; |
| 1426 | k_spin_unlock(&sched_spinlock, key); |
| 1427 | } else { |
| 1428 | k_spin_unlock(&sched_spinlock, key); |
| 1429 | k_busy_wait(100); |
Andy Ross | 42ed12a | 2019-02-19 16:03:39 -0800 | [diff] [blame] | 1430 | } |
| 1431 | } |
| 1432 | } |
| 1433 | #endif |
| 1434 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1435 | #ifdef CONFIG_USERSPACE |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1436 | static inline void z_vrfy_k_wakeup(k_tid_t thread) |
| 1437 | { |
| 1438 | Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD)); |
| 1439 | z_impl_k_wakeup(thread); |
| 1440 | } |
| 1441 | #include <syscalls/k_wakeup_mrsh.c> |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1442 | #endif |
| 1443 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1444 | k_tid_t z_impl_k_current_get(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1445 | { |
Andy Ross | eefd3da | 2020-02-06 13:39:52 -0800 | [diff] [blame] | 1446 | #ifdef CONFIG_SMP |
| 1447 | /* In SMP, _current is a field read from _current_cpu, which |
| 1448 | * can race with preemption before it is read. We must lock |
| 1449 | * local interrupts when reading it. |
| 1450 | */ |
| 1451 | unsigned int k = arch_irq_lock(); |
| 1452 | #endif |
| 1453 | |
| 1454 | k_tid_t ret = _current_cpu->current; |
| 1455 | |
| 1456 | #ifdef CONFIG_SMP |
| 1457 | arch_irq_unlock(k); |
| 1458 | #endif |
| 1459 | return ret; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1460 | } |
| 1461 | |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 1462 | #ifdef CONFIG_USERSPACE |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1463 | static inline k_tid_t z_vrfy_k_current_get(void) |
| 1464 | { |
| 1465 | return z_impl_k_current_get(); |
| 1466 | } |
| 1467 | #include <syscalls/k_current_get_mrsh.c> |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 1468 | #endif |
| 1469 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1470 | int z_impl_k_is_preempt_thread(void) |
Benjamin Walsh | 445830d | 2016-11-10 15:54:27 -0500 | [diff] [blame] | 1471 | { |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 1472 | return !arch_is_in_isr() && is_preempt(_current); |
Benjamin Walsh | 445830d | 2016-11-10 15:54:27 -0500 | [diff] [blame] | 1473 | } |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1474 | |
| 1475 | #ifdef CONFIG_USERSPACE |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1476 | static inline int z_vrfy_k_is_preempt_thread(void) |
| 1477 | { |
| 1478 | return z_impl_k_is_preempt_thread(); |
| 1479 | } |
| 1480 | #include <syscalls/k_is_preempt_thread_mrsh.c> |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1481 | #endif |
Andy Ross | ab46b1b | 2019-01-30 15:00:42 -0800 | [diff] [blame] | 1482 | |
| 1483 | #ifdef CONFIG_SCHED_CPU_MASK |
| 1484 | # ifdef CONFIG_SMP |
| 1485 | /* Right now we use a single byte for this mask */ |
Oleg Zhurakivskyy | b1e1f64 | 2020-03-12 17:16:00 +0200 | [diff] [blame] | 1486 | BUILD_ASSERT(CONFIG_MP_NUM_CPUS <= 8, "Too many CPUs for mask word"); |
Andy Ross | ab46b1b | 2019-01-30 15:00:42 -0800 | [diff] [blame] | 1487 | # endif |
| 1488 | |
| 1489 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 1490 | static int cpu_mask_mod(k_tid_t thread, uint32_t enable_mask, uint32_t disable_mask) |
Andy Ross | ab46b1b | 2019-01-30 15:00:42 -0800 | [diff] [blame] | 1491 | { |
| 1492 | int ret = 0; |
| 1493 | |
Patrik Flykt | cf2d579 | 2019-02-12 15:50:46 -0700 | [diff] [blame] | 1494 | LOCKED(&sched_spinlock) { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1495 | if (z_is_thread_prevented_from_running(thread)) { |
| 1496 | thread->base.cpu_mask |= enable_mask; |
| 1497 | thread->base.cpu_mask &= ~disable_mask; |
Andy Ross | ab46b1b | 2019-01-30 15:00:42 -0800 | [diff] [blame] | 1498 | } else { |
| 1499 | ret = -EINVAL; |
| 1500 | } |
| 1501 | } |
| 1502 | return ret; |
| 1503 | } |
| 1504 | |
| 1505 | int k_thread_cpu_mask_clear(k_tid_t thread) |
| 1506 | { |
| 1507 | return cpu_mask_mod(thread, 0, 0xffffffff); |
| 1508 | } |
| 1509 | |
| 1510 | int k_thread_cpu_mask_enable_all(k_tid_t thread) |
| 1511 | { |
| 1512 | return cpu_mask_mod(thread, 0xffffffff, 0); |
| 1513 | } |
| 1514 | |
| 1515 | int k_thread_cpu_mask_enable(k_tid_t thread, int cpu) |
| 1516 | { |
| 1517 | return cpu_mask_mod(thread, BIT(cpu), 0); |
| 1518 | } |
| 1519 | |
| 1520 | int k_thread_cpu_mask_disable(k_tid_t thread, int cpu) |
| 1521 | { |
| 1522 | return cpu_mask_mod(thread, 0, BIT(cpu)); |
| 1523 | } |
| 1524 | |
| 1525 | #endif /* CONFIG_SCHED_CPU_MASK */ |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 1526 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1527 | int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout) |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 1528 | { |
| 1529 | k_spinlock_key_t key; |
| 1530 | int ret; |
| 1531 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1532 | __ASSERT(((arch_is_in_isr() == false) || |
| 1533 | K_TIMEOUT_EQ(timeout, K_NO_WAIT)), ""); |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 1534 | |
| 1535 | key = k_spin_lock(&sched_spinlock); |
| 1536 | |
| 1537 | if ((thread->base.pended_on == &_current->base.join_waiters) || |
| 1538 | (thread == _current)) { |
| 1539 | ret = -EDEADLK; |
| 1540 | goto out; |
| 1541 | } |
| 1542 | |
| 1543 | if ((thread->base.thread_state & _THREAD_DEAD) != 0) { |
| 1544 | ret = 0; |
| 1545 | goto out; |
| 1546 | } |
| 1547 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1548 | if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 1549 | ret = -EBUSY; |
| 1550 | goto out; |
| 1551 | } |
| 1552 | |
| 1553 | #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC) |
| 1554 | pending_current = _current; |
| 1555 | #endif |
| 1556 | add_to_waitq_locked(_current, &thread->base.join_waiters); |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1557 | add_thread_timeout(_current, timeout); |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 1558 | |
| 1559 | return z_swap(&sched_spinlock, key); |
| 1560 | out: |
| 1561 | k_spin_unlock(&sched_spinlock, key); |
| 1562 | return ret; |
| 1563 | } |
| 1564 | |
| 1565 | #ifdef CONFIG_USERSPACE |
| 1566 | /* Special case: don't oops if the thread is uninitialized. This is because |
| 1567 | * the initialization bit does double-duty for thread objects; if false, means |
| 1568 | * the thread object is truly uninitialized, or the thread ran and exited for |
| 1569 | * some reason. |
| 1570 | * |
| 1571 | * Return true in this case indicating we should just do nothing and return |
| 1572 | * success to the caller. |
| 1573 | */ |
| 1574 | static bool thread_obj_validate(struct k_thread *thread) |
| 1575 | { |
Andrew Boie | 2dc2ecf | 2020-03-11 07:13:07 -0700 | [diff] [blame] | 1576 | struct z_object *ko = z_object_find(thread); |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 1577 | int ret = z_object_validate(ko, K_OBJ_THREAD, _OBJ_INIT_TRUE); |
| 1578 | |
| 1579 | switch (ret) { |
| 1580 | case 0: |
| 1581 | return false; |
| 1582 | case -EINVAL: |
| 1583 | return true; |
| 1584 | default: |
| 1585 | #ifdef CONFIG_LOG |
| 1586 | z_dump_object_error(ret, thread, ko, K_OBJ_THREAD); |
| 1587 | #endif |
| 1588 | Z_OOPS(Z_SYSCALL_VERIFY_MSG(ret, "access denied")); |
| 1589 | } |
| 1590 | CODE_UNREACHABLE; |
| 1591 | } |
| 1592 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1593 | static inline int z_vrfy_k_thread_join(struct k_thread *thread, |
| 1594 | k_timeout_t timeout) |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 1595 | { |
| 1596 | if (thread_obj_validate(thread)) { |
| 1597 | return 0; |
| 1598 | } |
| 1599 | |
| 1600 | return z_impl_k_thread_join(thread, timeout); |
| 1601 | } |
| 1602 | #include <syscalls/k_thread_join_mrsh.c> |
Andrew Boie | a4c9190 | 2020-03-24 16:09:24 -0700 | [diff] [blame] | 1603 | |
| 1604 | static inline void z_vrfy_k_thread_abort(k_tid_t thread) |
| 1605 | { |
| 1606 | if (thread_obj_validate(thread)) { |
| 1607 | return; |
| 1608 | } |
| 1609 | |
| 1610 | Z_OOPS(Z_SYSCALL_VERIFY_MSG(!(thread->base.user_options & K_ESSENTIAL), |
| 1611 | "aborting essential thread %p", thread)); |
| 1612 | |
| 1613 | z_impl_k_thread_abort((struct k_thread *)thread); |
| 1614 | } |
| 1615 | #include <syscalls/k_thread_abort_mrsh.c> |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 1616 | #endif /* CONFIG_USERSPACE */ |