Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1 | /* |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 2 | * Copyright (c) 2018 Intel Corporation |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 3 | * |
David B. Kinder | ac74d8b | 2017-01-18 17:01:01 -0800 | [diff] [blame] | 4 | * SPDX-License-Identifier: Apache-2.0 |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 5 | */ |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 6 | #include <kernel.h> |
Benjamin Walsh | b4b108d | 2016-10-13 10:31:48 -0400 | [diff] [blame] | 7 | #include <ksched.h> |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 8 | #include <spinlock.h> |
| 9 | #include <sched_priq.h> |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 10 | #include <wait_q.h> |
Andy Ross | 9c62cc6 | 2018-01-25 15:24:15 -0800 | [diff] [blame] | 11 | #include <kswap.h> |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 12 | #include <kernel_arch_func.h> |
| 13 | #include <syscall_handler.h> |
Andy Ross | 1c08aef | 2018-09-22 15:38:31 -0700 | [diff] [blame] | 14 | #include <drivers/system_timer.h> |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 15 | |
Andy Ross | 225c74b | 2018-06-27 11:20:50 -0700 | [diff] [blame] | 16 | #if defined(CONFIG_SCHED_DUMB) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 17 | #define _priq_run_add _priq_dumb_add |
| 18 | #define _priq_run_remove _priq_dumb_remove |
| 19 | #define _priq_run_best _priq_dumb_best |
Andy Ross | 225c74b | 2018-06-27 11:20:50 -0700 | [diff] [blame] | 20 | #elif defined(CONFIG_SCHED_SCALABLE) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 21 | #define _priq_run_add _priq_rb_add |
| 22 | #define _priq_run_remove _priq_rb_remove |
| 23 | #define _priq_run_best _priq_rb_best |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 24 | #elif defined(CONFIG_SCHED_MULTIQ) |
| 25 | #define _priq_run_add _priq_mq_add |
| 26 | #define _priq_run_remove _priq_mq_remove |
| 27 | #define _priq_run_best _priq_mq_best |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 28 | #endif |
| 29 | |
Andy Ross | 225c74b | 2018-06-27 11:20:50 -0700 | [diff] [blame] | 30 | #if defined(CONFIG_WAITQ_SCALABLE) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 31 | #define _priq_wait_add _priq_rb_add |
| 32 | #define _priq_wait_remove _priq_rb_remove |
| 33 | #define _priq_wait_best _priq_rb_best |
Andy Ross | 225c74b | 2018-06-27 11:20:50 -0700 | [diff] [blame] | 34 | #elif defined(CONFIG_WAITQ_DUMB) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 35 | #define _priq_wait_add _priq_dumb_add |
| 36 | #define _priq_wait_remove _priq_dumb_remove |
| 37 | #define _priq_wait_best _priq_dumb_best |
| 38 | #endif |
| 39 | |
Flavio Ceolin | a406b88 | 2018-11-01 17:50:02 -0700 | [diff] [blame] | 40 | /* the only struct z_kernel instance */ |
| 41 | struct z_kernel _kernel; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 42 | |
| 43 | static struct k_spinlock sched_lock; |
| 44 | |
| 45 | #define LOCKED(lck) for (k_spinlock_key_t __i = {}, \ |
| 46 | __key = k_spin_lock(lck); \ |
| 47 | !__i.key; \ |
| 48 | k_spin_unlock(lck, __key), __i.key = 1) |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 49 | |
| 50 | static inline int _is_preempt(struct k_thread *thread) |
| 51 | { |
| 52 | #ifdef CONFIG_PREEMPT_ENABLED |
| 53 | /* explanation in kernel_struct.h */ |
| 54 | return thread->base.preempt <= _PREEMPT_THRESHOLD; |
| 55 | #else |
| 56 | return 0; |
| 57 | #endif |
| 58 | } |
| 59 | |
Andy Ross | 7aa25fa | 2018-05-11 14:02:42 -0700 | [diff] [blame] | 60 | static inline int is_metairq(struct k_thread *thread) |
| 61 | { |
| 62 | #if CONFIG_NUM_METAIRQ_PRIORITIES > 0 |
| 63 | return (thread->base.prio - K_HIGHEST_THREAD_PRIO) |
| 64 | < CONFIG_NUM_METAIRQ_PRIORITIES; |
| 65 | #else |
| 66 | return 0; |
| 67 | #endif |
| 68 | } |
| 69 | |
Anas Nashif | 80e6a97 | 2018-06-23 08:20:34 -0500 | [diff] [blame] | 70 | #if CONFIG_ASSERT |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 71 | static inline int _is_thread_dummy(struct k_thread *thread) |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 72 | { |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 73 | return !!(thread->base.thread_state & _THREAD_DUMMY); |
| 74 | } |
Anas Nashif | 80e6a97 | 2018-06-23 08:20:34 -0500 | [diff] [blame] | 75 | #endif |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 76 | |
| 77 | static inline int _is_idle(struct k_thread *thread) |
| 78 | { |
| 79 | #ifdef CONFIG_SMP |
| 80 | return thread->base.is_idle; |
| 81 | #else |
| 82 | extern struct k_thread * const _idle_thread; |
| 83 | |
| 84 | return thread == _idle_thread; |
| 85 | #endif |
| 86 | } |
| 87 | |
Flavio Ceolin | 02ed85b | 2018-09-20 15:43:57 -0700 | [diff] [blame] | 88 | bool _is_t1_higher_prio_than_t2(struct k_thread *t1, struct k_thread *t2) |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 89 | { |
| 90 | if (t1->base.prio < t2->base.prio) { |
Flavio Ceolin | 02ed85b | 2018-09-20 15:43:57 -0700 | [diff] [blame] | 91 | return true; |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 92 | } |
| 93 | |
| 94 | #ifdef CONFIG_SCHED_DEADLINE |
| 95 | /* Note that we don't care about wraparound conditions. The |
| 96 | * expectation is that the application will have arranged to |
| 97 | * block the threads, change their priorities or reset their |
| 98 | * deadlines when the job is complete. Letting the deadlines |
| 99 | * go negative is fine and in fact prevents aliasing bugs. |
| 100 | */ |
| 101 | if (t1->base.prio == t2->base.prio) { |
| 102 | int now = (int) k_cycle_get_32(); |
| 103 | int dt1 = t1->base.prio_deadline - now; |
| 104 | int dt2 = t2->base.prio_deadline - now; |
| 105 | |
| 106 | return dt1 < dt2; |
| 107 | } |
| 108 | #endif |
| 109 | |
Flavio Ceolin | 02ed85b | 2018-09-20 15:43:57 -0700 | [diff] [blame] | 110 | return false; |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 111 | } |
| 112 | |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 113 | static int should_preempt(struct k_thread *th, int preempt_ok) |
| 114 | { |
Andy Ross | 43553da | 2018-05-31 11:13:49 -0700 | [diff] [blame] | 115 | /* Preemption is OK if it's being explicitly allowed by |
| 116 | * software state (e.g. the thread called k_yield()) |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 117 | */ |
Andy Ross | 43553da | 2018-05-31 11:13:49 -0700 | [diff] [blame] | 118 | if (preempt_ok) { |
| 119 | return 1; |
| 120 | } |
| 121 | |
| 122 | /* Or if we're pended/suspended/dummy (duh) */ |
Michael Scott | 6c95dafd | 2018-06-02 14:42:33 -0700 | [diff] [blame] | 123 | if (!_current || !_is_thread_ready(_current)) { |
Andy Ross | 43553da | 2018-05-31 11:13:49 -0700 | [diff] [blame] | 124 | return 1; |
| 125 | } |
| 126 | |
| 127 | /* Otherwise we have to be running a preemptible thread or |
| 128 | * switching to a metairq |
| 129 | */ |
| 130 | if (_is_preempt(_current) || is_metairq(th)) { |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 131 | return 1; |
| 132 | } |
| 133 | |
| 134 | /* The idle threads can look "cooperative" if there are no |
| 135 | * preemptible priorities (this is sort of an API glitch). |
| 136 | * They must always be preemptible. |
| 137 | */ |
| 138 | if (_is_idle(_current)) { |
| 139 | return 1; |
| 140 | } |
| 141 | |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 142 | return 0; |
| 143 | } |
| 144 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 145 | static struct k_thread *next_up(void) |
| 146 | { |
| 147 | #ifndef CONFIG_SMP |
| 148 | /* In uniprocessor mode, we can leave the current thread in |
| 149 | * the queue (actually we have to, otherwise the assembly |
| 150 | * context switch code for all architectures would be |
| 151 | * responsible for putting it back in _Swap and ISR return!), |
| 152 | * which makes this choice simple. |
| 153 | */ |
| 154 | struct k_thread *th = _priq_run_best(&_kernel.ready_q.runq); |
| 155 | |
| 156 | return th ? th : _current_cpu->idle_thread; |
| 157 | #else |
Andy Ross | 7aa25fa | 2018-05-11 14:02:42 -0700 | [diff] [blame] | 158 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 159 | /* Under SMP, the "cache" mechanism for selecting the next |
| 160 | * thread doesn't work, so we have more work to do to test |
| 161 | * _current against the best choice from the queue. |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 162 | * |
| 163 | * Subtle note on "queued": in SMP mode, _current does not |
| 164 | * live in the queue, so this isn't exactly the same thing as |
| 165 | * "ready", it means "is _current already added back to the |
| 166 | * queue such that we don't want to re-add it". |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 167 | */ |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 168 | int queued = _is_thread_queued(_current); |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 169 | int active = !_is_thread_prevented_from_running(_current); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 170 | |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 171 | /* Choose the best thread that is not current */ |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 172 | struct k_thread *th = _priq_run_best(&_kernel.ready_q.runq); |
Flavio Ceolin | 4218d5f | 2018-09-17 09:39:51 -0700 | [diff] [blame] | 173 | if (th == NULL) { |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 174 | th = _current_cpu->idle_thread; |
| 175 | } |
| 176 | |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 177 | if (active) { |
| 178 | if (!queued && |
| 179 | !_is_t1_higher_prio_than_t2(th, _current)) { |
| 180 | th = _current; |
| 181 | } |
| 182 | |
| 183 | if (!should_preempt(th, _current_cpu->swap_ok)) { |
| 184 | th = _current; |
| 185 | } |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 186 | } |
| 187 | |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 188 | /* Put _current back into the queue */ |
| 189 | if (th != _current && active && !_is_idle(_current) && !queued) { |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 190 | _priq_run_add(&_kernel.ready_q.runq, _current); |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 191 | _mark_thread_as_queued(_current); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 192 | } |
| 193 | |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 194 | /* Take the new _current out of the queue */ |
| 195 | if (_is_thread_queued(th)) { |
| 196 | _priq_run_remove(&_kernel.ready_q.runq, th); |
| 197 | } |
| 198 | _mark_thread_as_not_queued(th); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 199 | |
| 200 | return th; |
| 201 | #endif |
| 202 | } |
| 203 | |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 204 | #ifdef CONFIG_TIMESLICING |
| 205 | |
| 206 | static int slice_time; |
| 207 | static int slice_max_prio; |
| 208 | |
| 209 | static void reset_time_slice(void) |
| 210 | { |
| 211 | int to = _get_next_timeout_expiry(); |
| 212 | |
Andy Ross | 7a035c0 | 2018-10-04 09:26:11 -0700 | [diff] [blame] | 213 | /* Add the elapsed time since the last announced tick to the |
| 214 | * slice count, as we'll see those "expired" ticks arrive in a |
| 215 | * FUTURE z_time_slice() call. |
| 216 | */ |
| 217 | _current_cpu->slice_ticks = slice_time + z_clock_elapsed(); |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 218 | |
| 219 | if (to == K_FOREVER || slice_time < to) { |
| 220 | z_clock_set_timeout(slice_time, false); |
| 221 | } |
| 222 | } |
| 223 | |
| 224 | void k_sched_time_slice_set(s32_t duration_in_ms, int prio) |
| 225 | { |
Andy Ross | 1c30514 | 2018-10-15 11:10:49 -0700 | [diff] [blame] | 226 | LOCKED(&sched_lock) { |
| 227 | _current_cpu->slice_ticks = 0; |
| 228 | slice_time = _ms_to_ticks(duration_in_ms); |
| 229 | slice_max_prio = prio; |
| 230 | reset_time_slice(); |
| 231 | } |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 232 | } |
| 233 | |
| 234 | static inline int sliceable(struct k_thread *t) |
| 235 | { |
| 236 | return _is_preempt(t) |
| 237 | && !_is_prio_higher(t->base.prio, slice_max_prio) |
Andy Ross | 1129ea9 | 2018-10-02 08:18:07 -0700 | [diff] [blame] | 238 | && !_is_idle(t) |
| 239 | && !_is_thread_timeout_active(t); |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 240 | } |
| 241 | |
| 242 | /* Called out of each timer interrupt */ |
| 243 | void z_time_slice(int ticks) |
| 244 | { |
| 245 | if (slice_time && sliceable(_current)) { |
| 246 | if (ticks >= _current_cpu->slice_ticks) { |
| 247 | _move_thread_to_end_of_prio_q(_current); |
| 248 | reset_time_slice(); |
| 249 | } else { |
| 250 | _current_cpu->slice_ticks -= ticks; |
| 251 | } |
| 252 | } |
| 253 | } |
| 254 | #else |
| 255 | static void reset_time_slice(void) { /* !CONFIG_TIMESLICING */ } |
| 256 | #endif |
| 257 | |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 258 | static void update_cache(int preempt_ok) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 259 | { |
| 260 | #ifndef CONFIG_SMP |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 261 | struct k_thread *th = next_up(); |
| 262 | |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 263 | if (should_preempt(th, preempt_ok)) { |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 264 | if (th != _current) { |
| 265 | reset_time_slice(); |
| 266 | } |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 267 | _kernel.ready_q.cache = th; |
| 268 | } else { |
| 269 | _kernel.ready_q.cache = _current; |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 270 | } |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 271 | |
| 272 | #else |
| 273 | /* The way this works is that the CPU record keeps its |
| 274 | * "cooperative swapping is OK" flag until the next reschedule |
| 275 | * call or context switch. It doesn't need to be tracked per |
| 276 | * thread because if the thread gets preempted for whatever |
| 277 | * reason the scheduler will make the same decision anyway. |
| 278 | */ |
| 279 | _current_cpu->swap_ok = preempt_ok; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 280 | #endif |
| 281 | } |
| 282 | |
| 283 | void _add_thread_to_ready_q(struct k_thread *thread) |
| 284 | { |
| 285 | LOCKED(&sched_lock) { |
| 286 | _priq_run_add(&_kernel.ready_q.runq, thread); |
| 287 | _mark_thread_as_queued(thread); |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 288 | update_cache(0); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 289 | } |
| 290 | } |
| 291 | |
| 292 | void _move_thread_to_end_of_prio_q(struct k_thread *thread) |
| 293 | { |
| 294 | LOCKED(&sched_lock) { |
| 295 | _priq_run_remove(&_kernel.ready_q.runq, thread); |
| 296 | _priq_run_add(&_kernel.ready_q.runq, thread); |
| 297 | _mark_thread_as_queued(thread); |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 298 | update_cache(thread == _current); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 299 | } |
| 300 | } |
| 301 | |
| 302 | void _remove_thread_from_ready_q(struct k_thread *thread) |
| 303 | { |
| 304 | LOCKED(&sched_lock) { |
| 305 | if (_is_thread_queued(thread)) { |
| 306 | _priq_run_remove(&_kernel.ready_q.runq, thread); |
| 307 | _mark_thread_as_not_queued(thread); |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 308 | update_cache(thread == _current); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 309 | } |
| 310 | } |
| 311 | } |
| 312 | |
| 313 | static void pend(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout) |
| 314 | { |
| 315 | _remove_thread_from_ready_q(thread); |
| 316 | _mark_thread_as_pending(thread); |
| 317 | |
Andy Ross | 15d5208 | 2018-09-26 13:19:31 -0700 | [diff] [blame] | 318 | if (wait_q != NULL) { |
| 319 | thread->base.pended_on = wait_q; |
| 320 | _priq_wait_add(&wait_q->waitq, thread); |
| 321 | } |
| 322 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 323 | if (timeout != K_FOREVER) { |
| 324 | s32_t ticks = _TICK_ALIGN + _ms_to_ticks(timeout); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 325 | |
Andy Ross | 5d20352 | 2018-09-26 13:57:34 -0700 | [diff] [blame] | 326 | _add_thread_timeout(thread, ticks); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 327 | } |
| 328 | |
Anas Nashif | a9f32d6 | 2018-09-05 13:41:44 -0500 | [diff] [blame] | 329 | sys_trace_thread_pend(thread); |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 330 | } |
| 331 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 332 | void _pend_thread(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout) |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 333 | { |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 334 | __ASSERT_NO_MSG(thread == _current || _is_thread_dummy(thread)); |
| 335 | pend(thread, wait_q, timeout); |
| 336 | } |
| 337 | |
| 338 | static _wait_q_t *pended_on(struct k_thread *thread) |
| 339 | { |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 340 | __ASSERT_NO_MSG(thread->base.pended_on); |
| 341 | |
| 342 | return thread->base.pended_on; |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 343 | } |
| 344 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 345 | struct k_thread *_find_first_thread_to_unpend(_wait_q_t *wait_q, |
| 346 | struct k_thread *from) |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 347 | { |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 348 | ARG_UNUSED(from); |
| 349 | |
| 350 | struct k_thread *ret = NULL; |
| 351 | |
| 352 | LOCKED(&sched_lock) { |
| 353 | ret = _priq_wait_best(&wait_q->waitq); |
| 354 | } |
| 355 | |
| 356 | return ret; |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 357 | } |
| 358 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 359 | void _unpend_thread_no_timeout(struct k_thread *thread) |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 360 | { |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 361 | LOCKED(&sched_lock) { |
| 362 | _priq_wait_remove(&pended_on(thread)->waitq, thread); |
| 363 | _mark_thread_as_not_pending(thread); |
| 364 | } |
| 365 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 366 | thread->base.pended_on = NULL; |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 367 | } |
| 368 | |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 369 | #ifdef CONFIG_SYS_CLOCK_EXISTS |
| 370 | /* Timeout handler for *_thread_timeout() APIs */ |
| 371 | void z_thread_timeout(struct _timeout *to) |
| 372 | { |
| 373 | struct k_thread *th = CONTAINER_OF(to, struct k_thread, base.timeout); |
| 374 | |
| 375 | if (th->base.pended_on != NULL) { |
| 376 | _unpend_thread_no_timeout(th); |
| 377 | } |
| 378 | _mark_thread_as_started(th); |
| 379 | _ready_thread(th); |
| 380 | } |
| 381 | #endif |
| 382 | |
Adithya Baglody | 6176692 | 2018-10-16 11:48:51 +0530 | [diff] [blame] | 383 | int _pend_current_thread(u32_t key, _wait_q_t *wait_q, s32_t timeout) |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 384 | { |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 385 | pend(_current, wait_q, timeout); |
| 386 | return _Swap(key); |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 387 | } |
| 388 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 389 | struct k_thread *_unpend_first_thread(_wait_q_t *wait_q) |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 390 | { |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 391 | struct k_thread *t = _unpend1_no_timeout(wait_q); |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 392 | |
Flavio Ceolin | 4218d5f | 2018-09-17 09:39:51 -0700 | [diff] [blame] | 393 | if (t != NULL) { |
Flavio Ceolin | 8f72f24 | 2018-09-12 22:11:00 -0700 | [diff] [blame] | 394 | (void)_abort_thread_timeout(t); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 395 | } |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 396 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 397 | return t; |
| 398 | } |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 399 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 400 | void _unpend_thread(struct k_thread *thread) |
| 401 | { |
| 402 | _unpend_thread_no_timeout(thread); |
Flavio Ceolin | 8f72f24 | 2018-09-12 22:11:00 -0700 | [diff] [blame] | 403 | (void)_abort_thread_timeout(thread); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 404 | } |
| 405 | |
| 406 | /* FIXME: this API is glitchy when used in SMP. If the thread is |
| 407 | * currently scheduled on the other CPU, it will silently set it's |
| 408 | * priority but nothing will cause a reschedule until the next |
| 409 | * interrupt. An audit seems to show that all current usage is to set |
| 410 | * priorities on either _current or a pended thread, though, so it's |
| 411 | * fine for now. |
| 412 | */ |
| 413 | void _thread_priority_set(struct k_thread *thread, int prio) |
| 414 | { |
Flavio Ceolin | 02ed85b | 2018-09-20 15:43:57 -0700 | [diff] [blame] | 415 | bool need_sched = 0; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 416 | |
| 417 | LOCKED(&sched_lock) { |
| 418 | need_sched = _is_thread_ready(thread); |
| 419 | |
| 420 | if (need_sched) { |
| 421 | _priq_run_remove(&_kernel.ready_q.runq, thread); |
| 422 | thread->base.prio = prio; |
| 423 | _priq_run_add(&_kernel.ready_q.runq, thread); |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 424 | update_cache(1); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 425 | } else { |
| 426 | thread->base.prio = prio; |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 427 | } |
| 428 | } |
Anas Nashif | b6304e6 | 2018-07-04 08:03:03 -0500 | [diff] [blame] | 429 | sys_trace_thread_priority_set(thread); |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 430 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 431 | if (need_sched) { |
| 432 | _reschedule(irq_lock()); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 433 | } |
| 434 | } |
| 435 | |
Adithya Baglody | 1424561 | 2018-10-11 14:52:56 +0530 | [diff] [blame] | 436 | void _reschedule(u32_t key) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 437 | { |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 438 | #ifdef CONFIG_SMP |
| 439 | if (!_current_cpu->swap_ok) { |
| 440 | goto noswap; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 441 | } |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 442 | |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 443 | _current_cpu->swap_ok = 0; |
| 444 | #endif |
| 445 | |
| 446 | if (_is_in_isr()) { |
| 447 | goto noswap; |
| 448 | } |
| 449 | |
| 450 | #ifdef CONFIG_SMP |
Spoorthi K | b6cd192 | 2018-10-24 12:01:12 +0530 | [diff] [blame] | 451 | (void)_Swap(key); |
| 452 | return; |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 453 | #else |
| 454 | if (_get_next_ready_thread() != _current) { |
Flavio Ceolin | 98c64b6 | 2018-09-12 17:27:11 -0700 | [diff] [blame] | 455 | (void)_Swap(key); |
| 456 | return; |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 457 | } |
| 458 | #endif |
| 459 | |
| 460 | noswap: |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 461 | irq_unlock(key); |
Andy Ross | 8606fab | 2018-03-26 10:54:40 -0700 | [diff] [blame] | 462 | } |
| 463 | |
Benjamin Walsh | d7ad176 | 2016-11-10 14:46:58 -0500 | [diff] [blame] | 464 | void k_sched_lock(void) |
| 465 | { |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 466 | LOCKED(&sched_lock) { |
| 467 | _sched_lock(); |
| 468 | } |
Benjamin Walsh | d7ad176 | 2016-11-10 14:46:58 -0500 | [diff] [blame] | 469 | } |
| 470 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 471 | void k_sched_unlock(void) |
| 472 | { |
Benjamin Walsh | 8e4a534 | 2016-12-14 14:34:29 -0500 | [diff] [blame] | 473 | #ifdef CONFIG_PREEMPT_ENABLED |
Benjamin Walsh | e6a69ca | 2016-12-21 14:54:04 -0500 | [diff] [blame] | 474 | __ASSERT(_current->base.sched_locked != 0, ""); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 475 | __ASSERT(!_is_in_isr(), ""); |
| 476 | |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 477 | LOCKED(&sched_lock) { |
| 478 | ++_current->base.sched_locked; |
| 479 | update_cache(1); |
| 480 | } |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 481 | |
Kumar Gala | 34a57db | 2017-04-19 10:39:57 -0500 | [diff] [blame] | 482 | K_DEBUG("scheduler unlocked (%p:%d)\n", |
Benjamin Walsh | a4e033f | 2016-11-18 16:08:24 -0500 | [diff] [blame] | 483 | _current, _current->base.sched_locked); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 484 | |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 485 | _reschedule(irq_lock()); |
Benjamin Walsh | 8e4a534 | 2016-12-14 14:34:29 -0500 | [diff] [blame] | 486 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 487 | } |
| 488 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 489 | #ifdef CONFIG_SMP |
| 490 | struct k_thread *_get_next_ready_thread(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 491 | { |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 492 | struct k_thread *ret = 0; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 493 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 494 | LOCKED(&sched_lock) { |
| 495 | ret = next_up(); |
| 496 | } |
| 497 | |
| 498 | return ret; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 499 | } |
Benjamin Walsh | 6209218 | 2016-12-20 14:39:08 -0500 | [diff] [blame] | 500 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 501 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 502 | #ifdef CONFIG_USE_SWITCH |
| 503 | void *_get_next_switch_handle(void *interrupted) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 504 | { |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 505 | _current->switch_handle = interrupted; |
| 506 | |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 507 | #ifdef CONFIG_SMP |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 508 | LOCKED(&sched_lock) { |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 509 | struct k_thread *th = next_up(); |
| 510 | |
| 511 | if (_current != th) { |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 512 | reset_time_slice(); |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 513 | _current_cpu->swap_ok = 0; |
Marek Pieta | e871938 | 2018-10-26 16:54:16 +0200 | [diff] [blame] | 514 | #ifdef CONFIG_TRACING |
| 515 | sys_trace_thread_switched_out(); |
| 516 | #endif |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 517 | _current = th; |
Marek Pieta | e871938 | 2018-10-26 16:54:16 +0200 | [diff] [blame] | 518 | #ifdef CONFIG_TRACING |
| 519 | sys_trace_thread_switched_in(); |
| 520 | #endif |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 521 | } |
Benjamin Walsh | b8c2160 | 2016-12-23 19:34:41 -0500 | [diff] [blame] | 522 | } |
| 523 | |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 524 | #else |
Marek Pieta | e871938 | 2018-10-26 16:54:16 +0200 | [diff] [blame] | 525 | #ifdef CONFIG_TRACING |
| 526 | sys_trace_thread_switched_out(); |
| 527 | #endif |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 528 | _current = _get_next_ready_thread(); |
Marek Pieta | e871938 | 2018-10-26 16:54:16 +0200 | [diff] [blame] | 529 | #ifdef CONFIG_TRACING |
| 530 | sys_trace_thread_switched_in(); |
| 531 | #endif |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 532 | #endif |
| 533 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 534 | _check_stack_sentinel(); |
Benjamin Walsh | b8c2160 | 2016-12-23 19:34:41 -0500 | [diff] [blame] | 535 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 536 | return _current->switch_handle; |
| 537 | } |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 538 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 539 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 540 | void _priq_dumb_add(sys_dlist_t *pq, struct k_thread *thread) |
Andy Ross | 22642cf | 2018-04-02 18:24:58 -0700 | [diff] [blame] | 541 | { |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 542 | struct k_thread *t; |
Andy Ross | 22642cf | 2018-04-02 18:24:58 -0700 | [diff] [blame] | 543 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 544 | __ASSERT_NO_MSG(!_is_idle(thread)); |
Andy Ross | 22642cf | 2018-04-02 18:24:58 -0700 | [diff] [blame] | 545 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 546 | SYS_DLIST_FOR_EACH_CONTAINER(pq, t, base.qnode_dlist) { |
| 547 | if (_is_t1_higher_prio_than_t2(thread, t)) { |
| 548 | sys_dlist_insert_before(pq, &t->base.qnode_dlist, |
| 549 | &thread->base.qnode_dlist); |
| 550 | return; |
| 551 | } |
Andy Ross | 22642cf | 2018-04-02 18:24:58 -0700 | [diff] [blame] | 552 | } |
| 553 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 554 | sys_dlist_append(pq, &thread->base.qnode_dlist); |
Andy Ross | 22642cf | 2018-04-02 18:24:58 -0700 | [diff] [blame] | 555 | } |
| 556 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 557 | void _priq_dumb_remove(sys_dlist_t *pq, struct k_thread *thread) |
| 558 | { |
| 559 | __ASSERT_NO_MSG(!_is_idle(thread)); |
| 560 | |
| 561 | sys_dlist_remove(&thread->base.qnode_dlist); |
| 562 | } |
| 563 | |
| 564 | struct k_thread *_priq_dumb_best(sys_dlist_t *pq) |
| 565 | { |
| 566 | return CONTAINER_OF(sys_dlist_peek_head(pq), |
| 567 | struct k_thread, base.qnode_dlist); |
| 568 | } |
| 569 | |
Flavio Ceolin | 02ed85b | 2018-09-20 15:43:57 -0700 | [diff] [blame] | 570 | bool _priq_rb_lessthan(struct rbnode *a, struct rbnode *b) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 571 | { |
| 572 | struct k_thread *ta, *tb; |
| 573 | |
| 574 | ta = CONTAINER_OF(a, struct k_thread, base.qnode_rb); |
| 575 | tb = CONTAINER_OF(b, struct k_thread, base.qnode_rb); |
| 576 | |
| 577 | if (_is_t1_higher_prio_than_t2(ta, tb)) { |
Flavio Ceolin | 02ed85b | 2018-09-20 15:43:57 -0700 | [diff] [blame] | 578 | return true; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 579 | } else if (_is_t1_higher_prio_than_t2(tb, ta)) { |
Flavio Ceolin | 02ed85b | 2018-09-20 15:43:57 -0700 | [diff] [blame] | 580 | return false; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 581 | } else { |
| 582 | return ta->base.order_key < tb->base.order_key ? 1 : 0; |
| 583 | } |
| 584 | } |
| 585 | |
| 586 | void _priq_rb_add(struct _priq_rb *pq, struct k_thread *thread) |
| 587 | { |
| 588 | struct k_thread *t; |
| 589 | |
| 590 | __ASSERT_NO_MSG(!_is_idle(thread)); |
| 591 | |
| 592 | thread->base.order_key = pq->next_order_key++; |
| 593 | |
| 594 | /* Renumber at wraparound. This is tiny code, and in practice |
| 595 | * will almost never be hit on real systems. BUT on very |
| 596 | * long-running systems where a priq never completely empties |
| 597 | * AND that contains very large numbers of threads, it can be |
| 598 | * a latency glitch to loop over all the threads like this. |
| 599 | */ |
| 600 | if (!pq->next_order_key) { |
| 601 | RB_FOR_EACH_CONTAINER(&pq->tree, t, base.qnode_rb) { |
| 602 | t->base.order_key = pq->next_order_key++; |
| 603 | } |
| 604 | } |
| 605 | |
| 606 | rb_insert(&pq->tree, &thread->base.qnode_rb); |
| 607 | } |
| 608 | |
| 609 | void _priq_rb_remove(struct _priq_rb *pq, struct k_thread *thread) |
| 610 | { |
| 611 | __ASSERT_NO_MSG(!_is_idle(thread)); |
| 612 | |
| 613 | rb_remove(&pq->tree, &thread->base.qnode_rb); |
| 614 | |
| 615 | if (!pq->tree.root) { |
| 616 | pq->next_order_key = 0; |
| 617 | } |
| 618 | } |
| 619 | |
| 620 | struct k_thread *_priq_rb_best(struct _priq_rb *pq) |
| 621 | { |
| 622 | struct rbnode *n = rb_get_min(&pq->tree); |
| 623 | |
| 624 | return CONTAINER_OF(n, struct k_thread, base.qnode_rb); |
| 625 | } |
| 626 | |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 627 | #ifdef CONFIG_SCHED_MULTIQ |
| 628 | # if (K_LOWEST_THREAD_PRIO - K_HIGHEST_THREAD_PRIO) > 31 |
| 629 | # error Too many priorities for multiqueue scheduler (max 32) |
| 630 | # endif |
| 631 | #endif |
| 632 | |
| 633 | void _priq_mq_add(struct _priq_mq *pq, struct k_thread *thread) |
| 634 | { |
| 635 | int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO; |
| 636 | |
| 637 | sys_dlist_append(&pq->queues[priority_bit], &thread->base.qnode_dlist); |
| 638 | pq->bitmask |= (1 << priority_bit); |
| 639 | } |
| 640 | |
| 641 | void _priq_mq_remove(struct _priq_mq *pq, struct k_thread *thread) |
| 642 | { |
| 643 | int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO; |
| 644 | |
| 645 | sys_dlist_remove(&thread->base.qnode_dlist); |
| 646 | if (sys_dlist_is_empty(&pq->queues[priority_bit])) { |
| 647 | pq->bitmask &= ~(1 << priority_bit); |
| 648 | } |
| 649 | } |
| 650 | |
| 651 | struct k_thread *_priq_mq_best(struct _priq_mq *pq) |
| 652 | { |
| 653 | if (!pq->bitmask) { |
| 654 | return NULL; |
| 655 | } |
| 656 | |
| 657 | sys_dlist_t *l = &pq->queues[__builtin_ctz(pq->bitmask)]; |
| 658 | |
| 659 | return CONTAINER_OF(sys_dlist_peek_head(l), |
| 660 | struct k_thread, base.qnode_dlist); |
| 661 | } |
| 662 | |
Andy Ross | ccf3bf7 | 2018-05-10 11:10:34 -0700 | [diff] [blame] | 663 | int _unpend_all(_wait_q_t *waitq) |
Andy Ross | 4ca0e07 | 2018-05-10 09:45:42 -0700 | [diff] [blame] | 664 | { |
Andy Ross | ccf3bf7 | 2018-05-10 11:10:34 -0700 | [diff] [blame] | 665 | int need_sched = 0; |
| 666 | struct k_thread *th; |
Andy Ross | 4ca0e07 | 2018-05-10 09:45:42 -0700 | [diff] [blame] | 667 | |
Flavio Ceolin | 02ed85b | 2018-09-20 15:43:57 -0700 | [diff] [blame] | 668 | while ((th = _waitq_head(waitq)) != NULL) { |
Andy Ross | 4ca0e07 | 2018-05-10 09:45:42 -0700 | [diff] [blame] | 669 | _unpend_thread(th); |
| 670 | _ready_thread(th); |
| 671 | need_sched = 1; |
| 672 | } |
Andy Ross | ccf3bf7 | 2018-05-10 11:10:34 -0700 | [diff] [blame] | 673 | |
| 674 | return need_sched; |
Andy Ross | 4ca0e07 | 2018-05-10 09:45:42 -0700 | [diff] [blame] | 675 | } |
| 676 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 677 | void _sched_init(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 678 | { |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 679 | #ifdef CONFIG_SCHED_DUMB |
| 680 | sys_dlist_init(&_kernel.ready_q.runq); |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 681 | #endif |
| 682 | |
| 683 | #ifdef CONFIG_SCHED_SCALABLE |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 684 | _kernel.ready_q.runq = (struct _priq_rb) { |
| 685 | .tree = { |
| 686 | .lessthan_fn = _priq_rb_lessthan, |
| 687 | } |
| 688 | }; |
| 689 | #endif |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 690 | |
| 691 | #ifdef CONFIG_SCHED_MULTIQ |
| 692 | for (int i = 0; i < ARRAY_SIZE(_kernel.ready_q.runq.queues); i++) { |
| 693 | sys_dlist_init(&_kernel.ready_q.runq.queues[i]); |
| 694 | } |
| 695 | #endif |
Piotr Zięcik | 4a39b9e | 2018-07-26 14:56:39 +0200 | [diff] [blame] | 696 | |
| 697 | #ifdef CONFIG_TIMESLICING |
| 698 | k_sched_time_slice_set(CONFIG_TIMESLICE_SIZE, |
| 699 | CONFIG_TIMESLICE_PRIORITY); |
| 700 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 701 | } |
| 702 | |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 703 | int _impl_k_thread_priority_get(k_tid_t thread) |
Allan Stephens | 399d0ad | 2016-10-07 13:41:34 -0500 | [diff] [blame] | 704 | { |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 705 | return thread->base.prio; |
Allan Stephens | 399d0ad | 2016-10-07 13:41:34 -0500 | [diff] [blame] | 706 | } |
| 707 | |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 708 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 709 | Z_SYSCALL_HANDLER1_SIMPLE(k_thread_priority_get, K_OBJ_THREAD, |
| 710 | struct k_thread *); |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 711 | #endif |
| 712 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 713 | void _impl_k_thread_priority_set(k_tid_t tid, int prio) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 714 | { |
Benjamin Walsh | 3cc2ba9 | 2016-11-08 15:44:05 -0500 | [diff] [blame] | 715 | /* |
| 716 | * Use NULL, since we cannot know what the entry point is (we do not |
| 717 | * keep track of it) and idle cannot change its priority. |
| 718 | */ |
| 719 | _ASSERT_VALID_PRIO(prio, NULL); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 720 | __ASSERT(!_is_in_isr(), ""); |
| 721 | |
Benjamin Walsh | 3751123 | 2016-10-13 08:10:07 -0400 | [diff] [blame] | 722 | struct k_thread *thread = (struct k_thread *)tid; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 723 | |
| 724 | _thread_priority_set(thread, prio); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 725 | } |
| 726 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 727 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 728 | Z_SYSCALL_HANDLER(k_thread_priority_set, thread_p, prio) |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 729 | { |
Andrew Boie | 5008fed | 2017-10-08 10:11:24 -0700 | [diff] [blame] | 730 | struct k_thread *thread = (struct k_thread *)thread_p; |
| 731 | |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 732 | Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD)); |
| 733 | Z_OOPS(Z_SYSCALL_VERIFY_MSG(_is_valid_prio(prio, NULL), |
| 734 | "invalid thread priority %d", (int)prio)); |
| 735 | Z_OOPS(Z_SYSCALL_VERIFY_MSG((s8_t)prio >= thread->base.prio, |
| 736 | "thread priority may only be downgraded (%d < %d)", |
| 737 | prio, thread->base.prio)); |
Andrew Boie | 5008fed | 2017-10-08 10:11:24 -0700 | [diff] [blame] | 738 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 739 | _impl_k_thread_priority_set((k_tid_t)thread, prio); |
| 740 | return 0; |
| 741 | } |
| 742 | #endif |
| 743 | |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 744 | #ifdef CONFIG_SCHED_DEADLINE |
| 745 | void _impl_k_thread_deadline_set(k_tid_t tid, int deadline) |
| 746 | { |
| 747 | struct k_thread *th = tid; |
| 748 | |
| 749 | LOCKED(&sched_lock) { |
| 750 | th->base.prio_deadline = k_cycle_get_32() + deadline; |
| 751 | if (_is_thread_queued(th)) { |
| 752 | _priq_run_remove(&_kernel.ready_q.runq, th); |
| 753 | _priq_run_add(&_kernel.ready_q.runq, th); |
| 754 | } |
| 755 | } |
| 756 | } |
| 757 | |
| 758 | #ifdef CONFIG_USERSPACE |
| 759 | Z_SYSCALL_HANDLER(k_thread_deadline_set, thread_p, deadline) |
| 760 | { |
| 761 | struct k_thread *thread = (struct k_thread *)thread_p; |
| 762 | |
| 763 | Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD)); |
| 764 | Z_OOPS(Z_SYSCALL_VERIFY_MSG(deadline > 0, |
| 765 | "invalid thread deadline %d", |
| 766 | (int)deadline)); |
| 767 | |
| 768 | _impl_k_thread_deadline_set((k_tid_t)thread, deadline); |
| 769 | return 0; |
| 770 | } |
| 771 | #endif |
| 772 | #endif |
| 773 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 774 | void _impl_k_yield(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 775 | { |
| 776 | __ASSERT(!_is_in_isr(), ""); |
| 777 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 778 | if (!_is_idle(_current)) { |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 779 | LOCKED(&sched_lock) { |
| 780 | _priq_run_remove(&_kernel.ready_q.runq, _current); |
| 781 | _priq_run_add(&_kernel.ready_q.runq, _current); |
| 782 | update_cache(1); |
| 783 | } |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 784 | } |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 785 | |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 786 | #ifdef CONFIG_SMP |
Flavio Ceolin | 5884c7f | 2018-09-11 18:45:27 -0700 | [diff] [blame] | 787 | (void)_Swap(irq_lock()); |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 788 | #else |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 789 | if (_get_next_ready_thread() != _current) { |
Flavio Ceolin | 5884c7f | 2018-09-11 18:45:27 -0700 | [diff] [blame] | 790 | (void)_Swap(irq_lock()); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 791 | } |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 792 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 793 | } |
| 794 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 795 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 796 | Z_SYSCALL_HANDLER0_SIMPLE_VOID(k_yield); |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 797 | #endif |
| 798 | |
Piotr Zięcik | 7700eb2 | 2018-10-25 17:45:08 +0200 | [diff] [blame] | 799 | s32_t _impl_k_sleep(s32_t duration) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 800 | { |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 801 | #ifdef CONFIG_MULTITHREADING |
Piotr Zięcik | 7700eb2 | 2018-10-25 17:45:08 +0200 | [diff] [blame] | 802 | u32_t expected_wakeup_time; |
| 803 | s32_t ticks; |
Carles Cufi | 9849df8 | 2016-12-02 15:31:08 +0100 | [diff] [blame] | 804 | unsigned int key; |
| 805 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 806 | __ASSERT(!_is_in_isr(), ""); |
Benjamin Walsh | 688973e | 2016-10-05 16:03:31 -0400 | [diff] [blame] | 807 | __ASSERT(duration != K_FOREVER, ""); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 808 | |
Kumar Gala | 34a57db | 2017-04-19 10:39:57 -0500 | [diff] [blame] | 809 | K_DEBUG("thread %p for %d ns\n", _current, duration); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 810 | |
Benjamin Walsh | 5596f78 | 2016-12-09 19:57:17 -0500 | [diff] [blame] | 811 | /* wait of 0 ms is treated as a 'yield' */ |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 812 | if (duration == 0) { |
| 813 | k_yield(); |
Piotr Zięcik | 7700eb2 | 2018-10-25 17:45:08 +0200 | [diff] [blame] | 814 | return 0; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 815 | } |
| 816 | |
Carles Cufi | 9849df8 | 2016-12-02 15:31:08 +0100 | [diff] [blame] | 817 | ticks = _TICK_ALIGN + _ms_to_ticks(duration); |
Piotr Zięcik | 7700eb2 | 2018-10-25 17:45:08 +0200 | [diff] [blame] | 818 | expected_wakeup_time = ticks + z_tick_get_32(); |
Carles Cufi | 9849df8 | 2016-12-02 15:31:08 +0100 | [diff] [blame] | 819 | key = irq_lock(); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 820 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 821 | _remove_thread_from_ready_q(_current); |
Andy Ross | 5d20352 | 2018-09-26 13:57:34 -0700 | [diff] [blame] | 822 | _add_thread_timeout(_current, ticks); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 823 | |
Flavio Ceolin | 5884c7f | 2018-09-11 18:45:27 -0700 | [diff] [blame] | 824 | (void)_Swap(key); |
Piotr Zięcik | 7700eb2 | 2018-10-25 17:45:08 +0200 | [diff] [blame] | 825 | |
| 826 | ticks = expected_wakeup_time - z_tick_get_32(); |
| 827 | if (ticks > 0) { |
| 828 | return __ticks_to_ms(ticks); |
| 829 | } |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 830 | #endif |
Piotr Zięcik | 7700eb2 | 2018-10-25 17:45:08 +0200 | [diff] [blame] | 831 | |
| 832 | return 0; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 833 | } |
| 834 | |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 835 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 836 | Z_SYSCALL_HANDLER(k_sleep, duration) |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 837 | { |
Andrew Boie | 225e4c0 | 2017-10-12 09:54:26 -0700 | [diff] [blame] | 838 | /* FIXME there were some discussions recently on whether we should |
| 839 | * relax this, thread would be unscheduled until k_wakeup issued |
| 840 | */ |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 841 | Z_OOPS(Z_SYSCALL_VERIFY_MSG(duration != K_FOREVER, |
| 842 | "sleeping forever not allowed")); |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 843 | |
Piotr Zięcik | 7700eb2 | 2018-10-25 17:45:08 +0200 | [diff] [blame] | 844 | return _impl_k_sleep(duration); |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 845 | } |
| 846 | #endif |
| 847 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 848 | void _impl_k_wakeup(k_tid_t thread) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 849 | { |
Flavio Ceolin | 0866d18 | 2018-08-14 17:57:08 -0700 | [diff] [blame] | 850 | unsigned int key = irq_lock(); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 851 | |
| 852 | /* verify first if thread is not waiting on an object */ |
Benjamin Walsh | ce9f782 | 2016-10-06 16:25:39 -0400 | [diff] [blame] | 853 | if (_is_thread_pending(thread)) { |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 854 | irq_unlock(key); |
| 855 | return; |
| 856 | } |
| 857 | |
Benjamin Walsh | d211a52 | 2016-12-06 11:44:01 -0500 | [diff] [blame] | 858 | if (_abort_thread_timeout(thread) == _INACTIVE) { |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 859 | irq_unlock(key); |
| 860 | return; |
| 861 | } |
| 862 | |
| 863 | _ready_thread(thread); |
| 864 | |
| 865 | if (_is_in_isr()) { |
| 866 | irq_unlock(key); |
| 867 | } else { |
Andy Ross | 15cb5d7 | 2018-04-02 18:40:10 -0700 | [diff] [blame] | 868 | _reschedule(key); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 869 | } |
| 870 | } |
| 871 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 872 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 873 | Z_SYSCALL_HANDLER1_SIMPLE_VOID(k_wakeup, K_OBJ_THREAD, k_tid_t); |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 874 | #endif |
| 875 | |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 876 | k_tid_t _impl_k_current_get(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 877 | { |
| 878 | return _current; |
| 879 | } |
| 880 | |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 881 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 882 | Z_SYSCALL_HANDLER0_SIMPLE(k_current_get); |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 883 | #endif |
| 884 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 885 | int _impl_k_is_preempt_thread(void) |
Benjamin Walsh | 445830d | 2016-11-10 15:54:27 -0500 | [diff] [blame] | 886 | { |
| 887 | return !_is_in_isr() && _is_preempt(_current); |
| 888 | } |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 889 | |
| 890 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 891 | Z_SYSCALL_HANDLER0_SIMPLE(k_is_preempt_thread); |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 892 | #endif |