Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1 | /* |
Benjamin Walsh | 8d7c274 | 2017-02-11 10:50:27 -0500 | [diff] [blame] | 2 | * Copyright (c) 2016-2017 Wind River Systems, Inc. |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 3 | * |
David B. Kinder | ac74d8b | 2017-01-18 17:01:01 -0800 | [diff] [blame] | 4 | * SPDX-License-Identifier: Apache-2.0 |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #include <kernel.h> |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 8 | #include <kernel_structs.h> |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 9 | #include <atomic.h> |
Benjamin Walsh | b4b108d | 2016-10-13 10:31:48 -0400 | [diff] [blame] | 10 | #include <ksched.h> |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 11 | #include <wait_q.h> |
Benjamin Walsh | 6209218 | 2016-12-20 14:39:08 -0500 | [diff] [blame] | 12 | #include <misc/util.h> |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 13 | #include <syscall_handler.h> |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 14 | |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 15 | /* the only struct _kernel instance */ |
| 16 | struct _kernel _kernel = {0}; |
| 17 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 18 | /* set the bit corresponding to prio in ready q bitmap */ |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 19 | #ifdef CONFIG_MULTITHREADING |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 20 | static void _set_ready_q_prio_bit(int prio) |
| 21 | { |
| 22 | int bmap_index = _get_ready_q_prio_bmap_index(prio); |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 23 | u32_t *bmap = &_ready_q.prio_bmap[bmap_index]; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 24 | |
| 25 | *bmap |= _get_ready_q_prio_bit(prio); |
| 26 | } |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 27 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 28 | |
| 29 | /* clear the bit corresponding to prio in ready q bitmap */ |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 30 | #ifdef CONFIG_MULTITHREADING |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 31 | static void _clear_ready_q_prio_bit(int prio) |
| 32 | { |
| 33 | int bmap_index = _get_ready_q_prio_bmap_index(prio); |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 34 | u32_t *bmap = &_ready_q.prio_bmap[bmap_index]; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 35 | |
| 36 | *bmap &= ~_get_ready_q_prio_bit(prio); |
| 37 | } |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 38 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 39 | |
Benjamin Walsh | 096d8e9 | 2016-12-16 16:45:05 -0500 | [diff] [blame] | 40 | #ifdef CONFIG_MULTITHREADING |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 41 | /* |
Benjamin Walsh | 88b3691 | 2016-12-02 10:37:27 -0500 | [diff] [blame] | 42 | * Find the next thread to run when there is no thread in the cache and update |
| 43 | * the cache. |
| 44 | */ |
| 45 | static struct k_thread *_get_ready_q_head(void) |
| 46 | { |
| 47 | int prio = _get_highest_ready_prio(); |
| 48 | int q_index = _get_ready_q_q_index(prio); |
| 49 | sys_dlist_t *list = &_ready_q.q[q_index]; |
| 50 | |
| 51 | __ASSERT(!sys_dlist_is_empty(list), |
| 52 | "no thread to run (prio: %d, queue index: %u)!\n", |
| 53 | prio, q_index); |
| 54 | |
| 55 | struct k_thread *thread = |
| 56 | (struct k_thread *)sys_dlist_peek_head_not_empty(list); |
| 57 | |
| 58 | return thread; |
| 59 | } |
Benjamin Walsh | 096d8e9 | 2016-12-16 16:45:05 -0500 | [diff] [blame] | 60 | #endif |
Benjamin Walsh | 88b3691 | 2016-12-02 10:37:27 -0500 | [diff] [blame] | 61 | |
| 62 | /* |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 63 | * Add thread to the ready queue, in the slot for its priority; the thread |
| 64 | * must not be on a wait queue. |
Benjamin Walsh | 35497d6 | 2016-09-30 13:44:58 -0400 | [diff] [blame] | 65 | * |
| 66 | * This function, along with _move_thread_to_end_of_prio_q(), are the _only_ |
| 67 | * places where a thread is put on the ready queue. |
| 68 | * |
| 69 | * Interrupts must be locked when calling this function. |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 70 | */ |
Benjamin Walsh | 35497d6 | 2016-09-30 13:44:58 -0400 | [diff] [blame] | 71 | |
Benjamin Walsh | b7ef0cb | 2016-10-05 17:32:01 -0400 | [diff] [blame] | 72 | void _add_thread_to_ready_q(struct k_thread *thread) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 73 | { |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 74 | #ifdef CONFIG_MULTITHREADING |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 75 | int q_index = _get_ready_q_q_index(thread->base.prio); |
| 76 | sys_dlist_t *q = &_ready_q.q[q_index]; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 77 | |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 78 | _set_ready_q_prio_bit(thread->base.prio); |
| 79 | sys_dlist_append(q, &thread->base.k_q_node); |
Benjamin Walsh | 35497d6 | 2016-09-30 13:44:58 -0400 | [diff] [blame] | 80 | |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 81 | struct k_thread **cache = &_ready_q.cache; |
Benjamin Walsh | 35497d6 | 2016-09-30 13:44:58 -0400 | [diff] [blame] | 82 | |
Benjamin Walsh | 88b3691 | 2016-12-02 10:37:27 -0500 | [diff] [blame] | 83 | *cache = _is_t1_higher_prio_than_t2(thread, *cache) ? thread : *cache; |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 84 | #else |
| 85 | sys_dlist_append(&_ready_q.q[0], &thread->base.k_q_node); |
| 86 | _ready_q.prio_bmap[0] = 1; |
| 87 | _ready_q.cache = thread; |
| 88 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 89 | } |
| 90 | |
Benjamin Walsh | 35497d6 | 2016-09-30 13:44:58 -0400 | [diff] [blame] | 91 | /* |
| 92 | * This function, along with _move_thread_to_end_of_prio_q(), are the _only_ |
| 93 | * places where a thread is taken off the ready queue. |
| 94 | * |
| 95 | * Interrupts must be locked when calling this function. |
| 96 | */ |
| 97 | |
Benjamin Walsh | b7ef0cb | 2016-10-05 17:32:01 -0400 | [diff] [blame] | 98 | void _remove_thread_from_ready_q(struct k_thread *thread) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 99 | { |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 100 | #ifdef CONFIG_MULTITHREADING |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 101 | int q_index = _get_ready_q_q_index(thread->base.prio); |
| 102 | sys_dlist_t *q = &_ready_q.q[q_index]; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 103 | |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 104 | sys_dlist_remove(&thread->base.k_q_node); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 105 | if (sys_dlist_is_empty(q)) { |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 106 | _clear_ready_q_prio_bit(thread->base.prio); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 107 | } |
Benjamin Walsh | 35497d6 | 2016-09-30 13:44:58 -0400 | [diff] [blame] | 108 | |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 109 | struct k_thread **cache = &_ready_q.cache; |
Benjamin Walsh | 35497d6 | 2016-09-30 13:44:58 -0400 | [diff] [blame] | 110 | |
Benjamin Walsh | 88b3691 | 2016-12-02 10:37:27 -0500 | [diff] [blame] | 111 | *cache = *cache == thread ? _get_ready_q_head() : *cache; |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 112 | #else |
| 113 | _ready_q.prio_bmap[0] = 0; |
| 114 | _ready_q.cache = NULL; |
| 115 | sys_dlist_remove(&thread->base.k_q_node); |
| 116 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 117 | } |
| 118 | |
| 119 | /* reschedule threads if the scheduler is not locked */ |
| 120 | /* not callable from ISR */ |
| 121 | /* must be called with interrupts locked */ |
| 122 | void _reschedule_threads(int key) |
| 123 | { |
Benjamin Walsh | 8e4a534 | 2016-12-14 14:34:29 -0500 | [diff] [blame] | 124 | #ifdef CONFIG_PREEMPT_ENABLED |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 125 | K_DEBUG("rescheduling threads\n"); |
| 126 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 127 | if (_must_switch_threads()) { |
| 128 | K_DEBUG("context-switching out %p\n", _current); |
| 129 | _Swap(key); |
| 130 | } else { |
| 131 | irq_unlock(key); |
| 132 | } |
Benjamin Walsh | 8e4a534 | 2016-12-14 14:34:29 -0500 | [diff] [blame] | 133 | #else |
| 134 | irq_unlock(key); |
| 135 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 136 | } |
| 137 | |
Benjamin Walsh | d7ad176 | 2016-11-10 14:46:58 -0500 | [diff] [blame] | 138 | void k_sched_lock(void) |
| 139 | { |
Benjamin Walsh | 8e4a534 | 2016-12-14 14:34:29 -0500 | [diff] [blame] | 140 | #ifdef CONFIG_PREEMPT_ENABLED |
Benjamin Walsh | e6a69ca | 2016-12-21 14:54:04 -0500 | [diff] [blame] | 141 | __ASSERT(_current->base.sched_locked != 1, ""); |
Benjamin Walsh | d7ad176 | 2016-11-10 14:46:58 -0500 | [diff] [blame] | 142 | __ASSERT(!_is_in_isr(), ""); |
| 143 | |
Benjamin Walsh | e6a69ca | 2016-12-21 14:54:04 -0500 | [diff] [blame] | 144 | --_current->base.sched_locked; |
Benjamin Walsh | d7ad176 | 2016-11-10 14:46:58 -0500 | [diff] [blame] | 145 | |
Benjamin Walsh | 8d7c274 | 2017-02-11 10:50:27 -0500 | [diff] [blame] | 146 | /* Probably not needed since we're in a real function, |
| 147 | * but it doesn't hurt. |
| 148 | */ |
| 149 | compiler_barrier(); |
| 150 | |
Kumar Gala | 34a57db | 2017-04-19 10:39:57 -0500 | [diff] [blame] | 151 | K_DEBUG("scheduler locked (%p:%d)\n", |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 152 | _current, _current->base.sched_locked); |
Benjamin Walsh | 8e4a534 | 2016-12-14 14:34:29 -0500 | [diff] [blame] | 153 | #endif |
Benjamin Walsh | d7ad176 | 2016-11-10 14:46:58 -0500 | [diff] [blame] | 154 | } |
| 155 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 156 | void k_sched_unlock(void) |
| 157 | { |
Benjamin Walsh | 8e4a534 | 2016-12-14 14:34:29 -0500 | [diff] [blame] | 158 | #ifdef CONFIG_PREEMPT_ENABLED |
Benjamin Walsh | e6a69ca | 2016-12-21 14:54:04 -0500 | [diff] [blame] | 159 | __ASSERT(_current->base.sched_locked != 0, ""); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 160 | __ASSERT(!_is_in_isr(), ""); |
| 161 | |
| 162 | int key = irq_lock(); |
| 163 | |
Benjamin Walsh | 8d7c274 | 2017-02-11 10:50:27 -0500 | [diff] [blame] | 164 | /* compiler_barrier() not needed, comes from irq_lock() */ |
| 165 | |
Benjamin Walsh | e6a69ca | 2016-12-21 14:54:04 -0500 | [diff] [blame] | 166 | ++_current->base.sched_locked; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 167 | |
Kumar Gala | 34a57db | 2017-04-19 10:39:57 -0500 | [diff] [blame] | 168 | K_DEBUG("scheduler unlocked (%p:%d)\n", |
Benjamin Walsh | a4e033f | 2016-11-18 16:08:24 -0500 | [diff] [blame] | 169 | _current, _current->base.sched_locked); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 170 | |
| 171 | _reschedule_threads(key); |
Benjamin Walsh | 8e4a534 | 2016-12-14 14:34:29 -0500 | [diff] [blame] | 172 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 173 | } |
| 174 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 175 | /* convert milliseconds to ticks */ |
| 176 | |
Benjamin Walsh | 6209218 | 2016-12-20 14:39:08 -0500 | [diff] [blame] | 177 | #ifdef _NON_OPTIMIZED_TICKS_PER_SEC |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 178 | s32_t _ms_to_ticks(s32_t ms) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 179 | { |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 180 | s64_t ms_ticks_per_sec = (s64_t)ms * sys_clock_ticks_per_sec; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 181 | |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 182 | return (s32_t)ceiling_fraction(ms_ticks_per_sec, MSEC_PER_SEC); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 183 | } |
Benjamin Walsh | 6209218 | 2016-12-20 14:39:08 -0500 | [diff] [blame] | 184 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 185 | |
| 186 | /* pend the specified thread: it must *not* be in the ready queue */ |
| 187 | /* must be called with interrupts locked */ |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 188 | void _pend_thread(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 189 | { |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 190 | #ifdef CONFIG_MULTITHREADING |
Benjamin Walsh | b8c2160 | 2016-12-23 19:34:41 -0500 | [diff] [blame] | 191 | sys_dlist_t *wait_q_list = (sys_dlist_t *)wait_q; |
Luiz Augusto von Dentz | 87aa621 | 2017-08-22 15:27:31 +0300 | [diff] [blame] | 192 | struct k_thread *pending; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 193 | |
Luiz Augusto von Dentz | 87aa621 | 2017-08-22 15:27:31 +0300 | [diff] [blame] | 194 | SYS_DLIST_FOR_EACH_CONTAINER(wait_q_list, pending, base.k_q_node) { |
Benjamin Walsh | b8c2160 | 2016-12-23 19:34:41 -0500 | [diff] [blame] | 195 | if (_is_t1_higher_prio_than_t2(thread, pending)) { |
Luiz Augusto von Dentz | 87aa621 | 2017-08-22 15:27:31 +0300 | [diff] [blame] | 196 | sys_dlist_insert_before(wait_q_list, |
| 197 | &pending->base.k_q_node, |
Benjamin Walsh | b8c2160 | 2016-12-23 19:34:41 -0500 | [diff] [blame] | 198 | &thread->base.k_q_node); |
| 199 | goto inserted; |
| 200 | } |
| 201 | } |
| 202 | |
| 203 | sys_dlist_append(wait_q_list, &thread->base.k_q_node); |
| 204 | |
| 205 | inserted: |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 206 | _mark_thread_as_pending(thread); |
| 207 | |
| 208 | if (timeout != K_FOREVER) { |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 209 | s32_t ticks = _TICK_ALIGN + _ms_to_ticks(timeout); |
Benjamin Walsh | a36e0cf | 2016-11-23 22:15:44 -0500 | [diff] [blame] | 210 | |
| 211 | _add_thread_timeout(thread, wait_q, ticks); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 212 | } |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 213 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 214 | } |
| 215 | |
| 216 | /* pend the current thread */ |
| 217 | /* must be called with interrupts locked */ |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 218 | void _pend_current_thread(_wait_q_t *wait_q, s32_t timeout) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 219 | { |
| 220 | _remove_thread_from_ready_q(_current); |
| 221 | _pend_thread(_current, wait_q, timeout); |
| 222 | } |
| 223 | |
Maciek Borzecki | 81bdee3 | 2017-05-18 12:23:55 +0200 | [diff] [blame] | 224 | #if defined(CONFIG_PREEMPT_ENABLED) && defined(CONFIG_KERNEL_DEBUG) |
| 225 | /* debug aid */ |
| 226 | static void _dump_ready_q(void) |
| 227 | { |
| 228 | K_DEBUG("bitmaps: "); |
| 229 | for (int bitmap = 0; bitmap < K_NUM_PRIO_BITMAPS; bitmap++) { |
| 230 | K_DEBUG("%x", _ready_q.prio_bmap[bitmap]); |
| 231 | } |
| 232 | K_DEBUG("\n"); |
| 233 | for (int prio = 0; prio < K_NUM_PRIORITIES; prio++) { |
| 234 | K_DEBUG("prio: %d, head: %p\n", |
| 235 | prio - _NUM_COOP_PRIO, |
| 236 | sys_dlist_peek_head(&_ready_q.q[prio])); |
| 237 | } |
| 238 | } |
| 239 | #endif /* CONFIG_PREEMPT_ENABLED && CONFIG_KERNEL_DEBUG */ |
| 240 | |
Benjamin Walsh | 35497d6 | 2016-09-30 13:44:58 -0400 | [diff] [blame] | 241 | /* |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 242 | * Check if there is a thread of higher prio than the current one. Should only |
| 243 | * be called if we already know that the current thread is preemptible. |
| 244 | */ |
| 245 | int __must_switch_threads(void) |
| 246 | { |
Benjamin Walsh | 8e4a534 | 2016-12-14 14:34:29 -0500 | [diff] [blame] | 247 | #ifdef CONFIG_PREEMPT_ENABLED |
Kumar Gala | 34a57db | 2017-04-19 10:39:57 -0500 | [diff] [blame] | 248 | K_DEBUG("current prio: %d, highest prio: %d\n", |
Benjamin Walsh | a4e033f | 2016-11-18 16:08:24 -0500 | [diff] [blame] | 249 | _current->base.prio, _get_highest_ready_prio()); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 250 | |
Maciek Borzecki | 81bdee3 | 2017-05-18 12:23:55 +0200 | [diff] [blame] | 251 | #ifdef CONFIG_KERNEL_DEBUG |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 252 | _dump_ready_q(); |
Maciek Borzecki | 81bdee3 | 2017-05-18 12:23:55 +0200 | [diff] [blame] | 253 | #endif /* CONFIG_KERNEL_DEBUG */ |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 254 | |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 255 | return _is_prio_higher(_get_highest_ready_prio(), _current->base.prio); |
Benjamin Walsh | 8e4a534 | 2016-12-14 14:34:29 -0500 | [diff] [blame] | 256 | #else |
| 257 | return 0; |
| 258 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 259 | } |
| 260 | |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 261 | int _impl_k_thread_priority_get(k_tid_t thread) |
Allan Stephens | 399d0ad | 2016-10-07 13:41:34 -0500 | [diff] [blame] | 262 | { |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 263 | return thread->base.prio; |
Allan Stephens | 399d0ad | 2016-10-07 13:41:34 -0500 | [diff] [blame] | 264 | } |
| 265 | |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 266 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 225e4c0 | 2017-10-12 09:54:26 -0700 | [diff] [blame] | 267 | _SYSCALL_HANDLER1_SIMPLE(k_thread_priority_get, K_OBJ_THREAD, |
| 268 | struct k_thread *); |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 269 | #endif |
| 270 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 271 | void _impl_k_thread_priority_set(k_tid_t tid, int prio) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 272 | { |
Benjamin Walsh | 3cc2ba9 | 2016-11-08 15:44:05 -0500 | [diff] [blame] | 273 | /* |
| 274 | * Use NULL, since we cannot know what the entry point is (we do not |
| 275 | * keep track of it) and idle cannot change its priority. |
| 276 | */ |
| 277 | _ASSERT_VALID_PRIO(prio, NULL); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 278 | __ASSERT(!_is_in_isr(), ""); |
| 279 | |
Benjamin Walsh | 3751123 | 2016-10-13 08:10:07 -0400 | [diff] [blame] | 280 | struct k_thread *thread = (struct k_thread *)tid; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 281 | int key = irq_lock(); |
| 282 | |
| 283 | _thread_priority_set(thread, prio); |
| 284 | _reschedule_threads(key); |
| 285 | } |
| 286 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 287 | #ifdef CONFIG_USERSPACE |
Leandro Pereira | 6f99bdb | 2017-10-13 14:00:22 -0700 | [diff] [blame] | 288 | _SYSCALL_HANDLER(k_thread_priority_set, thread_p, prio) |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 289 | { |
Andrew Boie | 5008fed | 2017-10-08 10:11:24 -0700 | [diff] [blame] | 290 | struct k_thread *thread = (struct k_thread *)thread_p; |
| 291 | |
Andrew Boie | 225e4c0 | 2017-10-12 09:54:26 -0700 | [diff] [blame] | 292 | _SYSCALL_OBJ(thread, K_OBJ_THREAD); |
| 293 | _SYSCALL_VERIFY_MSG(_VALID_PRIO(prio, NULL), |
Andrew Boie | 37ff5a9 | 2017-10-10 12:30:23 -0700 | [diff] [blame] | 294 | "invalid thread priority %d", (int)prio); |
Punit Vara | ce60d04 | 2017-11-13 12:26:43 +0530 | [diff] [blame] | 295 | _SYSCALL_VERIFY_MSG((s8_t)prio >= thread->base.prio, |
Andrew Boie | 5008fed | 2017-10-08 10:11:24 -0700 | [diff] [blame] | 296 | "thread priority may only be downgraded (%d < %d)", |
| 297 | prio, thread->base.prio); |
| 298 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 299 | _impl_k_thread_priority_set((k_tid_t)thread, prio); |
| 300 | return 0; |
| 301 | } |
| 302 | #endif |
| 303 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 304 | /* |
Benjamin Walsh | 35497d6 | 2016-09-30 13:44:58 -0400 | [diff] [blame] | 305 | * Interrupts must be locked when calling this function. |
| 306 | * |
| 307 | * This function, along with _add_thread_to_ready_q() and |
| 308 | * _remove_thread_from_ready_q(), are the _only_ places where a thread is |
| 309 | * taken off or put on the ready queue. |
| 310 | */ |
| 311 | void _move_thread_to_end_of_prio_q(struct k_thread *thread) |
| 312 | { |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 313 | #ifdef CONFIG_MULTITHREADING |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 314 | int q_index = _get_ready_q_q_index(thread->base.prio); |
| 315 | sys_dlist_t *q = &_ready_q.q[q_index]; |
Benjamin Walsh | 35497d6 | 2016-09-30 13:44:58 -0400 | [diff] [blame] | 316 | |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 317 | if (sys_dlist_is_tail(q, &thread->base.k_q_node)) { |
Benjamin Walsh | 35497d6 | 2016-09-30 13:44:58 -0400 | [diff] [blame] | 318 | return; |
| 319 | } |
| 320 | |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 321 | sys_dlist_remove(&thread->base.k_q_node); |
| 322 | sys_dlist_append(q, &thread->base.k_q_node); |
Benjamin Walsh | 35497d6 | 2016-09-30 13:44:58 -0400 | [diff] [blame] | 323 | |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 324 | struct k_thread **cache = &_ready_q.cache; |
Benjamin Walsh | 35497d6 | 2016-09-30 13:44:58 -0400 | [diff] [blame] | 325 | |
Benjamin Walsh | 88b3691 | 2016-12-02 10:37:27 -0500 | [diff] [blame] | 326 | *cache = *cache == thread ? _get_ready_q_head() : *cache; |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 327 | #endif |
Benjamin Walsh | 35497d6 | 2016-09-30 13:44:58 -0400 | [diff] [blame] | 328 | } |
| 329 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 330 | void _impl_k_yield(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 331 | { |
| 332 | __ASSERT(!_is_in_isr(), ""); |
| 333 | |
| 334 | int key = irq_lock(); |
| 335 | |
Benjamin Walsh | 35497d6 | 2016-09-30 13:44:58 -0400 | [diff] [blame] | 336 | _move_thread_to_end_of_prio_q(_current); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 337 | |
| 338 | if (_current == _get_next_ready_thread()) { |
| 339 | irq_unlock(key); |
Andrew Boie | 5dcb279 | 2017-05-11 13:29:15 -0700 | [diff] [blame] | 340 | #ifdef CONFIG_STACK_SENTINEL |
| 341 | _check_stack_sentinel(); |
| 342 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 343 | } else { |
| 344 | _Swap(key); |
| 345 | } |
| 346 | } |
| 347 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 348 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 225e4c0 | 2017-10-12 09:54:26 -0700 | [diff] [blame] | 349 | _SYSCALL_HANDLER0_SIMPLE_VOID(k_yield); |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 350 | #endif |
| 351 | |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 352 | void _impl_k_sleep(s32_t duration) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 353 | { |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 354 | #ifdef CONFIG_MULTITHREADING |
Carles Cufi | 9849df8 | 2016-12-02 15:31:08 +0100 | [diff] [blame] | 355 | /* volatile to guarantee that irq_lock() is executed after ticks is |
| 356 | * populated |
| 357 | */ |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 358 | volatile s32_t ticks; |
Carles Cufi | 9849df8 | 2016-12-02 15:31:08 +0100 | [diff] [blame] | 359 | unsigned int key; |
| 360 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 361 | __ASSERT(!_is_in_isr(), ""); |
Benjamin Walsh | 688973e | 2016-10-05 16:03:31 -0400 | [diff] [blame] | 362 | __ASSERT(duration != K_FOREVER, ""); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 363 | |
Kumar Gala | 34a57db | 2017-04-19 10:39:57 -0500 | [diff] [blame] | 364 | K_DEBUG("thread %p for %d ns\n", _current, duration); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 365 | |
Benjamin Walsh | 5596f78 | 2016-12-09 19:57:17 -0500 | [diff] [blame] | 366 | /* wait of 0 ms is treated as a 'yield' */ |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 367 | if (duration == 0) { |
| 368 | k_yield(); |
| 369 | return; |
| 370 | } |
| 371 | |
Carles Cufi | 9849df8 | 2016-12-02 15:31:08 +0100 | [diff] [blame] | 372 | ticks = _TICK_ALIGN + _ms_to_ticks(duration); |
| 373 | key = irq_lock(); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 374 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 375 | _remove_thread_from_ready_q(_current); |
Benjamin Walsh | a36e0cf | 2016-11-23 22:15:44 -0500 | [diff] [blame] | 376 | _add_thread_timeout(_current, NULL, ticks); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 377 | |
| 378 | _Swap(key); |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 379 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 380 | } |
| 381 | |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 382 | #ifdef CONFIG_USERSPACE |
Leandro Pereira | 6f99bdb | 2017-10-13 14:00:22 -0700 | [diff] [blame] | 383 | _SYSCALL_HANDLER(k_sleep, duration) |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 384 | { |
Andrew Boie | 225e4c0 | 2017-10-12 09:54:26 -0700 | [diff] [blame] | 385 | /* FIXME there were some discussions recently on whether we should |
| 386 | * relax this, thread would be unscheduled until k_wakeup issued |
| 387 | */ |
| 388 | _SYSCALL_VERIFY_MSG(duration != K_FOREVER, |
Andrew Boie | 37ff5a9 | 2017-10-10 12:30:23 -0700 | [diff] [blame] | 389 | "sleeping forever not allowed"); |
Andrew Boie | 225e4c0 | 2017-10-12 09:54:26 -0700 | [diff] [blame] | 390 | _impl_k_sleep(duration); |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 391 | |
| 392 | return 0; |
| 393 | } |
| 394 | #endif |
| 395 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 396 | void _impl_k_wakeup(k_tid_t thread) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 397 | { |
| 398 | int key = irq_lock(); |
| 399 | |
| 400 | /* verify first if thread is not waiting on an object */ |
Benjamin Walsh | ce9f782 | 2016-10-06 16:25:39 -0400 | [diff] [blame] | 401 | if (_is_thread_pending(thread)) { |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 402 | irq_unlock(key); |
| 403 | return; |
| 404 | } |
| 405 | |
Benjamin Walsh | d211a52 | 2016-12-06 11:44:01 -0500 | [diff] [blame] | 406 | if (_abort_thread_timeout(thread) == _INACTIVE) { |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 407 | irq_unlock(key); |
| 408 | return; |
| 409 | } |
| 410 | |
| 411 | _ready_thread(thread); |
| 412 | |
| 413 | if (_is_in_isr()) { |
| 414 | irq_unlock(key); |
| 415 | } else { |
| 416 | _reschedule_threads(key); |
| 417 | } |
| 418 | } |
| 419 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 420 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 225e4c0 | 2017-10-12 09:54:26 -0700 | [diff] [blame] | 421 | _SYSCALL_HANDLER1_SIMPLE_VOID(k_wakeup, K_OBJ_THREAD, k_tid_t); |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 422 | #endif |
| 423 | |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 424 | k_tid_t _impl_k_current_get(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 425 | { |
| 426 | return _current; |
| 427 | } |
| 428 | |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 429 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 225e4c0 | 2017-10-12 09:54:26 -0700 | [diff] [blame] | 430 | _SYSCALL_HANDLER0_SIMPLE(k_current_get); |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 431 | #endif |
| 432 | |
Peter Mitsis | 68d1f4b | 2016-09-12 11:35:26 -0400 | [diff] [blame] | 433 | #ifdef CONFIG_TIMESLICING |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 434 | extern s32_t _time_slice_duration; /* Measured in ms */ |
| 435 | extern s32_t _time_slice_elapsed; /* Measured in ms */ |
Peter Mitsis | 68d1f4b | 2016-09-12 11:35:26 -0400 | [diff] [blame] | 436 | extern int _time_slice_prio_ceiling; |
| 437 | |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 438 | void k_sched_time_slice_set(s32_t duration_in_ms, int prio) |
Peter Mitsis | 68d1f4b | 2016-09-12 11:35:26 -0400 | [diff] [blame] | 439 | { |
| 440 | __ASSERT(duration_in_ms >= 0, ""); |
| 441 | __ASSERT((prio >= 0) && (prio < CONFIG_NUM_PREEMPT_PRIORITIES), ""); |
| 442 | |
| 443 | _time_slice_duration = duration_in_ms; |
| 444 | _time_slice_elapsed = 0; |
| 445 | _time_slice_prio_ceiling = prio; |
| 446 | } |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 447 | |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 448 | int _is_thread_time_slicing(struct k_thread *thread) |
| 449 | { |
| 450 | /* |
| 451 | * Time slicing is done on the thread if following conditions are met |
| 452 | * |
| 453 | * Time slice duration should be set > 0 |
| 454 | * Should not be the idle thread |
| 455 | * Priority should be higher than time slice priority ceiling |
| 456 | * There should be multiple threads active with same priority |
| 457 | */ |
| 458 | |
| 459 | if (!(_time_slice_duration > 0) || (_is_idle_thread_ptr(thread)) |
| 460 | || _is_prio_higher(thread->base.prio, _time_slice_prio_ceiling)) { |
| 461 | return 0; |
| 462 | } |
| 463 | |
| 464 | int q_index = _get_ready_q_q_index(thread->base.prio); |
| 465 | sys_dlist_t *q = &_ready_q.q[q_index]; |
| 466 | |
| 467 | return sys_dlist_has_multiple_nodes(q); |
| 468 | } |
| 469 | |
| 470 | /* Must be called with interrupts locked */ |
| 471 | /* Should be called only immediately before a thread switch */ |
| 472 | void _update_time_slice_before_swap(void) |
| 473 | { |
Andrew Boie | 3989de7 | 2017-05-30 12:51:39 -0700 | [diff] [blame] | 474 | #ifdef CONFIG_TICKLESS_KERNEL |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 475 | if (!_is_thread_time_slicing(_get_next_ready_thread())) { |
| 476 | return; |
| 477 | } |
| 478 | |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 479 | u32_t remaining = _get_remaining_program_time(); |
| 480 | |
| 481 | if (!remaining || (_time_slice_duration < remaining)) { |
| 482 | _set_time(_time_slice_duration); |
Youvedeep Singh | f807d4d | 2017-07-18 16:13:16 +0530 | [diff] [blame] | 483 | } else { |
| 484 | /* Account previous elapsed time and reprogram |
| 485 | * timer with remaining time |
| 486 | */ |
| 487 | _set_time(remaining); |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 488 | } |
Youvedeep Singh | f807d4d | 2017-07-18 16:13:16 +0530 | [diff] [blame] | 489 | |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 490 | #endif |
Andrew Boie | 3989de7 | 2017-05-30 12:51:39 -0700 | [diff] [blame] | 491 | /* Restart time slice count at new thread switch */ |
| 492 | _time_slice_elapsed = 0; |
| 493 | } |
Peter Mitsis | 68d1f4b | 2016-09-12 11:35:26 -0400 | [diff] [blame] | 494 | #endif /* CONFIG_TIMESLICING */ |
Benjamin Walsh | 445830d | 2016-11-10 15:54:27 -0500 | [diff] [blame] | 495 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 496 | int _impl_k_is_preempt_thread(void) |
Benjamin Walsh | 445830d | 2016-11-10 15:54:27 -0500 | [diff] [blame] | 497 | { |
| 498 | return !_is_in_isr() && _is_preempt(_current); |
| 499 | } |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 500 | |
| 501 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 225e4c0 | 2017-10-12 09:54:26 -0700 | [diff] [blame] | 502 | _SYSCALL_HANDLER0_SIMPLE(k_is_preempt_thread); |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 503 | #endif |