Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1 | /* |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 2 | * Copyright (c) 2018 Intel Corporation |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 3 | * |
David B. Kinder | ac74d8b | 2017-01-18 17:01:01 -0800 | [diff] [blame] | 4 | * SPDX-License-Identifier: Apache-2.0 |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 5 | */ |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 6 | #include <zephyr/kernel.h> |
Benjamin Walsh | b4b108d | 2016-10-13 10:31:48 -0400 | [diff] [blame] | 7 | #include <ksched.h> |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 8 | #include <zephyr/spinlock.h> |
Anas Nashif | 8634c3b | 2023-08-29 17:03:12 +0000 | [diff] [blame] | 9 | #include <wait_q.h> |
Anas Nashif | 9e83413 | 2024-02-26 17:03:35 -0500 | [diff] [blame] | 10 | #include <kthread.h> |
Anas Nashif | 46484da | 2024-02-26 11:30:49 -0500 | [diff] [blame] | 11 | #include <priority_q.h> |
Andy Ross | 9c62cc6 | 2018-01-25 15:24:15 -0800 | [diff] [blame] | 12 | #include <kswap.h> |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 13 | #include <kernel_arch_func.h> |
Anas Nashif | 4e39617 | 2023-09-26 22:46:01 +0000 | [diff] [blame] | 14 | #include <zephyr/internal/syscall_handler.h> |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 15 | #include <zephyr/drivers/timer/system_timer.h> |
Flavio Ceolin | 8041860 | 2018-11-21 16:22:15 -0800 | [diff] [blame] | 16 | #include <stdbool.h> |
Andrew Boie | fe03161 | 2019-09-21 17:54:37 -0700 | [diff] [blame] | 17 | #include <kernel_internal.h> |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 18 | #include <zephyr/logging/log.h> |
| 19 | #include <zephyr/sys/atomic.h> |
| 20 | #include <zephyr/sys/math_extras.h> |
| 21 | #include <zephyr/timing/timing.h> |
Gerard Marull-Paretas | 4863c5f | 2023-04-11 15:34:39 +0200 | [diff] [blame] | 22 | #include <zephyr/sys/util.h> |
Andy Ross | 5235145 | 2021-09-28 09:38:43 -0700 | [diff] [blame] | 23 | |
Krzysztof Chruscinski | 3ed8083 | 2020-11-26 19:32:34 +0100 | [diff] [blame] | 24 | LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 25 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 26 | struct k_spinlock _sched_spinlock; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 27 | |
Maksim Masalski | 78ba2ec | 2021-06-01 15:44:45 +0800 | [diff] [blame] | 28 | static void update_cache(int preempt_ok); |
Peter Mitsis | e1db1ce | 2023-08-14 14:06:52 -0400 | [diff] [blame] | 29 | static void halt_thread(struct k_thread *thread, uint8_t new_state); |
Peter Mitsis | b1384a7 | 2023-08-14 14:22:05 -0400 | [diff] [blame] | 30 | static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q); |
Andrew Boie | 8e0f6a5 | 2020-09-05 11:50:18 -0700 | [diff] [blame] | 31 | |
Peter Mitsis | f8b76f3 | 2021-11-29 09:52:11 -0500 | [diff] [blame] | 32 | |
Anas Nashif | 46484da | 2024-02-26 11:30:49 -0500 | [diff] [blame] | 33 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 34 | static inline int is_preempt(struct k_thread *thread) |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 35 | { |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 36 | /* explanation in kernel_struct.h */ |
| 37 | return thread->base.preempt <= _PREEMPT_THRESHOLD; |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 38 | } |
| 39 | |
Florian Grandel | cc4d1bd | 2023-08-28 17:31:54 +0200 | [diff] [blame] | 40 | BUILD_ASSERT(CONFIG_NUM_COOP_PRIORITIES >= CONFIG_NUM_METAIRQ_PRIORITIES, |
| 41 | "You need to provide at least as many CONFIG_NUM_COOP_PRIORITIES as " |
| 42 | "CONFIG_NUM_METAIRQ_PRIORITIES as Meta IRQs are just a special class of cooperative " |
| 43 | "threads."); |
| 44 | |
Andy Ross | 7aa25fa | 2018-05-11 14:02:42 -0700 | [diff] [blame] | 45 | static inline int is_metairq(struct k_thread *thread) |
| 46 | { |
| 47 | #if CONFIG_NUM_METAIRQ_PRIORITIES > 0 |
| 48 | return (thread->base.prio - K_HIGHEST_THREAD_PRIO) |
| 49 | < CONFIG_NUM_METAIRQ_PRIORITIES; |
| 50 | #else |
Benjamin Cabé | a46f1b9 | 2023-08-21 15:30:26 +0200 | [diff] [blame] | 51 | ARG_UNUSED(thread); |
Andy Ross | 7aa25fa | 2018-05-11 14:02:42 -0700 | [diff] [blame] | 52 | return 0; |
| 53 | #endif |
| 54 | } |
| 55 | |
Anas Nashif | 80e6a97 | 2018-06-23 08:20:34 -0500 | [diff] [blame] | 56 | #if CONFIG_ASSERT |
Flavio Ceolin | 2df02cc | 2019-03-14 14:32:45 -0700 | [diff] [blame] | 57 | static inline bool is_thread_dummy(struct k_thread *thread) |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 58 | { |
Patrik Flykt | 21358ba | 2019-03-28 14:57:54 -0600 | [diff] [blame] | 59 | return (thread->base.thread_state & _THREAD_DUMMY) != 0U; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 60 | } |
Anas Nashif | 80e6a97 | 2018-06-23 08:20:34 -0500 | [diff] [blame] | 61 | #endif |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 62 | |
James Harris | 2cd0f66 | 2021-03-01 09:19:57 -0800 | [diff] [blame] | 63 | /* |
| 64 | * Return value same as e.g. memcmp |
| 65 | * > 0 -> thread 1 priority > thread 2 priority |
| 66 | * = 0 -> thread 1 priority == thread 2 priority |
| 67 | * < 0 -> thread 1 priority < thread 2 priority |
| 68 | * Do not rely on the actual value returned aside from the above. |
| 69 | * (Again, like memcmp.) |
| 70 | */ |
| 71 | int32_t z_sched_prio_cmp(struct k_thread *thread_1, |
| 72 | struct k_thread *thread_2) |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 73 | { |
James Harris | 2cd0f66 | 2021-03-01 09:19:57 -0800 | [diff] [blame] | 74 | /* `prio` is <32b, so the below cannot overflow. */ |
| 75 | int32_t b1 = thread_1->base.prio; |
| 76 | int32_t b2 = thread_2->base.prio; |
| 77 | |
| 78 | if (b1 != b2) { |
| 79 | return b2 - b1; |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 80 | } |
| 81 | |
| 82 | #ifdef CONFIG_SCHED_DEADLINE |
Andy Ross | ef62657 | 2020-07-10 09:43:36 -0700 | [diff] [blame] | 83 | /* If we assume all deadlines live within the same "half" of |
| 84 | * the 32 bit modulus space (this is a documented API rule), |
James Harris | 2cd0f66 | 2021-03-01 09:19:57 -0800 | [diff] [blame] | 85 | * then the latest deadline in the queue minus the earliest is |
Andy Ross | ef62657 | 2020-07-10 09:43:36 -0700 | [diff] [blame] | 86 | * guaranteed to be (2's complement) non-negative. We can |
| 87 | * leverage that to compare the values without having to check |
| 88 | * the current time. |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 89 | */ |
James Harris | 2cd0f66 | 2021-03-01 09:19:57 -0800 | [diff] [blame] | 90 | uint32_t d1 = thread_1->base.prio_deadline; |
| 91 | uint32_t d2 = thread_2->base.prio_deadline; |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 92 | |
James Harris | 2cd0f66 | 2021-03-01 09:19:57 -0800 | [diff] [blame] | 93 | if (d1 != d2) { |
| 94 | /* Sooner deadline means higher effective priority. |
| 95 | * Doing the calculation with unsigned types and casting |
| 96 | * to signed isn't perfect, but at least reduces this |
| 97 | * from UB on overflow to impdef. |
| 98 | */ |
| 99 | return (int32_t) (d2 - d1); |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 100 | } |
| 101 | #endif |
James Harris | 2cd0f66 | 2021-03-01 09:19:57 -0800 | [diff] [blame] | 102 | return 0; |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 103 | } |
| 104 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 105 | static ALWAYS_INLINE bool should_preempt(struct k_thread *thread, |
| 106 | int preempt_ok) |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 107 | { |
Andy Ross | 43553da | 2018-05-31 11:13:49 -0700 | [diff] [blame] | 108 | /* Preemption is OK if it's being explicitly allowed by |
| 109 | * software state (e.g. the thread called k_yield()) |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 110 | */ |
Flavio Ceolin | 8041860 | 2018-11-21 16:22:15 -0800 | [diff] [blame] | 111 | if (preempt_ok != 0) { |
| 112 | return true; |
Andy Ross | 43553da | 2018-05-31 11:13:49 -0700 | [diff] [blame] | 113 | } |
| 114 | |
Andy Ross | 1763a01 | 2019-01-28 10:59:41 -0800 | [diff] [blame] | 115 | __ASSERT(_current != NULL, ""); |
| 116 | |
Andy Ross | 43553da | 2018-05-31 11:13:49 -0700 | [diff] [blame] | 117 | /* Or if we're pended/suspended/dummy (duh) */ |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 118 | if (z_is_thread_prevented_from_running(_current)) { |
Andy Ross | 23c5a63 | 2019-01-04 12:52:17 -0800 | [diff] [blame] | 119 | return true; |
| 120 | } |
| 121 | |
| 122 | /* Edge case on ARM where a thread can be pended out of an |
| 123 | * interrupt handler before the "synchronous" swap starts |
| 124 | * context switching. Platforms with atomic swap can never |
| 125 | * hit this. |
| 126 | */ |
| 127 | if (IS_ENABLED(CONFIG_SWAP_NONATOMIC) |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 128 | && z_is_thread_timeout_active(thread)) { |
Flavio Ceolin | 8041860 | 2018-11-21 16:22:15 -0800 | [diff] [blame] | 129 | return true; |
Andy Ross | 43553da | 2018-05-31 11:13:49 -0700 | [diff] [blame] | 130 | } |
| 131 | |
| 132 | /* Otherwise we have to be running a preemptible thread or |
| 133 | * switching to a metairq |
| 134 | */ |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 135 | if (is_preempt(_current) || is_metairq(thread)) { |
Flavio Ceolin | 8041860 | 2018-11-21 16:22:15 -0800 | [diff] [blame] | 136 | return true; |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 137 | } |
| 138 | |
Flavio Ceolin | 8041860 | 2018-11-21 16:22:15 -0800 | [diff] [blame] | 139 | return false; |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 140 | } |
| 141 | |
Andy Ross | ab46b1b | 2019-01-30 15:00:42 -0800 | [diff] [blame] | 142 | #ifdef CONFIG_SCHED_CPU_MASK |
| 143 | static ALWAYS_INLINE struct k_thread *_priq_dumb_mask_best(sys_dlist_t *pq) |
| 144 | { |
| 145 | /* With masks enabled we need to be prepared to walk the list |
| 146 | * looking for one we can run |
| 147 | */ |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 148 | struct k_thread *thread; |
Andy Ross | ab46b1b | 2019-01-30 15:00:42 -0800 | [diff] [blame] | 149 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 150 | SYS_DLIST_FOR_EACH_CONTAINER(pq, thread, base.qnode_dlist) { |
| 151 | if ((thread->base.cpu_mask & BIT(_current_cpu->id)) != 0) { |
| 152 | return thread; |
Andy Ross | ab46b1b | 2019-01-30 15:00:42 -0800 | [diff] [blame] | 153 | } |
| 154 | } |
| 155 | return NULL; |
| 156 | } |
| 157 | #endif |
| 158 | |
Flavio Ceolin | 2757e71 | 2023-01-06 12:51:16 -0800 | [diff] [blame] | 159 | #if defined(CONFIG_SCHED_DUMB) || defined(CONFIG_WAITQ_DUMB) |
Peter Mitsis | f8b76f3 | 2021-11-29 09:52:11 -0500 | [diff] [blame] | 160 | static ALWAYS_INLINE void z_priq_dumb_add(sys_dlist_t *pq, |
| 161 | struct k_thread *thread) |
Andy Ross | 0d763e0 | 2021-09-07 15:34:04 -0700 | [diff] [blame] | 162 | { |
| 163 | struct k_thread *t; |
| 164 | |
| 165 | __ASSERT_NO_MSG(!z_is_idle_thread_object(thread)); |
| 166 | |
| 167 | SYS_DLIST_FOR_EACH_CONTAINER(pq, t, base.qnode_dlist) { |
| 168 | if (z_sched_prio_cmp(thread, t) > 0) { |
| 169 | sys_dlist_insert(&t->base.qnode_dlist, |
| 170 | &thread->base.qnode_dlist); |
| 171 | return; |
| 172 | } |
| 173 | } |
| 174 | |
| 175 | sys_dlist_append(pq, &thread->base.qnode_dlist); |
| 176 | } |
Flavio Ceolin | 2757e71 | 2023-01-06 12:51:16 -0800 | [diff] [blame] | 177 | #endif |
Andy Ross | 0d763e0 | 2021-09-07 15:34:04 -0700 | [diff] [blame] | 178 | |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 179 | static ALWAYS_INLINE void *thread_runq(struct k_thread *thread) |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 180 | { |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 181 | #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY |
| 182 | int cpu, m = thread->base.cpu_mask; |
| 183 | |
| 184 | /* Edge case: it's legal per the API to "make runnable" a |
| 185 | * thread with all CPUs masked off (i.e. one that isn't |
| 186 | * actually runnable!). Sort of a wart in the API and maybe |
| 187 | * we should address this in docs/assertions instead to avoid |
| 188 | * the extra test. |
| 189 | */ |
| 190 | cpu = m == 0 ? 0 : u32_count_trailing_zeros(m); |
| 191 | |
| 192 | return &_kernel.cpus[cpu].ready_q.runq; |
| 193 | #else |
Benjamin Cabé | a46f1b9 | 2023-08-21 15:30:26 +0200 | [diff] [blame] | 194 | ARG_UNUSED(thread); |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 195 | return &_kernel.ready_q.runq; |
| 196 | #endif |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 197 | } |
| 198 | |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 199 | static ALWAYS_INLINE void *curr_cpu_runq(void) |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 200 | { |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 201 | #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY |
| 202 | return &arch_curr_cpu()->ready_q.runq; |
| 203 | #else |
| 204 | return &_kernel.ready_q.runq; |
| 205 | #endif |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 206 | } |
| 207 | |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 208 | static ALWAYS_INLINE void runq_add(struct k_thread *thread) |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 209 | { |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 210 | _priq_run_add(thread_runq(thread), thread); |
| 211 | } |
| 212 | |
| 213 | static ALWAYS_INLINE void runq_remove(struct k_thread *thread) |
| 214 | { |
| 215 | _priq_run_remove(thread_runq(thread), thread); |
| 216 | } |
| 217 | |
| 218 | static ALWAYS_INLINE struct k_thread *runq_best(void) |
| 219 | { |
| 220 | return _priq_run_best(curr_cpu_runq()); |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 221 | } |
| 222 | |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 223 | /* _current is never in the run queue until context switch on |
| 224 | * SMP configurations, see z_requeue_current() |
| 225 | */ |
Anas Nashif | 595ff63 | 2024-02-27 09:49:07 -0500 | [diff] [blame] | 226 | static inline bool should_queue_thread(struct k_thread *thread) |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 227 | { |
Anas Nashif | 595ff63 | 2024-02-27 09:49:07 -0500 | [diff] [blame] | 228 | return !IS_ENABLED(CONFIG_SMP) || thread != _current; |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 229 | } |
| 230 | |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 231 | static ALWAYS_INLINE void queue_thread(struct k_thread *thread) |
Andy Ross | 91946ef | 2021-02-07 13:03:09 -0800 | [diff] [blame] | 232 | { |
| 233 | thread->base.thread_state |= _THREAD_QUEUED; |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 234 | if (should_queue_thread(thread)) { |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 235 | runq_add(thread); |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 236 | } |
| 237 | #ifdef CONFIG_SMP |
| 238 | if (thread == _current) { |
| 239 | /* add current to end of queue means "yield" */ |
| 240 | _current_cpu->swap_ok = true; |
| 241 | } |
| 242 | #endif |
Andy Ross | 91946ef | 2021-02-07 13:03:09 -0800 | [diff] [blame] | 243 | } |
| 244 | |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 245 | static ALWAYS_INLINE void dequeue_thread(struct k_thread *thread) |
Andy Ross | 91946ef | 2021-02-07 13:03:09 -0800 | [diff] [blame] | 246 | { |
| 247 | thread->base.thread_state &= ~_THREAD_QUEUED; |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 248 | if (should_queue_thread(thread)) { |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 249 | runq_remove(thread); |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 250 | } |
Andy Ross | 91946ef | 2021-02-07 13:03:09 -0800 | [diff] [blame] | 251 | } |
| 252 | |
Andy Ross | b4e9ef0 | 2022-04-06 10:10:17 -0700 | [diff] [blame] | 253 | static void signal_pending_ipi(void) |
| 254 | { |
| 255 | /* Synchronization note: you might think we need to lock these |
| 256 | * two steps, but an IPI is idempotent. It's OK if we do it |
| 257 | * twice. All we require is that if a CPU sees the flag true, |
| 258 | * it is guaranteed to send the IPI, and if a core sets |
| 259 | * pending_ipi, the IPI will be sent the next time through |
| 260 | * this code. |
| 261 | */ |
| 262 | #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED) |
Kumar Gala | 4f458ba | 2022-10-18 11:11:46 -0500 | [diff] [blame] | 263 | if (arch_num_cpus() > 1) { |
Andy Ross | b4e9ef0 | 2022-04-06 10:10:17 -0700 | [diff] [blame] | 264 | if (_kernel.pending_ipi) { |
| 265 | _kernel.pending_ipi = false; |
| 266 | arch_sched_ipi(); |
| 267 | } |
| 268 | } |
| 269 | #endif |
| 270 | } |
| 271 | |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 272 | #ifdef CONFIG_SMP |
| 273 | /* Called out of z_swap() when CONFIG_SMP. The current thread can |
| 274 | * never live in the run queue until we are inexorably on the context |
| 275 | * switch path on SMP, otherwise there is a deadlock condition where a |
| 276 | * set of CPUs pick a cycle of threads to run and wait for them all to |
| 277 | * context switch forever. |
| 278 | */ |
Anas Nashif | 595ff63 | 2024-02-27 09:49:07 -0500 | [diff] [blame] | 279 | void z_requeue_current(struct k_thread *thread) |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 280 | { |
Anas Nashif | 595ff63 | 2024-02-27 09:49:07 -0500 | [diff] [blame] | 281 | if (z_is_thread_queued(thread)) { |
| 282 | runq_add(thread); |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 283 | } |
Andy Ross | b4e9ef0 | 2022-04-06 10:10:17 -0700 | [diff] [blame] | 284 | signal_pending_ipi(); |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 285 | } |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 286 | |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 287 | /* Return true if the thread is aborting, else false */ |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 288 | static inline bool is_aborting(struct k_thread *thread) |
| 289 | { |
Anas Nashif | bbbc38b | 2021-03-29 10:03:49 -0400 | [diff] [blame] | 290 | return (thread->base.thread_state & _THREAD_ABORTING) != 0U; |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 291 | } |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 292 | |
| 293 | /* Return true if the thread is aborting or suspending, else false */ |
| 294 | static inline bool is_halting(struct k_thread *thread) |
| 295 | { |
| 296 | return (thread->base.thread_state & |
| 297 | (_THREAD_ABORTING | _THREAD_SUSPENDING)) != 0U; |
| 298 | } |
Jeremy Bettis | 1e0a36c | 2021-12-06 10:56:33 -0700 | [diff] [blame] | 299 | #endif |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 300 | |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 301 | /* Clear the halting bits (_THREAD_ABORTING and _THREAD_SUSPENDING) */ |
| 302 | static inline void clear_halting(struct k_thread *thread) |
| 303 | { |
| 304 | thread->base.thread_state &= ~(_THREAD_ABORTING | _THREAD_SUSPENDING); |
| 305 | } |
| 306 | |
Andy Ross | b2791b0 | 2019-01-28 09:36:36 -0800 | [diff] [blame] | 307 | static ALWAYS_INLINE struct k_thread *next_up(void) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 308 | { |
Vadim Shakirov | 73944c6 | 2023-07-24 15:42:52 +0300 | [diff] [blame] | 309 | #ifdef CONFIG_SMP |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 310 | if (is_halting(_current)) { |
| 311 | halt_thread(_current, is_aborting(_current) ? |
| 312 | _THREAD_DEAD : _THREAD_SUSPENDED); |
Vadim Shakirov | 73944c6 | 2023-07-24 15:42:52 +0300 | [diff] [blame] | 313 | } |
| 314 | #endif |
| 315 | |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 316 | struct k_thread *thread = runq_best(); |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 317 | |
Florian Grandel | cc4d1bd | 2023-08-28 17:31:54 +0200 | [diff] [blame] | 318 | #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && \ |
| 319 | (CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES) |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 320 | /* MetaIRQs must always attempt to return back to a |
| 321 | * cooperative thread they preempted and not whatever happens |
| 322 | * to be highest priority now. The cooperative thread was |
| 323 | * promised it wouldn't be preempted (by non-metairq threads)! |
| 324 | */ |
| 325 | struct k_thread *mirqp = _current_cpu->metairq_preempted; |
| 326 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 327 | if (mirqp != NULL && (thread == NULL || !is_metairq(thread))) { |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 328 | if (!z_is_thread_prevented_from_running(mirqp)) { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 329 | thread = mirqp; |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 330 | } else { |
| 331 | _current_cpu->metairq_preempted = NULL; |
| 332 | } |
| 333 | } |
| 334 | #endif |
| 335 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 336 | #ifndef CONFIG_SMP |
| 337 | /* In uniprocessor mode, we can leave the current thread in |
| 338 | * the queue (actually we have to, otherwise the assembly |
| 339 | * context switch code for all architectures would be |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 340 | * responsible for putting it back in z_swap and ISR return!), |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 341 | * which makes this choice simple. |
| 342 | */ |
Anas Nashif | 3f4f3f6 | 2021-03-29 17:13:47 -0400 | [diff] [blame] | 343 | return (thread != NULL) ? thread : _current_cpu->idle_thread; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 344 | #else |
| 345 | /* Under SMP, the "cache" mechanism for selecting the next |
| 346 | * thread doesn't work, so we have more work to do to test |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 347 | * _current against the best choice from the queue. Here, the |
| 348 | * thread selected above represents "the best thread that is |
| 349 | * not current". |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 350 | * |
| 351 | * Subtle note on "queued": in SMP mode, _current does not |
| 352 | * live in the queue, so this isn't exactly the same thing as |
| 353 | * "ready", it means "is _current already added back to the |
| 354 | * queue such that we don't want to re-add it". |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 355 | */ |
Simon Hein | 02cfbfe | 2022-07-19 22:30:17 +0200 | [diff] [blame] | 356 | bool queued = z_is_thread_queued(_current); |
| 357 | bool active = !z_is_thread_prevented_from_running(_current); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 358 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 359 | if (thread == NULL) { |
| 360 | thread = _current_cpu->idle_thread; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 361 | } |
| 362 | |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 363 | if (active) { |
James Harris | 2cd0f66 | 2021-03-01 09:19:57 -0800 | [diff] [blame] | 364 | int32_t cmp = z_sched_prio_cmp(_current, thread); |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 365 | |
| 366 | /* Ties only switch if state says we yielded */ |
James Harris | 2cd0f66 | 2021-03-01 09:19:57 -0800 | [diff] [blame] | 367 | if ((cmp > 0) || ((cmp == 0) && !_current_cpu->swap_ok)) { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 368 | thread = _current; |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 369 | } |
| 370 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 371 | if (!should_preempt(thread, _current_cpu->swap_ok)) { |
| 372 | thread = _current; |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 373 | } |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 374 | } |
| 375 | |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 376 | /* Put _current back into the queue */ |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 377 | if (thread != _current && active && |
| 378 | !z_is_idle_thread_object(_current) && !queued) { |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 379 | queue_thread(_current); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 380 | } |
| 381 | |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 382 | /* Take the new _current out of the queue */ |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 383 | if (z_is_thread_queued(thread)) { |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 384 | dequeue_thread(thread); |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 385 | } |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 386 | |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 387 | _current_cpu->swap_ok = false; |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 388 | return thread; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 389 | #endif |
| 390 | } |
| 391 | |
Andrew Boie | 8e0f6a5 | 2020-09-05 11:50:18 -0700 | [diff] [blame] | 392 | static void move_thread_to_end_of_prio_q(struct k_thread *thread) |
| 393 | { |
| 394 | if (z_is_thread_queued(thread)) { |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 395 | dequeue_thread(thread); |
Andrew Boie | 8e0f6a5 | 2020-09-05 11:50:18 -0700 | [diff] [blame] | 396 | } |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 397 | queue_thread(thread); |
Andrew Boie | 8e0f6a5 | 2020-09-05 11:50:18 -0700 | [diff] [blame] | 398 | update_cache(thread == _current); |
| 399 | } |
| 400 | |
Andy Ross | c5c3ad9 | 2023-03-07 08:29:31 -0800 | [diff] [blame] | 401 | static void flag_ipi(void) |
| 402 | { |
| 403 | #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED) |
| 404 | if (arch_num_cpus() > 1) { |
| 405 | _kernel.pending_ipi = true; |
| 406 | } |
| 407 | #endif |
| 408 | } |
| 409 | |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 410 | #ifdef CONFIG_TIMESLICING |
| 411 | |
Gerard Marull-Paretas | 4863c5f | 2023-04-11 15:34:39 +0200 | [diff] [blame] | 412 | static int slice_ticks = DIV_ROUND_UP(CONFIG_TIMESLICE_SIZE * Z_HZ_ticks, Z_HZ_ms); |
Nicolas Pitre | 524ac8a | 2023-03-31 12:31:28 -0400 | [diff] [blame] | 413 | static int slice_max_prio = CONFIG_TIMESLICE_PRIORITY; |
Nicolas Pitre | 5879d2d | 2023-03-09 22:45:18 -0500 | [diff] [blame] | 414 | static struct _timeout slice_timeouts[CONFIG_MP_MAX_NUM_CPUS]; |
| 415 | static bool slice_expired[CONFIG_MP_MAX_NUM_CPUS]; |
Andy Ross | 3e69689 | 2021-11-30 18:26:26 -0800 | [diff] [blame] | 416 | |
Andy Ross | 7fb8eb5 | 2019-01-04 12:54:23 -0800 | [diff] [blame] | 417 | #ifdef CONFIG_SWAP_NONATOMIC |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 418 | /* If z_swap() isn't atomic, then it's possible for a timer interrupt |
Andy Ross | 7fb8eb5 | 2019-01-04 12:54:23 -0800 | [diff] [blame] | 419 | * to try to timeslice away _current after it has already pended |
| 420 | * itself but before the corresponding context switch. Treat that as |
| 421 | * a noop condition in z_time_slice(). |
| 422 | */ |
| 423 | static struct k_thread *pending_current; |
| 424 | #endif |
| 425 | |
Nicolas Pitre | 5879d2d | 2023-03-09 22:45:18 -0500 | [diff] [blame] | 426 | static inline int slice_time(struct k_thread *thread) |
| 427 | { |
| 428 | int ret = slice_ticks; |
| 429 | |
| 430 | #ifdef CONFIG_TIMESLICE_PER_THREAD |
| 431 | if (thread->base.slice_ticks != 0) { |
| 432 | ret = thread->base.slice_ticks; |
| 433 | } |
Benjamin Cabé | a46f1b9 | 2023-08-21 15:30:26 +0200 | [diff] [blame] | 434 | #else |
| 435 | ARG_UNUSED(thread); |
Nicolas Pitre | 5879d2d | 2023-03-09 22:45:18 -0500 | [diff] [blame] | 436 | #endif |
| 437 | return ret; |
| 438 | } |
| 439 | |
| 440 | static inline bool sliceable(struct k_thread *thread) |
| 441 | { |
| 442 | bool ret = is_preempt(thread) |
| 443 | && slice_time(thread) != 0 |
| 444 | && !z_is_prio_higher(thread->base.prio, slice_max_prio) |
| 445 | && !z_is_thread_prevented_from_running(thread) |
| 446 | && !z_is_idle_thread_object(thread); |
| 447 | |
| 448 | #ifdef CONFIG_TIMESLICE_PER_THREAD |
| 449 | ret |= thread->base.slice_ticks != 0; |
| 450 | #endif |
| 451 | |
| 452 | return ret; |
| 453 | } |
| 454 | |
Anas Nashif | 477a04a | 2024-02-28 08:15:15 -0500 | [diff] [blame] | 455 | static void slice_timeout(struct _timeout *timeout) |
Andy Ross | f3afd5a | 2023-03-06 14:31:35 -0800 | [diff] [blame] | 456 | { |
Anas Nashif | 477a04a | 2024-02-28 08:15:15 -0500 | [diff] [blame] | 457 | int cpu = ARRAY_INDEX(slice_timeouts, timeout); |
Andy Ross | f3afd5a | 2023-03-06 14:31:35 -0800 | [diff] [blame] | 458 | |
| 459 | slice_expired[cpu] = true; |
Andy Ross | c5c3ad9 | 2023-03-07 08:29:31 -0800 | [diff] [blame] | 460 | |
| 461 | /* We need an IPI if we just handled a timeslice expiration |
| 462 | * for a different CPU. Ideally this would be able to target |
| 463 | * the specific core, but that's not part of the API yet. |
| 464 | */ |
| 465 | if (IS_ENABLED(CONFIG_SMP) && cpu != _current_cpu->id) { |
| 466 | flag_ipi(); |
| 467 | } |
Andy Ross | f3afd5a | 2023-03-06 14:31:35 -0800 | [diff] [blame] | 468 | } |
| 469 | |
Anas Nashif | 595ff63 | 2024-02-27 09:49:07 -0500 | [diff] [blame] | 470 | void z_reset_time_slice(struct k_thread *thread) |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 471 | { |
Andy Ross | f3afd5a | 2023-03-06 14:31:35 -0800 | [diff] [blame] | 472 | int cpu = _current_cpu->id; |
| 473 | |
| 474 | z_abort_timeout(&slice_timeouts[cpu]); |
Nicolas Pitre | 5879d2d | 2023-03-09 22:45:18 -0500 | [diff] [blame] | 475 | slice_expired[cpu] = false; |
Anas Nashif | 595ff63 | 2024-02-27 09:49:07 -0500 | [diff] [blame] | 476 | if (sliceable(thread)) { |
Andy Ross | f3afd5a | 2023-03-06 14:31:35 -0800 | [diff] [blame] | 477 | z_add_timeout(&slice_timeouts[cpu], slice_timeout, |
Anas Nashif | 595ff63 | 2024-02-27 09:49:07 -0500 | [diff] [blame] | 478 | K_TICKS(slice_time(thread) - 1)); |
Andy Ross | ed7d863 | 2019-06-15 19:32:04 -0700 | [diff] [blame] | 479 | } |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 480 | } |
| 481 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 482 | void k_sched_time_slice_set(int32_t slice, int prio) |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 483 | { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 484 | K_SPINLOCK(&_sched_spinlock) { |
Andy Ross | 3e69689 | 2021-11-30 18:26:26 -0800 | [diff] [blame] | 485 | slice_ticks = k_ms_to_ticks_ceil32(slice); |
Andy Ross | 1c30514 | 2018-10-15 11:10:49 -0700 | [diff] [blame] | 486 | slice_max_prio = prio; |
Andy Ross | 3e69689 | 2021-11-30 18:26:26 -0800 | [diff] [blame] | 487 | z_reset_time_slice(_current); |
Andy Ross | 1c30514 | 2018-10-15 11:10:49 -0700 | [diff] [blame] | 488 | } |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 489 | } |
| 490 | |
Andy Ross | 3e69689 | 2021-11-30 18:26:26 -0800 | [diff] [blame] | 491 | #ifdef CONFIG_TIMESLICE_PER_THREAD |
Anas Nashif | 595ff63 | 2024-02-27 09:49:07 -0500 | [diff] [blame] | 492 | void k_thread_time_slice_set(struct k_thread *thread, int32_t thread_slice_ticks, |
Andy Ross | 3e69689 | 2021-11-30 18:26:26 -0800 | [diff] [blame] | 493 | k_thread_timeslice_fn_t expired, void *data) |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 494 | { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 495 | K_SPINLOCK(&_sched_spinlock) { |
Anas Nashif | 595ff63 | 2024-02-27 09:49:07 -0500 | [diff] [blame] | 496 | thread->base.slice_ticks = thread_slice_ticks; |
| 497 | thread->base.slice_expired = expired; |
| 498 | thread->base.slice_data = data; |
Andy Ross | 3e69689 | 2021-11-30 18:26:26 -0800 | [diff] [blame] | 499 | } |
| 500 | } |
| 501 | #endif |
| 502 | |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 503 | /* Called out of each timer interrupt */ |
Andy Ross | f3afd5a | 2023-03-06 14:31:35 -0800 | [diff] [blame] | 504 | void z_time_slice(void) |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 505 | { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 506 | k_spinlock_key_t key = k_spin_lock(&_sched_spinlock); |
Nicolas Pitre | 5879d2d | 2023-03-09 22:45:18 -0500 | [diff] [blame] | 507 | struct k_thread *curr = _current; |
Andrew Boie | 8e0f6a5 | 2020-09-05 11:50:18 -0700 | [diff] [blame] | 508 | |
Andy Ross | 7fb8eb5 | 2019-01-04 12:54:23 -0800 | [diff] [blame] | 509 | #ifdef CONFIG_SWAP_NONATOMIC |
Nicolas Pitre | 5879d2d | 2023-03-09 22:45:18 -0500 | [diff] [blame] | 510 | if (pending_current == curr) { |
| 511 | z_reset_time_slice(curr); |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 512 | k_spin_unlock(&_sched_spinlock, key); |
Andy Ross | 7fb8eb5 | 2019-01-04 12:54:23 -0800 | [diff] [blame] | 513 | return; |
| 514 | } |
| 515 | pending_current = NULL; |
| 516 | #endif |
| 517 | |
Nicolas Pitre | 5879d2d | 2023-03-09 22:45:18 -0500 | [diff] [blame] | 518 | if (slice_expired[_current_cpu->id] && sliceable(curr)) { |
| 519 | #ifdef CONFIG_TIMESLICE_PER_THREAD |
| 520 | if (curr->base.slice_expired) { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 521 | k_spin_unlock(&_sched_spinlock, key); |
Nicolas Pitre | 5879d2d | 2023-03-09 22:45:18 -0500 | [diff] [blame] | 522 | curr->base.slice_expired(curr, curr->base.slice_data); |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 523 | key = k_spin_lock(&_sched_spinlock); |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 524 | } |
Nicolas Pitre | 5879d2d | 2023-03-09 22:45:18 -0500 | [diff] [blame] | 525 | #endif |
| 526 | if (!z_is_thread_prevented_from_running(curr)) { |
| 527 | move_thread_to_end_of_prio_q(curr); |
| 528 | } |
| 529 | z_reset_time_slice(curr); |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 530 | } |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 531 | k_spin_unlock(&_sched_spinlock, key); |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 532 | } |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 533 | #endif |
| 534 | |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 535 | /* Track cooperative threads preempted by metairqs so we can return to |
| 536 | * them specifically. Called at the moment a new thread has been |
| 537 | * selected to run. |
| 538 | */ |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 539 | static void update_metairq_preempt(struct k_thread *thread) |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 540 | { |
Florian Grandel | cc4d1bd | 2023-08-28 17:31:54 +0200 | [diff] [blame] | 541 | #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && \ |
| 542 | (CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES) |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 543 | if (is_metairq(thread) && !is_metairq(_current) && |
| 544 | !is_preempt(_current)) { |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 545 | /* Record new preemption */ |
| 546 | _current_cpu->metairq_preempted = _current; |
Andrew Boie | f5a7e1a | 2020-09-02 09:20:38 -0700 | [diff] [blame] | 547 | } else if (!is_metairq(thread) && !z_is_idle_thread_object(thread)) { |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 548 | /* Returning from existing preemption */ |
| 549 | _current_cpu->metairq_preempted = NULL; |
| 550 | } |
Benjamin Cabé | a46f1b9 | 2023-08-21 15:30:26 +0200 | [diff] [blame] | 551 | #else |
| 552 | ARG_UNUSED(thread); |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 553 | #endif |
| 554 | } |
| 555 | |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 556 | static void update_cache(int preempt_ok) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 557 | { |
| 558 | #ifndef CONFIG_SMP |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 559 | struct k_thread *thread = next_up(); |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 560 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 561 | if (should_preempt(thread, preempt_ok)) { |
Andy Ross | cb3964f | 2019-08-16 21:29:26 -0700 | [diff] [blame] | 562 | #ifdef CONFIG_TIMESLICING |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 563 | if (thread != _current) { |
Andy Ross | 3e69689 | 2021-11-30 18:26:26 -0800 | [diff] [blame] | 564 | z_reset_time_slice(thread); |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 565 | } |
Andy Ross | cb3964f | 2019-08-16 21:29:26 -0700 | [diff] [blame] | 566 | #endif |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 567 | update_metairq_preempt(thread); |
| 568 | _kernel.ready_q.cache = thread; |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 569 | } else { |
| 570 | _kernel.ready_q.cache = _current; |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 571 | } |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 572 | |
| 573 | #else |
| 574 | /* The way this works is that the CPU record keeps its |
| 575 | * "cooperative swapping is OK" flag until the next reschedule |
| 576 | * call or context switch. It doesn't need to be tracked per |
| 577 | * thread because if the thread gets preempted for whatever |
| 578 | * reason the scheduler will make the same decision anyway. |
| 579 | */ |
| 580 | _current_cpu->swap_ok = preempt_ok; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 581 | #endif |
| 582 | } |
| 583 | |
Andy Ross | 05c468f | 2021-02-19 15:24:24 -0800 | [diff] [blame] | 584 | static bool thread_active_elsewhere(struct k_thread *thread) |
| 585 | { |
| 586 | /* True if the thread is currently running on another CPU. |
| 587 | * There are more scalable designs to answer this question in |
| 588 | * constant time, but this is fine for now. |
| 589 | */ |
| 590 | #ifdef CONFIG_SMP |
| 591 | int currcpu = _current_cpu->id; |
| 592 | |
Kumar Gala | a1195ae | 2022-10-18 09:45:13 -0500 | [diff] [blame] | 593 | unsigned int num_cpus = arch_num_cpus(); |
| 594 | |
| 595 | for (int i = 0; i < num_cpus; i++) { |
Andy Ross | 05c468f | 2021-02-19 15:24:24 -0800 | [diff] [blame] | 596 | if ((i != currcpu) && |
| 597 | (_kernel.cpus[i].current == thread)) { |
| 598 | return true; |
| 599 | } |
| 600 | } |
| 601 | #endif |
Benjamin Cabé | a46f1b9 | 2023-08-21 15:30:26 +0200 | [diff] [blame] | 602 | ARG_UNUSED(thread); |
Andy Ross | 05c468f | 2021-02-19 15:24:24 -0800 | [diff] [blame] | 603 | return false; |
| 604 | } |
| 605 | |
Andy Ross | 96ccc46 | 2020-01-23 13:28:30 -0800 | [diff] [blame] | 606 | static void ready_thread(struct k_thread *thread) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 607 | { |
Anas Nashif | 39f632e | 2020-12-07 13:15:42 -0500 | [diff] [blame] | 608 | #ifdef CONFIG_KERNEL_COHERENCE |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 609 | __ASSERT_NO_MSG(arch_mem_coherent(thread)); |
| 610 | #endif |
| 611 | |
Anas Nashif | 081605e | 2020-10-16 20:00:17 -0400 | [diff] [blame] | 612 | /* If thread is queued already, do not try and added it to the |
| 613 | * run queue again |
| 614 | */ |
| 615 | if (!z_is_thread_queued(thread) && z_is_thread_ready(thread)) { |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 616 | SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_ready, thread); |
| 617 | |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 618 | queue_thread(thread); |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 619 | update_cache(0); |
Andy Ross | 3267cd3 | 2022-04-06 09:58:20 -0700 | [diff] [blame] | 620 | flag_ipi(); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 621 | } |
| 622 | } |
| 623 | |
Andy Ross | 96ccc46 | 2020-01-23 13:28:30 -0800 | [diff] [blame] | 624 | void z_ready_thread(struct k_thread *thread) |
| 625 | { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 626 | K_SPINLOCK(&_sched_spinlock) { |
Andy Ross | 05c468f | 2021-02-19 15:24:24 -0800 | [diff] [blame] | 627 | if (!thread_active_elsewhere(thread)) { |
| 628 | ready_thread(thread); |
| 629 | } |
Andy Ross | 96ccc46 | 2020-01-23 13:28:30 -0800 | [diff] [blame] | 630 | } |
| 631 | } |
| 632 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 633 | void z_move_thread_to_end_of_prio_q(struct k_thread *thread) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 634 | { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 635 | K_SPINLOCK(&_sched_spinlock) { |
Andrew Boie | 8e0f6a5 | 2020-09-05 11:50:18 -0700 | [diff] [blame] | 636 | move_thread_to_end_of_prio_q(thread); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 637 | } |
| 638 | } |
| 639 | |
Andy Ross | 96ccc46 | 2020-01-23 13:28:30 -0800 | [diff] [blame] | 640 | void z_sched_start(struct k_thread *thread) |
| 641 | { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 642 | k_spinlock_key_t key = k_spin_lock(&_sched_spinlock); |
Andy Ross | 96ccc46 | 2020-01-23 13:28:30 -0800 | [diff] [blame] | 643 | |
| 644 | if (z_has_thread_started(thread)) { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 645 | k_spin_unlock(&_sched_spinlock, key); |
Andy Ross | 96ccc46 | 2020-01-23 13:28:30 -0800 | [diff] [blame] | 646 | return; |
| 647 | } |
| 648 | |
| 649 | z_mark_thread_as_started(thread); |
| 650 | ready_thread(thread); |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 651 | z_reschedule(&_sched_spinlock, key); |
Andy Ross | 96ccc46 | 2020-01-23 13:28:30 -0800 | [diff] [blame] | 652 | } |
| 653 | |
Peter Mitsis | b1384a7 | 2023-08-14 14:22:05 -0400 | [diff] [blame] | 654 | /** |
| 655 | * @brief Halt a thread |
| 656 | * |
| 657 | * If the target thread is running on another CPU, flag it as needing to |
| 658 | * abort and send an IPI (if supported) to force a schedule point and wait |
| 659 | * until the target thread is switched out (ISRs will spin to wait and threads |
| 660 | * will block to wait). If the target thread is not running on another CPU, |
| 661 | * then it is safe to act immediately. |
| 662 | * |
| 663 | * Upon entry to this routine, the scheduler lock is already held. It is |
| 664 | * released before this routine returns. |
| 665 | * |
| 666 | * @param thread Thread to suspend or abort |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 667 | * @param key Current key for _sched_spinlock |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 668 | * @param terminate True if aborting thread, false if suspending thread |
Peter Mitsis | b1384a7 | 2023-08-14 14:22:05 -0400 | [diff] [blame] | 669 | */ |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 670 | static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key, |
| 671 | bool terminate) |
Peter Mitsis | b1384a7 | 2023-08-14 14:22:05 -0400 | [diff] [blame] | 672 | { |
| 673 | #ifdef CONFIG_SMP |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 674 | if (is_halting(_current) && arch_is_in_isr()) { |
| 675 | /* Another CPU (in an ISR) or thread is waiting for the |
| 676 | * current thread to halt. Halt it now to help avoid a |
| 677 | * potential deadlock. |
| 678 | */ |
| 679 | halt_thread(_current, |
| 680 | is_aborting(_current) ? _THREAD_DEAD |
| 681 | : _THREAD_SUSPENDED); |
Peter Mitsis | b1384a7 | 2023-08-14 14:22:05 -0400 | [diff] [blame] | 682 | } |
| 683 | |
| 684 | bool active = thread_active_elsewhere(thread); |
| 685 | |
| 686 | if (active) { |
| 687 | /* It's running somewhere else, flag and poke */ |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 688 | thread->base.thread_state |= (terminate ? _THREAD_ABORTING |
| 689 | : _THREAD_SUSPENDING); |
Peter Mitsis | b1384a7 | 2023-08-14 14:22:05 -0400 | [diff] [blame] | 690 | |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 691 | /* We might spin to wait, so a true synchronous IPI is needed |
Peter Mitsis | b1384a7 | 2023-08-14 14:22:05 -0400 | [diff] [blame] | 692 | * here, not deferred! |
| 693 | */ |
| 694 | #ifdef CONFIG_SCHED_IPI_SUPPORTED |
| 695 | arch_sched_ipi(); |
| 696 | #endif |
| 697 | } |
| 698 | |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 699 | if (is_halting(thread) && (thread != _current)) { |
Peter Mitsis | b1384a7 | 2023-08-14 14:22:05 -0400 | [diff] [blame] | 700 | if (arch_is_in_isr()) { |
| 701 | /* ISRs can only spin waiting another CPU */ |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 702 | k_spin_unlock(&_sched_spinlock, key); |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 703 | while (is_halting(thread)) { |
Peter Mitsis | b1384a7 | 2023-08-14 14:22:05 -0400 | [diff] [blame] | 704 | } |
| 705 | |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 706 | /* Now we know it's halting, but not necessarily |
| 707 | * halted (suspended or aborted). Wait for the switch |
| 708 | * to happen! |
Peter Mitsis | b1384a7 | 2023-08-14 14:22:05 -0400 | [diff] [blame] | 709 | */ |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 710 | key = k_spin_lock(&_sched_spinlock); |
Peter Mitsis | b1384a7 | 2023-08-14 14:22:05 -0400 | [diff] [blame] | 711 | z_sched_switch_spin(thread); |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 712 | k_spin_unlock(&_sched_spinlock, key); |
Peter Mitsis | b1384a7 | 2023-08-14 14:22:05 -0400 | [diff] [blame] | 713 | } else if (active) { |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 714 | /* Threads can wait on a queue */ |
| 715 | add_to_waitq_locked(_current, terminate ? |
| 716 | &thread->join_queue : |
| 717 | &thread->halt_queue); |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 718 | z_swap(&_sched_spinlock, key); |
Peter Mitsis | b1384a7 | 2023-08-14 14:22:05 -0400 | [diff] [blame] | 719 | } |
| 720 | return; /* lock has been released */ |
| 721 | } |
| 722 | #endif |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 723 | halt_thread(thread, terminate ? _THREAD_DEAD : _THREAD_SUSPENDED); |
Peter Mitsis | b1384a7 | 2023-08-14 14:22:05 -0400 | [diff] [blame] | 724 | if ((thread == _current) && !arch_is_in_isr()) { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 725 | z_swap(&_sched_spinlock, key); |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 726 | __ASSERT(!terminate, "aborted _current back from dead"); |
| 727 | } else { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 728 | k_spin_unlock(&_sched_spinlock, key); |
Peter Mitsis | b1384a7 | 2023-08-14 14:22:05 -0400 | [diff] [blame] | 729 | } |
Peter Mitsis | b1384a7 | 2023-08-14 14:22:05 -0400 | [diff] [blame] | 730 | } |
| 731 | |
Andrew Boie | 6cf496f | 2020-02-14 10:52:49 -0800 | [diff] [blame] | 732 | void z_impl_k_thread_suspend(struct k_thread *thread) |
Andy Ross | 8bdabcc | 2020-01-07 09:58:46 -0800 | [diff] [blame] | 733 | { |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 734 | SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, suspend, thread); |
| 735 | |
Andy Ross | 8bdabcc | 2020-01-07 09:58:46 -0800 | [diff] [blame] | 736 | (void)z_abort_thread_timeout(thread); |
| 737 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 738 | k_spinlock_key_t key = k_spin_lock(&_sched_spinlock); |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 739 | |
| 740 | if ((thread->base.thread_state & _THREAD_SUSPENDED) != 0U) { |
| 741 | |
| 742 | /* The target thread is already suspended. Nothing to do. */ |
| 743 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 744 | k_spin_unlock(&_sched_spinlock, key); |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 745 | return; |
Andy Ross | 8bdabcc | 2020-01-07 09:58:46 -0800 | [diff] [blame] | 746 | } |
| 747 | |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 748 | z_thread_halt(thread, key, false); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 749 | |
| 750 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, suspend, thread); |
Andy Ross | 8bdabcc | 2020-01-07 09:58:46 -0800 | [diff] [blame] | 751 | } |
| 752 | |
Andrew Boie | 6cf496f | 2020-02-14 10:52:49 -0800 | [diff] [blame] | 753 | #ifdef CONFIG_USERSPACE |
| 754 | static inline void z_vrfy_k_thread_suspend(struct k_thread *thread) |
| 755 | { |
Anas Nashif | a08bfeb | 2023-09-27 11:20:28 +0000 | [diff] [blame] | 756 | K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD)); |
Andrew Boie | 6cf496f | 2020-02-14 10:52:49 -0800 | [diff] [blame] | 757 | z_impl_k_thread_suspend(thread); |
| 758 | } |
| 759 | #include <syscalls/k_thread_suspend_mrsh.c> |
| 760 | #endif |
| 761 | |
| 762 | void z_impl_k_thread_resume(struct k_thread *thread) |
| 763 | { |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 764 | SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, resume, thread); |
| 765 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 766 | k_spinlock_key_t key = k_spin_lock(&_sched_spinlock); |
Andrew Boie | 6cf496f | 2020-02-14 10:52:49 -0800 | [diff] [blame] | 767 | |
Anas Nashif | bf69afc | 2020-10-16 19:53:56 -0400 | [diff] [blame] | 768 | /* Do not try to resume a thread that was not suspended */ |
| 769 | if (!z_is_thread_suspended(thread)) { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 770 | k_spin_unlock(&_sched_spinlock, key); |
Anas Nashif | bf69afc | 2020-10-16 19:53:56 -0400 | [diff] [blame] | 771 | return; |
| 772 | } |
| 773 | |
Andrew Boie | 6cf496f | 2020-02-14 10:52:49 -0800 | [diff] [blame] | 774 | z_mark_thread_as_not_suspended(thread); |
| 775 | ready_thread(thread); |
| 776 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 777 | z_reschedule(&_sched_spinlock, key); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 778 | |
| 779 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, resume, thread); |
Andrew Boie | 6cf496f | 2020-02-14 10:52:49 -0800 | [diff] [blame] | 780 | } |
| 781 | |
| 782 | #ifdef CONFIG_USERSPACE |
| 783 | static inline void z_vrfy_k_thread_resume(struct k_thread *thread) |
| 784 | { |
Anas Nashif | a08bfeb | 2023-09-27 11:20:28 +0000 | [diff] [blame] | 785 | K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD)); |
Andrew Boie | 6cf496f | 2020-02-14 10:52:49 -0800 | [diff] [blame] | 786 | z_impl_k_thread_resume(thread); |
| 787 | } |
| 788 | #include <syscalls/k_thread_resume_mrsh.c> |
| 789 | #endif |
| 790 | |
Maksim Masalski | 970820e | 2021-05-25 14:40:14 +0800 | [diff] [blame] | 791 | static _wait_q_t *pended_on_thread(struct k_thread *thread) |
Andy Ross | 8bdabcc | 2020-01-07 09:58:46 -0800 | [diff] [blame] | 792 | { |
| 793 | __ASSERT_NO_MSG(thread->base.pended_on); |
| 794 | |
| 795 | return thread->base.pended_on; |
| 796 | } |
| 797 | |
Andy Ross | ed6b4fb | 2020-01-23 13:04:15 -0800 | [diff] [blame] | 798 | static void unready_thread(struct k_thread *thread) |
| 799 | { |
| 800 | if (z_is_thread_queued(thread)) { |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 801 | dequeue_thread(thread); |
Andy Ross | ed6b4fb | 2020-01-23 13:04:15 -0800 | [diff] [blame] | 802 | } |
| 803 | update_cache(thread == _current); |
| 804 | } |
| 805 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 806 | /* _sched_spinlock must be held */ |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 807 | static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 808 | { |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 809 | unready_thread(thread); |
| 810 | z_mark_thread_as_pending(thread); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 811 | |
| 812 | SYS_PORT_TRACING_FUNC(k_thread, sched_pend, thread); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 813 | |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 814 | if (wait_q != NULL) { |
| 815 | thread->base.pended_on = wait_q; |
| 816 | z_priq_wait_add(&wait_q->waitq, thread); |
Andy Ross | 15d5208 | 2018-09-26 13:19:31 -0700 | [diff] [blame] | 817 | } |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 818 | } |
Andy Ross | 15d5208 | 2018-09-26 13:19:31 -0700 | [diff] [blame] | 819 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 820 | static void add_thread_timeout(struct k_thread *thread, k_timeout_t timeout) |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 821 | { |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 822 | if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) { |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 823 | z_add_thread_timeout(thread, timeout); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 824 | } |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 825 | } |
| 826 | |
Andy Ross | c32f376 | 2022-10-08 07:24:28 -0700 | [diff] [blame] | 827 | static void pend_locked(struct k_thread *thread, _wait_q_t *wait_q, |
| 828 | k_timeout_t timeout) |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 829 | { |
Anas Nashif | 39f632e | 2020-12-07 13:15:42 -0500 | [diff] [blame] | 830 | #ifdef CONFIG_KERNEL_COHERENCE |
Andy Ross | 1ba7414 | 2021-02-09 13:48:25 -0800 | [diff] [blame] | 831 | __ASSERT_NO_MSG(wait_q == NULL || arch_mem_coherent(wait_q)); |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 832 | #endif |
Andy Ross | c32f376 | 2022-10-08 07:24:28 -0700 | [diff] [blame] | 833 | add_to_waitq_locked(thread, wait_q); |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 834 | add_thread_timeout(thread, timeout); |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 835 | } |
| 836 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 837 | void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q, |
| 838 | k_timeout_t timeout) |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 839 | { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 840 | __ASSERT_NO_MSG(thread == _current || is_thread_dummy(thread)); |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 841 | K_SPINLOCK(&_sched_spinlock) { |
Andy Ross | c32f376 | 2022-10-08 07:24:28 -0700 | [diff] [blame] | 842 | pend_locked(thread, wait_q, timeout); |
| 843 | } |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 844 | } |
| 845 | |
Andrew Boie | ffc5bdf | 2020-09-05 11:44:01 -0700 | [diff] [blame] | 846 | static inline void unpend_thread_no_timeout(struct k_thread *thread) |
| 847 | { |
Maksim Masalski | 970820e | 2021-05-25 14:40:14 +0800 | [diff] [blame] | 848 | _priq_wait_remove(&pended_on_thread(thread)->waitq, thread); |
Andrew Boie | ffc5bdf | 2020-09-05 11:44:01 -0700 | [diff] [blame] | 849 | z_mark_thread_as_not_pending(thread); |
| 850 | thread->base.pended_on = NULL; |
| 851 | } |
| 852 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 853 | ALWAYS_INLINE void z_unpend_thread_no_timeout(struct k_thread *thread) |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 854 | { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 855 | K_SPINLOCK(&_sched_spinlock) { |
Peter Mitsis | 31dfd84f | 2023-01-06 13:20:28 -0500 | [diff] [blame] | 856 | if (thread->base.pended_on != NULL) { |
| 857 | unpend_thread_no_timeout(thread); |
| 858 | } |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 859 | } |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 860 | } |
| 861 | |
Aastha Grover | 5537776 | 2023-03-08 16:54:12 -0500 | [diff] [blame] | 862 | void z_sched_wake_thread(struct k_thread *thread, bool is_timeout) |
| 863 | { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 864 | K_SPINLOCK(&_sched_spinlock) { |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 865 | bool killed = (thread->base.thread_state & |
| 866 | (_THREAD_DEAD | _THREAD_ABORTING)); |
Aastha Grover | 5537776 | 2023-03-08 16:54:12 -0500 | [diff] [blame] | 867 | |
Aastha Grover | 877fc3d | 2023-03-08 16:56:31 -0500 | [diff] [blame] | 868 | #ifdef CONFIG_EVENTS |
| 869 | bool do_nothing = thread->no_wake_on_timeout && is_timeout; |
| 870 | |
| 871 | thread->no_wake_on_timeout = false; |
| 872 | |
| 873 | if (do_nothing) { |
| 874 | continue; |
| 875 | } |
| 876 | #endif |
| 877 | |
Aastha Grover | 5537776 | 2023-03-08 16:54:12 -0500 | [diff] [blame] | 878 | if (!killed) { |
| 879 | /* The thread is not being killed */ |
| 880 | if (thread->base.pended_on != NULL) { |
| 881 | unpend_thread_no_timeout(thread); |
| 882 | } |
| 883 | z_mark_thread_as_started(thread); |
| 884 | if (is_timeout) { |
| 885 | z_mark_thread_as_not_suspended(thread); |
| 886 | } |
| 887 | ready_thread(thread); |
| 888 | } |
| 889 | } |
| 890 | |
| 891 | } |
| 892 | |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 893 | #ifdef CONFIG_SYS_CLOCK_EXISTS |
| 894 | /* Timeout handler for *_thread_timeout() APIs */ |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 895 | void z_thread_timeout(struct _timeout *timeout) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 896 | { |
Andy Ross | 3786633 | 2021-02-17 10:12:36 -0800 | [diff] [blame] | 897 | struct k_thread *thread = CONTAINER_OF(timeout, |
| 898 | struct k_thread, base.timeout); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 899 | |
Aastha Grover | 5537776 | 2023-03-08 16:54:12 -0500 | [diff] [blame] | 900 | z_sched_wake_thread(thread, true); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 901 | } |
| 902 | #endif |
| 903 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 904 | int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key, |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 905 | _wait_q_t *wait_q, k_timeout_t timeout) |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 906 | { |
| 907 | #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC) |
| 908 | pending_current = _current; |
| 909 | #endif |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 910 | __ASSERT_NO_MSG(sizeof(_sched_spinlock) == 0 || lock != &_sched_spinlock); |
Andy Ross | c32f376 | 2022-10-08 07:24:28 -0700 | [diff] [blame] | 911 | |
| 912 | /* We do a "lock swap" prior to calling z_swap(), such that |
| 913 | * the caller's lock gets released as desired. But we ensure |
| 914 | * that we hold the scheduler lock and leave local interrupts |
| 915 | * masked until we reach the context swich. z_swap() itself |
| 916 | * has similar code; the duplication is because it's a legacy |
| 917 | * API that doesn't expect to be called with scheduler lock |
| 918 | * held. |
| 919 | */ |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 920 | (void) k_spin_lock(&_sched_spinlock); |
Andy Ross | c32f376 | 2022-10-08 07:24:28 -0700 | [diff] [blame] | 921 | pend_locked(_current, wait_q, timeout); |
| 922 | k_spin_release(lock); |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 923 | return z_swap(&_sched_spinlock, key); |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 924 | } |
| 925 | |
Andy Ross | 604f0f4 | 2021-02-09 16:47:47 -0800 | [diff] [blame] | 926 | struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q) |
| 927 | { |
| 928 | struct k_thread *thread = NULL; |
| 929 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 930 | K_SPINLOCK(&_sched_spinlock) { |
Andy Ross | 604f0f4 | 2021-02-09 16:47:47 -0800 | [diff] [blame] | 931 | thread = _priq_wait_best(&wait_q->waitq); |
| 932 | |
| 933 | if (thread != NULL) { |
| 934 | unpend_thread_no_timeout(thread); |
| 935 | } |
| 936 | } |
| 937 | |
| 938 | return thread; |
| 939 | } |
| 940 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 941 | struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q) |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 942 | { |
Andy Ross | 604f0f4 | 2021-02-09 16:47:47 -0800 | [diff] [blame] | 943 | struct k_thread *thread = NULL; |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 944 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 945 | K_SPINLOCK(&_sched_spinlock) { |
Andy Ross | 604f0f4 | 2021-02-09 16:47:47 -0800 | [diff] [blame] | 946 | thread = _priq_wait_best(&wait_q->waitq); |
| 947 | |
| 948 | if (thread != NULL) { |
| 949 | unpend_thread_no_timeout(thread); |
| 950 | (void)z_abort_thread_timeout(thread); |
| 951 | } |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 952 | } |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 953 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 954 | return thread; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 955 | } |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 956 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 957 | void z_unpend_thread(struct k_thread *thread) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 958 | { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 959 | z_unpend_thread_no_timeout(thread); |
| 960 | (void)z_abort_thread_timeout(thread); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 961 | } |
| 962 | |
Andy Ross | 6f13980 | 2019-08-20 11:21:28 -0700 | [diff] [blame] | 963 | /* Priority set utility that does no rescheduling, it just changes the |
| 964 | * run queue state, returning true if a reschedule is needed later. |
| 965 | */ |
Anas Nashif | 868f099 | 2024-02-24 11:37:56 -0500 | [diff] [blame] | 966 | bool z_thread_prio_set(struct k_thread *thread, int prio) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 967 | { |
Flavio Ceolin | 02ed85b | 2018-09-20 15:43:57 -0700 | [diff] [blame] | 968 | bool need_sched = 0; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 969 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 970 | K_SPINLOCK(&_sched_spinlock) { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 971 | need_sched = z_is_thread_ready(thread); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 972 | |
| 973 | if (need_sched) { |
Andy Ross | 4d8e1f2 | 2019-07-01 10:25:55 -0700 | [diff] [blame] | 974 | /* Don't requeue on SMP if it's the running thread */ |
| 975 | if (!IS_ENABLED(CONFIG_SMP) || z_is_thread_queued(thread)) { |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 976 | dequeue_thread(thread); |
Andy Ross | 4d8e1f2 | 2019-07-01 10:25:55 -0700 | [diff] [blame] | 977 | thread->base.prio = prio; |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 978 | queue_thread(thread); |
Andy Ross | 4d8e1f2 | 2019-07-01 10:25:55 -0700 | [diff] [blame] | 979 | } else { |
| 980 | thread->base.prio = prio; |
| 981 | } |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 982 | update_cache(1); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 983 | } else { |
| 984 | thread->base.prio = prio; |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 985 | } |
| 986 | } |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 987 | |
| 988 | SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_priority_set, thread, prio); |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 989 | |
Andy Ross | 6f13980 | 2019-08-20 11:21:28 -0700 | [diff] [blame] | 990 | return need_sched; |
| 991 | } |
| 992 | |
Anas Nashif | 3f4f3f6 | 2021-03-29 17:13:47 -0400 | [diff] [blame] | 993 | static inline bool resched(uint32_t key) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 994 | { |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 995 | #ifdef CONFIG_SMP |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 996 | _current_cpu->swap_ok = 0; |
| 997 | #endif |
| 998 | |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 999 | return arch_irq_unlocked(key) && !arch_is_in_isr(); |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 1000 | } |
| 1001 | |
Anas Nashif | 379b93f | 2020-08-10 15:47:02 -0400 | [diff] [blame] | 1002 | /* |
| 1003 | * Check if the next ready thread is the same as the current thread |
| 1004 | * and save the trip if true. |
| 1005 | */ |
| 1006 | static inline bool need_swap(void) |
| 1007 | { |
| 1008 | /* the SMP case will be handled in C based z_swap() */ |
| 1009 | #ifdef CONFIG_SMP |
| 1010 | return true; |
| 1011 | #else |
| 1012 | struct k_thread *new_thread; |
| 1013 | |
| 1014 | /* Check if the next ready thread is the same as the current thread */ |
Andy Ross | 6b84ab3 | 2021-02-18 10:15:23 -0800 | [diff] [blame] | 1015 | new_thread = _kernel.ready_q.cache; |
Anas Nashif | 379b93f | 2020-08-10 15:47:02 -0400 | [diff] [blame] | 1016 | return new_thread != _current; |
| 1017 | #endif |
| 1018 | } |
| 1019 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1020 | void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key) |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 1021 | { |
Anas Nashif | 379b93f | 2020-08-10 15:47:02 -0400 | [diff] [blame] | 1022 | if (resched(key.key) && need_swap()) { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1023 | z_swap(lock, key); |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 1024 | } else { |
| 1025 | k_spin_unlock(lock, key); |
Andy Ross | b4e9ef0 | 2022-04-06 10:10:17 -0700 | [diff] [blame] | 1026 | signal_pending_ipi(); |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 1027 | } |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 1028 | } |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 1029 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 1030 | void z_reschedule_irqlock(uint32_t key) |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 1031 | { |
Gaetan Perrot | 68581ca | 2023-12-21 11:01:54 +0900 | [diff] [blame] | 1032 | if (resched(key) && need_swap()) { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1033 | z_swap_irqlock(key); |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 1034 | } else { |
| 1035 | irq_unlock(key); |
Andy Ross | b4e9ef0 | 2022-04-06 10:10:17 -0700 | [diff] [blame] | 1036 | signal_pending_ipi(); |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 1037 | } |
Andy Ross | 8606fab | 2018-03-26 10:54:40 -0700 | [diff] [blame] | 1038 | } |
| 1039 | |
Benjamin Walsh | d7ad176 | 2016-11-10 14:46:58 -0500 | [diff] [blame] | 1040 | void k_sched_lock(void) |
| 1041 | { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1042 | K_SPINLOCK(&_sched_spinlock) { |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1043 | SYS_PORT_TRACING_FUNC(k_thread, sched_lock); |
| 1044 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1045 | z_sched_lock(); |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 1046 | } |
Benjamin Walsh | d7ad176 | 2016-11-10 14:46:58 -0500 | [diff] [blame] | 1047 | } |
| 1048 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1049 | void k_sched_unlock(void) |
| 1050 | { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1051 | K_SPINLOCK(&_sched_spinlock) { |
Anas Nashif | bbbc38b | 2021-03-29 10:03:49 -0400 | [diff] [blame] | 1052 | __ASSERT(_current->base.sched_locked != 0U, ""); |
Andy Ross | eefd3da | 2020-02-06 13:39:52 -0800 | [diff] [blame] | 1053 | __ASSERT(!arch_is_in_isr(), ""); |
| 1054 | |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 1055 | ++_current->base.sched_locked; |
Yasushi SHOJI | 20d0724 | 2019-07-31 11:19:08 +0900 | [diff] [blame] | 1056 | update_cache(0); |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 1057 | } |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1058 | |
Anas Nashif | 2c5d404 | 2019-12-02 10:24:08 -0500 | [diff] [blame] | 1059 | LOG_DBG("scheduler unlocked (%p:%d)", |
Benjamin Walsh | a4e033f | 2016-11-18 16:08:24 -0500 | [diff] [blame] | 1060 | _current, _current->base.sched_locked); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1061 | |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1062 | SYS_PORT_TRACING_FUNC(k_thread, sched_unlock); |
| 1063 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1064 | z_reschedule_unlocked(); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1065 | } |
| 1066 | |
Andy Ross | 6b84ab3 | 2021-02-18 10:15:23 -0800 | [diff] [blame] | 1067 | struct k_thread *z_swap_next_thread(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1068 | { |
Andy Ross | 6b84ab3 | 2021-02-18 10:15:23 -0800 | [diff] [blame] | 1069 | #ifdef CONFIG_SMP |
Andy Ross | b4e9ef0 | 2022-04-06 10:10:17 -0700 | [diff] [blame] | 1070 | struct k_thread *ret = next_up(); |
| 1071 | |
| 1072 | if (ret == _current) { |
| 1073 | /* When not swapping, have to signal IPIs here. In |
| 1074 | * the context switch case it must happen later, after |
| 1075 | * _current gets requeued. |
| 1076 | */ |
| 1077 | signal_pending_ipi(); |
| 1078 | } |
| 1079 | return ret; |
Andy Ross | 6b84ab3 | 2021-02-18 10:15:23 -0800 | [diff] [blame] | 1080 | #else |
| 1081 | return _kernel.ready_q.cache; |
Benjamin Walsh | 6209218 | 2016-12-20 14:39:08 -0500 | [diff] [blame] | 1082 | #endif |
Andy Ross | 6b84ab3 | 2021-02-18 10:15:23 -0800 | [diff] [blame] | 1083 | } |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1084 | |
Jeremy Bettis | 1e0a36c | 2021-12-06 10:56:33 -0700 | [diff] [blame] | 1085 | #ifdef CONFIG_USE_SWITCH |
Andy Ross | b18685b | 2019-02-19 17:24:30 -0800 | [diff] [blame] | 1086 | /* Just a wrapper around _current = xxx with tracing */ |
| 1087 | static inline void set_current(struct k_thread *new_thread) |
| 1088 | { |
Daniel Leung | 11e6b43 | 2020-08-27 16:12:01 -0700 | [diff] [blame] | 1089 | z_thread_mark_switched_out(); |
Andy Ross | eefd3da | 2020-02-06 13:39:52 -0800 | [diff] [blame] | 1090 | _current_cpu->current = new_thread; |
Andy Ross | b18685b | 2019-02-19 17:24:30 -0800 | [diff] [blame] | 1091 | } |
| 1092 | |
Nicolas Pitre | c9e3e0d | 2022-03-15 22:36:20 -0400 | [diff] [blame] | 1093 | /** |
| 1094 | * @brief Determine next thread to execute upon completion of an interrupt |
| 1095 | * |
| 1096 | * Thread preemption is performed by context switching after the completion |
| 1097 | * of a non-recursed interrupt. This function determines which thread to |
| 1098 | * switch to if any. This function accepts as @p interrupted either: |
| 1099 | * |
| 1100 | * - The handle for the interrupted thread in which case the thread's context |
| 1101 | * must already be fully saved and ready to be picked up by a different CPU. |
| 1102 | * |
| 1103 | * - NULL if more work is required to fully save the thread's state after |
| 1104 | * it is known that a new thread is to be scheduled. It is up to the caller |
| 1105 | * to store the handle resulting from the thread that is being switched out |
| 1106 | * in that thread's "switch_handle" field after its |
| 1107 | * context has fully been saved, following the same requirements as with |
| 1108 | * the @ref arch_switch() function. |
| 1109 | * |
| 1110 | * If a new thread needs to be scheduled then its handle is returned. |
| 1111 | * Otherwise the same value provided as @p interrupted is returned back. |
| 1112 | * Those handles are the same opaque types used by the @ref arch_switch() |
| 1113 | * function. |
| 1114 | * |
| 1115 | * @warning |
| 1116 | * The @ref _current value may have changed after this call and not refer |
| 1117 | * to the interrupted thread anymore. It might be necessary to make a local |
| 1118 | * copy before calling this function. |
| 1119 | * |
| 1120 | * @param interrupted Handle for the thread that was interrupted or NULL. |
| 1121 | * @retval Handle for the next thread to execute, or @p interrupted when |
| 1122 | * no new thread is to be scheduled. |
| 1123 | */ |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1124 | void *z_get_next_switch_handle(void *interrupted) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1125 | { |
Andrew Boie | ae0d1b2 | 2019-03-29 16:25:27 -0700 | [diff] [blame] | 1126 | z_check_stack_sentinel(); |
| 1127 | |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 1128 | #ifdef CONFIG_SMP |
Andy Ross | dd43221 | 2021-02-05 08:15:02 -0800 | [diff] [blame] | 1129 | void *ret = NULL; |
| 1130 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1131 | K_SPINLOCK(&_sched_spinlock) { |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 1132 | struct k_thread *old_thread = _current, *new_thread; |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 1133 | |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 1134 | if (IS_ENABLED(CONFIG_SMP)) { |
| 1135 | old_thread->switch_handle = NULL; |
| 1136 | } |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 1137 | new_thread = next_up(); |
| 1138 | |
Andy Ross | 40d12c1 | 2021-09-27 08:22:43 -0700 | [diff] [blame] | 1139 | z_sched_usage_switch(new_thread); |
| 1140 | |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 1141 | if (old_thread != new_thread) { |
| 1142 | update_metairq_preempt(new_thread); |
Andy Ross | b89e427 | 2023-05-26 09:12:51 -0700 | [diff] [blame] | 1143 | z_sched_switch_spin(new_thread); |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 1144 | arch_cohere_stacks(old_thread, interrupted, new_thread); |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 1145 | |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 1146 | _current_cpu->swap_ok = 0; |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 1147 | set_current(new_thread); |
| 1148 | |
Andy Ross | 3e69689 | 2021-11-30 18:26:26 -0800 | [diff] [blame] | 1149 | #ifdef CONFIG_TIMESLICING |
| 1150 | z_reset_time_slice(new_thread); |
| 1151 | #endif |
| 1152 | |
Danny Oerndrup | c9d7840 | 2019-12-13 11:24:56 +0100 | [diff] [blame] | 1153 | #ifdef CONFIG_SPIN_VALIDATE |
Andy Ross | 8c1bdda | 2019-02-20 10:07:31 -0800 | [diff] [blame] | 1154 | /* Changed _current! Update the spinlock |
Anas Nashif | 6df4405 | 2021-04-30 09:58:20 -0400 | [diff] [blame] | 1155 | * bookkeeping so the validation doesn't get |
Andy Ross | 8c1bdda | 2019-02-20 10:07:31 -0800 | [diff] [blame] | 1156 | * confused when the "wrong" thread tries to |
| 1157 | * release the lock. |
| 1158 | */ |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1159 | z_spin_lock_set_owner(&_sched_spinlock); |
Andy Ross | 8c1bdda | 2019-02-20 10:07:31 -0800 | [diff] [blame] | 1160 | #endif |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 1161 | |
| 1162 | /* A queued (runnable) old/current thread |
| 1163 | * needs to be added back to the run queue |
| 1164 | * here, and atomically with its switch handle |
| 1165 | * being set below. This is safe now, as we |
| 1166 | * will not return into it. |
| 1167 | */ |
| 1168 | if (z_is_thread_queued(old_thread)) { |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 1169 | runq_add(old_thread); |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 1170 | } |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 1171 | } |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 1172 | old_thread->switch_handle = interrupted; |
Andy Ross | dd43221 | 2021-02-05 08:15:02 -0800 | [diff] [blame] | 1173 | ret = new_thread->switch_handle; |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 1174 | if (IS_ENABLED(CONFIG_SMP)) { |
| 1175 | /* Active threads MUST have a null here */ |
| 1176 | new_thread->switch_handle = NULL; |
| 1177 | } |
Benjamin Walsh | b8c2160 | 2016-12-23 19:34:41 -0500 | [diff] [blame] | 1178 | } |
Andy Ross | b4e9ef0 | 2022-04-06 10:10:17 -0700 | [diff] [blame] | 1179 | signal_pending_ipi(); |
Andy Ross | dd43221 | 2021-02-05 08:15:02 -0800 | [diff] [blame] | 1180 | return ret; |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 1181 | #else |
Andy Ross | 40d12c1 | 2021-09-27 08:22:43 -0700 | [diff] [blame] | 1182 | z_sched_usage_switch(_kernel.ready_q.cache); |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 1183 | _current->switch_handle = interrupted; |
Andy Ross | 6b84ab3 | 2021-02-18 10:15:23 -0800 | [diff] [blame] | 1184 | set_current(_kernel.ready_q.cache); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1185 | return _current->switch_handle; |
Andy Ross | dd43221 | 2021-02-05 08:15:02 -0800 | [diff] [blame] | 1186 | #endif |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1187 | } |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 1188 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1189 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1190 | int z_unpend_all(_wait_q_t *wait_q) |
Andy Ross | 4ca0e07 | 2018-05-10 09:45:42 -0700 | [diff] [blame] | 1191 | { |
Andy Ross | ccf3bf7 | 2018-05-10 11:10:34 -0700 | [diff] [blame] | 1192 | int need_sched = 0; |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1193 | struct k_thread *thread; |
Andy Ross | 4ca0e07 | 2018-05-10 09:45:42 -0700 | [diff] [blame] | 1194 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1195 | while ((thread = z_waitq_head(wait_q)) != NULL) { |
| 1196 | z_unpend_thread(thread); |
| 1197 | z_ready_thread(thread); |
Andy Ross | 4ca0e07 | 2018-05-10 09:45:42 -0700 | [diff] [blame] | 1198 | need_sched = 1; |
| 1199 | } |
Andy Ross | ccf3bf7 | 2018-05-10 11:10:34 -0700 | [diff] [blame] | 1200 | |
| 1201 | return need_sched; |
Andy Ross | 4ca0e07 | 2018-05-10 09:45:42 -0700 | [diff] [blame] | 1202 | } |
| 1203 | |
Anas Nashif | 477a04a | 2024-02-28 08:15:15 -0500 | [diff] [blame] | 1204 | void init_ready_q(struct _ready_q *ready_q) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1205 | { |
Andy Ross | b155d06 | 2021-09-24 13:49:14 -0700 | [diff] [blame] | 1206 | #if defined(CONFIG_SCHED_SCALABLE) |
Anas Nashif | 477a04a | 2024-02-28 08:15:15 -0500 | [diff] [blame] | 1207 | ready_q->runq = (struct _priq_rb) { |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1208 | .tree = { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1209 | .lessthan_fn = z_priq_rb_lessthan, |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1210 | } |
| 1211 | }; |
Andy Ross | b155d06 | 2021-09-24 13:49:14 -0700 | [diff] [blame] | 1212 | #elif defined(CONFIG_SCHED_MULTIQ) |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1213 | for (int i = 0; i < ARRAY_SIZE(_kernel.ready_q.runq.queues); i++) { |
Anas Nashif | 477a04a | 2024-02-28 08:15:15 -0500 | [diff] [blame] | 1214 | sys_dlist_init(&ready_q->runq.queues[i]); |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1215 | } |
Andy Ross | b155d06 | 2021-09-24 13:49:14 -0700 | [diff] [blame] | 1216 | #else |
Anas Nashif | 477a04a | 2024-02-28 08:15:15 -0500 | [diff] [blame] | 1217 | sys_dlist_init(&ready_q->runq); |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1218 | #endif |
Andy Ross | b155d06 | 2021-09-24 13:49:14 -0700 | [diff] [blame] | 1219 | } |
| 1220 | |
| 1221 | void z_sched_init(void) |
| 1222 | { |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 1223 | #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY |
Nicolas Pitre | 907eea0 | 2023-03-16 17:54:25 -0400 | [diff] [blame] | 1224 | for (int i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) { |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 1225 | init_ready_q(&_kernel.cpus[i].ready_q); |
| 1226 | } |
| 1227 | #else |
Andy Ross | b155d06 | 2021-09-24 13:49:14 -0700 | [diff] [blame] | 1228 | init_ready_q(&_kernel.ready_q); |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 1229 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1230 | } |
| 1231 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1232 | int z_impl_k_thread_priority_get(k_tid_t thread) |
Allan Stephens | 399d0ad | 2016-10-07 13:41:34 -0500 | [diff] [blame] | 1233 | { |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 1234 | return thread->base.prio; |
Allan Stephens | 399d0ad | 2016-10-07 13:41:34 -0500 | [diff] [blame] | 1235 | } |
| 1236 | |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 1237 | #ifdef CONFIG_USERSPACE |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1238 | static inline int z_vrfy_k_thread_priority_get(k_tid_t thread) |
| 1239 | { |
Anas Nashif | a08bfeb | 2023-09-27 11:20:28 +0000 | [diff] [blame] | 1240 | K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD)); |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1241 | return z_impl_k_thread_priority_get(thread); |
| 1242 | } |
| 1243 | #include <syscalls/k_thread_priority_get_mrsh.c> |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 1244 | #endif |
| 1245 | |
Anas Nashif | 25c87db | 2021-03-29 10:54:23 -0400 | [diff] [blame] | 1246 | void z_impl_k_thread_priority_set(k_tid_t thread, int prio) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1247 | { |
Benjamin Walsh | 3cc2ba9 | 2016-11-08 15:44:05 -0500 | [diff] [blame] | 1248 | /* |
| 1249 | * Use NULL, since we cannot know what the entry point is (we do not |
| 1250 | * keep track of it) and idle cannot change its priority. |
| 1251 | */ |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1252 | Z_ASSERT_VALID_PRIO(prio, NULL); |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 1253 | __ASSERT(!arch_is_in_isr(), ""); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1254 | |
Anas Nashif | 868f099 | 2024-02-24 11:37:56 -0500 | [diff] [blame] | 1255 | bool need_sched = z_thread_prio_set((struct k_thread *)thread, prio); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1256 | |
Anas Nashif | 5e591c3 | 2024-02-24 10:37:06 -0500 | [diff] [blame] | 1257 | flag_ipi(); |
| 1258 | if (need_sched && _current->base.sched_locked == 0U) { |
| 1259 | z_reschedule_unlocked(); |
| 1260 | } |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1261 | } |
| 1262 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1263 | #ifdef CONFIG_USERSPACE |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1264 | static inline void z_vrfy_k_thread_priority_set(k_tid_t thread, int prio) |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1265 | { |
Anas Nashif | a08bfeb | 2023-09-27 11:20:28 +0000 | [diff] [blame] | 1266 | K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD)); |
| 1267 | K_OOPS(K_SYSCALL_VERIFY_MSG(_is_valid_prio(prio, NULL), |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1268 | "invalid thread priority %d", prio)); |
Anas Nashif | 5e591c3 | 2024-02-24 10:37:06 -0500 | [diff] [blame] | 1269 | #ifndef CONFIG_USERSPACE_THREAD_MAY_RAISE_PRIORITY |
Anas Nashif | a08bfeb | 2023-09-27 11:20:28 +0000 | [diff] [blame] | 1270 | K_OOPS(K_SYSCALL_VERIFY_MSG((int8_t)prio >= thread->base.prio, |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 1271 | "thread priority may only be downgraded (%d < %d)", |
| 1272 | prio, thread->base.prio)); |
Anas Nashif | 5e591c3 | 2024-02-24 10:37:06 -0500 | [diff] [blame] | 1273 | #endif |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1274 | z_impl_k_thread_priority_set(thread, prio); |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1275 | } |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1276 | #include <syscalls/k_thread_priority_set_mrsh.c> |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1277 | #endif |
| 1278 | |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1279 | #ifdef CONFIG_SCHED_DEADLINE |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1280 | void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline) |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1281 | { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1282 | struct k_thread *thread = tid; |
Andy Ross | f2280d1 | 2024-03-08 08:42:08 -0800 | [diff] [blame] | 1283 | int32_t newdl = k_cycle_get_32() + deadline; |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1284 | |
Andy Ross | f2280d1 | 2024-03-08 08:42:08 -0800 | [diff] [blame] | 1285 | /* The prio_deadline field changes the sorting order, so can't |
| 1286 | * change it while the thread is in the run queue (dlists |
| 1287 | * actually are benign as long as we requeue it before we |
| 1288 | * release the lock, but an rbtree will blow up if we break |
| 1289 | * sorting!) |
| 1290 | */ |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1291 | K_SPINLOCK(&_sched_spinlock) { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1292 | if (z_is_thread_queued(thread)) { |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 1293 | dequeue_thread(thread); |
Andy Ross | f2280d1 | 2024-03-08 08:42:08 -0800 | [diff] [blame] | 1294 | thread->base.prio_deadline = newdl; |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 1295 | queue_thread(thread); |
Andy Ross | f2280d1 | 2024-03-08 08:42:08 -0800 | [diff] [blame] | 1296 | } else { |
| 1297 | thread->base.prio_deadline = newdl; |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1298 | } |
| 1299 | } |
| 1300 | } |
| 1301 | |
| 1302 | #ifdef CONFIG_USERSPACE |
Andy Ross | 075c94f | 2019-08-13 11:34:34 -0700 | [diff] [blame] | 1303 | static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline) |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1304 | { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1305 | struct k_thread *thread = tid; |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1306 | |
Anas Nashif | a08bfeb | 2023-09-27 11:20:28 +0000 | [diff] [blame] | 1307 | K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD)); |
| 1308 | K_OOPS(K_SYSCALL_VERIFY_MSG(deadline > 0, |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1309 | "invalid thread deadline %d", |
| 1310 | (int)deadline)); |
| 1311 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1312 | z_impl_k_thread_deadline_set((k_tid_t)thread, deadline); |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1313 | } |
Andy Ross | 075c94f | 2019-08-13 11:34:34 -0700 | [diff] [blame] | 1314 | #include <syscalls/k_thread_deadline_set_mrsh.c> |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1315 | #endif |
| 1316 | #endif |
| 1317 | |
Jordan Yates | 1ef647f | 2022-03-26 09:55:23 +1000 | [diff] [blame] | 1318 | bool k_can_yield(void) |
| 1319 | { |
| 1320 | return !(k_is_pre_kernel() || k_is_in_isr() || |
| 1321 | z_is_idle_thread_object(_current)); |
| 1322 | } |
| 1323 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1324 | void z_impl_k_yield(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1325 | { |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 1326 | __ASSERT(!arch_is_in_isr(), ""); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1327 | |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1328 | SYS_PORT_TRACING_FUNC(k_thread, yield); |
| 1329 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1330 | k_spinlock_key_t key = k_spin_lock(&_sched_spinlock); |
James Harris | 6543e06 | 2021-03-01 10:14:13 -0800 | [diff] [blame] | 1331 | |
Andy Ross | 851d14a | 2021-05-13 15:46:43 -0700 | [diff] [blame] | 1332 | if (!IS_ENABLED(CONFIG_SMP) || |
| 1333 | z_is_thread_queued(_current)) { |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 1334 | dequeue_thread(_current); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1335 | } |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 1336 | queue_thread(_current); |
Andy Ross | 851d14a | 2021-05-13 15:46:43 -0700 | [diff] [blame] | 1337 | update_cache(1); |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1338 | z_swap(&_sched_spinlock, key); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1339 | } |
| 1340 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1341 | #ifdef CONFIG_USERSPACE |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1342 | static inline void z_vrfy_k_yield(void) |
| 1343 | { |
| 1344 | z_impl_k_yield(); |
| 1345 | } |
| 1346 | #include <syscalls/k_yield_mrsh.c> |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1347 | #endif |
| 1348 | |
Flavio Ceolin | 7a815d5 | 2020-10-19 21:37:22 -0700 | [diff] [blame] | 1349 | static int32_t z_tick_sleep(k_ticks_t ticks) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1350 | { |
Flavio Ceolin | 9a16097 | 2020-11-16 10:40:46 -0800 | [diff] [blame] | 1351 | uint32_t expected_wakeup_ticks; |
Carles Cufi | 9849df8 | 2016-12-02 15:31:08 +0100 | [diff] [blame] | 1352 | |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 1353 | __ASSERT(!arch_is_in_isr(), ""); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1354 | |
Gerard Marull-Paretas | 737d799 | 2022-11-23 13:42:04 +0100 | [diff] [blame] | 1355 | LOG_DBG("thread %p for %lu ticks", _current, (unsigned long)ticks); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1356 | |
Gerson Fernando Budke | b8188e5 | 2023-10-16 20:15:31 +0200 | [diff] [blame] | 1357 | #ifdef CONFIG_MULTITHREADING |
Benjamin Walsh | 5596f78 | 2016-12-09 19:57:17 -0500 | [diff] [blame] | 1358 | /* wait of 0 ms is treated as a 'yield' */ |
Charles E. Youse | b186303 | 2019-05-08 13:22:46 -0700 | [diff] [blame] | 1359 | if (ticks == 0) { |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1360 | k_yield(); |
Piotr Zięcik | 7700eb2 | 2018-10-25 17:45:08 +0200 | [diff] [blame] | 1361 | return 0; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1362 | } |
Gerson Fernando Budke | b8188e5 | 2023-10-16 20:15:31 +0200 | [diff] [blame] | 1363 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1364 | |
Lauren Murphy | 4c85b46 | 2021-05-25 17:49:28 -0500 | [diff] [blame] | 1365 | if (Z_TICK_ABS(ticks) <= 0) { |
| 1366 | expected_wakeup_ticks = ticks + sys_clock_tick_get_32(); |
| 1367 | } else { |
| 1368 | expected_wakeup_ticks = Z_TICK_ABS(ticks); |
| 1369 | } |
Andy Ross | d27d4e6 | 2019-02-05 15:36:01 -0800 | [diff] [blame] | 1370 | |
Gerson Fernando Budke | b8188e5 | 2023-10-16 20:15:31 +0200 | [diff] [blame] | 1371 | #ifdef CONFIG_MULTITHREADING |
| 1372 | k_timeout_t timeout = Z_TIMEOUT_TICKS(ticks); |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1373 | k_spinlock_key_t key = k_spin_lock(&_sched_spinlock); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1374 | |
Andy Ross | dff6b71 | 2019-02-25 21:17:29 -0800 | [diff] [blame] | 1375 | #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC) |
| 1376 | pending_current = _current; |
| 1377 | #endif |
Andrew Boie | a8775ab | 2020-09-05 12:53:42 -0700 | [diff] [blame] | 1378 | unready_thread(_current); |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1379 | z_add_thread_timeout(_current, timeout); |
Andy Ross | 4521e0c | 2019-03-22 10:30:19 -0700 | [diff] [blame] | 1380 | z_mark_thread_as_suspended(_current); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1381 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1382 | (void)z_swap(&_sched_spinlock, key); |
Piotr Zięcik | 7700eb2 | 2018-10-25 17:45:08 +0200 | [diff] [blame] | 1383 | |
Andy Ross | 4521e0c | 2019-03-22 10:30:19 -0700 | [diff] [blame] | 1384 | __ASSERT(!z_is_thread_state_set(_current, _THREAD_SUSPENDED), ""); |
| 1385 | |
Anas Nashif | 5c90ceb | 2021-03-13 08:19:53 -0500 | [diff] [blame] | 1386 | ticks = (k_ticks_t)expected_wakeup_ticks - sys_clock_tick_get_32(); |
Piotr Zięcik | 7700eb2 | 2018-10-25 17:45:08 +0200 | [diff] [blame] | 1387 | if (ticks > 0) { |
Charles E. Youse | b186303 | 2019-05-08 13:22:46 -0700 | [diff] [blame] | 1388 | return ticks; |
Piotr Zięcik | 7700eb2 | 2018-10-25 17:45:08 +0200 | [diff] [blame] | 1389 | } |
Gerson Fernando Budke | b8188e5 | 2023-10-16 20:15:31 +0200 | [diff] [blame] | 1390 | #else |
| 1391 | /* busy wait to be time coherent since subsystems may depend on it */ |
| 1392 | z_impl_k_busy_wait(k_ticks_to_us_ceil32(expected_wakeup_ticks)); |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 1393 | #endif |
Piotr Zięcik | 7700eb2 | 2018-10-25 17:45:08 +0200 | [diff] [blame] | 1394 | |
| 1395 | return 0; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1396 | } |
| 1397 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 1398 | int32_t z_impl_k_sleep(k_timeout_t timeout) |
Charles E. Youse | b186303 | 2019-05-08 13:22:46 -0700 | [diff] [blame] | 1399 | { |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1400 | k_ticks_t ticks; |
Charles E. Youse | b186303 | 2019-05-08 13:22:46 -0700 | [diff] [blame] | 1401 | |
Peter Bigot | 8162e58 | 2019-12-12 16:07:07 -0600 | [diff] [blame] | 1402 | __ASSERT(!arch_is_in_isr(), ""); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1403 | |
| 1404 | SYS_PORT_TRACING_FUNC_ENTER(k_thread, sleep, timeout); |
Peter Bigot | 8162e58 | 2019-12-12 16:07:07 -0600 | [diff] [blame] | 1405 | |
Anas Nashif | d2c7179 | 2020-10-17 07:52:17 -0400 | [diff] [blame] | 1406 | /* in case of K_FOREVER, we suspend */ |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1407 | if (K_TIMEOUT_EQ(timeout, K_FOREVER)) { |
Gerson Fernando Budke | b8188e5 | 2023-10-16 20:15:31 +0200 | [diff] [blame] | 1408 | #ifdef CONFIG_MULTITHREADING |
Andrew Boie | d2b8922 | 2019-11-08 10:44:22 -0800 | [diff] [blame] | 1409 | k_thread_suspend(_current); |
Gerson Fernando Budke | b8188e5 | 2023-10-16 20:15:31 +0200 | [diff] [blame] | 1410 | #else |
| 1411 | /* In Single Thread, just wait for an interrupt saving power */ |
| 1412 | k_cpu_idle(); |
| 1413 | #endif |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1414 | SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, (int32_t) K_TICKS_FOREVER); |
| 1415 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 1416 | return (int32_t) K_TICKS_FOREVER; |
Andrew Boie | d2b8922 | 2019-11-08 10:44:22 -0800 | [diff] [blame] | 1417 | } |
| 1418 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1419 | ticks = timeout.ticks; |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1420 | |
Charles E. Youse | b186303 | 2019-05-08 13:22:46 -0700 | [diff] [blame] | 1421 | ticks = z_tick_sleep(ticks); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1422 | |
Peter Mitsis | a3e5af9 | 2023-12-05 13:40:19 -0500 | [diff] [blame] | 1423 | int32_t ret = k_ticks_to_ms_ceil64(ticks); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1424 | |
| 1425 | SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, ret); |
| 1426 | |
| 1427 | return ret; |
Charles E. Youse | b186303 | 2019-05-08 13:22:46 -0700 | [diff] [blame] | 1428 | } |
| 1429 | |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 1430 | #ifdef CONFIG_USERSPACE |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 1431 | static inline int32_t z_vrfy_k_sleep(k_timeout_t timeout) |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 1432 | { |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1433 | return z_impl_k_sleep(timeout); |
Charles E. Youse | a567831 | 2019-05-09 16:46:46 -0700 | [diff] [blame] | 1434 | } |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1435 | #include <syscalls/k_sleep_mrsh.c> |
Charles E. Youse | a567831 | 2019-05-09 16:46:46 -0700 | [diff] [blame] | 1436 | #endif |
| 1437 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 1438 | int32_t z_impl_k_usleep(int us) |
Charles E. Youse | a567831 | 2019-05-09 16:46:46 -0700 | [diff] [blame] | 1439 | { |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 1440 | int32_t ticks; |
Charles E. Youse | a567831 | 2019-05-09 16:46:46 -0700 | [diff] [blame] | 1441 | |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1442 | SYS_PORT_TRACING_FUNC_ENTER(k_thread, usleep, us); |
| 1443 | |
Andy Ross | 8892406 | 2019-10-03 11:43:10 -0700 | [diff] [blame] | 1444 | ticks = k_us_to_ticks_ceil64(us); |
Charles E. Youse | a567831 | 2019-05-09 16:46:46 -0700 | [diff] [blame] | 1445 | ticks = z_tick_sleep(ticks); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1446 | |
Peter Mitsis | a3e5af9 | 2023-12-05 13:40:19 -0500 | [diff] [blame] | 1447 | int32_t ret = k_ticks_to_us_ceil64(ticks); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1448 | |
Peter Mitsis | a3e5af9 | 2023-12-05 13:40:19 -0500 | [diff] [blame] | 1449 | SYS_PORT_TRACING_FUNC_EXIT(k_thread, usleep, us, ret); |
| 1450 | |
| 1451 | return ret; |
Charles E. Youse | a567831 | 2019-05-09 16:46:46 -0700 | [diff] [blame] | 1452 | } |
| 1453 | |
| 1454 | #ifdef CONFIG_USERSPACE |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 1455 | static inline int32_t z_vrfy_k_usleep(int us) |
Charles E. Youse | a567831 | 2019-05-09 16:46:46 -0700 | [diff] [blame] | 1456 | { |
| 1457 | return z_impl_k_usleep(us); |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 1458 | } |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1459 | #include <syscalls/k_usleep_mrsh.c> |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 1460 | #endif |
| 1461 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1462 | void z_impl_k_wakeup(k_tid_t thread) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1463 | { |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1464 | SYS_PORT_TRACING_OBJ_FUNC(k_thread, wakeup, thread); |
| 1465 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1466 | if (z_is_thread_pending(thread)) { |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1467 | return; |
| 1468 | } |
| 1469 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1470 | if (z_abort_thread_timeout(thread) < 0) { |
Andrew Boie | d2b8922 | 2019-11-08 10:44:22 -0800 | [diff] [blame] | 1471 | /* Might have just been sleeping forever */ |
| 1472 | if (thread->base.thread_state != _THREAD_SUSPENDED) { |
| 1473 | return; |
| 1474 | } |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1475 | } |
| 1476 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1477 | k_spinlock_key_t key = k_spin_lock(&_sched_spinlock); |
Peter Mitsis | 51ae993 | 2024-02-20 11:50:54 -0500 | [diff] [blame] | 1478 | |
Andy Ross | 4521e0c | 2019-03-22 10:30:19 -0700 | [diff] [blame] | 1479 | z_mark_thread_as_not_suspended(thread); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1480 | |
Peter Mitsis | 51ae993 | 2024-02-20 11:50:54 -0500 | [diff] [blame] | 1481 | if (!thread_active_elsewhere(thread)) { |
| 1482 | ready_thread(thread); |
| 1483 | } |
Andy Ross | 5737b5c | 2020-02-04 13:52:09 -0800 | [diff] [blame] | 1484 | |
Peter Mitsis | 51ae993 | 2024-02-20 11:50:54 -0500 | [diff] [blame] | 1485 | if (arch_is_in_isr()) { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1486 | k_spin_unlock(&_sched_spinlock, key); |
Peter Mitsis | 51ae993 | 2024-02-20 11:50:54 -0500 | [diff] [blame] | 1487 | } else { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1488 | z_reschedule(&_sched_spinlock, key); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1489 | } |
| 1490 | } |
| 1491 | |
Enjia Mai | 7ac40aa | 2020-05-28 11:29:50 +0800 | [diff] [blame] | 1492 | #ifdef CONFIG_TRACE_SCHED_IPI |
| 1493 | extern void z_trace_sched_ipi(void); |
| 1494 | #endif |
| 1495 | |
Andy Ross | 42ed12a | 2019-02-19 16:03:39 -0800 | [diff] [blame] | 1496 | #ifdef CONFIG_SMP |
Andy Ross | 42ed12a | 2019-02-19 16:03:39 -0800 | [diff] [blame] | 1497 | void z_sched_ipi(void) |
| 1498 | { |
Daniel Leung | adac4cb | 2020-01-09 18:55:07 -0800 | [diff] [blame] | 1499 | /* NOTE: When adding code to this, make sure this is called |
| 1500 | * at appropriate location when !CONFIG_SCHED_IPI_SUPPORTED. |
| 1501 | */ |
Enjia Mai | 7ac40aa | 2020-05-28 11:29:50 +0800 | [diff] [blame] | 1502 | #ifdef CONFIG_TRACE_SCHED_IPI |
| 1503 | z_trace_sched_ipi(); |
| 1504 | #endif |
Andy Ross | c5c3ad9 | 2023-03-07 08:29:31 -0800 | [diff] [blame] | 1505 | |
| 1506 | #ifdef CONFIG_TIMESLICING |
Nicolas Pitre | 5879d2d | 2023-03-09 22:45:18 -0500 | [diff] [blame] | 1507 | if (sliceable(_current)) { |
Andy Ross | c5c3ad9 | 2023-03-07 08:29:31 -0800 | [diff] [blame] | 1508 | z_time_slice(); |
| 1509 | } |
| 1510 | #endif |
Andy Ross | 42ed12a | 2019-02-19 16:03:39 -0800 | [diff] [blame] | 1511 | } |
Andy Ross | 42ed12a | 2019-02-19 16:03:39 -0800 | [diff] [blame] | 1512 | #endif |
| 1513 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1514 | #ifdef CONFIG_USERSPACE |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1515 | static inline void z_vrfy_k_wakeup(k_tid_t thread) |
| 1516 | { |
Anas Nashif | a08bfeb | 2023-09-27 11:20:28 +0000 | [diff] [blame] | 1517 | K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD)); |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1518 | z_impl_k_wakeup(thread); |
| 1519 | } |
| 1520 | #include <syscalls/k_wakeup_mrsh.c> |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1521 | #endif |
| 1522 | |
Daniel Leung | 0a50ff3 | 2023-09-25 11:56:10 -0700 | [diff] [blame] | 1523 | k_tid_t z_impl_k_sched_current_thread_query(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1524 | { |
Andy Ross | eefd3da | 2020-02-06 13:39:52 -0800 | [diff] [blame] | 1525 | #ifdef CONFIG_SMP |
| 1526 | /* In SMP, _current is a field read from _current_cpu, which |
| 1527 | * can race with preemption before it is read. We must lock |
| 1528 | * local interrupts when reading it. |
| 1529 | */ |
| 1530 | unsigned int k = arch_irq_lock(); |
| 1531 | #endif |
| 1532 | |
| 1533 | k_tid_t ret = _current_cpu->current; |
| 1534 | |
| 1535 | #ifdef CONFIG_SMP |
| 1536 | arch_irq_unlock(k); |
| 1537 | #endif |
| 1538 | return ret; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1539 | } |
| 1540 | |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 1541 | #ifdef CONFIG_USERSPACE |
Daniel Leung | 0a50ff3 | 2023-09-25 11:56:10 -0700 | [diff] [blame] | 1542 | static inline k_tid_t z_vrfy_k_sched_current_thread_query(void) |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1543 | { |
Daniel Leung | 0a50ff3 | 2023-09-25 11:56:10 -0700 | [diff] [blame] | 1544 | return z_impl_k_sched_current_thread_query(); |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1545 | } |
Daniel Leung | 0a50ff3 | 2023-09-25 11:56:10 -0700 | [diff] [blame] | 1546 | #include <syscalls/k_sched_current_thread_query_mrsh.c> |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 1547 | #endif |
| 1548 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1549 | int z_impl_k_is_preempt_thread(void) |
Benjamin Walsh | 445830d | 2016-11-10 15:54:27 -0500 | [diff] [blame] | 1550 | { |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 1551 | return !arch_is_in_isr() && is_preempt(_current); |
Benjamin Walsh | 445830d | 2016-11-10 15:54:27 -0500 | [diff] [blame] | 1552 | } |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1553 | |
| 1554 | #ifdef CONFIG_USERSPACE |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1555 | static inline int z_vrfy_k_is_preempt_thread(void) |
| 1556 | { |
| 1557 | return z_impl_k_is_preempt_thread(); |
| 1558 | } |
| 1559 | #include <syscalls/k_is_preempt_thread_mrsh.c> |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1560 | #endif |
Andy Ross | ab46b1b | 2019-01-30 15:00:42 -0800 | [diff] [blame] | 1561 | |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1562 | static inline void unpend_all(_wait_q_t *wait_q) |
| 1563 | { |
| 1564 | struct k_thread *thread; |
| 1565 | |
| 1566 | while ((thread = z_waitq_head(wait_q)) != NULL) { |
| 1567 | unpend_thread_no_timeout(thread); |
| 1568 | (void)z_abort_thread_timeout(thread); |
| 1569 | arch_thread_return_value_set(thread, 0); |
| 1570 | ready_thread(thread); |
| 1571 | } |
| 1572 | } |
| 1573 | |
Anas Nashif | a6ce422 | 2024-02-22 14:10:17 -0500 | [diff] [blame] | 1574 | #ifdef CONFIG_THREAD_ABORT_HOOK |
| 1575 | extern void thread_abort_hook(struct k_thread *thread); |
Chen Peng1 | 0f63d11 | 2021-09-06 13:59:40 +0800 | [diff] [blame] | 1576 | #endif |
| 1577 | |
Peter Mitsis | e1db1ce | 2023-08-14 14:06:52 -0400 | [diff] [blame] | 1578 | /** |
| 1579 | * @brief Dequeues the specified thread |
| 1580 | * |
| 1581 | * Dequeues the specified thread and move it into the specified new state. |
| 1582 | * |
| 1583 | * @param thread Identify the thread to halt |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 1584 | * @param new_state New thread state (_THREAD_DEAD or _THREAD_SUSPENDED) |
Peter Mitsis | e1db1ce | 2023-08-14 14:06:52 -0400 | [diff] [blame] | 1585 | */ |
| 1586 | static void halt_thread(struct k_thread *thread, uint8_t new_state) |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1587 | { |
| 1588 | /* We hold the lock, and the thread is known not to be running |
| 1589 | * anywhere. |
| 1590 | */ |
Peter Mitsis | e1db1ce | 2023-08-14 14:06:52 -0400 | [diff] [blame] | 1591 | if ((thread->base.thread_state & new_state) == 0U) { |
| 1592 | thread->base.thread_state |= new_state; |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 1593 | clear_halting(thread); |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1594 | if (z_is_thread_queued(thread)) { |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 1595 | dequeue_thread(thread); |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1596 | } |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 1597 | |
| 1598 | if (new_state == _THREAD_DEAD) { |
| 1599 | if (thread->base.pended_on != NULL) { |
| 1600 | unpend_thread_no_timeout(thread); |
| 1601 | } |
| 1602 | (void)z_abort_thread_timeout(thread); |
| 1603 | unpend_all(&thread->join_queue); |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1604 | } |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 1605 | #ifdef CONFIG_SMP |
| 1606 | unpend_all(&thread->halt_queue); |
| 1607 | #endif |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1608 | update_cache(1); |
| 1609 | |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 1610 | if (new_state == _THREAD_SUSPENDED) { |
| 1611 | return; |
| 1612 | } |
| 1613 | |
Grant Ramsay | 45701e6 | 2023-08-14 09:41:52 +1200 | [diff] [blame] | 1614 | #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) |
| 1615 | arch_float_disable(thread); |
| 1616 | #endif |
| 1617 | |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1618 | SYS_PORT_TRACING_FUNC(k_thread, sched_abort, thread); |
| 1619 | |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1620 | z_thread_monitor_exit(thread); |
Anas Nashif | a6ce422 | 2024-02-22 14:10:17 -0500 | [diff] [blame] | 1621 | #ifdef CONFIG_THREAD_ABORT_HOOK |
| 1622 | thread_abort_hook(thread); |
Chen Peng1 | 0f63d11 | 2021-09-06 13:59:40 +0800 | [diff] [blame] | 1623 | #endif |
| 1624 | |
Peter Mitsis | 6df8efe | 2023-05-11 14:06:46 -0400 | [diff] [blame] | 1625 | #ifdef CONFIG_OBJ_CORE_THREAD |
Peter Mitsis | e6f1090 | 2023-06-01 12:16:40 -0400 | [diff] [blame] | 1626 | #ifdef CONFIG_OBJ_CORE_STATS_THREAD |
| 1627 | k_obj_core_stats_deregister(K_OBJ_CORE(thread)); |
| 1628 | #endif |
Peter Mitsis | 6df8efe | 2023-05-11 14:06:46 -0400 | [diff] [blame] | 1629 | k_obj_core_unlink(K_OBJ_CORE(thread)); |
| 1630 | #endif |
| 1631 | |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1632 | #ifdef CONFIG_USERSPACE |
| 1633 | z_mem_domain_exit_thread(thread); |
Anas Nashif | 70cf96b | 2023-09-27 10:45:48 +0000 | [diff] [blame] | 1634 | k_thread_perms_all_clear(thread); |
Anas Nashif | 7a18c2b | 2023-09-27 10:45:18 +0000 | [diff] [blame] | 1635 | k_object_uninit(thread->stack_obj); |
| 1636 | k_object_uninit(thread); |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1637 | #endif |
| 1638 | } |
| 1639 | } |
| 1640 | |
| 1641 | void z_thread_abort(struct k_thread *thread) |
| 1642 | { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1643 | k_spinlock_key_t key = k_spin_lock(&_sched_spinlock); |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1644 | |
Anas Nashif | 8791012 | 2024-02-22 22:24:36 -0500 | [diff] [blame] | 1645 | if (z_is_thread_essential(thread)) { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1646 | k_spin_unlock(&_sched_spinlock, key); |
Andy Ross | fb61359 | 2022-05-19 12:55:28 -0700 | [diff] [blame] | 1647 | __ASSERT(false, "aborting essential thread %p", thread); |
| 1648 | k_panic(); |
| 1649 | return; |
| 1650 | } |
| 1651 | |
Anas Nashif | 3f4f3f6 | 2021-03-29 17:13:47 -0400 | [diff] [blame] | 1652 | if ((thread->base.thread_state & _THREAD_DEAD) != 0U) { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1653 | k_spin_unlock(&_sched_spinlock, key); |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1654 | return; |
| 1655 | } |
| 1656 | |
Peter Mitsis | e7986eb | 2023-08-14 16:41:05 -0400 | [diff] [blame] | 1657 | z_thread_halt(thread, key, true); |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1658 | } |
| 1659 | |
| 1660 | #if !defined(CONFIG_ARCH_HAS_THREAD_ABORT) |
| 1661 | void z_impl_k_thread_abort(struct k_thread *thread) |
| 1662 | { |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1663 | SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread); |
| 1664 | |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1665 | z_thread_abort(thread); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1666 | |
| 1667 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, abort, thread); |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1668 | } |
| 1669 | #endif |
| 1670 | |
| 1671 | int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout) |
| 1672 | { |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1673 | k_spinlock_key_t key = k_spin_lock(&_sched_spinlock); |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1674 | int ret = 0; |
| 1675 | |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1676 | SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, join, thread, timeout); |
| 1677 | |
Anas Nashif | 3f4f3f6 | 2021-03-29 17:13:47 -0400 | [diff] [blame] | 1678 | if ((thread->base.thread_state & _THREAD_DEAD) != 0U) { |
Andy Ross | a08e23f | 2023-05-26 09:39:16 -0700 | [diff] [blame] | 1679 | z_sched_switch_spin(thread); |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1680 | ret = 0; |
| 1681 | } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { |
| 1682 | ret = -EBUSY; |
Anas Nashif | 3f4f3f6 | 2021-03-29 17:13:47 -0400 | [diff] [blame] | 1683 | } else if ((thread == _current) || |
| 1684 | (thread->base.pended_on == &_current->join_queue)) { |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1685 | ret = -EDEADLK; |
| 1686 | } else { |
| 1687 | __ASSERT(!arch_is_in_isr(), "cannot join in ISR"); |
| 1688 | add_to_waitq_locked(_current, &thread->join_queue); |
| 1689 | add_thread_timeout(_current, timeout); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1690 | |
| 1691 | SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_thread, join, thread, timeout); |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1692 | ret = z_swap(&_sched_spinlock, key); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1693 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret); |
| 1694 | |
| 1695 | return ret; |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1696 | } |
| 1697 | |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1698 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret); |
| 1699 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1700 | k_spin_unlock(&_sched_spinlock, key); |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1701 | return ret; |
| 1702 | } |
| 1703 | |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 1704 | #ifdef CONFIG_USERSPACE |
| 1705 | /* Special case: don't oops if the thread is uninitialized. This is because |
| 1706 | * the initialization bit does double-duty for thread objects; if false, means |
| 1707 | * the thread object is truly uninitialized, or the thread ran and exited for |
| 1708 | * some reason. |
| 1709 | * |
| 1710 | * Return true in this case indicating we should just do nothing and return |
| 1711 | * success to the caller. |
| 1712 | */ |
| 1713 | static bool thread_obj_validate(struct k_thread *thread) |
| 1714 | { |
Anas Nashif | c25d080 | 2023-09-27 10:49:28 +0000 | [diff] [blame] | 1715 | struct k_object *ko = k_object_find(thread); |
Anas Nashif | 21254b2 | 2023-09-27 10:50:26 +0000 | [diff] [blame] | 1716 | int ret = k_object_validate(ko, K_OBJ_THREAD, _OBJ_INIT_TRUE); |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 1717 | |
| 1718 | switch (ret) { |
| 1719 | case 0: |
| 1720 | return false; |
| 1721 | case -EINVAL: |
| 1722 | return true; |
| 1723 | default: |
| 1724 | #ifdef CONFIG_LOG |
Anas Nashif | 3ab3566 | 2023-09-27 10:51:23 +0000 | [diff] [blame] | 1725 | k_object_dump_error(ret, thread, ko, K_OBJ_THREAD); |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 1726 | #endif |
Anas Nashif | a08bfeb | 2023-09-27 11:20:28 +0000 | [diff] [blame] | 1727 | K_OOPS(K_SYSCALL_VERIFY_MSG(ret, "access denied")); |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 1728 | } |
Enjia Mai | 53ca709 | 2021-01-15 17:09:58 +0800 | [diff] [blame] | 1729 | CODE_UNREACHABLE; /* LCOV_EXCL_LINE */ |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 1730 | } |
| 1731 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1732 | static inline int z_vrfy_k_thread_join(struct k_thread *thread, |
| 1733 | k_timeout_t timeout) |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 1734 | { |
| 1735 | if (thread_obj_validate(thread)) { |
| 1736 | return 0; |
| 1737 | } |
| 1738 | |
| 1739 | return z_impl_k_thread_join(thread, timeout); |
| 1740 | } |
| 1741 | #include <syscalls/k_thread_join_mrsh.c> |
Andrew Boie | a4c9190 | 2020-03-24 16:09:24 -0700 | [diff] [blame] | 1742 | |
| 1743 | static inline void z_vrfy_k_thread_abort(k_tid_t thread) |
| 1744 | { |
| 1745 | if (thread_obj_validate(thread)) { |
| 1746 | return; |
| 1747 | } |
| 1748 | |
Anas Nashif | 8791012 | 2024-02-22 22:24:36 -0500 | [diff] [blame] | 1749 | K_OOPS(K_SYSCALL_VERIFY_MSG(!z_is_thread_essential(thread), |
Andrew Boie | a4c9190 | 2020-03-24 16:09:24 -0700 | [diff] [blame] | 1750 | "aborting essential thread %p", thread)); |
| 1751 | |
| 1752 | z_impl_k_thread_abort((struct k_thread *)thread); |
| 1753 | } |
| 1754 | #include <syscalls/k_thread_abort_mrsh.c> |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 1755 | #endif /* CONFIG_USERSPACE */ |
Peter Bigot | 0259c86 | 2021-01-12 13:45:32 -0600 | [diff] [blame] | 1756 | |
| 1757 | /* |
| 1758 | * future scheduler.h API implementations |
| 1759 | */ |
| 1760 | bool z_sched_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data) |
| 1761 | { |
| 1762 | struct k_thread *thread; |
| 1763 | bool ret = false; |
| 1764 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1765 | K_SPINLOCK(&_sched_spinlock) { |
Peter Bigot | 0259c86 | 2021-01-12 13:45:32 -0600 | [diff] [blame] | 1766 | thread = _priq_wait_best(&wait_q->waitq); |
| 1767 | |
| 1768 | if (thread != NULL) { |
| 1769 | z_thread_return_value_set_with_data(thread, |
| 1770 | swap_retval, |
| 1771 | swap_data); |
| 1772 | unpend_thread_no_timeout(thread); |
| 1773 | (void)z_abort_thread_timeout(thread); |
| 1774 | ready_thread(thread); |
| 1775 | ret = true; |
| 1776 | } |
| 1777 | } |
| 1778 | |
| 1779 | return ret; |
| 1780 | } |
| 1781 | |
| 1782 | int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key, |
| 1783 | _wait_q_t *wait_q, k_timeout_t timeout, void **data) |
| 1784 | { |
| 1785 | int ret = z_pend_curr(lock, key, wait_q, timeout); |
| 1786 | |
| 1787 | if (data != NULL) { |
| 1788 | *data = _current->base.swap_data; |
| 1789 | } |
| 1790 | return ret; |
| 1791 | } |
Peter Mitsis | ca58339 | 2023-01-05 11:50:21 -0500 | [diff] [blame] | 1792 | |
| 1793 | int z_sched_waitq_walk(_wait_q_t *wait_q, |
| 1794 | int (*func)(struct k_thread *, void *), void *data) |
| 1795 | { |
| 1796 | struct k_thread *thread; |
| 1797 | int status = 0; |
| 1798 | |
Anas Nashif | 0d8da5f | 2024-03-06 15:59:36 -0500 | [diff] [blame] | 1799 | K_SPINLOCK(&_sched_spinlock) { |
Peter Mitsis | ca58339 | 2023-01-05 11:50:21 -0500 | [diff] [blame] | 1800 | _WAIT_Q_FOR_EACH(wait_q, thread) { |
| 1801 | |
| 1802 | /* |
| 1803 | * Invoke the callback function on each waiting thread |
| 1804 | * for as long as there are both waiting threads AND |
| 1805 | * it returns 0. |
| 1806 | */ |
| 1807 | |
| 1808 | status = func(thread, data); |
| 1809 | if (status != 0) { |
| 1810 | break; |
| 1811 | } |
| 1812 | } |
| 1813 | } |
| 1814 | |
| 1815 | return status; |
| 1816 | } |