Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1 | /* |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 2 | * Copyright (c) 2018 Intel Corporation |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 3 | * |
David B. Kinder | ac74d8b | 2017-01-18 17:01:01 -0800 | [diff] [blame] | 4 | * SPDX-License-Identifier: Apache-2.0 |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 5 | */ |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 6 | #include <zephyr/kernel.h> |
Benjamin Walsh | b4b108d | 2016-10-13 10:31:48 -0400 | [diff] [blame] | 7 | #include <ksched.h> |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 8 | #include <zephyr/spinlock.h> |
Anas Nashif | f0c7fbf | 2023-09-18 08:34:26 -0400 | [diff] [blame] | 9 | #include <zephyr/kernel/internal/sched_priq.h> |
Anas Nashif | 8634c3b | 2023-08-29 17:03:12 +0000 | [diff] [blame] | 10 | #include <wait_q.h> |
Andy Ross | 9c62cc6 | 2018-01-25 15:24:15 -0800 | [diff] [blame] | 11 | #include <kswap.h> |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 12 | #include <kernel_arch_func.h> |
Anas Nashif | 4e39617 | 2023-09-26 22:46:01 +0000 | [diff] [blame] | 13 | #include <zephyr/internal/syscall_handler.h> |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 14 | #include <zephyr/drivers/timer/system_timer.h> |
Flavio Ceolin | 8041860 | 2018-11-21 16:22:15 -0800 | [diff] [blame] | 15 | #include <stdbool.h> |
Andrew Boie | fe03161 | 2019-09-21 17:54:37 -0700 | [diff] [blame] | 16 | #include <kernel_internal.h> |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 17 | #include <zephyr/logging/log.h> |
| 18 | #include <zephyr/sys/atomic.h> |
| 19 | #include <zephyr/sys/math_extras.h> |
| 20 | #include <zephyr/timing/timing.h> |
Gerard Marull-Paretas | 4863c5f | 2023-04-11 15:34:39 +0200 | [diff] [blame] | 21 | #include <zephyr/sys/util.h> |
Andy Ross | 5235145 | 2021-09-28 09:38:43 -0700 | [diff] [blame] | 22 | |
Krzysztof Chruscinski | 3ed8083 | 2020-11-26 19:32:34 +0100 | [diff] [blame] | 23 | LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 24 | |
Andy Ross | 225c74b | 2018-06-27 11:20:50 -0700 | [diff] [blame] | 25 | #if defined(CONFIG_SCHED_DUMB) |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 26 | #define _priq_run_add z_priq_dumb_add |
| 27 | #define _priq_run_remove z_priq_dumb_remove |
Andy Ross | ab46b1b | 2019-01-30 15:00:42 -0800 | [diff] [blame] | 28 | # if defined(CONFIG_SCHED_CPU_MASK) |
| 29 | # define _priq_run_best _priq_dumb_mask_best |
| 30 | # else |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 31 | # define _priq_run_best z_priq_dumb_best |
Andy Ross | ab46b1b | 2019-01-30 15:00:42 -0800 | [diff] [blame] | 32 | # endif |
Andy Ross | 225c74b | 2018-06-27 11:20:50 -0700 | [diff] [blame] | 33 | #elif defined(CONFIG_SCHED_SCALABLE) |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 34 | #define _priq_run_add z_priq_rb_add |
| 35 | #define _priq_run_remove z_priq_rb_remove |
| 36 | #define _priq_run_best z_priq_rb_best |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 37 | #elif defined(CONFIG_SCHED_MULTIQ) |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 38 | #define _priq_run_add z_priq_mq_add |
| 39 | #define _priq_run_remove z_priq_mq_remove |
| 40 | #define _priq_run_best z_priq_mq_best |
Jeremy Bettis | fb1c36f | 2021-12-20 16:24:30 -0700 | [diff] [blame] | 41 | static ALWAYS_INLINE void z_priq_mq_add(struct _priq_mq *pq, |
| 42 | struct k_thread *thread); |
| 43 | static ALWAYS_INLINE void z_priq_mq_remove(struct _priq_mq *pq, |
| 44 | struct k_thread *thread); |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 45 | #endif |
| 46 | |
Andy Ross | 225c74b | 2018-06-27 11:20:50 -0700 | [diff] [blame] | 47 | #if defined(CONFIG_WAITQ_SCALABLE) |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 48 | #define z_priq_wait_add z_priq_rb_add |
| 49 | #define _priq_wait_remove z_priq_rb_remove |
| 50 | #define _priq_wait_best z_priq_rb_best |
Andy Ross | 225c74b | 2018-06-27 11:20:50 -0700 | [diff] [blame] | 51 | #elif defined(CONFIG_WAITQ_DUMB) |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 52 | #define z_priq_wait_add z_priq_dumb_add |
| 53 | #define _priq_wait_remove z_priq_dumb_remove |
| 54 | #define _priq_wait_best z_priq_dumb_best |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 55 | #endif |
| 56 | |
Andy Ross | 6b84ab3 | 2021-02-18 10:15:23 -0800 | [diff] [blame] | 57 | struct k_spinlock sched_spinlock; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 58 | |
Maksim Masalski | 78ba2ec | 2021-06-01 15:44:45 +0800 | [diff] [blame] | 59 | static void update_cache(int preempt_ok); |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 60 | static void end_thread(struct k_thread *thread); |
Andrew Boie | 8e0f6a5 | 2020-09-05 11:50:18 -0700 | [diff] [blame] | 61 | |
Peter Mitsis | f8b76f3 | 2021-11-29 09:52:11 -0500 | [diff] [blame] | 62 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 63 | static inline int is_preempt(struct k_thread *thread) |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 64 | { |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 65 | /* explanation in kernel_struct.h */ |
| 66 | return thread->base.preempt <= _PREEMPT_THRESHOLD; |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 67 | } |
| 68 | |
Florian Grandel | cc4d1bd | 2023-08-28 17:31:54 +0200 | [diff] [blame] | 69 | BUILD_ASSERT(CONFIG_NUM_COOP_PRIORITIES >= CONFIG_NUM_METAIRQ_PRIORITIES, |
| 70 | "You need to provide at least as many CONFIG_NUM_COOP_PRIORITIES as " |
| 71 | "CONFIG_NUM_METAIRQ_PRIORITIES as Meta IRQs are just a special class of cooperative " |
| 72 | "threads."); |
| 73 | |
Andy Ross | 7aa25fa | 2018-05-11 14:02:42 -0700 | [diff] [blame] | 74 | static inline int is_metairq(struct k_thread *thread) |
| 75 | { |
| 76 | #if CONFIG_NUM_METAIRQ_PRIORITIES > 0 |
| 77 | return (thread->base.prio - K_HIGHEST_THREAD_PRIO) |
| 78 | < CONFIG_NUM_METAIRQ_PRIORITIES; |
| 79 | #else |
Benjamin Cabé | a46f1b9 | 2023-08-21 15:30:26 +0200 | [diff] [blame] | 80 | ARG_UNUSED(thread); |
Andy Ross | 7aa25fa | 2018-05-11 14:02:42 -0700 | [diff] [blame] | 81 | return 0; |
| 82 | #endif |
| 83 | } |
| 84 | |
Anas Nashif | 80e6a97 | 2018-06-23 08:20:34 -0500 | [diff] [blame] | 85 | #if CONFIG_ASSERT |
Flavio Ceolin | 2df02cc | 2019-03-14 14:32:45 -0700 | [diff] [blame] | 86 | static inline bool is_thread_dummy(struct k_thread *thread) |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 87 | { |
Patrik Flykt | 21358ba | 2019-03-28 14:57:54 -0600 | [diff] [blame] | 88 | return (thread->base.thread_state & _THREAD_DUMMY) != 0U; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 89 | } |
Anas Nashif | 80e6a97 | 2018-06-23 08:20:34 -0500 | [diff] [blame] | 90 | #endif |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 91 | |
James Harris | 2cd0f66 | 2021-03-01 09:19:57 -0800 | [diff] [blame] | 92 | /* |
| 93 | * Return value same as e.g. memcmp |
| 94 | * > 0 -> thread 1 priority > thread 2 priority |
| 95 | * = 0 -> thread 1 priority == thread 2 priority |
| 96 | * < 0 -> thread 1 priority < thread 2 priority |
| 97 | * Do not rely on the actual value returned aside from the above. |
| 98 | * (Again, like memcmp.) |
| 99 | */ |
| 100 | int32_t z_sched_prio_cmp(struct k_thread *thread_1, |
| 101 | struct k_thread *thread_2) |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 102 | { |
James Harris | 2cd0f66 | 2021-03-01 09:19:57 -0800 | [diff] [blame] | 103 | /* `prio` is <32b, so the below cannot overflow. */ |
| 104 | int32_t b1 = thread_1->base.prio; |
| 105 | int32_t b2 = thread_2->base.prio; |
| 106 | |
| 107 | if (b1 != b2) { |
| 108 | return b2 - b1; |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 109 | } |
| 110 | |
| 111 | #ifdef CONFIG_SCHED_DEADLINE |
Andy Ross | ef62657 | 2020-07-10 09:43:36 -0700 | [diff] [blame] | 112 | /* If we assume all deadlines live within the same "half" of |
| 113 | * the 32 bit modulus space (this is a documented API rule), |
James Harris | 2cd0f66 | 2021-03-01 09:19:57 -0800 | [diff] [blame] | 114 | * then the latest deadline in the queue minus the earliest is |
Andy Ross | ef62657 | 2020-07-10 09:43:36 -0700 | [diff] [blame] | 115 | * guaranteed to be (2's complement) non-negative. We can |
| 116 | * leverage that to compare the values without having to check |
| 117 | * the current time. |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 118 | */ |
James Harris | 2cd0f66 | 2021-03-01 09:19:57 -0800 | [diff] [blame] | 119 | uint32_t d1 = thread_1->base.prio_deadline; |
| 120 | uint32_t d2 = thread_2->base.prio_deadline; |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 121 | |
James Harris | 2cd0f66 | 2021-03-01 09:19:57 -0800 | [diff] [blame] | 122 | if (d1 != d2) { |
| 123 | /* Sooner deadline means higher effective priority. |
| 124 | * Doing the calculation with unsigned types and casting |
| 125 | * to signed isn't perfect, but at least reduces this |
| 126 | * from UB on overflow to impdef. |
| 127 | */ |
| 128 | return (int32_t) (d2 - d1); |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 129 | } |
| 130 | #endif |
James Harris | 2cd0f66 | 2021-03-01 09:19:57 -0800 | [diff] [blame] | 131 | return 0; |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 132 | } |
| 133 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 134 | static ALWAYS_INLINE bool should_preempt(struct k_thread *thread, |
| 135 | int preempt_ok) |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 136 | { |
Andy Ross | 43553da | 2018-05-31 11:13:49 -0700 | [diff] [blame] | 137 | /* Preemption is OK if it's being explicitly allowed by |
| 138 | * software state (e.g. the thread called k_yield()) |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 139 | */ |
Flavio Ceolin | 8041860 | 2018-11-21 16:22:15 -0800 | [diff] [blame] | 140 | if (preempt_ok != 0) { |
| 141 | return true; |
Andy Ross | 43553da | 2018-05-31 11:13:49 -0700 | [diff] [blame] | 142 | } |
| 143 | |
Andy Ross | 1763a01 | 2019-01-28 10:59:41 -0800 | [diff] [blame] | 144 | __ASSERT(_current != NULL, ""); |
| 145 | |
Andy Ross | 43553da | 2018-05-31 11:13:49 -0700 | [diff] [blame] | 146 | /* Or if we're pended/suspended/dummy (duh) */ |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 147 | if (z_is_thread_prevented_from_running(_current)) { |
Andy Ross | 23c5a63 | 2019-01-04 12:52:17 -0800 | [diff] [blame] | 148 | return true; |
| 149 | } |
| 150 | |
| 151 | /* Edge case on ARM where a thread can be pended out of an |
| 152 | * interrupt handler before the "synchronous" swap starts |
| 153 | * context switching. Platforms with atomic swap can never |
| 154 | * hit this. |
| 155 | */ |
| 156 | if (IS_ENABLED(CONFIG_SWAP_NONATOMIC) |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 157 | && z_is_thread_timeout_active(thread)) { |
Flavio Ceolin | 8041860 | 2018-11-21 16:22:15 -0800 | [diff] [blame] | 158 | return true; |
Andy Ross | 43553da | 2018-05-31 11:13:49 -0700 | [diff] [blame] | 159 | } |
| 160 | |
| 161 | /* Otherwise we have to be running a preemptible thread or |
| 162 | * switching to a metairq |
| 163 | */ |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 164 | if (is_preempt(_current) || is_metairq(thread)) { |
Flavio Ceolin | 8041860 | 2018-11-21 16:22:15 -0800 | [diff] [blame] | 165 | return true; |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 166 | } |
| 167 | |
Flavio Ceolin | 8041860 | 2018-11-21 16:22:15 -0800 | [diff] [blame] | 168 | return false; |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 169 | } |
| 170 | |
Andy Ross | ab46b1b | 2019-01-30 15:00:42 -0800 | [diff] [blame] | 171 | #ifdef CONFIG_SCHED_CPU_MASK |
| 172 | static ALWAYS_INLINE struct k_thread *_priq_dumb_mask_best(sys_dlist_t *pq) |
| 173 | { |
| 174 | /* With masks enabled we need to be prepared to walk the list |
| 175 | * looking for one we can run |
| 176 | */ |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 177 | struct k_thread *thread; |
Andy Ross | ab46b1b | 2019-01-30 15:00:42 -0800 | [diff] [blame] | 178 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 179 | SYS_DLIST_FOR_EACH_CONTAINER(pq, thread, base.qnode_dlist) { |
| 180 | if ((thread->base.cpu_mask & BIT(_current_cpu->id)) != 0) { |
| 181 | return thread; |
Andy Ross | ab46b1b | 2019-01-30 15:00:42 -0800 | [diff] [blame] | 182 | } |
| 183 | } |
| 184 | return NULL; |
| 185 | } |
| 186 | #endif |
| 187 | |
Flavio Ceolin | 2757e71 | 2023-01-06 12:51:16 -0800 | [diff] [blame] | 188 | #if defined(CONFIG_SCHED_DUMB) || defined(CONFIG_WAITQ_DUMB) |
Peter Mitsis | f8b76f3 | 2021-11-29 09:52:11 -0500 | [diff] [blame] | 189 | static ALWAYS_INLINE void z_priq_dumb_add(sys_dlist_t *pq, |
| 190 | struct k_thread *thread) |
Andy Ross | 0d763e0 | 2021-09-07 15:34:04 -0700 | [diff] [blame] | 191 | { |
| 192 | struct k_thread *t; |
| 193 | |
| 194 | __ASSERT_NO_MSG(!z_is_idle_thread_object(thread)); |
| 195 | |
| 196 | SYS_DLIST_FOR_EACH_CONTAINER(pq, t, base.qnode_dlist) { |
| 197 | if (z_sched_prio_cmp(thread, t) > 0) { |
| 198 | sys_dlist_insert(&t->base.qnode_dlist, |
| 199 | &thread->base.qnode_dlist); |
| 200 | return; |
| 201 | } |
| 202 | } |
| 203 | |
| 204 | sys_dlist_append(pq, &thread->base.qnode_dlist); |
| 205 | } |
Flavio Ceolin | 2757e71 | 2023-01-06 12:51:16 -0800 | [diff] [blame] | 206 | #endif |
Andy Ross | 0d763e0 | 2021-09-07 15:34:04 -0700 | [diff] [blame] | 207 | |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 208 | static ALWAYS_INLINE void *thread_runq(struct k_thread *thread) |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 209 | { |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 210 | #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY |
| 211 | int cpu, m = thread->base.cpu_mask; |
| 212 | |
| 213 | /* Edge case: it's legal per the API to "make runnable" a |
| 214 | * thread with all CPUs masked off (i.e. one that isn't |
| 215 | * actually runnable!). Sort of a wart in the API and maybe |
| 216 | * we should address this in docs/assertions instead to avoid |
| 217 | * the extra test. |
| 218 | */ |
| 219 | cpu = m == 0 ? 0 : u32_count_trailing_zeros(m); |
| 220 | |
| 221 | return &_kernel.cpus[cpu].ready_q.runq; |
| 222 | #else |
Benjamin Cabé | a46f1b9 | 2023-08-21 15:30:26 +0200 | [diff] [blame] | 223 | ARG_UNUSED(thread); |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 224 | return &_kernel.ready_q.runq; |
| 225 | #endif |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 226 | } |
| 227 | |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 228 | static ALWAYS_INLINE void *curr_cpu_runq(void) |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 229 | { |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 230 | #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY |
| 231 | return &arch_curr_cpu()->ready_q.runq; |
| 232 | #else |
| 233 | return &_kernel.ready_q.runq; |
| 234 | #endif |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 235 | } |
| 236 | |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 237 | static ALWAYS_INLINE void runq_add(struct k_thread *thread) |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 238 | { |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 239 | _priq_run_add(thread_runq(thread), thread); |
| 240 | } |
| 241 | |
| 242 | static ALWAYS_INLINE void runq_remove(struct k_thread *thread) |
| 243 | { |
| 244 | _priq_run_remove(thread_runq(thread), thread); |
| 245 | } |
| 246 | |
| 247 | static ALWAYS_INLINE struct k_thread *runq_best(void) |
| 248 | { |
| 249 | return _priq_run_best(curr_cpu_runq()); |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 250 | } |
| 251 | |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 252 | /* _current is never in the run queue until context switch on |
| 253 | * SMP configurations, see z_requeue_current() |
| 254 | */ |
| 255 | static inline bool should_queue_thread(struct k_thread *th) |
| 256 | { |
| 257 | return !IS_ENABLED(CONFIG_SMP) || th != _current; |
| 258 | } |
| 259 | |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 260 | static ALWAYS_INLINE void queue_thread(struct k_thread *thread) |
Andy Ross | 91946ef | 2021-02-07 13:03:09 -0800 | [diff] [blame] | 261 | { |
| 262 | thread->base.thread_state |= _THREAD_QUEUED; |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 263 | if (should_queue_thread(thread)) { |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 264 | runq_add(thread); |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 265 | } |
| 266 | #ifdef CONFIG_SMP |
| 267 | if (thread == _current) { |
| 268 | /* add current to end of queue means "yield" */ |
| 269 | _current_cpu->swap_ok = true; |
| 270 | } |
| 271 | #endif |
Andy Ross | 91946ef | 2021-02-07 13:03:09 -0800 | [diff] [blame] | 272 | } |
| 273 | |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 274 | static ALWAYS_INLINE void dequeue_thread(struct k_thread *thread) |
Andy Ross | 91946ef | 2021-02-07 13:03:09 -0800 | [diff] [blame] | 275 | { |
| 276 | thread->base.thread_state &= ~_THREAD_QUEUED; |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 277 | if (should_queue_thread(thread)) { |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 278 | runq_remove(thread); |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 279 | } |
Andy Ross | 91946ef | 2021-02-07 13:03:09 -0800 | [diff] [blame] | 280 | } |
| 281 | |
Andy Ross | b4e9ef0 | 2022-04-06 10:10:17 -0700 | [diff] [blame] | 282 | static void signal_pending_ipi(void) |
| 283 | { |
| 284 | /* Synchronization note: you might think we need to lock these |
| 285 | * two steps, but an IPI is idempotent. It's OK if we do it |
| 286 | * twice. All we require is that if a CPU sees the flag true, |
| 287 | * it is guaranteed to send the IPI, and if a core sets |
| 288 | * pending_ipi, the IPI will be sent the next time through |
| 289 | * this code. |
| 290 | */ |
| 291 | #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED) |
Kumar Gala | 4f458ba | 2022-10-18 11:11:46 -0500 | [diff] [blame] | 292 | if (arch_num_cpus() > 1) { |
Andy Ross | b4e9ef0 | 2022-04-06 10:10:17 -0700 | [diff] [blame] | 293 | if (_kernel.pending_ipi) { |
| 294 | _kernel.pending_ipi = false; |
| 295 | arch_sched_ipi(); |
| 296 | } |
| 297 | } |
| 298 | #endif |
| 299 | } |
| 300 | |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 301 | #ifdef CONFIG_SMP |
| 302 | /* Called out of z_swap() when CONFIG_SMP. The current thread can |
| 303 | * never live in the run queue until we are inexorably on the context |
| 304 | * switch path on SMP, otherwise there is a deadlock condition where a |
| 305 | * set of CPUs pick a cycle of threads to run and wait for them all to |
| 306 | * context switch forever. |
| 307 | */ |
| 308 | void z_requeue_current(struct k_thread *curr) |
| 309 | { |
Andy Ross | 6b84ab3 | 2021-02-18 10:15:23 -0800 | [diff] [blame] | 310 | if (z_is_thread_queued(curr)) { |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 311 | runq_add(curr); |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 312 | } |
Andy Ross | b4e9ef0 | 2022-04-06 10:10:17 -0700 | [diff] [blame] | 313 | signal_pending_ipi(); |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 314 | } |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 315 | |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 316 | static inline bool is_aborting(struct k_thread *thread) |
| 317 | { |
Anas Nashif | bbbc38b | 2021-03-29 10:03:49 -0400 | [diff] [blame] | 318 | return (thread->base.thread_state & _THREAD_ABORTING) != 0U; |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 319 | } |
Jeremy Bettis | 1e0a36c | 2021-12-06 10:56:33 -0700 | [diff] [blame] | 320 | #endif |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 321 | |
Andy Ross | b2791b0 | 2019-01-28 09:36:36 -0800 | [diff] [blame] | 322 | static ALWAYS_INLINE struct k_thread *next_up(void) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 323 | { |
Vadim Shakirov | 73944c6 | 2023-07-24 15:42:52 +0300 | [diff] [blame] | 324 | #ifdef CONFIG_SMP |
| 325 | if (is_aborting(_current)) { |
| 326 | end_thread(_current); |
| 327 | } |
| 328 | #endif |
| 329 | |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 330 | struct k_thread *thread = runq_best(); |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 331 | |
Florian Grandel | cc4d1bd | 2023-08-28 17:31:54 +0200 | [diff] [blame] | 332 | #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && \ |
| 333 | (CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES) |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 334 | /* MetaIRQs must always attempt to return back to a |
| 335 | * cooperative thread they preempted and not whatever happens |
| 336 | * to be highest priority now. The cooperative thread was |
| 337 | * promised it wouldn't be preempted (by non-metairq threads)! |
| 338 | */ |
| 339 | struct k_thread *mirqp = _current_cpu->metairq_preempted; |
| 340 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 341 | if (mirqp != NULL && (thread == NULL || !is_metairq(thread))) { |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 342 | if (!z_is_thread_prevented_from_running(mirqp)) { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 343 | thread = mirqp; |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 344 | } else { |
| 345 | _current_cpu->metairq_preempted = NULL; |
| 346 | } |
| 347 | } |
| 348 | #endif |
| 349 | |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 350 | #ifndef CONFIG_SMP |
| 351 | /* In uniprocessor mode, we can leave the current thread in |
| 352 | * the queue (actually we have to, otherwise the assembly |
| 353 | * context switch code for all architectures would be |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 354 | * responsible for putting it back in z_swap and ISR return!), |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 355 | * which makes this choice simple. |
| 356 | */ |
Anas Nashif | 3f4f3f6 | 2021-03-29 17:13:47 -0400 | [diff] [blame] | 357 | return (thread != NULL) ? thread : _current_cpu->idle_thread; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 358 | #else |
| 359 | /* Under SMP, the "cache" mechanism for selecting the next |
| 360 | * thread doesn't work, so we have more work to do to test |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 361 | * _current against the best choice from the queue. Here, the |
| 362 | * thread selected above represents "the best thread that is |
| 363 | * not current". |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 364 | * |
| 365 | * Subtle note on "queued": in SMP mode, _current does not |
| 366 | * live in the queue, so this isn't exactly the same thing as |
| 367 | * "ready", it means "is _current already added back to the |
| 368 | * queue such that we don't want to re-add it". |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 369 | */ |
Simon Hein | 02cfbfe | 2022-07-19 22:30:17 +0200 | [diff] [blame] | 370 | bool queued = z_is_thread_queued(_current); |
| 371 | bool active = !z_is_thread_prevented_from_running(_current); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 372 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 373 | if (thread == NULL) { |
| 374 | thread = _current_cpu->idle_thread; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 375 | } |
| 376 | |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 377 | if (active) { |
James Harris | 2cd0f66 | 2021-03-01 09:19:57 -0800 | [diff] [blame] | 378 | int32_t cmp = z_sched_prio_cmp(_current, thread); |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 379 | |
| 380 | /* Ties only switch if state says we yielded */ |
James Harris | 2cd0f66 | 2021-03-01 09:19:57 -0800 | [diff] [blame] | 381 | if ((cmp > 0) || ((cmp == 0) && !_current_cpu->swap_ok)) { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 382 | thread = _current; |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 383 | } |
| 384 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 385 | if (!should_preempt(thread, _current_cpu->swap_ok)) { |
| 386 | thread = _current; |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 387 | } |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 388 | } |
| 389 | |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 390 | /* Put _current back into the queue */ |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 391 | if (thread != _current && active && |
| 392 | !z_is_idle_thread_object(_current) && !queued) { |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 393 | queue_thread(_current); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 394 | } |
| 395 | |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 396 | /* Take the new _current out of the queue */ |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 397 | if (z_is_thread_queued(thread)) { |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 398 | dequeue_thread(thread); |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 399 | } |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 400 | |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 401 | _current_cpu->swap_ok = false; |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 402 | return thread; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 403 | #endif |
| 404 | } |
| 405 | |
Andrew Boie | 8e0f6a5 | 2020-09-05 11:50:18 -0700 | [diff] [blame] | 406 | static void move_thread_to_end_of_prio_q(struct k_thread *thread) |
| 407 | { |
| 408 | if (z_is_thread_queued(thread)) { |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 409 | dequeue_thread(thread); |
Andrew Boie | 8e0f6a5 | 2020-09-05 11:50:18 -0700 | [diff] [blame] | 410 | } |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 411 | queue_thread(thread); |
Andrew Boie | 8e0f6a5 | 2020-09-05 11:50:18 -0700 | [diff] [blame] | 412 | update_cache(thread == _current); |
| 413 | } |
| 414 | |
Andy Ross | c5c3ad9 | 2023-03-07 08:29:31 -0800 | [diff] [blame] | 415 | static void flag_ipi(void) |
| 416 | { |
| 417 | #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED) |
| 418 | if (arch_num_cpus() > 1) { |
| 419 | _kernel.pending_ipi = true; |
| 420 | } |
| 421 | #endif |
| 422 | } |
| 423 | |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 424 | #ifdef CONFIG_TIMESLICING |
| 425 | |
Gerard Marull-Paretas | 4863c5f | 2023-04-11 15:34:39 +0200 | [diff] [blame] | 426 | static int slice_ticks = DIV_ROUND_UP(CONFIG_TIMESLICE_SIZE * Z_HZ_ticks, Z_HZ_ms); |
Nicolas Pitre | 524ac8a | 2023-03-31 12:31:28 -0400 | [diff] [blame] | 427 | static int slice_max_prio = CONFIG_TIMESLICE_PRIORITY; |
Nicolas Pitre | 5879d2d | 2023-03-09 22:45:18 -0500 | [diff] [blame] | 428 | static struct _timeout slice_timeouts[CONFIG_MP_MAX_NUM_CPUS]; |
| 429 | static bool slice_expired[CONFIG_MP_MAX_NUM_CPUS]; |
Andy Ross | 3e69689 | 2021-11-30 18:26:26 -0800 | [diff] [blame] | 430 | |
Andy Ross | 7fb8eb5 | 2019-01-04 12:54:23 -0800 | [diff] [blame] | 431 | #ifdef CONFIG_SWAP_NONATOMIC |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 432 | /* If z_swap() isn't atomic, then it's possible for a timer interrupt |
Andy Ross | 7fb8eb5 | 2019-01-04 12:54:23 -0800 | [diff] [blame] | 433 | * to try to timeslice away _current after it has already pended |
| 434 | * itself but before the corresponding context switch. Treat that as |
| 435 | * a noop condition in z_time_slice(). |
| 436 | */ |
| 437 | static struct k_thread *pending_current; |
| 438 | #endif |
| 439 | |
Nicolas Pitre | 5879d2d | 2023-03-09 22:45:18 -0500 | [diff] [blame] | 440 | static inline int slice_time(struct k_thread *thread) |
| 441 | { |
| 442 | int ret = slice_ticks; |
| 443 | |
| 444 | #ifdef CONFIG_TIMESLICE_PER_THREAD |
| 445 | if (thread->base.slice_ticks != 0) { |
| 446 | ret = thread->base.slice_ticks; |
| 447 | } |
Benjamin Cabé | a46f1b9 | 2023-08-21 15:30:26 +0200 | [diff] [blame] | 448 | #else |
| 449 | ARG_UNUSED(thread); |
Nicolas Pitre | 5879d2d | 2023-03-09 22:45:18 -0500 | [diff] [blame] | 450 | #endif |
| 451 | return ret; |
| 452 | } |
| 453 | |
| 454 | static inline bool sliceable(struct k_thread *thread) |
| 455 | { |
| 456 | bool ret = is_preempt(thread) |
| 457 | && slice_time(thread) != 0 |
| 458 | && !z_is_prio_higher(thread->base.prio, slice_max_prio) |
| 459 | && !z_is_thread_prevented_from_running(thread) |
| 460 | && !z_is_idle_thread_object(thread); |
| 461 | |
| 462 | #ifdef CONFIG_TIMESLICE_PER_THREAD |
| 463 | ret |= thread->base.slice_ticks != 0; |
| 464 | #endif |
| 465 | |
| 466 | return ret; |
| 467 | } |
| 468 | |
Andy Ross | f3afd5a | 2023-03-06 14:31:35 -0800 | [diff] [blame] | 469 | static void slice_timeout(struct _timeout *t) |
| 470 | { |
| 471 | int cpu = ARRAY_INDEX(slice_timeouts, t); |
| 472 | |
| 473 | slice_expired[cpu] = true; |
Andy Ross | c5c3ad9 | 2023-03-07 08:29:31 -0800 | [diff] [blame] | 474 | |
| 475 | /* We need an IPI if we just handled a timeslice expiration |
| 476 | * for a different CPU. Ideally this would be able to target |
| 477 | * the specific core, but that's not part of the API yet. |
| 478 | */ |
| 479 | if (IS_ENABLED(CONFIG_SMP) && cpu != _current_cpu->id) { |
| 480 | flag_ipi(); |
| 481 | } |
Andy Ross | f3afd5a | 2023-03-06 14:31:35 -0800 | [diff] [blame] | 482 | } |
| 483 | |
Andy Ross | 3e69689 | 2021-11-30 18:26:26 -0800 | [diff] [blame] | 484 | void z_reset_time_slice(struct k_thread *curr) |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 485 | { |
Andy Ross | f3afd5a | 2023-03-06 14:31:35 -0800 | [diff] [blame] | 486 | int cpu = _current_cpu->id; |
| 487 | |
| 488 | z_abort_timeout(&slice_timeouts[cpu]); |
Nicolas Pitre | 5879d2d | 2023-03-09 22:45:18 -0500 | [diff] [blame] | 489 | slice_expired[cpu] = false; |
| 490 | if (sliceable(curr)) { |
Andy Ross | f3afd5a | 2023-03-06 14:31:35 -0800 | [diff] [blame] | 491 | z_add_timeout(&slice_timeouts[cpu], slice_timeout, |
| 492 | K_TICKS(slice_time(curr) - 1)); |
Andy Ross | ed7d863 | 2019-06-15 19:32:04 -0700 | [diff] [blame] | 493 | } |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 494 | } |
| 495 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 496 | void k_sched_time_slice_set(int32_t slice, int prio) |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 497 | { |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 498 | K_SPINLOCK(&sched_spinlock) { |
Andy Ross | 3e69689 | 2021-11-30 18:26:26 -0800 | [diff] [blame] | 499 | slice_ticks = k_ms_to_ticks_ceil32(slice); |
Andy Ross | 1c30514 | 2018-10-15 11:10:49 -0700 | [diff] [blame] | 500 | slice_max_prio = prio; |
Andy Ross | 3e69689 | 2021-11-30 18:26:26 -0800 | [diff] [blame] | 501 | z_reset_time_slice(_current); |
Andy Ross | 1c30514 | 2018-10-15 11:10:49 -0700 | [diff] [blame] | 502 | } |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 503 | } |
| 504 | |
Andy Ross | 3e69689 | 2021-11-30 18:26:26 -0800 | [diff] [blame] | 505 | #ifdef CONFIG_TIMESLICE_PER_THREAD |
Daniel Leung | 9c0ff33 | 2023-08-03 10:28:01 -0700 | [diff] [blame] | 506 | void k_thread_time_slice_set(struct k_thread *th, int32_t thread_slice_ticks, |
Andy Ross | 3e69689 | 2021-11-30 18:26:26 -0800 | [diff] [blame] | 507 | k_thread_timeslice_fn_t expired, void *data) |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 508 | { |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 509 | K_SPINLOCK(&sched_spinlock) { |
Daniel Leung | 9c0ff33 | 2023-08-03 10:28:01 -0700 | [diff] [blame] | 510 | th->base.slice_ticks = thread_slice_ticks; |
Andy Ross | 3e69689 | 2021-11-30 18:26:26 -0800 | [diff] [blame] | 511 | th->base.slice_expired = expired; |
| 512 | th->base.slice_data = data; |
| 513 | } |
| 514 | } |
| 515 | #endif |
| 516 | |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 517 | /* Called out of each timer interrupt */ |
Andy Ross | f3afd5a | 2023-03-06 14:31:35 -0800 | [diff] [blame] | 518 | void z_time_slice(void) |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 519 | { |
Andrew Boie | 8e0f6a5 | 2020-09-05 11:50:18 -0700 | [diff] [blame] | 520 | k_spinlock_key_t key = k_spin_lock(&sched_spinlock); |
Nicolas Pitre | 5879d2d | 2023-03-09 22:45:18 -0500 | [diff] [blame] | 521 | struct k_thread *curr = _current; |
Andrew Boie | 8e0f6a5 | 2020-09-05 11:50:18 -0700 | [diff] [blame] | 522 | |
Andy Ross | 7fb8eb5 | 2019-01-04 12:54:23 -0800 | [diff] [blame] | 523 | #ifdef CONFIG_SWAP_NONATOMIC |
Nicolas Pitre | 5879d2d | 2023-03-09 22:45:18 -0500 | [diff] [blame] | 524 | if (pending_current == curr) { |
| 525 | z_reset_time_slice(curr); |
Andrew Boie | 8e0f6a5 | 2020-09-05 11:50:18 -0700 | [diff] [blame] | 526 | k_spin_unlock(&sched_spinlock, key); |
Andy Ross | 7fb8eb5 | 2019-01-04 12:54:23 -0800 | [diff] [blame] | 527 | return; |
| 528 | } |
| 529 | pending_current = NULL; |
| 530 | #endif |
| 531 | |
Nicolas Pitre | 5879d2d | 2023-03-09 22:45:18 -0500 | [diff] [blame] | 532 | if (slice_expired[_current_cpu->id] && sliceable(curr)) { |
| 533 | #ifdef CONFIG_TIMESLICE_PER_THREAD |
| 534 | if (curr->base.slice_expired) { |
| 535 | k_spin_unlock(&sched_spinlock, key); |
| 536 | curr->base.slice_expired(curr, curr->base.slice_data); |
| 537 | key = k_spin_lock(&sched_spinlock); |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 538 | } |
Nicolas Pitre | 5879d2d | 2023-03-09 22:45:18 -0500 | [diff] [blame] | 539 | #endif |
| 540 | if (!z_is_thread_prevented_from_running(curr)) { |
| 541 | move_thread_to_end_of_prio_q(curr); |
| 542 | } |
| 543 | z_reset_time_slice(curr); |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 544 | } |
Andrew Boie | 8e0f6a5 | 2020-09-05 11:50:18 -0700 | [diff] [blame] | 545 | k_spin_unlock(&sched_spinlock, key); |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 546 | } |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 547 | #endif |
| 548 | |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 549 | /* Track cooperative threads preempted by metairqs so we can return to |
| 550 | * them specifically. Called at the moment a new thread has been |
| 551 | * selected to run. |
| 552 | */ |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 553 | static void update_metairq_preempt(struct k_thread *thread) |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 554 | { |
Florian Grandel | cc4d1bd | 2023-08-28 17:31:54 +0200 | [diff] [blame] | 555 | #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && \ |
| 556 | (CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES) |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 557 | if (is_metairq(thread) && !is_metairq(_current) && |
| 558 | !is_preempt(_current)) { |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 559 | /* Record new preemption */ |
| 560 | _current_cpu->metairq_preempted = _current; |
Andrew Boie | f5a7e1a | 2020-09-02 09:20:38 -0700 | [diff] [blame] | 561 | } else if (!is_metairq(thread) && !z_is_idle_thread_object(thread)) { |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 562 | /* Returning from existing preemption */ |
| 563 | _current_cpu->metairq_preempted = NULL; |
| 564 | } |
Benjamin Cabé | a46f1b9 | 2023-08-21 15:30:26 +0200 | [diff] [blame] | 565 | #else |
| 566 | ARG_UNUSED(thread); |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 567 | #endif |
| 568 | } |
| 569 | |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 570 | static void update_cache(int preempt_ok) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 571 | { |
| 572 | #ifndef CONFIG_SMP |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 573 | struct k_thread *thread = next_up(); |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 574 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 575 | if (should_preempt(thread, preempt_ok)) { |
Andy Ross | cb3964f | 2019-08-16 21:29:26 -0700 | [diff] [blame] | 576 | #ifdef CONFIG_TIMESLICING |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 577 | if (thread != _current) { |
Andy Ross | 3e69689 | 2021-11-30 18:26:26 -0800 | [diff] [blame] | 578 | z_reset_time_slice(thread); |
Andy Ross | 9098a45 | 2018-09-25 10:56:09 -0700 | [diff] [blame] | 579 | } |
Andy Ross | cb3964f | 2019-08-16 21:29:26 -0700 | [diff] [blame] | 580 | #endif |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 581 | update_metairq_preempt(thread); |
| 582 | _kernel.ready_q.cache = thread; |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 583 | } else { |
| 584 | _kernel.ready_q.cache = _current; |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 585 | } |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 586 | |
| 587 | #else |
| 588 | /* The way this works is that the CPU record keeps its |
| 589 | * "cooperative swapping is OK" flag until the next reschedule |
| 590 | * call or context switch. It doesn't need to be tracked per |
| 591 | * thread because if the thread gets preempted for whatever |
| 592 | * reason the scheduler will make the same decision anyway. |
| 593 | */ |
| 594 | _current_cpu->swap_ok = preempt_ok; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 595 | #endif |
| 596 | } |
| 597 | |
Andy Ross | 05c468f | 2021-02-19 15:24:24 -0800 | [diff] [blame] | 598 | static bool thread_active_elsewhere(struct k_thread *thread) |
| 599 | { |
| 600 | /* True if the thread is currently running on another CPU. |
| 601 | * There are more scalable designs to answer this question in |
| 602 | * constant time, but this is fine for now. |
| 603 | */ |
| 604 | #ifdef CONFIG_SMP |
| 605 | int currcpu = _current_cpu->id; |
| 606 | |
Kumar Gala | a1195ae | 2022-10-18 09:45:13 -0500 | [diff] [blame] | 607 | unsigned int num_cpus = arch_num_cpus(); |
| 608 | |
| 609 | for (int i = 0; i < num_cpus; i++) { |
Andy Ross | 05c468f | 2021-02-19 15:24:24 -0800 | [diff] [blame] | 610 | if ((i != currcpu) && |
| 611 | (_kernel.cpus[i].current == thread)) { |
| 612 | return true; |
| 613 | } |
| 614 | } |
| 615 | #endif |
Benjamin Cabé | a46f1b9 | 2023-08-21 15:30:26 +0200 | [diff] [blame] | 616 | ARG_UNUSED(thread); |
Andy Ross | 05c468f | 2021-02-19 15:24:24 -0800 | [diff] [blame] | 617 | return false; |
| 618 | } |
| 619 | |
Andy Ross | 96ccc46 | 2020-01-23 13:28:30 -0800 | [diff] [blame] | 620 | static void ready_thread(struct k_thread *thread) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 621 | { |
Anas Nashif | 39f632e | 2020-12-07 13:15:42 -0500 | [diff] [blame] | 622 | #ifdef CONFIG_KERNEL_COHERENCE |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 623 | __ASSERT_NO_MSG(arch_mem_coherent(thread)); |
| 624 | #endif |
| 625 | |
Anas Nashif | 081605e | 2020-10-16 20:00:17 -0400 | [diff] [blame] | 626 | /* If thread is queued already, do not try and added it to the |
| 627 | * run queue again |
| 628 | */ |
| 629 | if (!z_is_thread_queued(thread) && z_is_thread_ready(thread)) { |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 630 | SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_ready, thread); |
| 631 | |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 632 | queue_thread(thread); |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 633 | update_cache(0); |
Andy Ross | 3267cd3 | 2022-04-06 09:58:20 -0700 | [diff] [blame] | 634 | flag_ipi(); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 635 | } |
| 636 | } |
| 637 | |
Andy Ross | 96ccc46 | 2020-01-23 13:28:30 -0800 | [diff] [blame] | 638 | void z_ready_thread(struct k_thread *thread) |
| 639 | { |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 640 | K_SPINLOCK(&sched_spinlock) { |
Andy Ross | 05c468f | 2021-02-19 15:24:24 -0800 | [diff] [blame] | 641 | if (!thread_active_elsewhere(thread)) { |
| 642 | ready_thread(thread); |
| 643 | } |
Andy Ross | 96ccc46 | 2020-01-23 13:28:30 -0800 | [diff] [blame] | 644 | } |
| 645 | } |
| 646 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 647 | void z_move_thread_to_end_of_prio_q(struct k_thread *thread) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 648 | { |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 649 | K_SPINLOCK(&sched_spinlock) { |
Andrew Boie | 8e0f6a5 | 2020-09-05 11:50:18 -0700 | [diff] [blame] | 650 | move_thread_to_end_of_prio_q(thread); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 651 | } |
| 652 | } |
| 653 | |
Andy Ross | 96ccc46 | 2020-01-23 13:28:30 -0800 | [diff] [blame] | 654 | void z_sched_start(struct k_thread *thread) |
| 655 | { |
| 656 | k_spinlock_key_t key = k_spin_lock(&sched_spinlock); |
| 657 | |
| 658 | if (z_has_thread_started(thread)) { |
| 659 | k_spin_unlock(&sched_spinlock, key); |
| 660 | return; |
| 661 | } |
| 662 | |
| 663 | z_mark_thread_as_started(thread); |
| 664 | ready_thread(thread); |
| 665 | z_reschedule(&sched_spinlock, key); |
| 666 | } |
| 667 | |
Andrew Boie | 6cf496f | 2020-02-14 10:52:49 -0800 | [diff] [blame] | 668 | void z_impl_k_thread_suspend(struct k_thread *thread) |
Andy Ross | 8bdabcc | 2020-01-07 09:58:46 -0800 | [diff] [blame] | 669 | { |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 670 | SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, suspend, thread); |
| 671 | |
Andy Ross | 8bdabcc | 2020-01-07 09:58:46 -0800 | [diff] [blame] | 672 | (void)z_abort_thread_timeout(thread); |
| 673 | |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 674 | K_SPINLOCK(&sched_spinlock) { |
Andy Ross | 8bdabcc | 2020-01-07 09:58:46 -0800 | [diff] [blame] | 675 | if (z_is_thread_queued(thread)) { |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 676 | dequeue_thread(thread); |
Andy Ross | 8bdabcc | 2020-01-07 09:58:46 -0800 | [diff] [blame] | 677 | } |
| 678 | z_mark_thread_as_suspended(thread); |
| 679 | update_cache(thread == _current); |
| 680 | } |
| 681 | |
| 682 | if (thread == _current) { |
| 683 | z_reschedule_unlocked(); |
| 684 | } |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 685 | |
| 686 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, suspend, thread); |
Andy Ross | 8bdabcc | 2020-01-07 09:58:46 -0800 | [diff] [blame] | 687 | } |
| 688 | |
Andrew Boie | 6cf496f | 2020-02-14 10:52:49 -0800 | [diff] [blame] | 689 | #ifdef CONFIG_USERSPACE |
| 690 | static inline void z_vrfy_k_thread_suspend(struct k_thread *thread) |
| 691 | { |
| 692 | Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD)); |
| 693 | z_impl_k_thread_suspend(thread); |
| 694 | } |
| 695 | #include <syscalls/k_thread_suspend_mrsh.c> |
| 696 | #endif |
| 697 | |
| 698 | void z_impl_k_thread_resume(struct k_thread *thread) |
| 699 | { |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 700 | SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, resume, thread); |
| 701 | |
Andrew Boie | 6cf496f | 2020-02-14 10:52:49 -0800 | [diff] [blame] | 702 | k_spinlock_key_t key = k_spin_lock(&sched_spinlock); |
| 703 | |
Anas Nashif | bf69afc | 2020-10-16 19:53:56 -0400 | [diff] [blame] | 704 | /* Do not try to resume a thread that was not suspended */ |
| 705 | if (!z_is_thread_suspended(thread)) { |
| 706 | k_spin_unlock(&sched_spinlock, key); |
| 707 | return; |
| 708 | } |
| 709 | |
Andrew Boie | 6cf496f | 2020-02-14 10:52:49 -0800 | [diff] [blame] | 710 | z_mark_thread_as_not_suspended(thread); |
| 711 | ready_thread(thread); |
| 712 | |
| 713 | z_reschedule(&sched_spinlock, key); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 714 | |
| 715 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, resume, thread); |
Andrew Boie | 6cf496f | 2020-02-14 10:52:49 -0800 | [diff] [blame] | 716 | } |
| 717 | |
| 718 | #ifdef CONFIG_USERSPACE |
| 719 | static inline void z_vrfy_k_thread_resume(struct k_thread *thread) |
| 720 | { |
| 721 | Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD)); |
| 722 | z_impl_k_thread_resume(thread); |
| 723 | } |
| 724 | #include <syscalls/k_thread_resume_mrsh.c> |
| 725 | #endif |
| 726 | |
Maksim Masalski | 970820e | 2021-05-25 14:40:14 +0800 | [diff] [blame] | 727 | static _wait_q_t *pended_on_thread(struct k_thread *thread) |
Andy Ross | 8bdabcc | 2020-01-07 09:58:46 -0800 | [diff] [blame] | 728 | { |
| 729 | __ASSERT_NO_MSG(thread->base.pended_on); |
| 730 | |
| 731 | return thread->base.pended_on; |
| 732 | } |
| 733 | |
Andy Ross | ed6b4fb | 2020-01-23 13:04:15 -0800 | [diff] [blame] | 734 | static void unready_thread(struct k_thread *thread) |
| 735 | { |
| 736 | if (z_is_thread_queued(thread)) { |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 737 | dequeue_thread(thread); |
Andy Ross | ed6b4fb | 2020-01-23 13:04:15 -0800 | [diff] [blame] | 738 | } |
| 739 | update_cache(thread == _current); |
| 740 | } |
| 741 | |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 742 | /* sched_spinlock must be held */ |
| 743 | static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 744 | { |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 745 | unready_thread(thread); |
| 746 | z_mark_thread_as_pending(thread); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 747 | |
| 748 | SYS_PORT_TRACING_FUNC(k_thread, sched_pend, thread); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 749 | |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 750 | if (wait_q != NULL) { |
| 751 | thread->base.pended_on = wait_q; |
| 752 | z_priq_wait_add(&wait_q->waitq, thread); |
Andy Ross | 15d5208 | 2018-09-26 13:19:31 -0700 | [diff] [blame] | 753 | } |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 754 | } |
Andy Ross | 15d5208 | 2018-09-26 13:19:31 -0700 | [diff] [blame] | 755 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 756 | static void add_thread_timeout(struct k_thread *thread, k_timeout_t timeout) |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 757 | { |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 758 | if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) { |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 759 | z_add_thread_timeout(thread, timeout); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 760 | } |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 761 | } |
| 762 | |
Andy Ross | c32f376 | 2022-10-08 07:24:28 -0700 | [diff] [blame] | 763 | static void pend_locked(struct k_thread *thread, _wait_q_t *wait_q, |
| 764 | k_timeout_t timeout) |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 765 | { |
Anas Nashif | 39f632e | 2020-12-07 13:15:42 -0500 | [diff] [blame] | 766 | #ifdef CONFIG_KERNEL_COHERENCE |
Andy Ross | 1ba7414 | 2021-02-09 13:48:25 -0800 | [diff] [blame] | 767 | __ASSERT_NO_MSG(wait_q == NULL || arch_mem_coherent(wait_q)); |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 768 | #endif |
Andy Ross | c32f376 | 2022-10-08 07:24:28 -0700 | [diff] [blame] | 769 | add_to_waitq_locked(thread, wait_q); |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 770 | add_thread_timeout(thread, timeout); |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 771 | } |
| 772 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 773 | void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q, |
| 774 | k_timeout_t timeout) |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 775 | { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 776 | __ASSERT_NO_MSG(thread == _current || is_thread_dummy(thread)); |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 777 | K_SPINLOCK(&sched_spinlock) { |
Andy Ross | c32f376 | 2022-10-08 07:24:28 -0700 | [diff] [blame] | 778 | pend_locked(thread, wait_q, timeout); |
| 779 | } |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 780 | } |
| 781 | |
Andrew Boie | ffc5bdf | 2020-09-05 11:44:01 -0700 | [diff] [blame] | 782 | static inline void unpend_thread_no_timeout(struct k_thread *thread) |
| 783 | { |
Maksim Masalski | 970820e | 2021-05-25 14:40:14 +0800 | [diff] [blame] | 784 | _priq_wait_remove(&pended_on_thread(thread)->waitq, thread); |
Andrew Boie | ffc5bdf | 2020-09-05 11:44:01 -0700 | [diff] [blame] | 785 | z_mark_thread_as_not_pending(thread); |
| 786 | thread->base.pended_on = NULL; |
| 787 | } |
| 788 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 789 | ALWAYS_INLINE void z_unpend_thread_no_timeout(struct k_thread *thread) |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 790 | { |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 791 | K_SPINLOCK(&sched_spinlock) { |
Peter Mitsis | 31dfd84f | 2023-01-06 13:20:28 -0500 | [diff] [blame] | 792 | if (thread->base.pended_on != NULL) { |
| 793 | unpend_thread_no_timeout(thread); |
| 794 | } |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 795 | } |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 796 | } |
| 797 | |
Aastha Grover | 5537776 | 2023-03-08 16:54:12 -0500 | [diff] [blame] | 798 | void z_sched_wake_thread(struct k_thread *thread, bool is_timeout) |
| 799 | { |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 800 | K_SPINLOCK(&sched_spinlock) { |
Aastha Grover | 5537776 | 2023-03-08 16:54:12 -0500 | [diff] [blame] | 801 | bool killed = ((thread->base.thread_state & _THREAD_DEAD) || |
| 802 | (thread->base.thread_state & _THREAD_ABORTING)); |
| 803 | |
Aastha Grover | 877fc3d | 2023-03-08 16:56:31 -0500 | [diff] [blame] | 804 | #ifdef CONFIG_EVENTS |
| 805 | bool do_nothing = thread->no_wake_on_timeout && is_timeout; |
| 806 | |
| 807 | thread->no_wake_on_timeout = false; |
| 808 | |
| 809 | if (do_nothing) { |
| 810 | continue; |
| 811 | } |
| 812 | #endif |
| 813 | |
Aastha Grover | 5537776 | 2023-03-08 16:54:12 -0500 | [diff] [blame] | 814 | if (!killed) { |
| 815 | /* The thread is not being killed */ |
| 816 | if (thread->base.pended_on != NULL) { |
| 817 | unpend_thread_no_timeout(thread); |
| 818 | } |
| 819 | z_mark_thread_as_started(thread); |
| 820 | if (is_timeout) { |
| 821 | z_mark_thread_as_not_suspended(thread); |
| 822 | } |
| 823 | ready_thread(thread); |
| 824 | } |
| 825 | } |
| 826 | |
| 827 | } |
| 828 | |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 829 | #ifdef CONFIG_SYS_CLOCK_EXISTS |
| 830 | /* Timeout handler for *_thread_timeout() APIs */ |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 831 | void z_thread_timeout(struct _timeout *timeout) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 832 | { |
Andy Ross | 3786633 | 2021-02-17 10:12:36 -0800 | [diff] [blame] | 833 | struct k_thread *thread = CONTAINER_OF(timeout, |
| 834 | struct k_thread, base.timeout); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 835 | |
Aastha Grover | 5537776 | 2023-03-08 16:54:12 -0500 | [diff] [blame] | 836 | z_sched_wake_thread(thread, true); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 837 | } |
| 838 | #endif |
| 839 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 840 | int z_pend_curr_irqlock(uint32_t key, _wait_q_t *wait_q, k_timeout_t timeout) |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 841 | { |
Andy Ross | c32f376 | 2022-10-08 07:24:28 -0700 | [diff] [blame] | 842 | /* This is a legacy API for pre-switch architectures and isn't |
| 843 | * correctly synchronized for multi-cpu use |
| 844 | */ |
| 845 | __ASSERT_NO_MSG(!IS_ENABLED(CONFIG_SMP)); |
| 846 | |
| 847 | pend_locked(_current, wait_q, timeout); |
Andy Ross | 722aeea | 2019-03-14 13:50:16 -0700 | [diff] [blame] | 848 | |
Andy Ross | 7fb8eb5 | 2019-01-04 12:54:23 -0800 | [diff] [blame] | 849 | #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC) |
| 850 | pending_current = _current; |
Andy Ross | 722aeea | 2019-03-14 13:50:16 -0700 | [diff] [blame] | 851 | |
| 852 | int ret = z_swap_irqlock(key); |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 853 | K_SPINLOCK(&sched_spinlock) { |
Andy Ross | 722aeea | 2019-03-14 13:50:16 -0700 | [diff] [blame] | 854 | if (pending_current == _current) { |
| 855 | pending_current = NULL; |
| 856 | } |
| 857 | } |
| 858 | return ret; |
| 859 | #else |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 860 | return z_swap_irqlock(key); |
Andy Ross | 722aeea | 2019-03-14 13:50:16 -0700 | [diff] [blame] | 861 | #endif |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 862 | } |
| 863 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 864 | int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key, |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 865 | _wait_q_t *wait_q, k_timeout_t timeout) |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 866 | { |
| 867 | #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC) |
| 868 | pending_current = _current; |
| 869 | #endif |
Andy Ross | c32f376 | 2022-10-08 07:24:28 -0700 | [diff] [blame] | 870 | __ASSERT_NO_MSG(sizeof(sched_spinlock) == 0 || lock != &sched_spinlock); |
| 871 | |
| 872 | /* We do a "lock swap" prior to calling z_swap(), such that |
| 873 | * the caller's lock gets released as desired. But we ensure |
| 874 | * that we hold the scheduler lock and leave local interrupts |
| 875 | * masked until we reach the context swich. z_swap() itself |
| 876 | * has similar code; the duplication is because it's a legacy |
| 877 | * API that doesn't expect to be called with scheduler lock |
| 878 | * held. |
| 879 | */ |
| 880 | (void) k_spin_lock(&sched_spinlock); |
| 881 | pend_locked(_current, wait_q, timeout); |
| 882 | k_spin_release(lock); |
| 883 | return z_swap(&sched_spinlock, key); |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 884 | } |
| 885 | |
Andy Ross | 604f0f4 | 2021-02-09 16:47:47 -0800 | [diff] [blame] | 886 | struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q) |
| 887 | { |
| 888 | struct k_thread *thread = NULL; |
| 889 | |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 890 | K_SPINLOCK(&sched_spinlock) { |
Andy Ross | 604f0f4 | 2021-02-09 16:47:47 -0800 | [diff] [blame] | 891 | thread = _priq_wait_best(&wait_q->waitq); |
| 892 | |
| 893 | if (thread != NULL) { |
| 894 | unpend_thread_no_timeout(thread); |
| 895 | } |
| 896 | } |
| 897 | |
| 898 | return thread; |
| 899 | } |
| 900 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 901 | struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q) |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 902 | { |
Andy Ross | 604f0f4 | 2021-02-09 16:47:47 -0800 | [diff] [blame] | 903 | struct k_thread *thread = NULL; |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 904 | |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 905 | K_SPINLOCK(&sched_spinlock) { |
Andy Ross | 604f0f4 | 2021-02-09 16:47:47 -0800 | [diff] [blame] | 906 | thread = _priq_wait_best(&wait_q->waitq); |
| 907 | |
| 908 | if (thread != NULL) { |
| 909 | unpend_thread_no_timeout(thread); |
| 910 | (void)z_abort_thread_timeout(thread); |
| 911 | } |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 912 | } |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 913 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 914 | return thread; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 915 | } |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 916 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 917 | void z_unpend_thread(struct k_thread *thread) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 918 | { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 919 | z_unpend_thread_no_timeout(thread); |
| 920 | (void)z_abort_thread_timeout(thread); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 921 | } |
| 922 | |
Andy Ross | 6f13980 | 2019-08-20 11:21:28 -0700 | [diff] [blame] | 923 | /* Priority set utility that does no rescheduling, it just changes the |
| 924 | * run queue state, returning true if a reschedule is needed later. |
| 925 | */ |
| 926 | bool z_set_prio(struct k_thread *thread, int prio) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 927 | { |
Flavio Ceolin | 02ed85b | 2018-09-20 15:43:57 -0700 | [diff] [blame] | 928 | bool need_sched = 0; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 929 | |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 930 | K_SPINLOCK(&sched_spinlock) { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 931 | need_sched = z_is_thread_ready(thread); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 932 | |
| 933 | if (need_sched) { |
Andy Ross | 4d8e1f2 | 2019-07-01 10:25:55 -0700 | [diff] [blame] | 934 | /* Don't requeue on SMP if it's the running thread */ |
| 935 | if (!IS_ENABLED(CONFIG_SMP) || z_is_thread_queued(thread)) { |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 936 | dequeue_thread(thread); |
Andy Ross | 4d8e1f2 | 2019-07-01 10:25:55 -0700 | [diff] [blame] | 937 | thread->base.prio = prio; |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 938 | queue_thread(thread); |
Andy Ross | 4d8e1f2 | 2019-07-01 10:25:55 -0700 | [diff] [blame] | 939 | } else { |
| 940 | thread->base.prio = prio; |
| 941 | } |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 942 | update_cache(1); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 943 | } else { |
| 944 | thread->base.prio = prio; |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 945 | } |
| 946 | } |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 947 | |
| 948 | SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_priority_set, thread, prio); |
Andy Ross | e7ded11 | 2018-04-11 14:52:47 -0700 | [diff] [blame] | 949 | |
Andy Ross | 6f13980 | 2019-08-20 11:21:28 -0700 | [diff] [blame] | 950 | return need_sched; |
| 951 | } |
| 952 | |
| 953 | void z_thread_priority_set(struct k_thread *thread, int prio) |
| 954 | { |
| 955 | bool need_sched = z_set_prio(thread, prio); |
| 956 | |
Andy Ross | 3267cd3 | 2022-04-06 09:58:20 -0700 | [diff] [blame] | 957 | flag_ipi(); |
Andy Ross | 5737b5c | 2020-02-04 13:52:09 -0800 | [diff] [blame] | 958 | |
Anas Nashif | bbbc38b | 2021-03-29 10:03:49 -0400 | [diff] [blame] | 959 | if (need_sched && _current->base.sched_locked == 0U) { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 960 | z_reschedule_unlocked(); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 961 | } |
| 962 | } |
| 963 | |
Anas Nashif | 3f4f3f6 | 2021-03-29 17:13:47 -0400 | [diff] [blame] | 964 | static inline bool resched(uint32_t key) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 965 | { |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 966 | #ifdef CONFIG_SMP |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 967 | _current_cpu->swap_ok = 0; |
| 968 | #endif |
| 969 | |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 970 | return arch_irq_unlocked(key) && !arch_is_in_isr(); |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 971 | } |
| 972 | |
Anas Nashif | 379b93f | 2020-08-10 15:47:02 -0400 | [diff] [blame] | 973 | /* |
| 974 | * Check if the next ready thread is the same as the current thread |
| 975 | * and save the trip if true. |
| 976 | */ |
| 977 | static inline bool need_swap(void) |
| 978 | { |
| 979 | /* the SMP case will be handled in C based z_swap() */ |
| 980 | #ifdef CONFIG_SMP |
| 981 | return true; |
| 982 | #else |
| 983 | struct k_thread *new_thread; |
| 984 | |
| 985 | /* Check if the next ready thread is the same as the current thread */ |
Andy Ross | 6b84ab3 | 2021-02-18 10:15:23 -0800 | [diff] [blame] | 986 | new_thread = _kernel.ready_q.cache; |
Anas Nashif | 379b93f | 2020-08-10 15:47:02 -0400 | [diff] [blame] | 987 | return new_thread != _current; |
| 988 | #endif |
| 989 | } |
| 990 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 991 | void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key) |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 992 | { |
Anas Nashif | 379b93f | 2020-08-10 15:47:02 -0400 | [diff] [blame] | 993 | if (resched(key.key) && need_swap()) { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 994 | z_swap(lock, key); |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 995 | } else { |
| 996 | k_spin_unlock(lock, key); |
Andy Ross | b4e9ef0 | 2022-04-06 10:10:17 -0700 | [diff] [blame] | 997 | signal_pending_ipi(); |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 998 | } |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 999 | } |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 1000 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 1001 | void z_reschedule_irqlock(uint32_t key) |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 1002 | { |
Andy Ross | 312b43f | 2019-05-24 10:09:13 -0700 | [diff] [blame] | 1003 | if (resched(key)) { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1004 | z_swap_irqlock(key); |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 1005 | } else { |
| 1006 | irq_unlock(key); |
Andy Ross | b4e9ef0 | 2022-04-06 10:10:17 -0700 | [diff] [blame] | 1007 | signal_pending_ipi(); |
Andy Ross | ec554f4 | 2018-07-24 13:37:59 -0700 | [diff] [blame] | 1008 | } |
Andy Ross | 8606fab | 2018-03-26 10:54:40 -0700 | [diff] [blame] | 1009 | } |
| 1010 | |
Benjamin Walsh | d7ad176 | 2016-11-10 14:46:58 -0500 | [diff] [blame] | 1011 | void k_sched_lock(void) |
| 1012 | { |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 1013 | K_SPINLOCK(&sched_spinlock) { |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1014 | SYS_PORT_TRACING_FUNC(k_thread, sched_lock); |
| 1015 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1016 | z_sched_lock(); |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 1017 | } |
Benjamin Walsh | d7ad176 | 2016-11-10 14:46:58 -0500 | [diff] [blame] | 1018 | } |
| 1019 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1020 | void k_sched_unlock(void) |
| 1021 | { |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 1022 | K_SPINLOCK(&sched_spinlock) { |
Anas Nashif | bbbc38b | 2021-03-29 10:03:49 -0400 | [diff] [blame] | 1023 | __ASSERT(_current->base.sched_locked != 0U, ""); |
Andy Ross | eefd3da | 2020-02-06 13:39:52 -0800 | [diff] [blame] | 1024 | __ASSERT(!arch_is_in_isr(), ""); |
| 1025 | |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 1026 | ++_current->base.sched_locked; |
Yasushi SHOJI | 20d0724 | 2019-07-31 11:19:08 +0900 | [diff] [blame] | 1027 | update_cache(0); |
Andy Ross | 1856e22 | 2018-05-21 11:48:35 -0700 | [diff] [blame] | 1028 | } |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1029 | |
Anas Nashif | 2c5d404 | 2019-12-02 10:24:08 -0500 | [diff] [blame] | 1030 | LOG_DBG("scheduler unlocked (%p:%d)", |
Benjamin Walsh | a4e033f | 2016-11-18 16:08:24 -0500 | [diff] [blame] | 1031 | _current, _current->base.sched_locked); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1032 | |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1033 | SYS_PORT_TRACING_FUNC(k_thread, sched_unlock); |
| 1034 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1035 | z_reschedule_unlocked(); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1036 | } |
| 1037 | |
Andy Ross | 6b84ab3 | 2021-02-18 10:15:23 -0800 | [diff] [blame] | 1038 | struct k_thread *z_swap_next_thread(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1039 | { |
Andy Ross | 6b84ab3 | 2021-02-18 10:15:23 -0800 | [diff] [blame] | 1040 | #ifdef CONFIG_SMP |
Andy Ross | b4e9ef0 | 2022-04-06 10:10:17 -0700 | [diff] [blame] | 1041 | struct k_thread *ret = next_up(); |
| 1042 | |
| 1043 | if (ret == _current) { |
| 1044 | /* When not swapping, have to signal IPIs here. In |
| 1045 | * the context switch case it must happen later, after |
| 1046 | * _current gets requeued. |
| 1047 | */ |
| 1048 | signal_pending_ipi(); |
| 1049 | } |
| 1050 | return ret; |
Andy Ross | 6b84ab3 | 2021-02-18 10:15:23 -0800 | [diff] [blame] | 1051 | #else |
| 1052 | return _kernel.ready_q.cache; |
Benjamin Walsh | 6209218 | 2016-12-20 14:39:08 -0500 | [diff] [blame] | 1053 | #endif |
Andy Ross | 6b84ab3 | 2021-02-18 10:15:23 -0800 | [diff] [blame] | 1054 | } |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1055 | |
Jeremy Bettis | 1e0a36c | 2021-12-06 10:56:33 -0700 | [diff] [blame] | 1056 | #ifdef CONFIG_USE_SWITCH |
Andy Ross | b18685b | 2019-02-19 17:24:30 -0800 | [diff] [blame] | 1057 | /* Just a wrapper around _current = xxx with tracing */ |
| 1058 | static inline void set_current(struct k_thread *new_thread) |
| 1059 | { |
Daniel Leung | 11e6b43 | 2020-08-27 16:12:01 -0700 | [diff] [blame] | 1060 | z_thread_mark_switched_out(); |
Andy Ross | eefd3da | 2020-02-06 13:39:52 -0800 | [diff] [blame] | 1061 | _current_cpu->current = new_thread; |
Andy Ross | b18685b | 2019-02-19 17:24:30 -0800 | [diff] [blame] | 1062 | } |
| 1063 | |
Nicolas Pitre | c9e3e0d | 2022-03-15 22:36:20 -0400 | [diff] [blame] | 1064 | /** |
| 1065 | * @brief Determine next thread to execute upon completion of an interrupt |
| 1066 | * |
| 1067 | * Thread preemption is performed by context switching after the completion |
| 1068 | * of a non-recursed interrupt. This function determines which thread to |
| 1069 | * switch to if any. This function accepts as @p interrupted either: |
| 1070 | * |
| 1071 | * - The handle for the interrupted thread in which case the thread's context |
| 1072 | * must already be fully saved and ready to be picked up by a different CPU. |
| 1073 | * |
| 1074 | * - NULL if more work is required to fully save the thread's state after |
| 1075 | * it is known that a new thread is to be scheduled. It is up to the caller |
| 1076 | * to store the handle resulting from the thread that is being switched out |
| 1077 | * in that thread's "switch_handle" field after its |
| 1078 | * context has fully been saved, following the same requirements as with |
| 1079 | * the @ref arch_switch() function. |
| 1080 | * |
| 1081 | * If a new thread needs to be scheduled then its handle is returned. |
| 1082 | * Otherwise the same value provided as @p interrupted is returned back. |
| 1083 | * Those handles are the same opaque types used by the @ref arch_switch() |
| 1084 | * function. |
| 1085 | * |
| 1086 | * @warning |
| 1087 | * The @ref _current value may have changed after this call and not refer |
| 1088 | * to the interrupted thread anymore. It might be necessary to make a local |
| 1089 | * copy before calling this function. |
| 1090 | * |
| 1091 | * @param interrupted Handle for the thread that was interrupted or NULL. |
| 1092 | * @retval Handle for the next thread to execute, or @p interrupted when |
| 1093 | * no new thread is to be scheduled. |
| 1094 | */ |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1095 | void *z_get_next_switch_handle(void *interrupted) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1096 | { |
Andrew Boie | ae0d1b2 | 2019-03-29 16:25:27 -0700 | [diff] [blame] | 1097 | z_check_stack_sentinel(); |
| 1098 | |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 1099 | #ifdef CONFIG_SMP |
Andy Ross | dd43221 | 2021-02-05 08:15:02 -0800 | [diff] [blame] | 1100 | void *ret = NULL; |
| 1101 | |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 1102 | K_SPINLOCK(&sched_spinlock) { |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 1103 | struct k_thread *old_thread = _current, *new_thread; |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 1104 | |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 1105 | if (IS_ENABLED(CONFIG_SMP)) { |
| 1106 | old_thread->switch_handle = NULL; |
| 1107 | } |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 1108 | new_thread = next_up(); |
| 1109 | |
Andy Ross | 40d12c1 | 2021-09-27 08:22:43 -0700 | [diff] [blame] | 1110 | z_sched_usage_switch(new_thread); |
| 1111 | |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 1112 | if (old_thread != new_thread) { |
| 1113 | update_metairq_preempt(new_thread); |
Andy Ross | b89e427 | 2023-05-26 09:12:51 -0700 | [diff] [blame] | 1114 | z_sched_switch_spin(new_thread); |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 1115 | arch_cohere_stacks(old_thread, interrupted, new_thread); |
Andy Ross | 11a050b | 2019-11-13 09:41:52 -0800 | [diff] [blame] | 1116 | |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 1117 | _current_cpu->swap_ok = 0; |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 1118 | set_current(new_thread); |
| 1119 | |
Andy Ross | 3e69689 | 2021-11-30 18:26:26 -0800 | [diff] [blame] | 1120 | #ifdef CONFIG_TIMESLICING |
| 1121 | z_reset_time_slice(new_thread); |
| 1122 | #endif |
| 1123 | |
Danny Oerndrup | c9d7840 | 2019-12-13 11:24:56 +0100 | [diff] [blame] | 1124 | #ifdef CONFIG_SPIN_VALIDATE |
Andy Ross | 8c1bdda | 2019-02-20 10:07:31 -0800 | [diff] [blame] | 1125 | /* Changed _current! Update the spinlock |
Anas Nashif | 6df4405 | 2021-04-30 09:58:20 -0400 | [diff] [blame] | 1126 | * bookkeeping so the validation doesn't get |
Andy Ross | 8c1bdda | 2019-02-20 10:07:31 -0800 | [diff] [blame] | 1127 | * confused when the "wrong" thread tries to |
| 1128 | * release the lock. |
| 1129 | */ |
| 1130 | z_spin_lock_set_owner(&sched_spinlock); |
| 1131 | #endif |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 1132 | |
| 1133 | /* A queued (runnable) old/current thread |
| 1134 | * needs to be added back to the run queue |
| 1135 | * here, and atomically with its switch handle |
| 1136 | * being set below. This is safe now, as we |
| 1137 | * will not return into it. |
| 1138 | */ |
| 1139 | if (z_is_thread_queued(old_thread)) { |
Andy Ross | 387fdd2 | 2021-09-23 18:44:40 -0700 | [diff] [blame] | 1140 | runq_add(old_thread); |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 1141 | } |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 1142 | } |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 1143 | old_thread->switch_handle = interrupted; |
Andy Ross | dd43221 | 2021-02-05 08:15:02 -0800 | [diff] [blame] | 1144 | ret = new_thread->switch_handle; |
Andy Ross | 4ff4571 | 2021-02-08 08:28:54 -0800 | [diff] [blame] | 1145 | if (IS_ENABLED(CONFIG_SMP)) { |
| 1146 | /* Active threads MUST have a null here */ |
| 1147 | new_thread->switch_handle = NULL; |
| 1148 | } |
Benjamin Walsh | b8c2160 | 2016-12-23 19:34:41 -0500 | [diff] [blame] | 1149 | } |
Andy Ross | b4e9ef0 | 2022-04-06 10:10:17 -0700 | [diff] [blame] | 1150 | signal_pending_ipi(); |
Andy Ross | dd43221 | 2021-02-05 08:15:02 -0800 | [diff] [blame] | 1151 | return ret; |
Andy Ross | eace1df | 2018-05-30 11:23:02 -0700 | [diff] [blame] | 1152 | #else |
Andy Ross | 40d12c1 | 2021-09-27 08:22:43 -0700 | [diff] [blame] | 1153 | z_sched_usage_switch(_kernel.ready_q.cache); |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 1154 | _current->switch_handle = interrupted; |
Andy Ross | 6b84ab3 | 2021-02-18 10:15:23 -0800 | [diff] [blame] | 1155 | set_current(_kernel.ready_q.cache); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1156 | return _current->switch_handle; |
Andy Ross | dd43221 | 2021-02-05 08:15:02 -0800 | [diff] [blame] | 1157 | #endif |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1158 | } |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 1159 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1160 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1161 | void z_priq_dumb_remove(sys_dlist_t *pq, struct k_thread *thread) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1162 | { |
Benjamin Cabé | a46f1b9 | 2023-08-21 15:30:26 +0200 | [diff] [blame] | 1163 | ARG_UNUSED(pq); |
| 1164 | |
Andrew Boie | 8f0bb6a | 2019-09-21 18:36:23 -0700 | [diff] [blame] | 1165 | __ASSERT_NO_MSG(!z_is_idle_thread_object(thread)); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1166 | |
| 1167 | sys_dlist_remove(&thread->base.qnode_dlist); |
| 1168 | } |
| 1169 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1170 | struct k_thread *z_priq_dumb_best(sys_dlist_t *pq) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1171 | { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1172 | struct k_thread *thread = NULL; |
Flavio Ceolin | 26be335 | 2018-11-15 15:03:32 -0800 | [diff] [blame] | 1173 | sys_dnode_t *n = sys_dlist_peek_head(pq); |
| 1174 | |
Peter A. Bigot | 692e103 | 2019-01-03 23:36:28 -0600 | [diff] [blame] | 1175 | if (n != NULL) { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1176 | thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist); |
Peter A. Bigot | 692e103 | 2019-01-03 23:36:28 -0600 | [diff] [blame] | 1177 | } |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1178 | return thread; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1179 | } |
| 1180 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1181 | bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1182 | { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1183 | struct k_thread *thread_a, *thread_b; |
James Harris | 2cd0f66 | 2021-03-01 09:19:57 -0800 | [diff] [blame] | 1184 | int32_t cmp; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1185 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1186 | thread_a = CONTAINER_OF(a, struct k_thread, base.qnode_rb); |
| 1187 | thread_b = CONTAINER_OF(b, struct k_thread, base.qnode_rb); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1188 | |
James Harris | 2cd0f66 | 2021-03-01 09:19:57 -0800 | [diff] [blame] | 1189 | cmp = z_sched_prio_cmp(thread_a, thread_b); |
| 1190 | |
| 1191 | if (cmp > 0) { |
Flavio Ceolin | 02ed85b | 2018-09-20 15:43:57 -0700 | [diff] [blame] | 1192 | return true; |
James Harris | 2cd0f66 | 2021-03-01 09:19:57 -0800 | [diff] [blame] | 1193 | } else if (cmp < 0) { |
Flavio Ceolin | 02ed85b | 2018-09-20 15:43:57 -0700 | [diff] [blame] | 1194 | return false; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1195 | } else { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1196 | return thread_a->base.order_key < thread_b->base.order_key |
| 1197 | ? 1 : 0; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1198 | } |
| 1199 | } |
| 1200 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1201 | void z_priq_rb_add(struct _priq_rb *pq, struct k_thread *thread) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1202 | { |
| 1203 | struct k_thread *t; |
| 1204 | |
Andrew Boie | 8f0bb6a | 2019-09-21 18:36:23 -0700 | [diff] [blame] | 1205 | __ASSERT_NO_MSG(!z_is_idle_thread_object(thread)); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1206 | |
| 1207 | thread->base.order_key = pq->next_order_key++; |
| 1208 | |
| 1209 | /* Renumber at wraparound. This is tiny code, and in practice |
| 1210 | * will almost never be hit on real systems. BUT on very |
| 1211 | * long-running systems where a priq never completely empties |
| 1212 | * AND that contains very large numbers of threads, it can be |
| 1213 | * a latency glitch to loop over all the threads like this. |
| 1214 | */ |
| 1215 | if (!pq->next_order_key) { |
| 1216 | RB_FOR_EACH_CONTAINER(&pq->tree, t, base.qnode_rb) { |
| 1217 | t->base.order_key = pq->next_order_key++; |
| 1218 | } |
| 1219 | } |
| 1220 | |
| 1221 | rb_insert(&pq->tree, &thread->base.qnode_rb); |
| 1222 | } |
| 1223 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1224 | void z_priq_rb_remove(struct _priq_rb *pq, struct k_thread *thread) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1225 | { |
Andrew Boie | 8f0bb6a | 2019-09-21 18:36:23 -0700 | [diff] [blame] | 1226 | __ASSERT_NO_MSG(!z_is_idle_thread_object(thread)); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1227 | |
| 1228 | rb_remove(&pq->tree, &thread->base.qnode_rb); |
| 1229 | |
| 1230 | if (!pq->tree.root) { |
| 1231 | pq->next_order_key = 0; |
| 1232 | } |
| 1233 | } |
| 1234 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1235 | struct k_thread *z_priq_rb_best(struct _priq_rb *pq) |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1236 | { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1237 | struct k_thread *thread = NULL; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1238 | struct rbnode *n = rb_get_min(&pq->tree); |
| 1239 | |
Peter A. Bigot | 692e103 | 2019-01-03 23:36:28 -0600 | [diff] [blame] | 1240 | if (n != NULL) { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1241 | thread = CONTAINER_OF(n, struct k_thread, base.qnode_rb); |
Peter A. Bigot | 692e103 | 2019-01-03 23:36:28 -0600 | [diff] [blame] | 1242 | } |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1243 | return thread; |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1244 | } |
| 1245 | |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1246 | #ifdef CONFIG_SCHED_MULTIQ |
| 1247 | # if (K_LOWEST_THREAD_PRIO - K_HIGHEST_THREAD_PRIO) > 31 |
| 1248 | # error Too many priorities for multiqueue scheduler (max 32) |
| 1249 | # endif |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1250 | |
Peter Mitsis | f8b76f3 | 2021-11-29 09:52:11 -0500 | [diff] [blame] | 1251 | static ALWAYS_INLINE void z_priq_mq_add(struct _priq_mq *pq, |
| 1252 | struct k_thread *thread) |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1253 | { |
| 1254 | int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO; |
| 1255 | |
| 1256 | sys_dlist_append(&pq->queues[priority_bit], &thread->base.qnode_dlist); |
Flavio Ceolin | a996203 | 2019-02-26 10:14:04 -0800 | [diff] [blame] | 1257 | pq->bitmask |= BIT(priority_bit); |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1258 | } |
| 1259 | |
Peter Mitsis | f8b76f3 | 2021-11-29 09:52:11 -0500 | [diff] [blame] | 1260 | static ALWAYS_INLINE void z_priq_mq_remove(struct _priq_mq *pq, |
| 1261 | struct k_thread *thread) |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1262 | { |
| 1263 | int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO; |
| 1264 | |
| 1265 | sys_dlist_remove(&thread->base.qnode_dlist); |
| 1266 | if (sys_dlist_is_empty(&pq->queues[priority_bit])) { |
Flavio Ceolin | a996203 | 2019-02-26 10:14:04 -0800 | [diff] [blame] | 1267 | pq->bitmask &= ~BIT(priority_bit); |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1268 | } |
| 1269 | } |
Jeremy Bettis | fb1c36f | 2021-12-20 16:24:30 -0700 | [diff] [blame] | 1270 | #endif |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1271 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1272 | struct k_thread *z_priq_mq_best(struct _priq_mq *pq) |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1273 | { |
| 1274 | if (!pq->bitmask) { |
| 1275 | return NULL; |
| 1276 | } |
| 1277 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1278 | struct k_thread *thread = NULL; |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1279 | sys_dlist_t *l = &pq->queues[__builtin_ctz(pq->bitmask)]; |
Flavio Ceolin | 26be335 | 2018-11-15 15:03:32 -0800 | [diff] [blame] | 1280 | sys_dnode_t *n = sys_dlist_peek_head(l); |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1281 | |
Peter A. Bigot | 692e103 | 2019-01-03 23:36:28 -0600 | [diff] [blame] | 1282 | if (n != NULL) { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1283 | thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist); |
Peter A. Bigot | 692e103 | 2019-01-03 23:36:28 -0600 | [diff] [blame] | 1284 | } |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1285 | return thread; |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1286 | } |
| 1287 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1288 | int z_unpend_all(_wait_q_t *wait_q) |
Andy Ross | 4ca0e07 | 2018-05-10 09:45:42 -0700 | [diff] [blame] | 1289 | { |
Andy Ross | ccf3bf7 | 2018-05-10 11:10:34 -0700 | [diff] [blame] | 1290 | int need_sched = 0; |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1291 | struct k_thread *thread; |
Andy Ross | 4ca0e07 | 2018-05-10 09:45:42 -0700 | [diff] [blame] | 1292 | |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1293 | while ((thread = z_waitq_head(wait_q)) != NULL) { |
| 1294 | z_unpend_thread(thread); |
| 1295 | z_ready_thread(thread); |
Andy Ross | 4ca0e07 | 2018-05-10 09:45:42 -0700 | [diff] [blame] | 1296 | need_sched = 1; |
| 1297 | } |
Andy Ross | ccf3bf7 | 2018-05-10 11:10:34 -0700 | [diff] [blame] | 1298 | |
| 1299 | return need_sched; |
Andy Ross | 4ca0e07 | 2018-05-10 09:45:42 -0700 | [diff] [blame] | 1300 | } |
| 1301 | |
Andy Ross | b155d06 | 2021-09-24 13:49:14 -0700 | [diff] [blame] | 1302 | void init_ready_q(struct _ready_q *rq) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1303 | { |
Andy Ross | b155d06 | 2021-09-24 13:49:14 -0700 | [diff] [blame] | 1304 | #if defined(CONFIG_SCHED_SCALABLE) |
| 1305 | rq->runq = (struct _priq_rb) { |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1306 | .tree = { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1307 | .lessthan_fn = z_priq_rb_lessthan, |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1308 | } |
| 1309 | }; |
Andy Ross | b155d06 | 2021-09-24 13:49:14 -0700 | [diff] [blame] | 1310 | #elif defined(CONFIG_SCHED_MULTIQ) |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1311 | for (int i = 0; i < ARRAY_SIZE(_kernel.ready_q.runq.queues); i++) { |
Andy Ross | b155d06 | 2021-09-24 13:49:14 -0700 | [diff] [blame] | 1312 | sys_dlist_init(&rq->runq.queues[i]); |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1313 | } |
Andy Ross | b155d06 | 2021-09-24 13:49:14 -0700 | [diff] [blame] | 1314 | #else |
| 1315 | sys_dlist_init(&rq->runq); |
Andy Ross | 9f06a35 | 2018-06-28 10:38:14 -0700 | [diff] [blame] | 1316 | #endif |
Andy Ross | b155d06 | 2021-09-24 13:49:14 -0700 | [diff] [blame] | 1317 | } |
| 1318 | |
| 1319 | void z_sched_init(void) |
| 1320 | { |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 1321 | #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY |
Nicolas Pitre | 907eea0 | 2023-03-16 17:54:25 -0400 | [diff] [blame] | 1322 | for (int i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) { |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 1323 | init_ready_q(&_kernel.cpus[i].ready_q); |
| 1324 | } |
| 1325 | #else |
Andy Ross | b155d06 | 2021-09-24 13:49:14 -0700 | [diff] [blame] | 1326 | init_ready_q(&_kernel.ready_q); |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 1327 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1328 | } |
| 1329 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1330 | int z_impl_k_thread_priority_get(k_tid_t thread) |
Allan Stephens | 399d0ad | 2016-10-07 13:41:34 -0500 | [diff] [blame] | 1331 | { |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 1332 | return thread->base.prio; |
Allan Stephens | 399d0ad | 2016-10-07 13:41:34 -0500 | [diff] [blame] | 1333 | } |
| 1334 | |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 1335 | #ifdef CONFIG_USERSPACE |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1336 | static inline int z_vrfy_k_thread_priority_get(k_tid_t thread) |
| 1337 | { |
| 1338 | Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD)); |
| 1339 | return z_impl_k_thread_priority_get(thread); |
| 1340 | } |
| 1341 | #include <syscalls/k_thread_priority_get_mrsh.c> |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 1342 | #endif |
| 1343 | |
Anas Nashif | 25c87db | 2021-03-29 10:54:23 -0400 | [diff] [blame] | 1344 | void z_impl_k_thread_priority_set(k_tid_t thread, int prio) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1345 | { |
Benjamin Walsh | 3cc2ba9 | 2016-11-08 15:44:05 -0500 | [diff] [blame] | 1346 | /* |
| 1347 | * Use NULL, since we cannot know what the entry point is (we do not |
| 1348 | * keep track of it) and idle cannot change its priority. |
| 1349 | */ |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1350 | Z_ASSERT_VALID_PRIO(prio, NULL); |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 1351 | __ASSERT(!arch_is_in_isr(), ""); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1352 | |
Anas Nashif | 25c87db | 2021-03-29 10:54:23 -0400 | [diff] [blame] | 1353 | struct k_thread *th = (struct k_thread *)thread; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1354 | |
Anas Nashif | 25c87db | 2021-03-29 10:54:23 -0400 | [diff] [blame] | 1355 | z_thread_priority_set(th, prio); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1356 | } |
| 1357 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1358 | #ifdef CONFIG_USERSPACE |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1359 | static inline void z_vrfy_k_thread_priority_set(k_tid_t thread, int prio) |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1360 | { |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 1361 | Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD)); |
Anas Nashif | 684b8fc | 2023-09-27 10:41:51 +0000 | [diff] [blame] | 1362 | Z_OOPS(K_SYSCALL_VERIFY_MSG(_is_valid_prio(prio, NULL), |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1363 | "invalid thread priority %d", prio)); |
Anas Nashif | 684b8fc | 2023-09-27 10:41:51 +0000 | [diff] [blame] | 1364 | Z_OOPS(K_SYSCALL_VERIFY_MSG((int8_t)prio >= thread->base.prio, |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 1365 | "thread priority may only be downgraded (%d < %d)", |
| 1366 | prio, thread->base.prio)); |
Andrew Boie | 5008fed | 2017-10-08 10:11:24 -0700 | [diff] [blame] | 1367 | |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1368 | z_impl_k_thread_priority_set(thread, prio); |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1369 | } |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1370 | #include <syscalls/k_thread_priority_set_mrsh.c> |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1371 | #endif |
| 1372 | |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1373 | #ifdef CONFIG_SCHED_DEADLINE |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1374 | void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline) |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1375 | { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1376 | struct k_thread *thread = tid; |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1377 | |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 1378 | K_SPINLOCK(&sched_spinlock) { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1379 | thread->base.prio_deadline = k_cycle_get_32() + deadline; |
| 1380 | if (z_is_thread_queued(thread)) { |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 1381 | dequeue_thread(thread); |
| 1382 | queue_thread(thread); |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1383 | } |
| 1384 | } |
| 1385 | } |
| 1386 | |
| 1387 | #ifdef CONFIG_USERSPACE |
Andy Ross | 075c94f | 2019-08-13 11:34:34 -0700 | [diff] [blame] | 1388 | static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline) |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1389 | { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1390 | struct k_thread *thread = tid; |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1391 | |
| 1392 | Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD)); |
Anas Nashif | 684b8fc | 2023-09-27 10:41:51 +0000 | [diff] [blame] | 1393 | Z_OOPS(K_SYSCALL_VERIFY_MSG(deadline > 0, |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1394 | "invalid thread deadline %d", |
| 1395 | (int)deadline)); |
| 1396 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1397 | z_impl_k_thread_deadline_set((k_tid_t)thread, deadline); |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1398 | } |
Andy Ross | 075c94f | 2019-08-13 11:34:34 -0700 | [diff] [blame] | 1399 | #include <syscalls/k_thread_deadline_set_mrsh.c> |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 1400 | #endif |
| 1401 | #endif |
| 1402 | |
Jordan Yates | 1ef647f | 2022-03-26 09:55:23 +1000 | [diff] [blame] | 1403 | bool k_can_yield(void) |
| 1404 | { |
| 1405 | return !(k_is_pre_kernel() || k_is_in_isr() || |
| 1406 | z_is_idle_thread_object(_current)); |
| 1407 | } |
| 1408 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1409 | void z_impl_k_yield(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1410 | { |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 1411 | __ASSERT(!arch_is_in_isr(), ""); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1412 | |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1413 | SYS_PORT_TRACING_FUNC(k_thread, yield); |
| 1414 | |
Andy Ross | 851d14a | 2021-05-13 15:46:43 -0700 | [diff] [blame] | 1415 | k_spinlock_key_t key = k_spin_lock(&sched_spinlock); |
James Harris | 6543e06 | 2021-03-01 10:14:13 -0800 | [diff] [blame] | 1416 | |
Andy Ross | 851d14a | 2021-05-13 15:46:43 -0700 | [diff] [blame] | 1417 | if (!IS_ENABLED(CONFIG_SMP) || |
| 1418 | z_is_thread_queued(_current)) { |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 1419 | dequeue_thread(_current); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 1420 | } |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 1421 | queue_thread(_current); |
Andy Ross | 851d14a | 2021-05-13 15:46:43 -0700 | [diff] [blame] | 1422 | update_cache(1); |
| 1423 | z_swap(&sched_spinlock, key); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1424 | } |
| 1425 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1426 | #ifdef CONFIG_USERSPACE |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1427 | static inline void z_vrfy_k_yield(void) |
| 1428 | { |
| 1429 | z_impl_k_yield(); |
| 1430 | } |
| 1431 | #include <syscalls/k_yield_mrsh.c> |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1432 | #endif |
| 1433 | |
Flavio Ceolin | 7a815d5 | 2020-10-19 21:37:22 -0700 | [diff] [blame] | 1434 | static int32_t z_tick_sleep(k_ticks_t ticks) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1435 | { |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 1436 | #ifdef CONFIG_MULTITHREADING |
Flavio Ceolin | 9a16097 | 2020-11-16 10:40:46 -0800 | [diff] [blame] | 1437 | uint32_t expected_wakeup_ticks; |
Carles Cufi | 9849df8 | 2016-12-02 15:31:08 +0100 | [diff] [blame] | 1438 | |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 1439 | __ASSERT(!arch_is_in_isr(), ""); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1440 | |
Gerard Marull-Paretas | 737d799 | 2022-11-23 13:42:04 +0100 | [diff] [blame] | 1441 | LOG_DBG("thread %p for %lu ticks", _current, (unsigned long)ticks); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1442 | |
Benjamin Walsh | 5596f78 | 2016-12-09 19:57:17 -0500 | [diff] [blame] | 1443 | /* wait of 0 ms is treated as a 'yield' */ |
Charles E. Youse | b186303 | 2019-05-08 13:22:46 -0700 | [diff] [blame] | 1444 | if (ticks == 0) { |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1445 | k_yield(); |
Piotr Zięcik | 7700eb2 | 2018-10-25 17:45:08 +0200 | [diff] [blame] | 1446 | return 0; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1447 | } |
| 1448 | |
Andy Ross | e956639 | 2020-12-18 11:12:39 -0800 | [diff] [blame] | 1449 | k_timeout_t timeout = Z_TIMEOUT_TICKS(ticks); |
Lauren Murphy | 4c85b46 | 2021-05-25 17:49:28 -0500 | [diff] [blame] | 1450 | if (Z_TICK_ABS(ticks) <= 0) { |
| 1451 | expected_wakeup_ticks = ticks + sys_clock_tick_get_32(); |
| 1452 | } else { |
| 1453 | expected_wakeup_ticks = Z_TICK_ABS(ticks); |
| 1454 | } |
Andy Ross | d27d4e6 | 2019-02-05 15:36:01 -0800 | [diff] [blame] | 1455 | |
Andrew Boie | a8775ab | 2020-09-05 12:53:42 -0700 | [diff] [blame] | 1456 | k_spinlock_key_t key = k_spin_lock(&sched_spinlock); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1457 | |
Andy Ross | dff6b71 | 2019-02-25 21:17:29 -0800 | [diff] [blame] | 1458 | #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC) |
| 1459 | pending_current = _current; |
| 1460 | #endif |
Andrew Boie | a8775ab | 2020-09-05 12:53:42 -0700 | [diff] [blame] | 1461 | unready_thread(_current); |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1462 | z_add_thread_timeout(_current, timeout); |
Andy Ross | 4521e0c | 2019-03-22 10:30:19 -0700 | [diff] [blame] | 1463 | z_mark_thread_as_suspended(_current); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1464 | |
Andrew Boie | a8775ab | 2020-09-05 12:53:42 -0700 | [diff] [blame] | 1465 | (void)z_swap(&sched_spinlock, key); |
Piotr Zięcik | 7700eb2 | 2018-10-25 17:45:08 +0200 | [diff] [blame] | 1466 | |
Andy Ross | 4521e0c | 2019-03-22 10:30:19 -0700 | [diff] [blame] | 1467 | __ASSERT(!z_is_thread_state_set(_current, _THREAD_SUSPENDED), ""); |
| 1468 | |
Anas Nashif | 5c90ceb | 2021-03-13 08:19:53 -0500 | [diff] [blame] | 1469 | ticks = (k_ticks_t)expected_wakeup_ticks - sys_clock_tick_get_32(); |
Piotr Zięcik | 7700eb2 | 2018-10-25 17:45:08 +0200 | [diff] [blame] | 1470 | if (ticks > 0) { |
Charles E. Youse | b186303 | 2019-05-08 13:22:46 -0700 | [diff] [blame] | 1471 | return ticks; |
Piotr Zięcik | 7700eb2 | 2018-10-25 17:45:08 +0200 | [diff] [blame] | 1472 | } |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 1473 | #endif |
Piotr Zięcik | 7700eb2 | 2018-10-25 17:45:08 +0200 | [diff] [blame] | 1474 | |
| 1475 | return 0; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1476 | } |
| 1477 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 1478 | int32_t z_impl_k_sleep(k_timeout_t timeout) |
Charles E. Youse | b186303 | 2019-05-08 13:22:46 -0700 | [diff] [blame] | 1479 | { |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1480 | k_ticks_t ticks; |
Charles E. Youse | b186303 | 2019-05-08 13:22:46 -0700 | [diff] [blame] | 1481 | |
Peter Bigot | 8162e58 | 2019-12-12 16:07:07 -0600 | [diff] [blame] | 1482 | __ASSERT(!arch_is_in_isr(), ""); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1483 | |
| 1484 | SYS_PORT_TRACING_FUNC_ENTER(k_thread, sleep, timeout); |
Peter Bigot | 8162e58 | 2019-12-12 16:07:07 -0600 | [diff] [blame] | 1485 | |
Anas Nashif | d2c7179 | 2020-10-17 07:52:17 -0400 | [diff] [blame] | 1486 | /* in case of K_FOREVER, we suspend */ |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1487 | if (K_TIMEOUT_EQ(timeout, K_FOREVER)) { |
Andrew Boie | d2b8922 | 2019-11-08 10:44:22 -0800 | [diff] [blame] | 1488 | k_thread_suspend(_current); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1489 | |
| 1490 | SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, (int32_t) K_TICKS_FOREVER); |
| 1491 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 1492 | return (int32_t) K_TICKS_FOREVER; |
Andrew Boie | d2b8922 | 2019-11-08 10:44:22 -0800 | [diff] [blame] | 1493 | } |
| 1494 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1495 | ticks = timeout.ticks; |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1496 | |
Charles E. Youse | b186303 | 2019-05-08 13:22:46 -0700 | [diff] [blame] | 1497 | ticks = z_tick_sleep(ticks); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1498 | |
| 1499 | int32_t ret = k_ticks_to_ms_floor64(ticks); |
| 1500 | |
| 1501 | SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, ret); |
| 1502 | |
| 1503 | return ret; |
Charles E. Youse | b186303 | 2019-05-08 13:22:46 -0700 | [diff] [blame] | 1504 | } |
| 1505 | |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 1506 | #ifdef CONFIG_USERSPACE |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 1507 | static inline int32_t z_vrfy_k_sleep(k_timeout_t timeout) |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 1508 | { |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1509 | return z_impl_k_sleep(timeout); |
Charles E. Youse | a567831 | 2019-05-09 16:46:46 -0700 | [diff] [blame] | 1510 | } |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1511 | #include <syscalls/k_sleep_mrsh.c> |
Charles E. Youse | a567831 | 2019-05-09 16:46:46 -0700 | [diff] [blame] | 1512 | #endif |
| 1513 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 1514 | int32_t z_impl_k_usleep(int us) |
Charles E. Youse | a567831 | 2019-05-09 16:46:46 -0700 | [diff] [blame] | 1515 | { |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 1516 | int32_t ticks; |
Charles E. Youse | a567831 | 2019-05-09 16:46:46 -0700 | [diff] [blame] | 1517 | |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1518 | SYS_PORT_TRACING_FUNC_ENTER(k_thread, usleep, us); |
| 1519 | |
Andy Ross | 8892406 | 2019-10-03 11:43:10 -0700 | [diff] [blame] | 1520 | ticks = k_us_to_ticks_ceil64(us); |
Charles E. Youse | a567831 | 2019-05-09 16:46:46 -0700 | [diff] [blame] | 1521 | ticks = z_tick_sleep(ticks); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1522 | |
| 1523 | SYS_PORT_TRACING_FUNC_EXIT(k_thread, usleep, us, k_ticks_to_us_floor64(ticks)); |
| 1524 | |
Andy Ross | 8892406 | 2019-10-03 11:43:10 -0700 | [diff] [blame] | 1525 | return k_ticks_to_us_floor64(ticks); |
Charles E. Youse | a567831 | 2019-05-09 16:46:46 -0700 | [diff] [blame] | 1526 | } |
| 1527 | |
| 1528 | #ifdef CONFIG_USERSPACE |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 1529 | static inline int32_t z_vrfy_k_usleep(int us) |
Charles E. Youse | a567831 | 2019-05-09 16:46:46 -0700 | [diff] [blame] | 1530 | { |
| 1531 | return z_impl_k_usleep(us); |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 1532 | } |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1533 | #include <syscalls/k_usleep_mrsh.c> |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 1534 | #endif |
| 1535 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1536 | void z_impl_k_wakeup(k_tid_t thread) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1537 | { |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1538 | SYS_PORT_TRACING_OBJ_FUNC(k_thread, wakeup, thread); |
| 1539 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1540 | if (z_is_thread_pending(thread)) { |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1541 | return; |
| 1542 | } |
| 1543 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1544 | if (z_abort_thread_timeout(thread) < 0) { |
Andrew Boie | d2b8922 | 2019-11-08 10:44:22 -0800 | [diff] [blame] | 1545 | /* Might have just been sleeping forever */ |
| 1546 | if (thread->base.thread_state != _THREAD_SUSPENDED) { |
| 1547 | return; |
| 1548 | } |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1549 | } |
| 1550 | |
Andy Ross | 4521e0c | 2019-03-22 10:30:19 -0700 | [diff] [blame] | 1551 | z_mark_thread_as_not_suspended(thread); |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1552 | z_ready_thread(thread); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1553 | |
Andy Ross | 3267cd3 | 2022-04-06 09:58:20 -0700 | [diff] [blame] | 1554 | flag_ipi(); |
Andy Ross | 5737b5c | 2020-02-04 13:52:09 -0800 | [diff] [blame] | 1555 | |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 1556 | if (!arch_is_in_isr()) { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1557 | z_reschedule_unlocked(); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1558 | } |
| 1559 | } |
| 1560 | |
Enjia Mai | 7ac40aa | 2020-05-28 11:29:50 +0800 | [diff] [blame] | 1561 | #ifdef CONFIG_TRACE_SCHED_IPI |
| 1562 | extern void z_trace_sched_ipi(void); |
| 1563 | #endif |
| 1564 | |
Andy Ross | 42ed12a | 2019-02-19 16:03:39 -0800 | [diff] [blame] | 1565 | #ifdef CONFIG_SMP |
Andy Ross | 42ed12a | 2019-02-19 16:03:39 -0800 | [diff] [blame] | 1566 | void z_sched_ipi(void) |
| 1567 | { |
Daniel Leung | adac4cb | 2020-01-09 18:55:07 -0800 | [diff] [blame] | 1568 | /* NOTE: When adding code to this, make sure this is called |
| 1569 | * at appropriate location when !CONFIG_SCHED_IPI_SUPPORTED. |
| 1570 | */ |
Enjia Mai | 7ac40aa | 2020-05-28 11:29:50 +0800 | [diff] [blame] | 1571 | #ifdef CONFIG_TRACE_SCHED_IPI |
| 1572 | z_trace_sched_ipi(); |
| 1573 | #endif |
Andy Ross | c5c3ad9 | 2023-03-07 08:29:31 -0800 | [diff] [blame] | 1574 | |
| 1575 | #ifdef CONFIG_TIMESLICING |
Nicolas Pitre | 5879d2d | 2023-03-09 22:45:18 -0500 | [diff] [blame] | 1576 | if (sliceable(_current)) { |
Andy Ross | c5c3ad9 | 2023-03-07 08:29:31 -0800 | [diff] [blame] | 1577 | z_time_slice(); |
| 1578 | } |
| 1579 | #endif |
Andy Ross | 42ed12a | 2019-02-19 16:03:39 -0800 | [diff] [blame] | 1580 | } |
Andy Ross | 42ed12a | 2019-02-19 16:03:39 -0800 | [diff] [blame] | 1581 | #endif |
| 1582 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1583 | #ifdef CONFIG_USERSPACE |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1584 | static inline void z_vrfy_k_wakeup(k_tid_t thread) |
| 1585 | { |
| 1586 | Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD)); |
| 1587 | z_impl_k_wakeup(thread); |
| 1588 | } |
| 1589 | #include <syscalls/k_wakeup_mrsh.c> |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1590 | #endif |
| 1591 | |
Daniel Leung | 0a50ff3 | 2023-09-25 11:56:10 -0700 | [diff] [blame] | 1592 | k_tid_t z_impl_k_sched_current_thread_query(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1593 | { |
Andy Ross | eefd3da | 2020-02-06 13:39:52 -0800 | [diff] [blame] | 1594 | #ifdef CONFIG_SMP |
| 1595 | /* In SMP, _current is a field read from _current_cpu, which |
| 1596 | * can race with preemption before it is read. We must lock |
| 1597 | * local interrupts when reading it. |
| 1598 | */ |
| 1599 | unsigned int k = arch_irq_lock(); |
| 1600 | #endif |
| 1601 | |
| 1602 | k_tid_t ret = _current_cpu->current; |
| 1603 | |
| 1604 | #ifdef CONFIG_SMP |
| 1605 | arch_irq_unlock(k); |
| 1606 | #endif |
| 1607 | return ret; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1608 | } |
| 1609 | |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 1610 | #ifdef CONFIG_USERSPACE |
Daniel Leung | 0a50ff3 | 2023-09-25 11:56:10 -0700 | [diff] [blame] | 1611 | static inline k_tid_t z_vrfy_k_sched_current_thread_query(void) |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1612 | { |
Daniel Leung | 0a50ff3 | 2023-09-25 11:56:10 -0700 | [diff] [blame] | 1613 | return z_impl_k_sched_current_thread_query(); |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1614 | } |
Daniel Leung | 0a50ff3 | 2023-09-25 11:56:10 -0700 | [diff] [blame] | 1615 | #include <syscalls/k_sched_current_thread_query_mrsh.c> |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 1616 | #endif |
| 1617 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 1618 | int z_impl_k_is_preempt_thread(void) |
Benjamin Walsh | 445830d | 2016-11-10 15:54:27 -0500 | [diff] [blame] | 1619 | { |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 1620 | return !arch_is_in_isr() && is_preempt(_current); |
Benjamin Walsh | 445830d | 2016-11-10 15:54:27 -0500 | [diff] [blame] | 1621 | } |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1622 | |
| 1623 | #ifdef CONFIG_USERSPACE |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 1624 | static inline int z_vrfy_k_is_preempt_thread(void) |
| 1625 | { |
| 1626 | return z_impl_k_is_preempt_thread(); |
| 1627 | } |
| 1628 | #include <syscalls/k_is_preempt_thread_mrsh.c> |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 1629 | #endif |
Andy Ross | ab46b1b | 2019-01-30 15:00:42 -0800 | [diff] [blame] | 1630 | |
| 1631 | #ifdef CONFIG_SCHED_CPU_MASK |
| 1632 | # ifdef CONFIG_SMP |
Evgeniy Paltsev | 54e0731 | 2023-09-19 22:20:36 +0100 | [diff] [blame] | 1633 | /* Right now we use a two byte for this mask */ |
| 1634 | BUILD_ASSERT(CONFIG_MP_MAX_NUM_CPUS <= 16, "Too many CPUs for mask word"); |
Andy Ross | ab46b1b | 2019-01-30 15:00:42 -0800 | [diff] [blame] | 1635 | # endif |
| 1636 | |
| 1637 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 1638 | static int cpu_mask_mod(k_tid_t thread, uint32_t enable_mask, uint32_t disable_mask) |
Andy Ross | ab46b1b | 2019-01-30 15:00:42 -0800 | [diff] [blame] | 1639 | { |
| 1640 | int ret = 0; |
| 1641 | |
Flavio Ceolin | 551038e | 2022-05-02 14:31:04 -0700 | [diff] [blame] | 1642 | #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY |
Kai Vehmanen | e81ccef | 2022-09-09 12:15:41 +0300 | [diff] [blame] | 1643 | __ASSERT(z_is_thread_prevented_from_running(thread), |
| 1644 | "Running threads cannot change CPU pin"); |
Flavio Ceolin | 551038e | 2022-05-02 14:31:04 -0700 | [diff] [blame] | 1645 | #endif |
| 1646 | |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 1647 | K_SPINLOCK(&sched_spinlock) { |
Anas Nashif | 9e3e7f6 | 2019-12-19 08:19:45 -0500 | [diff] [blame] | 1648 | if (z_is_thread_prevented_from_running(thread)) { |
| 1649 | thread->base.cpu_mask |= enable_mask; |
| 1650 | thread->base.cpu_mask &= ~disable_mask; |
Andy Ross | ab46b1b | 2019-01-30 15:00:42 -0800 | [diff] [blame] | 1651 | } else { |
| 1652 | ret = -EINVAL; |
| 1653 | } |
| 1654 | } |
Andy Ross | b11e796 | 2021-09-24 10:57:39 -0700 | [diff] [blame] | 1655 | |
| 1656 | #if defined(CONFIG_ASSERT) && defined(CONFIG_SCHED_CPU_MASK_PIN_ONLY) |
| 1657 | int m = thread->base.cpu_mask; |
| 1658 | |
| 1659 | __ASSERT((m == 0) || ((m & (m - 1)) == 0), |
| 1660 | "Only one CPU allowed in mask when PIN_ONLY"); |
| 1661 | #endif |
| 1662 | |
Andy Ross | ab46b1b | 2019-01-30 15:00:42 -0800 | [diff] [blame] | 1663 | return ret; |
| 1664 | } |
| 1665 | |
| 1666 | int k_thread_cpu_mask_clear(k_tid_t thread) |
| 1667 | { |
| 1668 | return cpu_mask_mod(thread, 0, 0xffffffff); |
| 1669 | } |
| 1670 | |
| 1671 | int k_thread_cpu_mask_enable_all(k_tid_t thread) |
| 1672 | { |
| 1673 | return cpu_mask_mod(thread, 0xffffffff, 0); |
| 1674 | } |
| 1675 | |
| 1676 | int k_thread_cpu_mask_enable(k_tid_t thread, int cpu) |
| 1677 | { |
| 1678 | return cpu_mask_mod(thread, BIT(cpu), 0); |
| 1679 | } |
| 1680 | |
| 1681 | int k_thread_cpu_mask_disable(k_tid_t thread, int cpu) |
| 1682 | { |
| 1683 | return cpu_mask_mod(thread, 0, BIT(cpu)); |
| 1684 | } |
| 1685 | |
Anas Nashif | c9d0248 | 2022-04-15 08:27:15 -0400 | [diff] [blame] | 1686 | int k_thread_cpu_pin(k_tid_t thread, int cpu) |
| 1687 | { |
| 1688 | int ret; |
| 1689 | |
| 1690 | ret = k_thread_cpu_mask_clear(thread); |
| 1691 | if (ret == 0) { |
| 1692 | return k_thread_cpu_mask_enable(thread, cpu); |
| 1693 | } |
| 1694 | return ret; |
| 1695 | } |
| 1696 | |
Andy Ross | ab46b1b | 2019-01-30 15:00:42 -0800 | [diff] [blame] | 1697 | #endif /* CONFIG_SCHED_CPU_MASK */ |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 1698 | |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1699 | static inline void unpend_all(_wait_q_t *wait_q) |
| 1700 | { |
| 1701 | struct k_thread *thread; |
| 1702 | |
| 1703 | while ((thread = z_waitq_head(wait_q)) != NULL) { |
| 1704 | unpend_thread_no_timeout(thread); |
| 1705 | (void)z_abort_thread_timeout(thread); |
| 1706 | arch_thread_return_value_set(thread, 0); |
| 1707 | ready_thread(thread); |
| 1708 | } |
| 1709 | } |
| 1710 | |
Chen Peng1 | 0f63d11 | 2021-09-06 13:59:40 +0800 | [diff] [blame] | 1711 | #ifdef CONFIG_CMSIS_RTOS_V1 |
| 1712 | extern void z_thread_cmsis_status_mask_clear(struct k_thread *thread); |
| 1713 | #endif |
| 1714 | |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1715 | static void end_thread(struct k_thread *thread) |
| 1716 | { |
| 1717 | /* We hold the lock, and the thread is known not to be running |
| 1718 | * anywhere. |
| 1719 | */ |
Anas Nashif | bbbc38b | 2021-03-29 10:03:49 -0400 | [diff] [blame] | 1720 | if ((thread->base.thread_state & _THREAD_DEAD) == 0U) { |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1721 | thread->base.thread_state |= _THREAD_DEAD; |
| 1722 | thread->base.thread_state &= ~_THREAD_ABORTING; |
| 1723 | if (z_is_thread_queued(thread)) { |
Andy Ross | c230fb3 | 2021-09-23 16:41:30 -0700 | [diff] [blame] | 1724 | dequeue_thread(thread); |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1725 | } |
| 1726 | if (thread->base.pended_on != NULL) { |
| 1727 | unpend_thread_no_timeout(thread); |
| 1728 | } |
| 1729 | (void)z_abort_thread_timeout(thread); |
| 1730 | unpend_all(&thread->join_queue); |
| 1731 | update_cache(1); |
| 1732 | |
Grant Ramsay | 45701e6 | 2023-08-14 09:41:52 +1200 | [diff] [blame] | 1733 | #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) |
| 1734 | arch_float_disable(thread); |
| 1735 | #endif |
| 1736 | |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1737 | SYS_PORT_TRACING_FUNC(k_thread, sched_abort, thread); |
| 1738 | |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1739 | z_thread_monitor_exit(thread); |
| 1740 | |
Chen Peng1 | 0f63d11 | 2021-09-06 13:59:40 +0800 | [diff] [blame] | 1741 | #ifdef CONFIG_CMSIS_RTOS_V1 |
| 1742 | z_thread_cmsis_status_mask_clear(thread); |
| 1743 | #endif |
| 1744 | |
Peter Mitsis | 6df8efe | 2023-05-11 14:06:46 -0400 | [diff] [blame] | 1745 | #ifdef CONFIG_OBJ_CORE_THREAD |
Peter Mitsis | e6f1090 | 2023-06-01 12:16:40 -0400 | [diff] [blame] | 1746 | #ifdef CONFIG_OBJ_CORE_STATS_THREAD |
| 1747 | k_obj_core_stats_deregister(K_OBJ_CORE(thread)); |
| 1748 | #endif |
Peter Mitsis | 6df8efe | 2023-05-11 14:06:46 -0400 | [diff] [blame] | 1749 | k_obj_core_unlink(K_OBJ_CORE(thread)); |
| 1750 | #endif |
| 1751 | |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1752 | #ifdef CONFIG_USERSPACE |
| 1753 | z_mem_domain_exit_thread(thread); |
Anas Nashif | 70cf96b | 2023-09-27 10:45:48 +0000 | [diff] [blame] | 1754 | k_thread_perms_all_clear(thread); |
Anas Nashif | 7a18c2b | 2023-09-27 10:45:18 +0000 | [diff] [blame] | 1755 | k_object_uninit(thread->stack_obj); |
| 1756 | k_object_uninit(thread); |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1757 | #endif |
| 1758 | } |
| 1759 | } |
| 1760 | |
| 1761 | void z_thread_abort(struct k_thread *thread) |
| 1762 | { |
| 1763 | k_spinlock_key_t key = k_spin_lock(&sched_spinlock); |
| 1764 | |
Andy Ross | fb61359 | 2022-05-19 12:55:28 -0700 | [diff] [blame] | 1765 | if ((thread->base.user_options & K_ESSENTIAL) != 0) { |
| 1766 | k_spin_unlock(&sched_spinlock, key); |
| 1767 | __ASSERT(false, "aborting essential thread %p", thread); |
| 1768 | k_panic(); |
| 1769 | return; |
| 1770 | } |
| 1771 | |
Anas Nashif | 3f4f3f6 | 2021-03-29 17:13:47 -0400 | [diff] [blame] | 1772 | if ((thread->base.thread_state & _THREAD_DEAD) != 0U) { |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1773 | k_spin_unlock(&sched_spinlock, key); |
| 1774 | return; |
| 1775 | } |
| 1776 | |
| 1777 | #ifdef CONFIG_SMP |
| 1778 | if (is_aborting(thread) && thread == _current && arch_is_in_isr()) { |
| 1779 | /* Another CPU is spinning for us, don't deadlock */ |
| 1780 | end_thread(thread); |
| 1781 | } |
| 1782 | |
| 1783 | bool active = thread_active_elsewhere(thread); |
| 1784 | |
| 1785 | if (active) { |
| 1786 | /* It's running somewhere else, flag and poke */ |
| 1787 | thread->base.thread_state |= _THREAD_ABORTING; |
Lauren Murphy | d88ce65 | 2021-03-09 16:41:43 -0600 | [diff] [blame] | 1788 | |
Andy Ross | b4e9ef0 | 2022-04-06 10:10:17 -0700 | [diff] [blame] | 1789 | /* We're going to spin, so need a true synchronous IPI |
| 1790 | * here, not deferred! |
| 1791 | */ |
Lauren Murphy | d88ce65 | 2021-03-09 16:41:43 -0600 | [diff] [blame] | 1792 | #ifdef CONFIG_SCHED_IPI_SUPPORTED |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1793 | arch_sched_ipi(); |
Lauren Murphy | d88ce65 | 2021-03-09 16:41:43 -0600 | [diff] [blame] | 1794 | #endif |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1795 | } |
| 1796 | |
| 1797 | if (is_aborting(thread) && thread != _current) { |
| 1798 | if (arch_is_in_isr()) { |
| 1799 | /* ISRs can only spin waiting another CPU */ |
| 1800 | k_spin_unlock(&sched_spinlock, key); |
| 1801 | while (is_aborting(thread)) { |
| 1802 | } |
Andy Ross | a08e23f | 2023-05-26 09:39:16 -0700 | [diff] [blame] | 1803 | |
| 1804 | /* Now we know it's dying, but not necessarily |
| 1805 | * dead. Wait for the switch to happen! |
| 1806 | */ |
| 1807 | key = k_spin_lock(&sched_spinlock); |
| 1808 | z_sched_switch_spin(thread); |
| 1809 | k_spin_unlock(&sched_spinlock, key); |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1810 | } else if (active) { |
| 1811 | /* Threads can join */ |
| 1812 | add_to_waitq_locked(_current, &thread->join_queue); |
| 1813 | z_swap(&sched_spinlock, key); |
| 1814 | } |
| 1815 | return; /* lock has been released */ |
| 1816 | } |
| 1817 | #endif |
| 1818 | end_thread(thread); |
| 1819 | if (thread == _current && !arch_is_in_isr()) { |
| 1820 | z_swap(&sched_spinlock, key); |
| 1821 | __ASSERT(false, "aborted _current back from dead"); |
| 1822 | } |
| 1823 | k_spin_unlock(&sched_spinlock, key); |
| 1824 | } |
| 1825 | |
| 1826 | #if !defined(CONFIG_ARCH_HAS_THREAD_ABORT) |
| 1827 | void z_impl_k_thread_abort(struct k_thread *thread) |
| 1828 | { |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1829 | SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread); |
| 1830 | |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1831 | z_thread_abort(thread); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1832 | |
| 1833 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, abort, thread); |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1834 | } |
| 1835 | #endif |
| 1836 | |
| 1837 | int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout) |
| 1838 | { |
| 1839 | k_spinlock_key_t key = k_spin_lock(&sched_spinlock); |
| 1840 | int ret = 0; |
| 1841 | |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1842 | SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, join, thread, timeout); |
| 1843 | |
Anas Nashif | 3f4f3f6 | 2021-03-29 17:13:47 -0400 | [diff] [blame] | 1844 | if ((thread->base.thread_state & _THREAD_DEAD) != 0U) { |
Andy Ross | a08e23f | 2023-05-26 09:39:16 -0700 | [diff] [blame] | 1845 | z_sched_switch_spin(thread); |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1846 | ret = 0; |
| 1847 | } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { |
| 1848 | ret = -EBUSY; |
Anas Nashif | 3f4f3f6 | 2021-03-29 17:13:47 -0400 | [diff] [blame] | 1849 | } else if ((thread == _current) || |
| 1850 | (thread->base.pended_on == &_current->join_queue)) { |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1851 | ret = -EDEADLK; |
| 1852 | } else { |
| 1853 | __ASSERT(!arch_is_in_isr(), "cannot join in ISR"); |
| 1854 | add_to_waitq_locked(_current, &thread->join_queue); |
| 1855 | add_thread_timeout(_current, timeout); |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1856 | |
| 1857 | SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_thread, join, thread, timeout); |
| 1858 | ret = z_swap(&sched_spinlock, key); |
| 1859 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret); |
| 1860 | |
| 1861 | return ret; |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1862 | } |
| 1863 | |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 1864 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret); |
| 1865 | |
Andy Ross | 6fb6d3c | 2021-02-19 15:32:19 -0800 | [diff] [blame] | 1866 | k_spin_unlock(&sched_spinlock, key); |
| 1867 | return ret; |
| 1868 | } |
| 1869 | |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 1870 | #ifdef CONFIG_USERSPACE |
| 1871 | /* Special case: don't oops if the thread is uninitialized. This is because |
| 1872 | * the initialization bit does double-duty for thread objects; if false, means |
| 1873 | * the thread object is truly uninitialized, or the thread ran and exited for |
| 1874 | * some reason. |
| 1875 | * |
| 1876 | * Return true in this case indicating we should just do nothing and return |
| 1877 | * success to the caller. |
| 1878 | */ |
| 1879 | static bool thread_obj_validate(struct k_thread *thread) |
| 1880 | { |
Anas Nashif | c25d080 | 2023-09-27 10:49:28 +0000 | [diff] [blame] | 1881 | struct k_object *ko = k_object_find(thread); |
Anas Nashif | 21254b2 | 2023-09-27 10:50:26 +0000 | [diff] [blame^] | 1882 | int ret = k_object_validate(ko, K_OBJ_THREAD, _OBJ_INIT_TRUE); |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 1883 | |
| 1884 | switch (ret) { |
| 1885 | case 0: |
| 1886 | return false; |
| 1887 | case -EINVAL: |
| 1888 | return true; |
| 1889 | default: |
| 1890 | #ifdef CONFIG_LOG |
| 1891 | z_dump_object_error(ret, thread, ko, K_OBJ_THREAD); |
| 1892 | #endif |
Anas Nashif | 684b8fc | 2023-09-27 10:41:51 +0000 | [diff] [blame] | 1893 | Z_OOPS(K_SYSCALL_VERIFY_MSG(ret, "access denied")); |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 1894 | } |
Enjia Mai | 53ca709 | 2021-01-15 17:09:58 +0800 | [diff] [blame] | 1895 | CODE_UNREACHABLE; /* LCOV_EXCL_LINE */ |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 1896 | } |
| 1897 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 1898 | static inline int z_vrfy_k_thread_join(struct k_thread *thread, |
| 1899 | k_timeout_t timeout) |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 1900 | { |
| 1901 | if (thread_obj_validate(thread)) { |
| 1902 | return 0; |
| 1903 | } |
| 1904 | |
| 1905 | return z_impl_k_thread_join(thread, timeout); |
| 1906 | } |
| 1907 | #include <syscalls/k_thread_join_mrsh.c> |
Andrew Boie | a4c9190 | 2020-03-24 16:09:24 -0700 | [diff] [blame] | 1908 | |
| 1909 | static inline void z_vrfy_k_thread_abort(k_tid_t thread) |
| 1910 | { |
| 1911 | if (thread_obj_validate(thread)) { |
| 1912 | return; |
| 1913 | } |
| 1914 | |
Anas Nashif | 684b8fc | 2023-09-27 10:41:51 +0000 | [diff] [blame] | 1915 | Z_OOPS(K_SYSCALL_VERIFY_MSG(!(thread->base.user_options & K_ESSENTIAL), |
Andrew Boie | a4c9190 | 2020-03-24 16:09:24 -0700 | [diff] [blame] | 1916 | "aborting essential thread %p", thread)); |
| 1917 | |
| 1918 | z_impl_k_thread_abort((struct k_thread *)thread); |
| 1919 | } |
| 1920 | #include <syscalls/k_thread_abort_mrsh.c> |
Andrew Boie | 322816e | 2020-02-20 16:33:06 -0800 | [diff] [blame] | 1921 | #endif /* CONFIG_USERSPACE */ |
Peter Bigot | 0259c86 | 2021-01-12 13:45:32 -0600 | [diff] [blame] | 1922 | |
| 1923 | /* |
| 1924 | * future scheduler.h API implementations |
| 1925 | */ |
| 1926 | bool z_sched_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data) |
| 1927 | { |
| 1928 | struct k_thread *thread; |
| 1929 | bool ret = false; |
| 1930 | |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 1931 | K_SPINLOCK(&sched_spinlock) { |
Peter Bigot | 0259c86 | 2021-01-12 13:45:32 -0600 | [diff] [blame] | 1932 | thread = _priq_wait_best(&wait_q->waitq); |
| 1933 | |
| 1934 | if (thread != NULL) { |
| 1935 | z_thread_return_value_set_with_data(thread, |
| 1936 | swap_retval, |
| 1937 | swap_data); |
| 1938 | unpend_thread_no_timeout(thread); |
| 1939 | (void)z_abort_thread_timeout(thread); |
| 1940 | ready_thread(thread); |
| 1941 | ret = true; |
| 1942 | } |
| 1943 | } |
| 1944 | |
| 1945 | return ret; |
| 1946 | } |
| 1947 | |
| 1948 | int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key, |
| 1949 | _wait_q_t *wait_q, k_timeout_t timeout, void **data) |
| 1950 | { |
| 1951 | int ret = z_pend_curr(lock, key, wait_q, timeout); |
| 1952 | |
| 1953 | if (data != NULL) { |
| 1954 | *data = _current->base.swap_data; |
| 1955 | } |
| 1956 | return ret; |
| 1957 | } |
Peter Mitsis | ca58339 | 2023-01-05 11:50:21 -0500 | [diff] [blame] | 1958 | |
| 1959 | int z_sched_waitq_walk(_wait_q_t *wait_q, |
| 1960 | int (*func)(struct k_thread *, void *), void *data) |
| 1961 | { |
| 1962 | struct k_thread *thread; |
| 1963 | int status = 0; |
| 1964 | |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 1965 | K_SPINLOCK(&sched_spinlock) { |
Peter Mitsis | ca58339 | 2023-01-05 11:50:21 -0500 | [diff] [blame] | 1966 | _WAIT_Q_FOR_EACH(wait_q, thread) { |
| 1967 | |
| 1968 | /* |
| 1969 | * Invoke the callback function on each waiting thread |
| 1970 | * for as long as there are both waiting threads AND |
| 1971 | * it returns 0. |
| 1972 | */ |
| 1973 | |
| 1974 | status = func(thread, data); |
| 1975 | if (status != 0) { |
| 1976 | break; |
| 1977 | } |
| 1978 | } |
| 1979 | } |
| 1980 | |
| 1981 | return status; |
| 1982 | } |