blob: 22d24da140b831b3ea9da993e2cb81a3ce69b400 [file] [log] [blame]
Andy Rossd2eadfa2020-07-28 13:14:23 -07001/*
2 * Copyright (c) 2020 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
Gerard Marull-Paretascbd31d72022-05-06 11:23:05 +02006#include <zephyr/logging/log.h>
7#include <zephyr/sys/p4wq.h>
Gerard Marull-Paretascbd31d72022-05-06 11:23:05 +02008#include <zephyr/kernel.h>
Gerard Marull-Paretascbd31d72022-05-06 11:23:05 +02009#include <zephyr/init.h>
Gerard Marull-Paretasdacb3db2023-05-15 15:50:28 +020010#include <zephyr/sys/iterable_sections.h>
Anas Nashif8634c3b2023-08-29 17:03:12 +000011/* private kernel APIs */
12#include <ksched.h>
13#include <wait_q.h>
Andy Rossd2eadfa2020-07-28 13:14:23 -070014
Anas Nashife5471a22022-05-17 16:58:17 -040015LOG_MODULE_REGISTER(p4wq, CONFIG_LOG_DEFAULT_LEVEL);
Andy Rossd2eadfa2020-07-28 13:14:23 -070016
17struct device;
18
19static void set_prio(struct k_thread *th, struct k_p4wq_work *item)
20{
21 __ASSERT_NO_MSG(!IS_ENABLED(CONFIG_SMP) || !z_is_thread_queued(th));
22 th->base.prio = item->priority;
23 th->base.prio_deadline = item->deadline;
24}
25
26static bool rb_lessthan(struct rbnode *a, struct rbnode *b)
27{
28 struct k_p4wq_work *aw = CONTAINER_OF(a, struct k_p4wq_work, rbnode);
29 struct k_p4wq_work *bw = CONTAINER_OF(b, struct k_p4wq_work, rbnode);
30
31 if (aw->priority != bw->priority) {
32 return aw->priority > bw->priority;
33 }
34
35 if (aw->deadline != bw->deadline) {
36 return aw->deadline - bw->deadline > 0;
37 }
38
39 return (uintptr_t)a < (uintptr_t)b;
40}
41
Andy Ross9a594a02021-02-10 14:54:21 -080042static void thread_set_requeued(struct k_thread *th)
43{
44 th->base.user_options |= K_CALLBACK_STATE;
45}
46
47static void thread_clear_requeued(struct k_thread *th)
48{
49 th->base.user_options &= ~K_CALLBACK_STATE;
50}
51
52static bool thread_was_requeued(struct k_thread *th)
53{
54 return !!(th->base.user_options & K_CALLBACK_STATE);
55}
56
Andy Rossd2eadfa2020-07-28 13:14:23 -070057/* Slightly different semantics: rb_lessthan must be perfectly
58 * symmetric (to produce a single tree structure) and will use the
59 * pointer value to break ties where priorities are equal, here we
60 * tolerate equality as meaning "not lessthan"
61 */
62static inline bool item_lessthan(struct k_p4wq_work *a, struct k_p4wq_work *b)
63{
64 if (a->priority > b->priority) {
65 return true;
66 } else if ((a->priority == b->priority) &&
67 (a->deadline != b->deadline)) {
68 return a->deadline - b->deadline > 0;
Jennifer Williamsbe5a3772021-04-28 10:56:06 -070069 } else {
70 ;
Andy Rossd2eadfa2020-07-28 13:14:23 -070071 }
72 return false;
73}
74
75static FUNC_NORETURN void p4wq_loop(void *p0, void *p1, void *p2)
76{
77 ARG_UNUSED(p1);
78 ARG_UNUSED(p2);
79 struct k_p4wq *queue = p0;
80 k_spinlock_key_t k = k_spin_lock(&queue->lock);
81
82 while (true) {
83 struct rbnode *r = rb_get_max(&queue->queue);
84
85 if (r) {
86 struct k_p4wq_work *w
87 = CONTAINER_OF(r, struct k_p4wq_work, rbnode);
88
89 rb_remove(&queue->queue, r);
90 w->thread = _current;
91 sys_dlist_append(&queue->active, &w->dlnode);
92 set_prio(_current, w);
Andy Ross9a594a02021-02-10 14:54:21 -080093 thread_clear_requeued(_current);
Andy Rossd2eadfa2020-07-28 13:14:23 -070094
95 k_spin_unlock(&queue->lock, k);
Guennadi Liakhovetski56610bd2021-04-07 15:13:27 +020096
Andy Rossd2eadfa2020-07-28 13:14:23 -070097 w->handler(w);
Guennadi Liakhovetski56610bd2021-04-07 15:13:27 +020098
Andy Rossd2eadfa2020-07-28 13:14:23 -070099 k = k_spin_lock(&queue->lock);
100
101 /* Remove from the active list only if it
102 * wasn't resubmitted already
103 */
Andy Ross9a594a02021-02-10 14:54:21 -0800104 if (!thread_was_requeued(_current)) {
Andy Rossd2eadfa2020-07-28 13:14:23 -0700105 sys_dlist_remove(&w->dlnode);
106 w->thread = NULL;
Guennadi Liakhovetski56610bd2021-04-07 15:13:27 +0200107 k_sem_give(&w->done_sem);
Andy Rossd2eadfa2020-07-28 13:14:23 -0700108 }
109 } else {
110 z_pend_curr(&queue->lock, k, &queue->waitq, K_FOREVER);
111 k = k_spin_lock(&queue->lock);
112 }
113 }
114}
115
Guennadi Liakhovetski56610bd2021-04-07 15:13:27 +0200116/* Must be called to regain ownership of the work item */
117int k_p4wq_wait(struct k_p4wq_work *work, k_timeout_t timeout)
118{
Maksim Masalski3960d412021-06-02 15:36:42 +0800119 if (work->sync) {
Guennadi Liakhovetski56610bd2021-04-07 15:13:27 +0200120 return k_sem_take(&work->done_sem, timeout);
Maksim Masalski3960d412021-06-02 15:36:42 +0800121 }
Guennadi Liakhovetski56610bd2021-04-07 15:13:27 +0200122
123 return k_sem_count_get(&work->done_sem) ? 0 : -EBUSY;
124}
125
Andy Rossd2eadfa2020-07-28 13:14:23 -0700126void k_p4wq_init(struct k_p4wq *queue)
127{
128 memset(queue, 0, sizeof(*queue));
129 z_waitq_init(&queue->waitq);
130 queue->queue.lessthan_fn = rb_lessthan;
131 sys_dlist_init(&queue->active);
132}
133
134void k_p4wq_add_thread(struct k_p4wq *queue, struct k_thread *thread,
135 k_thread_stack_t *stack,
136 size_t stack_size)
137{
138 k_thread_create(thread, stack, stack_size,
139 p4wq_loop, queue, NULL, NULL,
Guennadi Liakhovetski91d90df2021-04-07 15:34:30 +0200140 K_HIGHEST_THREAD_PRIO, 0,
141 queue->flags & K_P4WQ_DELAYED_START ? K_FOREVER : K_NO_WAIT);
Andy Rossd2eadfa2020-07-28 13:14:23 -0700142}
143
Gerard Marull-Paretasa5fd0d12022-10-19 09:33:44 +0200144static int static_init(void)
Andy Rossd2eadfa2020-07-28 13:14:23 -0700145{
Andy Rossd2eadfa2020-07-28 13:14:23 -0700146
Fabio Baltierif88a4202021-08-04 23:05:54 +0100147 STRUCT_SECTION_FOREACH(k_p4wq_initparam, pp) {
Andy Rossd2eadfa2020-07-28 13:14:23 -0700148 for (int i = 0; i < pp->num; i++) {
149 uintptr_t ssz = K_THREAD_STACK_LEN(pp->stack_size);
Guennadi Liakhovetski91d90df2021-04-07 15:34:30 +0200150 struct k_p4wq *q = pp->flags & K_P4WQ_QUEUE_PER_THREAD ?
151 pp->queue + i : pp->queue;
Andy Rossd2eadfa2020-07-28 13:14:23 -0700152
Maksim Masalski3960d412021-06-02 15:36:42 +0800153 if (!i || (pp->flags & K_P4WQ_QUEUE_PER_THREAD)) {
Guennadi Liakhovetski91d90df2021-04-07 15:34:30 +0200154 k_p4wq_init(q);
Maksim Masalski3960d412021-06-02 15:36:42 +0800155 }
Guennadi Liakhovetski91d90df2021-04-07 15:34:30 +0200156
157 q->flags = pp->flags;
158
159 /*
160 * If the user wants to specify CPU affinity, we have to
161 * delay starting threads until that has been done
162 */
Maksim Masalski3960d412021-06-02 15:36:42 +0800163 if (q->flags & K_P4WQ_USER_CPU_MASK) {
Guennadi Liakhovetski91d90df2021-04-07 15:34:30 +0200164 q->flags |= K_P4WQ_DELAYED_START;
Maksim Masalski3960d412021-06-02 15:36:42 +0800165 }
Guennadi Liakhovetski91d90df2021-04-07 15:34:30 +0200166
167 k_p4wq_add_thread(q, &pp->threads[i],
Andy Rossd2eadfa2020-07-28 13:14:23 -0700168 &pp->stacks[ssz * i],
169 pp->stack_size);
Guennadi Liakhovetski91d90df2021-04-07 15:34:30 +0200170
Maksim Masalski3960d412021-06-02 15:36:42 +0800171 if (pp->flags & K_P4WQ_DELAYED_START) {
Guennadi Liakhovetski91d90df2021-04-07 15:34:30 +0200172 z_mark_thread_as_suspended(&pp->threads[i]);
Maksim Masalski3960d412021-06-02 15:36:42 +0800173 }
Guennadi Liakhovetski91d90df2021-04-07 15:34:30 +0200174
175#ifdef CONFIG_SCHED_CPU_MASK
176 if (pp->flags & K_P4WQ_USER_CPU_MASK) {
177 int ret = k_thread_cpu_mask_clear(&pp->threads[i]);
178
179 if (ret < 0)
180 LOG_ERR("Couldn't clear CPU mask: %d", ret);
181 }
182#endif
Andy Rossd2eadfa2020-07-28 13:14:23 -0700183 }
184 }
Guennadi Liakhovetski91d90df2021-04-07 15:34:30 +0200185
Andy Rossd2eadfa2020-07-28 13:14:23 -0700186 return 0;
187}
188
Guennadi Liakhovetski91d90df2021-04-07 15:34:30 +0200189void k_p4wq_enable_static_thread(struct k_p4wq *queue, struct k_thread *thread,
190 uint32_t cpu_mask)
191{
192#ifdef CONFIG_SCHED_CPU_MASK
193 if (queue->flags & K_P4WQ_USER_CPU_MASK) {
194 unsigned int i;
195
196 while ((i = find_lsb_set(cpu_mask))) {
197 int ret = k_thread_cpu_mask_enable(thread, i - 1);
198
199 if (ret < 0)
200 LOG_ERR("Couldn't set CPU mask for %u: %d", i, ret);
201 cpu_mask &= ~BIT(i - 1);
202 }
203 }
204#endif
205
206 if (queue->flags & K_P4WQ_DELAYED_START) {
207 z_mark_thread_as_not_suspended(thread);
208 k_thread_start(thread);
209 }
210}
211
Andy Rossd2eadfa2020-07-28 13:14:23 -0700212/* We spawn a bunch of high priority threads, use the "SMP" initlevel
213 * so they can initialize in parallel instead of serially on the main
214 * CPU.
215 */
Guennadi Liakhovetskia4b88192021-04-07 14:43:07 +0200216SYS_INIT(static_init, APPLICATION, 99);
Andy Rossd2eadfa2020-07-28 13:14:23 -0700217
218void k_p4wq_submit(struct k_p4wq *queue, struct k_p4wq_work *item)
219{
220 k_spinlock_key_t k = k_spin_lock(&queue->lock);
221
222 /* Input is a delta time from now (to match
223 * k_thread_deadline_set()), but we store and use the absolute
224 * cycle count.
225 */
226 item->deadline += k_cycle_get_32();
227
228 /* Resubmission from within handler? Remove from active list */
229 if (item->thread == _current) {
230 sys_dlist_remove(&item->dlnode);
Andy Ross9a594a02021-02-10 14:54:21 -0800231 thread_set_requeued(_current);
Andy Rossd2eadfa2020-07-28 13:14:23 -0700232 item->thread = NULL;
Guennadi Liakhovetski56610bd2021-04-07 15:13:27 +0200233 } else {
234 k_sem_init(&item->done_sem, 0, 1);
Andy Rossd2eadfa2020-07-28 13:14:23 -0700235 }
236 __ASSERT_NO_MSG(item->thread == NULL);
237
238 rb_insert(&queue->queue, &item->rbnode);
Guennadi Liakhovetski56610bd2021-04-07 15:13:27 +0200239 item->queue = queue;
Andy Rossd2eadfa2020-07-28 13:14:23 -0700240
241 /* If there were other items already ahead of it in the queue,
242 * then we don't need to revisit active thread state and can
243 * return.
244 */
245 if (rb_get_max(&queue->queue) != &item->rbnode) {
246 goto out;
247 }
248
249 /* Check the list of active (running or preempted) items, if
250 * there are at least an "active target" of those that are
251 * higher priority than the new item, then no one needs to be
252 * preempted and we can return.
253 */
254 struct k_p4wq_work *wi;
Kumar Gala9be09142022-10-26 14:31:14 -0500255 uint32_t n_beaten_by = 0, active_target = arch_num_cpus();
Andy Rossd2eadfa2020-07-28 13:14:23 -0700256
257 SYS_DLIST_FOR_EACH_CONTAINER(&queue->active, wi, dlnode) {
Guennadi Liakhovetski56610bd2021-04-07 15:13:27 +0200258 /*
259 * item_lessthan(a, b) == true means a has lower priority than b
260 * !item_lessthan(a, b) counts all work items with higher or
261 * equal priority
262 */
Andy Rossd2eadfa2020-07-28 13:14:23 -0700263 if (!item_lessthan(wi, item)) {
264 n_beaten_by++;
265 }
266 }
267
268 if (n_beaten_by >= active_target) {
Guennadi Liakhovetski56610bd2021-04-07 15:13:27 +0200269 /* Too many already have higher priority, not preempting */
Andy Rossd2eadfa2020-07-28 13:14:23 -0700270 goto out;
271 }
272
273 /* Grab a thread, set its priority and queue it. If there are
274 * no threads available to unpend, this is a soft runtime
275 * error: we are breaking our promise about run order.
276 * Complain.
277 */
278 struct k_thread *th = z_unpend_first_thread(&queue->waitq);
279
280 if (th == NULL) {
281 LOG_WRN("Out of worker threads, priority guarantee violated");
282 goto out;
283 }
284
285 set_prio(th, item);
286 z_ready_thread(th);
287 z_reschedule(&queue->lock, k);
Guennadi Liakhovetski56610bd2021-04-07 15:13:27 +0200288
Andy Rossd2eadfa2020-07-28 13:14:23 -0700289 return;
290
291out:
292 k_spin_unlock(&queue->lock, k);
293}
294
295bool k_p4wq_cancel(struct k_p4wq *queue, struct k_p4wq_work *item)
296{
297 k_spinlock_key_t k = k_spin_lock(&queue->lock);
298 bool ret = rb_contains(&queue->queue, &item->rbnode);
299
300 if (ret) {
301 rb_remove(&queue->queue, &item->rbnode);
Guennadi Liakhovetski56610bd2021-04-07 15:13:27 +0200302 k_sem_give(&item->done_sem);
Andy Rossd2eadfa2020-07-28 13:14:23 -0700303 }
304
305 k_spin_unlock(&queue->lock, k);
306 return ret;
307}