blob: 65522c621146dcb961b9959f18ffc18ff176bbc9 [file] [log] [blame]
Peter Bigotdc34e7c2020-10-28 11:24:05 -05001/*
2 * Copyright (c) 2020 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7/**
8 * @file
9 *
10 * Second generation work queue implementation
11 */
12
13#include <kernel.h>
14#include <kernel_structs.h>
15#include <wait_q.h>
16#include <spinlock.h>
17#include <errno.h>
18#include <ksched.h>
19#include <sys/printk.h>
20
21static inline void flag_clear(uint32_t *flagp,
22 uint32_t bit)
23{
24 *flagp &= ~BIT(bit);
25}
26
27static inline void flag_set(uint32_t *flagp,
28 uint32_t bit)
29{
30 *flagp |= BIT(bit);
31}
32
33static inline bool flag_test(const uint32_t *flagp,
34 uint32_t bit)
35{
36 return (*flagp & BIT(bit)) != 0U;
37}
38
39static inline bool flag_test_and_clear(uint32_t *flagp,
40 int bit)
41{
42 bool ret = flag_test(flagp, bit);
43
44 flag_clear(flagp, bit);
45
46 return ret;
47}
48
49static inline void flags_set(uint32_t *flagp,
50 uint32_t flags)
51{
52 *flagp = flags;
53}
54
55static inline uint32_t flags_get(const uint32_t *flagp)
56{
57 return *flagp;
58}
59
60/* Lock to protect the internal state of all work items, work queues,
61 * and pending_cancels.
62 */
63static struct k_spinlock lock;
64
65/* Invoked by work thread */
66static void handle_flush(struct k_work *work)
67{
68 struct z_work_flusher *flusher
69 = CONTAINER_OF(work, struct z_work_flusher, work);
70
71 k_sem_give(&flusher->sem);
72}
73
74static inline void init_flusher(struct z_work_flusher *flusher)
75{
76 k_sem_init(&flusher->sem, 0, 1);
77 k_work_init(&flusher->work, handle_flush);
78}
79
80/* List of pending cancellations. */
81static sys_slist_t pending_cancels;
82
83/* Initialize a canceler record and add it to the list of pending
84 * cancels.
85 *
86 * Invoked with work lock held.
87 *
88 * @param canceler the structure used to notify a waiting process.
89 * @param work the work structure that is to be canceled
90 */
91static inline void init_work_cancel(struct z_work_canceller *canceler,
92 struct k_work *work)
93{
94 k_sem_init(&canceler->sem, 0, 1);
95 canceler->work = work;
96 sys_slist_append(&pending_cancels, &canceler->node);
97}
98
99/* Complete cancellation of a work item and unlock held lock.
100 *
101 * Invoked with work lock held.
102 *
103 * Invoked from a work queue thread.
104 *
105 * Reschedules.
106 *
107 * @param work the work structre that has completed cancellation
108 */
109static void finalize_cancel_locked(struct k_work *work)
110{
111 struct z_work_canceller *wc, *tmp;
112 sys_snode_t *prev = NULL;
113
114 /* Clear this first, so released high-priority threads don't
115 * see it when doing things.
116 */
117 flag_clear(&work->flags, K_WORK_CANCELING_BIT);
118
119 /* Search for and remove the matching container, and release
120 * what's waiting for the completion. The same work item can
121 * appear multiple times in the list if multiple threads
122 * attempt to cancel it.
123 */
124 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&pending_cancels, wc, tmp, node) {
125 if (wc->work == work) {
126 sys_slist_remove(&pending_cancels, prev, &wc->node);
127 k_sem_give(&wc->sem);
128 } else {
129 prev = &wc->node;
130 }
131 }
132}
133
134void k_work_init(struct k_work *work,
135 k_work_handler_t handler)
136{
137 __ASSERT_NO_MSG(work != NULL);
Daniel Leungc8177ac2021-04-27 12:09:35 -0700138 __ASSERT_NO_MSG(handler != NULL);
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500139
140 *work = (struct k_work)Z_WORK_INITIALIZER(handler);
Torbjörn Leksell7a646b32021-03-26 14:41:18 +0100141
142 SYS_PORT_TRACING_OBJ_INIT(k_work, work);
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500143}
144
145static inline int work_busy_get_locked(const struct k_work *work)
146{
147 return flags_get(&work->flags) & K_WORK_MASK;
148}
149
150int k_work_busy_get(const struct k_work *work)
151{
152 k_spinlock_key_t key = k_spin_lock(&lock);
153 int ret = work_busy_get_locked(work);
154
155 k_spin_unlock(&lock, key);
156
157 return ret;
158}
159
160/* Add a flusher work item to the queue.
161 *
162 * Invoked with work lock held.
163 *
164 * Caller must notify queue of pending work.
165 *
166 * @param queue queue on which a work item may appear.
167 * @param work the work item that is either queued or running on @p
168 * queue
169 * @param flusher an uninitialized/unused flusher object
170 */
171static void queue_flusher_locked(struct k_work_q *queue,
172 struct k_work *work,
173 struct z_work_flusher *flusher)
174{
175 bool in_list = false;
176 struct k_work *wn;
177
178 /* Determine whether the work item is still queued. */
179 SYS_SLIST_FOR_EACH_CONTAINER(&queue->pending, wn, node) {
180 if (wn == work) {
181 in_list = true;
182 break;
183 }
184 }
185
186 init_flusher(flusher);
187 if (in_list) {
188 sys_slist_insert(&queue->pending, &work->node,
189 &flusher->work.node);
190 } else {
191 sys_slist_prepend(&queue->pending, &flusher->work.node);
192 }
193}
194
195/* Try to remove a work item from the given queue.
196 *
197 * Invoked with work lock held.
198 *
199 * @param queue the queue from which the work should be removed
200 * @param work work that may be on the queue
201 */
202static inline void queue_remove_locked(struct k_work_q *queue,
203 struct k_work *work)
204{
205 if (flag_test_and_clear(&work->flags, K_WORK_QUEUED_BIT)) {
206 (void)sys_slist_find_and_remove(&queue->pending, &work->node);
207 }
208}
209
210/* Potentially notify a queue that it needs to look for pending work.
211 *
212 * This may make the work queue thread ready, but as the lock is held it
213 * will not be a reschedule point. Callers should yield after the lock is
214 * released where appropriate (generally if this returns true).
215 *
216 * @param queue to be notified. If this is null no notification is required.
217 *
218 * @return true if and only if the queue was notified and woken, i.e. a
219 * reschedule is pending.
220 */
221static inline bool notify_queue_locked(struct k_work_q *queue)
222{
223 bool rv = false;
224
225 if (queue != NULL) {
226 rv = z_sched_wake(&queue->notifyq, 0, NULL);
227 }
228
229 return rv;
230}
231
232/* Submit an work item to a queue if queue state allows new work.
233 *
234 * Submission is rejected if no queue is provided, or if the queue is
235 * draining and the work isn't being submitted from the queue's
236 * thread (chained submission).
237 *
238 * Invoked with work lock held.
239 * Conditionally notifies queue.
240 *
241 * @param queue the queue to which work should be submitted. This may
242 * be null, in which case the submission will fail.
243 *
244 * @param work to be submitted
245 *
246 * @retval 1 if successfully queued
247 * @retval -EINVAL if no queue is provided
248 * @retval -ENODEV if the queue is not started
249 * @retval -EBUSY if the submission was rejected (draining, plugged)
250 */
251static inline int queue_submit_locked(struct k_work_q *queue,
252 struct k_work *work)
253{
254 if (queue == NULL) {
255 return -EINVAL;
256 }
257
258 int ret = -EBUSY;
259 bool chained = (_current == &queue->thread) && !k_is_in_isr();
260 bool draining = flag_test(&queue->flags, K_WORK_QUEUE_DRAIN_BIT);
261 bool plugged = flag_test(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT);
262
263 /* Test for acceptability, in priority order:
264 *
265 * * -ENODEV if the queue isn't running.
266 * * -EBUSY if draining and not chained
267 * * -EBUSY if plugged and not draining
268 * * otherwise OK
269 */
270 if (!flag_test(&queue->flags, K_WORK_QUEUE_STARTED_BIT)) {
271 ret = -ENODEV;
272 } else if (draining && !chained) {
273 ret = -EBUSY;
274 } else if (plugged && !draining) {
275 ret = -EBUSY;
276 } else {
277 sys_slist_append(&queue->pending, &work->node);
278 ret = 1;
279 (void)notify_queue_locked(queue);
280 }
281
282 return ret;
283}
284
285/* Attempt to submit work to a queue.
286 *
287 * The submission can fail if:
288 * * the work is cancelling,
289 * * no candidate queue can be identified;
290 * * the candidate queue rejects the submission.
291 *
292 * Invoked with work lock held.
293 * Conditionally notifies queue.
294 *
295 * @param work the work structure to be submitted
296
297 * @param queuep pointer to a queue reference. On input this should
298 * dereference to the proposed queue (which may be null); after completion it
299 * will be null if the work was not submitted or if submitted will reference
300 * the queue it was submitted to. That may or may not be the queue provided
301 * on input.
302 *
303 * @retval 0 if work was already submitted to a queue
304 * @retval 1 if work was not submitted and has been queued to @p queue
305 * @retval 2 if work was running and has been queued to the queue that was
306 * running it
307 * @retval -EBUSY if canceling or submission was rejected by queue
308 * @retval -EINVAL if no queue is provided
309 * @retval -ENODEV if the queue is not started
310 */
311static int submit_to_queue_locked(struct k_work *work,
312 struct k_work_q **queuep)
313{
314 int ret = 0;
315
316 if (flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
317 /* Disallowed */
318 ret = -EBUSY;
319 } else if (!flag_test(&work->flags, K_WORK_QUEUED_BIT)) {
320 /* Not currently queued */
321 ret = 1;
322
323 /* If no queue specified resubmit to last queue.
324 */
325 if (*queuep == NULL) {
326 *queuep = work->queue;
327 }
328
329 /* If the work is currently running we have to use the
330 * queue it's running on to prevent handler
331 * re-entrancy.
332 */
333 if (flag_test(&work->flags, K_WORK_RUNNING_BIT)) {
334 __ASSERT_NO_MSG(work->queue != NULL);
335 *queuep = work->queue;
336 ret = 2;
337 }
338
339 int rc = queue_submit_locked(*queuep, work);
340
341 if (rc < 0) {
342 ret = rc;
343 } else {
344 flag_set(&work->flags, K_WORK_QUEUED_BIT);
345 work->queue = *queuep;
346 }
347 } else {
348 /* Already queued, do nothing. */
349 }
350
351 if (ret <= 0) {
352 *queuep = NULL;
353 }
354
355 return ret;
356}
357
358int k_work_submit_to_queue(struct k_work_q *queue,
359 struct k_work *work)
360{
361 __ASSERT_NO_MSG(work != NULL);
362
363 k_spinlock_key_t key = k_spin_lock(&lock);
Torbjörn Leksell7a646b32021-03-26 14:41:18 +0100364
365 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, submit_to_queue, queue, work);
366
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500367 int ret = submit_to_queue_locked(work, &queue);
368
369 k_spin_unlock(&lock, key);
370
371 /* If we changed the queue contents (as indicated by a positive ret)
372 * the queue thread may now be ready, but we missed the reschedule
373 * point because the lock was held. If this is being invoked by a
374 * preemptible thread then yield.
375 */
376 if ((ret > 0) && (k_is_preempt_thread() != 0)) {
377 k_yield();
378 }
379
Torbjörn Leksell7a646b32021-03-26 14:41:18 +0100380 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, submit_to_queue, queue, work, ret);
381
382 return ret;
383}
384
385int k_work_submit(struct k_work *work)
386{
387 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, submit, work);
388
389 int ret = k_work_submit_to_queue(&k_sys_work_q, work);
390
391 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, submit, work, ret);
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500392
393 return ret;
394}
395
396/* Flush the work item if necessary.
397 *
398 * Flushing is necessary only if the work is either queued or running.
399 *
400 * Invoked with work lock held by key.
401 * Sleeps.
402 *
403 * @param work the work item that is to be flushed
404 * @param flusher state used to synchronize the flush
405 *
406 * @retval true if work is queued or running. If this happens the
407 * caller must take the flusher semaphore after releasing the lock.
408 *
409 * @retval false otherwise. No wait required.
410 */
411static bool work_flush_locked(struct k_work *work,
412 struct z_work_flusher *flusher)
413{
414 bool need_flush = (flags_get(&work->flags)
Anas Nashifbbbc38b2021-03-29 10:03:49 -0400415 & (K_WORK_QUEUED | K_WORK_RUNNING)) != 0U;
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500416
417 if (need_flush) {
418 struct k_work_q *queue = work->queue;
419
420 __ASSERT_NO_MSG(queue != NULL);
421
422 queue_flusher_locked(queue, work, flusher);
423 notify_queue_locked(queue);
424 }
425
426 return need_flush;
427}
428
429bool k_work_flush(struct k_work *work,
430 struct k_work_sync *sync)
431{
432 __ASSERT_NO_MSG(work != NULL);
433 __ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT));
434 __ASSERT_NO_MSG(!k_is_in_isr());
435 __ASSERT_NO_MSG(sync != NULL);
436#ifdef CONFIG_KERNEL_COHERENCE
437 __ASSERT_NO_MSG(arch_mem_coherent(sync));
438#endif
439
Torbjörn Leksell7a646b32021-03-26 14:41:18 +0100440 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, flush, work);
441
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500442 struct z_work_flusher *flusher = &sync->flusher;
443 k_spinlock_key_t key = k_spin_lock(&lock);
444
445 bool need_flush = work_flush_locked(work, flusher);
446
447 k_spin_unlock(&lock, key);
448
449 /* If necessary wait until the flusher item completes */
450 if (need_flush) {
Torbjörn Leksell7a646b32021-03-26 14:41:18 +0100451 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_work, flush, work, K_FOREVER);
452
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500453 k_sem_take(&flusher->sem, K_FOREVER);
454 }
455
Torbjörn Leksell7a646b32021-03-26 14:41:18 +0100456 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush, work, need_flush);
457
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500458 return need_flush;
459}
460
461/* Execute the non-waiting steps necessary to cancel a work item.
462 *
463 * Invoked with work lock held.
464 *
465 * @param work the work item to be canceled.
466 *
467 * @retval true if we need to wait for the work item to finish canceling
468 * @retval false if the work item is idle
469 *
470 * @return k_busy_wait() captured under lock
471 */
472static int cancel_async_locked(struct k_work *work)
473{
474 /* If we haven't already started canceling, do it now. */
475 if (!flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
476 /* Remove it from the queue, if it's queued. */
477 queue_remove_locked(work->queue, work);
478 }
479
480 /* If it's still busy after it's been dequeued, then flag it
481 * as canceling.
482 */
483 int ret = work_busy_get_locked(work);
484
485 if (ret != 0) {
486 flag_set(&work->flags, K_WORK_CANCELING_BIT);
487 ret = work_busy_get_locked(work);
488 }
489
490 return ret;
491}
492
493/* Complete cancellation necessary, release work lock, and wait if
494 * necessary.
495 *
496 * Invoked with work lock held by key.
497 * Sleeps.
498 *
499 * @param work work that is being canceled
500 * @param canceller state used to synchronize the cancellation
501 * @param key used by work lock
502 *
503 * @retval true if and only if the work was still active on entry. The caller
504 * must wait on the canceller semaphore after releasing the lock.
505 *
506 * @retval false if work was idle on entry. The caller need not wait.
507 */
508static bool cancel_sync_locked(struct k_work *work,
509 struct z_work_canceller *canceller)
510{
511 bool ret = flag_test(&work->flags, K_WORK_CANCELING_BIT);
512
513 /* If something's still running then we have to wait for
514 * completion, which is indicated when finish_cancel() gets
515 * invoked.
516 */
517 if (ret) {
518 init_work_cancel(canceller, work);
519 }
520
521 return ret;
522}
523
524int k_work_cancel(struct k_work *work)
525{
526 __ASSERT_NO_MSG(work != NULL);
527 __ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT));
528
Torbjörn Leksell7a646b32021-03-26 14:41:18 +0100529 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel, work);
530
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500531 k_spinlock_key_t key = k_spin_lock(&lock);
532 int ret = cancel_async_locked(work);
533
534 k_spin_unlock(&lock, key);
535
Torbjörn Leksell7a646b32021-03-26 14:41:18 +0100536 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel, work, ret);
537
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500538 return ret;
539}
540
541bool k_work_cancel_sync(struct k_work *work,
542 struct k_work_sync *sync)
543{
544 __ASSERT_NO_MSG(work != NULL);
545 __ASSERT_NO_MSG(sync != NULL);
546 __ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT));
547 __ASSERT_NO_MSG(!k_is_in_isr());
548#ifdef CONFIG_KERNEL_COHERENCE
549 __ASSERT_NO_MSG(arch_mem_coherent(sync));
550#endif
551
Torbjörn Leksell7a646b32021-03-26 14:41:18 +0100552 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_sync, work, sync);
553
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500554 struct z_work_canceller *canceller = &sync->canceller;
555 k_spinlock_key_t key = k_spin_lock(&lock);
Peter Bigot707dc222021-04-16 11:48:50 -0500556 bool pending = (work_busy_get_locked(work) != 0U);
557 bool need_wait = false;
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500558
Peter Bigot707dc222021-04-16 11:48:50 -0500559 if (pending) {
560 (void)cancel_async_locked(work);
561 need_wait = cancel_sync_locked(work, canceller);
562 }
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500563
564 k_spin_unlock(&lock, key);
565
566 if (need_wait) {
Torbjörn Leksell7a646b32021-03-26 14:41:18 +0100567 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_work, cancel_sync, work, sync);
568
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500569 k_sem_take(&canceller->sem, K_FOREVER);
570 }
571
Torbjörn Leksell7a646b32021-03-26 14:41:18 +0100572 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_sync, work, sync, pending);
Peter Bigot707dc222021-04-16 11:48:50 -0500573 return pending;
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500574}
575
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500576/* Loop executed by a work queue thread.
577 *
578 * @param workq_ptr pointer to the work queue structure
579 */
580static void work_queue_main(void *workq_ptr, void *p2, void *p3)
581{
582 struct k_work_q *queue = (struct k_work_q *)workq_ptr;
583
584 while (true) {
585 sys_snode_t *node;
586 struct k_work *work = NULL;
Peter Bigot656c0952021-05-12 08:20:55 -0500587 k_work_handler_t handler = NULL;
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500588 k_spinlock_key_t key = k_spin_lock(&lock);
589
Peter Bigot656c0952021-05-12 08:20:55 -0500590 /* Check for and prepare any new work. */
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500591 node = sys_slist_get(&queue->pending);
592 if (node != NULL) {
593 /* Mark that there's some work active that's
594 * not on the pending list.
595 */
596 flag_set(&queue->flags, K_WORK_QUEUE_BUSY_BIT);
597 work = CONTAINER_OF(node, struct k_work, node);
Peter Bigot656c0952021-05-12 08:20:55 -0500598 flag_set(&work->flags, K_WORK_RUNNING_BIT);
599 flag_clear(&work->flags, K_WORK_QUEUED_BIT);
600 handler = work->handler;
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500601 } else if (flag_test_and_clear(&queue->flags,
602 K_WORK_QUEUE_DRAIN_BIT)) {
603 /* Not busy and draining: move threads waiting for
604 * drain to ready state. The held spinlock inhibits
605 * immediate reschedule; released threads get their
606 * chance when this invokes z_sched_wait() below.
607 *
608 * We don't touch K_WORK_QUEUE_PLUGGABLE, so getting
609 * here doesn't mean that the queue will allow new
610 * submissions.
611 */
612 (void)z_sched_wake_all(&queue->drainq, 1, NULL);
Jennifer Williams9aa0f212021-03-10 05:05:56 +0200613 } else {
614 /* No work is available and no queue state requires
615 * special handling.
616 */
617 ;
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500618 }
619
620 if (work == NULL) {
621 /* Nothing's had a chance to add work since we took
622 * the lock, and we didn't find work nor got asked to
623 * stop. Just go to sleep: when something happens the
624 * work thread will be woken and we can check again.
625 */
626
627 (void)z_sched_wait(&lock, key, &queue->notifyq,
628 K_FOREVER, NULL);
629 continue;
630 }
631
632 k_spin_unlock(&lock, key);
633
634 if (work != NULL) {
635 bool yield;
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500636
Daniel Leungc8177ac2021-04-27 12:09:35 -0700637 __ASSERT_NO_MSG(handler != NULL);
Peter Bigot656c0952021-05-12 08:20:55 -0500638 handler(work);
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500639
Peter Bigot656c0952021-05-12 08:20:55 -0500640 /* Mark the work item as no longer running and deal
641 * with any cancellation issued while it was running.
642 * Clear the BUSY flag and optionally yield to prevent
643 * starving other threads.
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500644 */
645 key = k_spin_lock(&lock);
Peter Bigot656c0952021-05-12 08:20:55 -0500646
647 flag_clear(&work->flags, K_WORK_RUNNING_BIT);
648 if (flag_test(&work->flags, K_WORK_CANCELING_BIT)) {
649 finalize_cancel_locked(work);
650 }
651
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500652 flag_clear(&queue->flags, K_WORK_QUEUE_BUSY_BIT);
653 yield = !flag_test(&queue->flags, K_WORK_QUEUE_NO_YIELD_BIT);
654 k_spin_unlock(&lock, key);
655
656 /* Optionally yield to prevent the work queue from
657 * starving other threads.
658 */
659 if (yield) {
660 k_yield();
661 }
662 }
663 }
664}
665
666void k_work_queue_start(struct k_work_q *queue,
667 k_thread_stack_t *stack,
668 size_t stack_size,
669 int prio,
670 const struct k_work_queue_config *cfg)
671{
672 __ASSERT_NO_MSG(queue);
673 __ASSERT_NO_MSG(stack);
674 __ASSERT_NO_MSG(!flag_test(&queue->flags, K_WORK_QUEUE_STARTED_BIT));
675 uint32_t flags = K_WORK_QUEUE_STARTED;
676
Torbjörn Leksell7a646b32021-03-26 14:41:18 +0100677 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_queue, start, queue);
678
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500679 sys_slist_init(&queue->pending);
680 z_waitq_init(&queue->notifyq);
681 z_waitq_init(&queue->drainq);
682
683 if ((cfg != NULL) && cfg->no_yield) {
684 flags |= K_WORK_QUEUE_NO_YIELD;
685 }
686
687 /* It hasn't actually been started yet, but all the state is in place
688 * so we can submit things and once the thread gets control it's ready
689 * to roll.
690 */
691 flags_set(&queue->flags, flags);
692
693 (void)k_thread_create(&queue->thread, stack, stack_size,
694 work_queue_main, queue, NULL, NULL,
695 prio, 0, K_FOREVER);
696
697 if ((cfg != NULL) && (cfg->name != NULL)) {
698 k_thread_name_set(&queue->thread, cfg->name);
699 }
700
701 k_thread_start(&queue->thread);
Torbjörn Leksell7a646b32021-03-26 14:41:18 +0100702
703 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_queue, start, queue);
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500704}
705
706int k_work_queue_drain(struct k_work_q *queue,
707 bool plug)
708{
709 __ASSERT_NO_MSG(queue);
710 __ASSERT_NO_MSG(!k_is_in_isr());
711
Torbjörn Leksell7a646b32021-03-26 14:41:18 +0100712 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_queue, drain, queue);
713
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500714 int ret = 0;
715 k_spinlock_key_t key = k_spin_lock(&lock);
716
717 if (((flags_get(&queue->flags)
718 & (K_WORK_QUEUE_BUSY | K_WORK_QUEUE_DRAIN)) != 0U)
719 || plug
720 || !sys_slist_is_empty(&queue->pending)) {
721 flag_set(&queue->flags, K_WORK_QUEUE_DRAIN_BIT);
722 if (plug) {
723 flag_set(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT);
724 }
725
726 notify_queue_locked(queue);
727 ret = z_sched_wait(&lock, key, &queue->drainq,
728 K_FOREVER, NULL);
729 } else {
730 k_spin_unlock(&lock, key);
731 }
732
Torbjörn Leksell7a646b32021-03-26 14:41:18 +0100733 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_queue, drain, queue, ret);
734
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500735 return ret;
736}
737
738int k_work_queue_unplug(struct k_work_q *queue)
739{
740 __ASSERT_NO_MSG(queue);
741
Torbjörn Leksell7a646b32021-03-26 14:41:18 +0100742 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_queue, unplug, queue);
743
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500744 int ret = -EALREADY;
745 k_spinlock_key_t key = k_spin_lock(&lock);
746
747 if (flag_test_and_clear(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT)) {
748 ret = 0;
749 }
750
751 k_spin_unlock(&lock, key);
752
Torbjörn Leksell7a646b32021-03-26 14:41:18 +0100753 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_queue, unplug, queue, ret);
754
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500755 return ret;
756}
757
758#ifdef CONFIG_SYS_CLOCK_EXISTS
759
760/* Timeout handler for delayable work.
761 *
762 * Invoked by timeout infrastructure.
763 * Takes and releases work lock.
764 * Conditionally reschedules.
765 */
766static void work_timeout(struct _timeout *to)
767{
768 struct k_work_delayable *dw
769 = CONTAINER_OF(to, struct k_work_delayable, timeout);
770 struct k_work *wp = &dw->work;
771 k_spinlock_key_t key = k_spin_lock(&lock);
772 struct k_work_q *queue = NULL;
773
774 /* If the work is still marked delayed (should be) then clear that
775 * state and submit it to the queue. If successful the queue will be
776 * notified of new work at the next reschedule point.
777 *
778 * If not successful there is no notification that the work has been
779 * abandoned. Sorry.
780 */
781 if (flag_test_and_clear(&wp->flags, K_WORK_DELAYED_BIT)) {
782 queue = dw->queue;
783 (void)submit_to_queue_locked(wp, &queue);
784 }
785
786 k_spin_unlock(&lock, key);
787}
788
789void k_work_init_delayable(struct k_work_delayable *dwork,
790 k_work_handler_t handler)
791{
792 __ASSERT_NO_MSG(dwork != NULL);
Daniel Leungc8177ac2021-04-27 12:09:35 -0700793 __ASSERT_NO_MSG(handler != NULL);
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500794
795 *dwork = (struct k_work_delayable){
796 .work = {
797 .handler = handler,
798 .flags = K_WORK_DELAYABLE,
799 },
800 };
801 z_init_timeout(&dwork->timeout);
Torbjörn Leksell7a646b32021-03-26 14:41:18 +0100802
803 SYS_PORT_TRACING_OBJ_INIT(k_work_delayable, dwork);
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500804}
805
806static inline int work_delayable_busy_get_locked(const struct k_work_delayable *dwork)
807{
808 return atomic_get(&dwork->work.flags) & K_WORK_MASK;
809}
810
811int k_work_delayable_busy_get(const struct k_work_delayable *dwork)
812{
813 k_spinlock_key_t key = k_spin_lock(&lock);
814 int ret = work_delayable_busy_get_locked(dwork);
815
816 k_spin_unlock(&lock, key);
817 return ret;
818}
819
820/* Attempt to schedule a work item for future (maybe immediate)
821 * submission.
822 *
823 * Invoked with work lock held.
824 *
825 * See also submit_to_queue_locked(), which implements this for a no-wait
826 * delay.
827 *
828 * Invoked with work lock held.
829 *
830 * @param queuep pointer to a pointer to a queue. On input this
831 * should dereference to the proposed queue (which may be null); after
832 * completion it will be null if the work was not submitted or if
833 * submitted will reference the queue it was submitted to. That may
834 * or may not be the queue provided on input.
835 *
836 * @param dwork the delayed work structure
837 *
838 * @param delay the delay to use before scheduling.
839 *
840 * @retval from submit_to_queue_locked() if delay is K_NO_WAIT; otherwise
841 * @retval 1 to indicate successfully scheduled.
842 */
843static int schedule_for_queue_locked(struct k_work_q **queuep,
844 struct k_work_delayable *dwork,
845 k_timeout_t delay)
846{
847 int ret = 1;
848 struct k_work *work = &dwork->work;
849
850 if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) {
851 return submit_to_queue_locked(work, queuep);
852 }
853
854 flag_set(&work->flags, K_WORK_DELAYED_BIT);
855 dwork->queue = *queuep;
856
857 /* Add timeout */
858 z_add_timeout(&dwork->timeout, work_timeout, delay);
859
860 return ret;
861}
862
863/* Unschedule delayable work.
864 *
865 * If the work is delayed, cancel the timeout and clear the delayed
866 * flag.
867 *
868 * Invoked with work lock held.
869 *
870 * @param dwork pointer to delayable work structure.
871 *
872 * @return true if and only if work had been delayed so the timeout
873 * was cancelled.
874 */
875static inline bool unschedule_locked(struct k_work_delayable *dwork)
876{
877 bool ret = false;
878 struct k_work *work = &dwork->work;
879
880 /* If scheduled, try to cancel. */
881 if (flag_test_and_clear(&work->flags, K_WORK_DELAYED_BIT)) {
882 z_abort_timeout(&dwork->timeout);
883 ret = true;
884 }
885
886 return ret;
887}
888
889/* Full cancellation of a delayable work item.
890 *
891 * Unschedules the delayed part then delegates to standard work
892 * cancellation.
893 *
894 * Invoked with work lock held.
895 *
896 * @param dwork delayable work item
897 *
898 * @return k_work_busy_get() flags
899 */
900static int cancel_delayable_async_locked(struct k_work_delayable *dwork)
901{
902 (void)unschedule_locked(dwork);
903
904 return cancel_async_locked(&dwork->work);
905}
906
907int k_work_schedule_for_queue(struct k_work_q *queue,
908 struct k_work_delayable *dwork,
909 k_timeout_t delay)
910{
911 __ASSERT_NO_MSG(dwork != NULL);
912
Torbjörn Leksell7a646b32021-03-26 14:41:18 +0100913 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, schedule_for_queue, queue, dwork, delay);
914
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500915 struct k_work *work = &dwork->work;
916 int ret = 0;
917 k_spinlock_key_t key = k_spin_lock(&lock);
918
Peter Bigotfed03522021-03-15 11:23:00 -0500919 /* Schedule the work item if it's idle or running. */
920 if ((work_busy_get_locked(work) & ~K_WORK_RUNNING) == 0U) {
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500921 ret = schedule_for_queue_locked(&queue, dwork, delay);
922 }
923
924 k_spin_unlock(&lock, key);
925
Torbjörn Leksell7a646b32021-03-26 14:41:18 +0100926 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, schedule_for_queue, queue, dwork, delay, ret);
927
928 return ret;
929}
930
931int k_work_schedule(struct k_work_delayable *dwork,
932 k_timeout_t delay)
933{
934 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, schedule, dwork, delay);
935
936 int ret = k_work_schedule_for_queue(&k_sys_work_q, dwork, delay);
937
938 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, schedule, dwork, delay, ret);
939
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500940 return ret;
941}
942
943int k_work_reschedule_for_queue(struct k_work_q *queue,
944 struct k_work_delayable *dwork,
945 k_timeout_t delay)
946{
947 __ASSERT_NO_MSG(dwork != NULL);
948
Torbjörn Leksell7a646b32021-03-26 14:41:18 +0100949 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, reschedule_for_queue, queue, dwork, delay);
950
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500951 int ret = 0;
952 k_spinlock_key_t key = k_spin_lock(&lock);
953
954 /* Remove any active scheduling. */
955 (void)unschedule_locked(dwork);
956
957 /* Schedule the work item with the new parameters. */
958 ret = schedule_for_queue_locked(&queue, dwork, delay);
959
960 k_spin_unlock(&lock, key);
961
Torbjörn Leksell7a646b32021-03-26 14:41:18 +0100962 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, reschedule_for_queue, queue, dwork, delay, ret);
963
964 return ret;
965}
966
967int k_work_reschedule(struct k_work_delayable *dwork,
968 k_timeout_t delay)
969{
970 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, reschedule, dwork, delay);
971
972 int ret = k_work_reschedule_for_queue(&k_sys_work_q, dwork, delay);
973
974 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, reschedule, dwork, delay, ret);
975
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500976 return ret;
977}
978
979int k_work_cancel_delayable(struct k_work_delayable *dwork)
980{
981 __ASSERT_NO_MSG(dwork != NULL);
982
Torbjörn Leksell7a646b32021-03-26 14:41:18 +0100983 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_delayable, dwork);
984
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500985 k_spinlock_key_t key = k_spin_lock(&lock);
986 int ret = cancel_delayable_async_locked(dwork);
987
988 k_spin_unlock(&lock, key);
Torbjörn Leksell7a646b32021-03-26 14:41:18 +0100989
990 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_delayable, dwork, ret);
991
Peter Bigotdc34e7c2020-10-28 11:24:05 -0500992 return ret;
993}
994
995bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
996 struct k_work_sync *sync)
997{
998 __ASSERT_NO_MSG(dwork != NULL);
999 __ASSERT_NO_MSG(sync != NULL);
1000 __ASSERT_NO_MSG(!k_is_in_isr());
1001#ifdef CONFIG_KERNEL_COHERENCE
1002 __ASSERT_NO_MSG(arch_mem_coherent(sync));
1003#endif
1004
Torbjörn Leksell7a646b32021-03-26 14:41:18 +01001005 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, cancel_delayable_sync, dwork, sync);
1006
Peter Bigotdc34e7c2020-10-28 11:24:05 -05001007 struct z_work_canceller *canceller = &sync->canceller;
1008 k_spinlock_key_t key = k_spin_lock(&lock);
Peter Bigot707dc222021-04-16 11:48:50 -05001009 bool pending = (work_delayable_busy_get_locked(dwork) != 0U);
1010 bool need_wait = false;
Peter Bigotdc34e7c2020-10-28 11:24:05 -05001011
Peter Bigot707dc222021-04-16 11:48:50 -05001012 if (pending) {
1013 (void)cancel_delayable_async_locked(dwork);
1014 need_wait = cancel_sync_locked(&dwork->work, canceller);
1015 }
Peter Bigotdc34e7c2020-10-28 11:24:05 -05001016
1017 k_spin_unlock(&lock, key);
1018
1019 if (need_wait) {
1020 k_sem_take(&canceller->sem, K_FOREVER);
1021 }
1022
Torbjörn Leksell7a646b32021-03-26 14:41:18 +01001023 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, cancel_delayable_sync, dwork, sync, pending);
Peter Bigot707dc222021-04-16 11:48:50 -05001024 return pending;
Peter Bigotdc34e7c2020-10-28 11:24:05 -05001025}
1026
1027bool k_work_flush_delayable(struct k_work_delayable *dwork,
1028 struct k_work_sync *sync)
1029{
1030 __ASSERT_NO_MSG(dwork != NULL);
1031 __ASSERT_NO_MSG(sync != NULL);
1032 __ASSERT_NO_MSG(!k_is_in_isr());
1033#ifdef CONFIG_KERNEL_COHERENCE
1034 __ASSERT_NO_MSG(arch_mem_coherent(sync));
1035#endif
1036
Torbjörn Leksell7a646b32021-03-26 14:41:18 +01001037 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work, flush_delayable, dwork, sync);
1038
Peter Bigotdc34e7c2020-10-28 11:24:05 -05001039 struct k_work *work = &dwork->work;
1040 struct z_work_flusher *flusher = &sync->flusher;
1041 k_spinlock_key_t key = k_spin_lock(&lock);
1042
1043 /* If it's idle release the lock and return immediately. */
1044 if (work_busy_get_locked(work) == 0U) {
1045 k_spin_unlock(&lock, key);
Torbjörn Leksell7a646b32021-03-26 14:41:18 +01001046
1047 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush_delayable, dwork, sync, false);
1048
Peter Bigotdc34e7c2020-10-28 11:24:05 -05001049 return false;
1050 }
1051
1052 /* If unscheduling did something then submit it. Ignore a
1053 * failed submission (e.g. when cancelling).
1054 */
1055 if (unschedule_locked(dwork)) {
1056 struct k_work_q *queue = dwork->queue;
1057
1058 (void)submit_to_queue_locked(work, &queue);
1059 }
1060
1061 /* Wait for it to finish */
1062 bool need_flush = work_flush_locked(work, flusher);
1063
1064 k_spin_unlock(&lock, key);
1065
1066 /* If necessary wait until the flusher item completes */
1067 if (need_flush) {
1068 k_sem_take(&flusher->sem, K_FOREVER);
1069 }
1070
Torbjörn Leksell7a646b32021-03-26 14:41:18 +01001071 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work, flush_delayable, dwork, sync, need_flush);
1072
Peter Bigotdc34e7c2020-10-28 11:24:05 -05001073 return need_flush;
1074}
1075
1076#endif /* CONFIG_SYS_CLOCK_EXISTS */