blob: 6121f2c3e23ddb28af40845cb348b4f700091000 [file] [log] [blame]
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001/*
Benjamin Walsh8cf56bc2016-11-04 15:47:32 -04002 * Copyright (c) 2016 Intel Corporation
Benjamin Walsh456c6da2016-09-02 18:55:39 -04003 * Copyright (c) 2016 Wind River Systems, Inc.
4 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08005 * SPDX-License-Identifier: Apache-2.0
Benjamin Walsh456c6da2016-09-02 18:55:39 -04006 */
7
8/**
9 * @file
10 *
11 * Workqueue support functions
12 */
13
Benjamin Walshf6ca7de2016-11-08 10:36:50 -050014#include <kernel_structs.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040015#include <wait_q.h>
16#include <errno.h>
17
18static void work_q_main(void *work_q_ptr, void *p2, void *p3)
19{
20 struct k_work_q *work_q = work_q_ptr;
21
22 ARG_UNUSED(p2);
23 ARG_UNUSED(p3);
24
25 while (1) {
26 struct k_work *work;
27 k_work_handler_t handler;
28
Luiz Augusto von Dentzadb581b2017-07-03 19:09:44 +030029 work = k_queue_get(&work_q->queue, K_FOREVER);
Luiz Augusto von Dentzc1fa82b2017-07-03 19:24:10 +030030 if (!work) {
31 continue;
32 }
Benjamin Walsh456c6da2016-09-02 18:55:39 -040033
34 handler = work->handler;
35
Luiz Augusto von Dentzee1e99b2016-09-26 09:36:49 +030036 /* Reset pending state so it can be resubmitted by handler */
Iván Briano9c7b5ea2016-10-04 18:11:05 -030037 if (atomic_test_and_clear_bit(work->flags,
Luiz Augusto von Dentzadb581b2017-07-03 19:09:44 +030038 K_WORK_STATE_PENDING)) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -040039 handler(work);
40 }
41
42 /* Make sure we don't hog up the CPU if the FIFO never (or
43 * very rarely) gets empty.
44 */
45 k_yield();
46 }
47}
48
Andrew Boiec5c104f2017-10-16 14:46:34 -070049void k_work_q_start(struct k_work_q *work_q, k_thread_stack_t *stack,
Benjamin Walsh669360d2016-11-14 16:46:14 -050050 size_t stack_size, int prio)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040051{
Luiz Augusto von Dentzadb581b2017-07-03 19:09:44 +030052 k_queue_init(&work_q->queue);
Andrew Boied26cf2d2017-03-30 13:07:02 -070053 k_thread_create(&work_q->thread, stack, stack_size, work_q_main,
54 work_q, 0, 0, prio, 0, 0);
Andrew Boie945af952017-08-22 13:15:23 -070055 _k_object_init(work_q);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040056}
57
Benjamin Walsh179fd3a2016-10-04 16:58:46 -040058#ifdef CONFIG_SYS_CLOCK_EXISTS
Benjamin Walsh456c6da2016-09-02 18:55:39 -040059static void work_timeout(struct _timeout *t)
60{
61 struct k_delayed_work *w = CONTAINER_OF(t, struct k_delayed_work,
62 timeout);
63
64 /* submit work to workqueue */
65 k_work_submit_to_queue(w->work_q, &w->work);
66}
67
68void k_delayed_work_init(struct k_delayed_work *work, k_work_handler_t handler)
69{
70 k_work_init(&work->work, handler);
Benjamin Walsh055262c2016-10-05 17:16:01 -040071 _init_timeout(&work->timeout, work_timeout);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040072 work->work_q = NULL;
Andrew Boie945af952017-08-22 13:15:23 -070073
74 _k_object_init(work);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040075}
76
77int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
78 struct k_delayed_work *work,
Kumar Galacc334c72017-04-21 10:55:34 -050079 s32_t delay)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040080{
81 int key = irq_lock();
82 int err;
83
84 /* Work cannot be active in multiple queues */
85 if (work->work_q && work->work_q != work_q) {
86 err = -EADDRINUSE;
87 goto done;
88 }
89
90 /* Cancel if work has been submitted */
91 if (work->work_q == work_q) {
92 err = k_delayed_work_cancel(work);
93 if (err < 0) {
94 goto done;
95 }
96 }
97
98 /* Attach workqueue so the timeout callback can submit it */
99 work->work_q = work_q;
100
Allan Stephens6c98c4d2016-10-17 14:34:53 -0500101 if (!delay) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400102 /* Submit work if no ticks is 0 */
103 k_work_submit_to_queue(work_q, &work->work);
104 } else {
105 /* Add timeout */
Allan Stephens6c98c4d2016-10-17 14:34:53 -0500106 _add_timeout(NULL, &work->timeout, NULL,
107 _TICK_ALIGN + _ms_to_ticks(delay));
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400108 }
109
110 err = 0;
111
112done:
113 irq_unlock(key);
114
115 return err;
116}
117
118int k_delayed_work_cancel(struct k_delayed_work *work)
119{
120 int key = irq_lock();
121
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400122 if (!work->work_q) {
123 irq_unlock(key);
124 return -EINVAL;
125 }
126
Luiz Augusto von Dentzc1fa82b2017-07-03 19:24:10 +0300127 if (k_work_pending(&work->work)) {
128 /* Remove from the queue if already submitted */
129 if (!k_queue_remove(&work->work_q->queue, &work->work)) {
130 irq_unlock(key);
131 return -EINVAL;
132 }
133 } else {
134 _abort_timeout(&work->timeout);
135 }
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400136
137 /* Detach from workqueue */
138 work->work_q = NULL;
139
Andy Ross03c1d282018-02-13 12:13:25 -0800140 atomic_clear_bit(work->work.flags, K_WORK_STATE_PENDING);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400141 irq_unlock(key);
142
143 return 0;
144}
Benjamin Walsh179fd3a2016-10-04 16:58:46 -0400145#endif /* CONFIG_SYS_CLOCK_EXISTS */