blob: b4e3897b9b1045cc09b367152bb04c5740135d53 [file] [log] [blame]
Benjamin Walshacc68c12017-01-29 18:57:45 -05001/*
2 * Copyright (c) 2017 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7/**
8 * @file
9 *
10 * @brief Kernel asynchronous event polling interface.
11 *
12 * This polling mechanism allows waiting on multiple events concurrently,
13 * either events triggered directly, or from kernel objects or other kernel
14 * constructs.
15 */
16
17#include <kernel.h>
18#include <kernel_structs.h>
Andy Ross245b54e2018-02-08 09:10:46 -080019#include <kernel_internal.h>
Benjamin Walshacc68c12017-01-29 18:57:45 -050020#include <wait_q.h>
21#include <ksched.h>
Andrew Boie3772f772018-05-07 16:52:57 -070022#include <syscall_handler.h>
Anas Nashifee9dd1a2019-06-26 10:33:41 -040023#include <sys/dlist.h>
Anas Nashifa2fd7d72019-06-26 10:33:55 -040024#include <sys/util.h>
Anas Nashif5eb90ec2019-06-26 10:33:39 -040025#include <sys/__assert.h>
Flavio Ceolin4f2e9a72018-12-16 14:27:10 -080026#include <stdbool.h>
Benjamin Walshacc68c12017-01-29 18:57:45 -050027
Andy Rossf2b1a4b2018-07-25 09:40:32 -070028/* Single subsystem lock. Locking per-event would be better on highly
29 * contended SMP systems, but the original locking scheme here is
30 * subtle (it relies on releasing/reacquiring the lock in areas for
31 * latency control and it's sometimes hard to see exactly what data is
32 * "inside" a given critical section). Do the synchronization port
33 * later as an optimization.
34 */
35static struct k_spinlock lock;
36
Andy Ross0c7af402020-10-30 11:18:53 -070037enum POLL_MODE { MODE_NONE, MODE_POLL, MODE_TRIGGERED };
38
39static int signal_poller(struct k_poll_event *event, uint32_t state);
40static int signal_triggered_work(struct k_poll_event *event, uint32_t status);
41
Kumar Galaa1b77fd2020-05-27 11:26:57 -050042void k_poll_event_init(struct k_poll_event *event, uint32_t type,
Benjamin Walshacc68c12017-01-29 18:57:45 -050043 int mode, void *obj)
44{
45 __ASSERT(mode == K_POLL_MODE_NOTIFY_ONLY,
46 "only NOTIFY_ONLY mode is supported\n");
Flavio Ceolin8aec0872018-08-15 11:52:00 -070047 __ASSERT(type < (BIT(_POLL_NUM_TYPES)), "invalid type\n");
Flavio Ceolind8837c62018-09-18 12:40:54 -070048 __ASSERT(obj != NULL, "must provide an object\n");
Benjamin Walshacc68c12017-01-29 18:57:45 -050049
50 event->poller = NULL;
Benjamin Walsh969d4a72017-02-02 11:25:11 -050051 /* event->tag is left uninitialized: the user will set it if needed */
Benjamin Walshacc68c12017-01-29 18:57:45 -050052 event->type = type;
53 event->state = K_POLL_STATE_NOT_READY;
54 event->mode = mode;
Patrik Flykt24d71432019-03-26 19:57:45 -060055 event->unused = 0U;
Benjamin Walshacc68c12017-01-29 18:57:45 -050056 event->obj = obj;
Torbjörn Leksellcae9a902021-03-26 14:20:05 +010057
58 SYS_PORT_TRACING_FUNC(k_poll_api, event_init, event);
Benjamin Walshacc68c12017-01-29 18:57:45 -050059}
60
61/* must be called with interrupts locked */
Kumar Galaa1b77fd2020-05-27 11:26:57 -050062static inline bool is_condition_met(struct k_poll_event *event, uint32_t *state)
Benjamin Walshacc68c12017-01-29 18:57:45 -050063{
64 switch (event->type) {
65 case K_POLL_TYPE_SEM_AVAILABLE:
Aastha Grover83b9f692020-08-20 16:47:11 -070066 if (k_sem_count_get(event->sem) > 0U) {
Benjamin Walshacc68c12017-01-29 18:57:45 -050067 *state = K_POLL_STATE_SEM_AVAILABLE;
Flavio Ceolin4f2e9a72018-12-16 14:27:10 -080068 return true;
Benjamin Walshacc68c12017-01-29 18:57:45 -050069 }
70 break;
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +020071 case K_POLL_TYPE_DATA_AVAILABLE:
72 if (!k_queue_is_empty(event->queue)) {
Benjamin Walshacc68c12017-01-29 18:57:45 -050073 *state = K_POLL_STATE_FIFO_DATA_AVAILABLE;
Flavio Ceolin4f2e9a72018-12-16 14:27:10 -080074 return true;
Benjamin Walshacc68c12017-01-29 18:57:45 -050075 }
76 break;
77 case K_POLL_TYPE_SIGNAL:
Patrik Flykt24d71432019-03-26 19:57:45 -060078 if (event->signal->signaled != 0U) {
Benjamin Walshacc68c12017-01-29 18:57:45 -050079 *state = K_POLL_STATE_SIGNALED;
Flavio Ceolin4f2e9a72018-12-16 14:27:10 -080080 return true;
Benjamin Walshacc68c12017-01-29 18:57:45 -050081 }
82 break;
Nick Gravesb445f132021-04-12 12:35:18 -070083 case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
84 if (event->msgq->used_msgs > 0) {
85 *state = K_POLL_STATE_MSGQ_DATA_AVAILABLE;
86 return true;
87 }
88 break;
Benjamin Walshacc68c12017-01-29 18:57:45 -050089 case K_POLL_TYPE_IGNORE:
Flavio Ceolind7271ec2018-11-15 09:42:14 -080090 break;
Benjamin Walshacc68c12017-01-29 18:57:45 -050091 default:
Flavio Ceolin6fdc56d2018-09-18 12:32:27 -070092 __ASSERT(false, "invalid event type (0x%x)\n", event->type);
Flavio Ceolina3cea502018-09-10 22:54:55 -070093 break;
Benjamin Walshacc68c12017-01-29 18:57:45 -050094 }
95
Flavio Ceolin4f2e9a72018-12-16 14:27:10 -080096 return false;
Benjamin Walshacc68c12017-01-29 18:57:45 -050097}
98
Andy Ross202adf52020-11-10 09:54:49 -080099static struct k_thread *poller_thread(struct z_poller *p)
Andy Rossdadc6642020-11-09 15:17:18 -0800100{
101 return p ? CONTAINER_OF(p, struct k_thread, poller) : NULL;
102}
103
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300104static inline void add_event(sys_dlist_t *events, struct k_poll_event *event,
Andy Ross202adf52020-11-10 09:54:49 -0800105 struct z_poller *poller)
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300106{
107 struct k_poll_event *pending;
108
109 pending = (struct k_poll_event *)sys_dlist_peek_tail(events);
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700110 if ((pending == NULL) ||
James Harris2cd0f662021-03-01 09:19:57 -0800111 (z_sched_prio_cmp(poller_thread(pending->poller),
112 poller_thread(poller)) > 0)) {
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300113 sys_dlist_append(events, &event->_node);
114 return;
115 }
116
117 SYS_DLIST_FOR_EACH_CONTAINER(events, pending, _node) {
James Harris2cd0f662021-03-01 09:19:57 -0800118 if (z_sched_prio_cmp(poller_thread(poller),
119 poller_thread(pending->poller)) > 0) {
Andy Rosseda4c022019-01-28 09:35:27 -0800120 sys_dlist_insert(&pending->_node, &event->_node);
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300121 return;
122 }
123 }
124
125 sys_dlist_append(events, &event->_node);
126}
127
Benjamin Walshacc68c12017-01-29 18:57:45 -0500128/* must be called with interrupts locked */
Ningx Zhaoff7ec0c2021-01-13 16:46:03 +0800129static inline void register_event(struct k_poll_event *event,
Andy Ross202adf52020-11-10 09:54:49 -0800130 struct z_poller *poller)
Benjamin Walshacc68c12017-01-29 18:57:45 -0500131{
132 switch (event->type) {
133 case K_POLL_TYPE_SEM_AVAILABLE:
Flavio Ceolind8837c62018-09-18 12:40:54 -0700134 __ASSERT(event->sem != NULL, "invalid semaphore\n");
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300135 add_event(&event->sem->poll_events, event, poller);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500136 break;
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +0200137 case K_POLL_TYPE_DATA_AVAILABLE:
Flavio Ceolind8837c62018-09-18 12:40:54 -0700138 __ASSERT(event->queue != NULL, "invalid queue\n");
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300139 add_event(&event->queue->poll_events, event, poller);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500140 break;
141 case K_POLL_TYPE_SIGNAL:
Flavio Ceolind8837c62018-09-18 12:40:54 -0700142 __ASSERT(event->signal != NULL, "invalid poll signal\n");
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300143 add_event(&event->signal->poll_events, event, poller);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500144 break;
Nick Gravesb445f132021-04-12 12:35:18 -0700145 case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
146 __ASSERT(event->msgq != NULL, "invalid message queue\n");
147 add_event(&event->msgq->poll_events, event, poller);
148 break;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500149 case K_POLL_TYPE_IGNORE:
150 /* nothing to do */
151 break;
152 default:
Flavio Ceolin6fdc56d2018-09-18 12:32:27 -0700153 __ASSERT(false, "invalid event type\n");
Flavio Ceolina3cea502018-09-10 22:54:55 -0700154 break;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500155 }
156
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300157 event->poller = poller;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500158}
159
160/* must be called with interrupts locked */
161static inline void clear_event_registration(struct k_poll_event *event)
162{
Anas Nashif669f7f72021-03-22 07:53:54 -0400163 bool remove_event = false;
Peter A. Bigot4863aa82018-12-30 06:38:53 -0600164
Benjamin Walshacc68c12017-01-29 18:57:45 -0500165 event->poller = NULL;
166
167 switch (event->type) {
168 case K_POLL_TYPE_SEM_AVAILABLE:
Flavio Ceolind8837c62018-09-18 12:40:54 -0700169 __ASSERT(event->sem != NULL, "invalid semaphore\n");
Anas Nashif669f7f72021-03-22 07:53:54 -0400170 remove_event = true;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500171 break;
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +0200172 case K_POLL_TYPE_DATA_AVAILABLE:
Flavio Ceolind8837c62018-09-18 12:40:54 -0700173 __ASSERT(event->queue != NULL, "invalid queue\n");
Anas Nashif669f7f72021-03-22 07:53:54 -0400174 remove_event = true;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500175 break;
176 case K_POLL_TYPE_SIGNAL:
Flavio Ceolind8837c62018-09-18 12:40:54 -0700177 __ASSERT(event->signal != NULL, "invalid poll signal\n");
Anas Nashif669f7f72021-03-22 07:53:54 -0400178 remove_event = true;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500179 break;
Nick Gravesb445f132021-04-12 12:35:18 -0700180 case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
181 __ASSERT(event->msgq != NULL, "invalid message queue\n");
182 remove_event = true;
183 break;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500184 case K_POLL_TYPE_IGNORE:
185 /* nothing to do */
186 break;
187 default:
Flavio Ceolin6fdc56d2018-09-18 12:32:27 -0700188 __ASSERT(false, "invalid event type\n");
Flavio Ceolina3cea502018-09-10 22:54:55 -0700189 break;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500190 }
Anas Nashif669f7f72021-03-22 07:53:54 -0400191 if (remove_event && sys_dnode_is_linked(&event->_node)) {
Peter A. Bigot4863aa82018-12-30 06:38:53 -0600192 sys_dlist_remove(&event->_node);
193 }
Benjamin Walshacc68c12017-01-29 18:57:45 -0500194}
195
196/* must be called with interrupts locked */
197static inline void clear_event_registrations(struct k_poll_event *events,
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200198 int num_events,
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700199 k_spinlock_key_t key)
Benjamin Walshacc68c12017-01-29 18:57:45 -0500200{
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200201 while (num_events--) {
202 clear_event_registration(&events[num_events]);
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700203 k_spin_unlock(&lock, key);
204 key = k_spin_lock(&lock);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500205 }
206}
207
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500208static inline void set_event_ready(struct k_poll_event *event, uint32_t state)
Benjamin Walshacc68c12017-01-29 18:57:45 -0500209{
210 event->poller = NULL;
211 event->state |= state;
212}
213
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200214static inline int register_events(struct k_poll_event *events,
215 int num_events,
Andy Ross202adf52020-11-10 09:54:49 -0800216 struct z_poller *poller,
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200217 bool just_check)
Benjamin Walshacc68c12017-01-29 18:57:45 -0500218{
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200219 int events_registered = 0;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500220
Benjamin Walshacc68c12017-01-29 18:57:45 -0500221 for (int ii = 0; ii < num_events; ii++) {
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200222 k_spinlock_key_t key;
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500223 uint32_t state;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500224
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700225 key = k_spin_lock(&lock);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500226 if (is_condition_met(&events[ii], &state)) {
227 set_event_ready(&events[ii], state);
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200228 poller->is_polling = false;
229 } else if (!just_check && poller->is_polling) {
Ningx Zhaoff7ec0c2021-01-13 16:46:03 +0800230 register_event(&events[ii], poller);
231 events_registered += 1;
Jennifer Williamsc00bdcf2021-03-10 04:44:21 +0200232 } else {
Jennifer Williamsae85da12021-05-17 16:38:40 -0700233 /* Event is not one of those identified in is_condition_met()
234 * catching non-polling events, or is marked for just check,
235 * or not marked for polling. No action needed.
236 */
Jennifer Williamsc00bdcf2021-03-10 04:44:21 +0200237 ;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500238 }
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700239 k_spin_unlock(&lock, key);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500240 }
241
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200242 return events_registered;
243}
244
Andy Ross0c7af402020-10-30 11:18:53 -0700245static int signal_poller(struct k_poll_event *event, uint32_t state)
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200246{
Andy Rossdadc6642020-11-09 15:17:18 -0800247 struct k_thread *thread = poller_thread(event->poller);
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200248
249 __ASSERT(thread != NULL, "poller should have a thread\n");
250
251 if (!z_is_thread_pending(thread)) {
252 return 0;
253 }
254
255 if (z_is_thread_timeout_expired(thread)) {
256 return -EAGAIN;
257 }
258
259 z_unpend_thread(thread);
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800260 arch_thread_return_value_set(thread,
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200261 state == K_POLL_STATE_CANCELLED ? -EINTR : 0);
262
263 if (!z_is_thread_ready(thread)) {
264 return 0;
265 }
266
267 z_ready_thread(thread);
268
269 return 0;
270}
271
Andy Ross78327382020-03-05 15:18:14 -0800272int z_impl_k_poll(struct k_poll_event *events, int num_events,
273 k_timeout_t timeout)
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200274{
275 int events_registered;
276 k_spinlock_key_t key;
Andy Ross202adf52020-11-10 09:54:49 -0800277 struct z_poller *poller = &_current->poller;
Andy Ross4a8b3d12020-06-05 08:39:45 -0700278
Andy Rossdadc6642020-11-09 15:17:18 -0800279 poller->is_polling = true;
280 poller->mode = MODE_POLL;
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200281
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800282 __ASSERT(!arch_is_in_isr(), "");
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200283 __ASSERT(events != NULL, "NULL events\n");
Jukka Rissanencc6317d2019-11-01 14:03:32 +0200284 __ASSERT(num_events >= 0, "<0 events\n");
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200285
Torbjörn Leksellcae9a902021-03-26 14:20:05 +0100286 SYS_PORT_TRACING_FUNC_ENTER(k_poll_api, poll, events);
287
Andy Rossdadc6642020-11-09 15:17:18 -0800288 events_registered = register_events(events, num_events, poller,
Andy Ross78327382020-03-05 15:18:14 -0800289 K_TIMEOUT_EQ(timeout, K_NO_WAIT));
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200290
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700291 key = k_spin_lock(&lock);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500292
293 /*
294 * If we're not polling anymore, it means that at least one event
295 * condition is met, either when looping through the events here or
Luiz Augusto von Dentz87862442017-09-07 10:57:27 +0300296 * because one of the events registered has had its state changed.
Benjamin Walshacc68c12017-01-29 18:57:45 -0500297 */
Andy Rossdadc6642020-11-09 15:17:18 -0800298 if (!poller->is_polling) {
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200299 clear_event_registrations(events, events_registered, key);
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700300 k_spin_unlock(&lock, key);
Torbjörn Leksellcae9a902021-03-26 14:20:05 +0100301
302 SYS_PORT_TRACING_FUNC_EXIT(k_poll_api, poll, events, 0);
303
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300304 return 0;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500305 }
306
Andy Rossdadc6642020-11-09 15:17:18 -0800307 poller->is_polling = false;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500308
Andy Ross78327382020-03-05 15:18:14 -0800309 if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700310 k_spin_unlock(&lock, key);
Torbjörn Leksellcae9a902021-03-26 14:20:05 +0100311
312 SYS_PORT_TRACING_FUNC_EXIT(k_poll_api, poll, events, -EAGAIN);
313
Benjamin Walshacc68c12017-01-29 18:57:45 -0500314 return -EAGAIN;
315 }
316
Andy Ross4dc6a0b2021-02-09 13:46:21 -0800317 static _wait_q_t wait_q = Z_WAIT_Q_INIT(&wait_q);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500318
Patrik Flykt4344e272019-03-08 14:19:05 -0700319 int swap_rc = z_pend_curr(&lock, key, &wait_q, timeout);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500320
321 /*
322 * Clear all event registrations. If events happen while we're in this
323 * loop, and we already had one that triggered, that's OK: they will
324 * end up in the list of events that are ready; if we timed out, and
325 * events happen while we're in this loop, that is OK as well since
326 * we've already know the return code (-EAGAIN), and even if they are
327 * added to the list of events that occurred, the user has to check the
328 * return code first, which invalidates the whole list of event states.
329 */
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700330 key = k_spin_lock(&lock);
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200331 clear_event_registrations(events, events_registered, key);
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700332 k_spin_unlock(&lock, key);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500333
Torbjörn Leksellcae9a902021-03-26 14:20:05 +0100334 SYS_PORT_TRACING_FUNC_EXIT(k_poll_api, poll, events, swap_rc);
335
Benjamin Walshacc68c12017-01-29 18:57:45 -0500336 return swap_rc;
337}
338
Andrew Boie3772f772018-05-07 16:52:57 -0700339#ifdef CONFIG_USERSPACE
Andy Ross643701a2019-08-13 12:58:38 -0700340static inline int z_vrfy_k_poll(struct k_poll_event *events,
Andy Ross78327382020-03-05 15:18:14 -0800341 int num_events, k_timeout_t timeout)
Andrew Boie3772f772018-05-07 16:52:57 -0700342{
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700343 int ret;
344 k_spinlock_key_t key;
Andrew Boie3772f772018-05-07 16:52:57 -0700345 struct k_poll_event *events_copy = NULL;
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500346 uint32_t bounds;
Andrew Boie3772f772018-05-07 16:52:57 -0700347
348 /* Validate the events buffer and make a copy of it in an
349 * allocated kernel-side buffer.
350 */
Anas Nashifbbbc38b2021-03-29 10:03:49 -0400351 if (Z_SYSCALL_VERIFY(num_events >= 0U)) {
Andrew Boie3772f772018-05-07 16:52:57 -0700352 ret = -EINVAL;
353 goto out;
354 }
Jakob Olesenc8708d92019-05-07 10:17:35 -0700355 if (Z_SYSCALL_VERIFY_MSG(!u32_mul_overflow(num_events,
356 sizeof(struct k_poll_event),
357 &bounds),
358 "num_events too large")) {
Andrew Boie3772f772018-05-07 16:52:57 -0700359 ret = -EINVAL;
360 goto out;
361 }
362 events_copy = z_thread_malloc(bounds);
363 if (!events_copy) {
364 ret = -ENOMEM;
365 goto out;
366 }
367
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700368 key = k_spin_lock(&lock);
Andrew Boie3772f772018-05-07 16:52:57 -0700369 if (Z_SYSCALL_MEMORY_WRITE(events, bounds)) {
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700370 k_spin_unlock(&lock, key);
Andrew Boie3772f772018-05-07 16:52:57 -0700371 goto oops_free;
372 }
Andy Ross65649742019-08-06 13:34:31 -0700373 (void)memcpy(events_copy, events, bounds);
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700374 k_spin_unlock(&lock, key);
Andrew Boie3772f772018-05-07 16:52:57 -0700375
376 /* Validate what's inside events_copy */
377 for (int i = 0; i < num_events; i++) {
378 struct k_poll_event *e = &events_copy[i];
379
380 if (Z_SYSCALL_VERIFY(e->mode == K_POLL_MODE_NOTIFY_ONLY)) {
381 ret = -EINVAL;
382 goto out_free;
383 }
384
385 switch (e->type) {
386 case K_POLL_TYPE_IGNORE:
387 break;
388 case K_POLL_TYPE_SIGNAL:
389 Z_OOPS(Z_SYSCALL_OBJ(e->signal, K_OBJ_POLL_SIGNAL));
390 break;
391 case K_POLL_TYPE_SEM_AVAILABLE:
392 Z_OOPS(Z_SYSCALL_OBJ(e->sem, K_OBJ_SEM));
393 break;
394 case K_POLL_TYPE_DATA_AVAILABLE:
395 Z_OOPS(Z_SYSCALL_OBJ(e->queue, K_OBJ_QUEUE));
396 break;
Nick Gravesb445f132021-04-12 12:35:18 -0700397 case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
398 Z_OOPS(Z_SYSCALL_OBJ(e->msgq, K_OBJ_MSGQ));
399 break;
Andrew Boie3772f772018-05-07 16:52:57 -0700400 default:
401 ret = -EINVAL;
402 goto out_free;
403 }
404 }
405
406 ret = k_poll(events_copy, num_events, timeout);
Flavio Ceolin66994232018-08-13 15:17:04 -0700407 (void)memcpy((void *)events, events_copy, bounds);
Andrew Boie3772f772018-05-07 16:52:57 -0700408out_free:
409 k_free(events_copy);
410out:
411 return ret;
412oops_free:
413 k_free(events_copy);
414 Z_OOPS(1);
415}
Andy Ross65649742019-08-06 13:34:31 -0700416#include <syscalls/k_poll_mrsh.c>
Andrew Boie3772f772018-05-07 16:52:57 -0700417#endif
418
Benjamin Walshacc68c12017-01-29 18:57:45 -0500419/* must be called with interrupts locked */
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500420static int signal_poll_event(struct k_poll_event *event, uint32_t state)
Benjamin Walshacc68c12017-01-29 18:57:45 -0500421{
Andy Ross202adf52020-11-10 09:54:49 -0800422 struct z_poller *poller = event->poller;
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200423 int retcode = 0;
424
Anas Nashif3f4f3f62021-03-29 17:13:47 -0400425 if (poller != NULL) {
Andy Ross0c7af402020-10-30 11:18:53 -0700426 if (poller->mode == MODE_POLL) {
427 retcode = signal_poller(event, state);
428 } else if (poller->mode == MODE_TRIGGERED) {
429 retcode = signal_triggered_work(event, state);
Jennifer Williamsc00bdcf2021-03-10 04:44:21 +0200430 } else {
Jennifer Williamsae85da12021-05-17 16:38:40 -0700431 /* Poller is not poll or triggered mode. No action needed.*/
Jennifer Williamsc00bdcf2021-03-10 04:44:21 +0200432 ;
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200433 }
434
435 poller->is_polling = false;
436
437 if (retcode < 0) {
438 return retcode;
439 }
Benjamin Walshacc68c12017-01-29 18:57:45 -0500440 }
441
Benjamin Walshacc68c12017-01-29 18:57:45 -0500442 set_event_ready(event, state);
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200443 return retcode;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500444}
445
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500446void z_handle_obj_poll_events(sys_dlist_t *events, uint32_t state)
Benjamin Walshacc68c12017-01-29 18:57:45 -0500447{
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300448 struct k_poll_event *poll_event;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500449
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300450 poll_event = (struct k_poll_event *)sys_dlist_get(events);
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700451 if (poll_event != NULL) {
Andy Ross8606fab2018-03-26 10:54:40 -0700452 (void) signal_poll_event(poll_event, state);
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300453 }
Benjamin Walshacc68c12017-01-29 18:57:45 -0500454}
455
Anas Nashifb503be22021-03-22 08:09:55 -0400456void z_impl_k_poll_signal_init(struct k_poll_signal *sig)
Benjamin Walsha304f162017-02-02 16:46:09 -0500457{
Anas Nashifb503be22021-03-22 08:09:55 -0400458 sys_dlist_init(&sig->poll_events);
459 sig->signaled = 0U;
Torbjörn Leksellcae9a902021-03-26 14:20:05 +0100460 /* signal->result is left unitialized */
Anas Nashifb503be22021-03-22 08:09:55 -0400461 z_object_init(sig);
Torbjörn Leksellcae9a902021-03-26 14:20:05 +0100462
463 SYS_PORT_TRACING_FUNC(k_poll_api, signal_init, sig);
Benjamin Walsha304f162017-02-02 16:46:09 -0500464}
465
Andrew Boie3772f772018-05-07 16:52:57 -0700466#ifdef CONFIG_USERSPACE
Anas Nashifb503be22021-03-22 08:09:55 -0400467static inline void z_vrfy_k_poll_signal_init(struct k_poll_signal *sig)
Andrew Boie3772f772018-05-07 16:52:57 -0700468{
Anas Nashifb503be22021-03-22 08:09:55 -0400469 Z_OOPS(Z_SYSCALL_OBJ_INIT(sig, K_OBJ_POLL_SIGNAL));
470 z_impl_k_poll_signal_init(sig);
Andrew Boie3772f772018-05-07 16:52:57 -0700471}
Andy Ross65649742019-08-06 13:34:31 -0700472#include <syscalls/k_poll_signal_init_mrsh.c>
Andrew Boie3772f772018-05-07 16:52:57 -0700473#endif
474
Torbjörn Leksellcae9a902021-03-26 14:20:05 +0100475void z_impl_k_poll_signal_reset(struct k_poll_signal *sig)
476{
477 sig->signaled = 0U;
478
479 SYS_PORT_TRACING_FUNC(k_poll_api, signal_reset, sig);
480}
481
Anas Nashifb503be22021-03-22 08:09:55 -0400482void z_impl_k_poll_signal_check(struct k_poll_signal *sig,
Andrew Boie3772f772018-05-07 16:52:57 -0700483 unsigned int *signaled, int *result)
484{
Anas Nashifb503be22021-03-22 08:09:55 -0400485 *signaled = sig->signaled;
486 *result = sig->result;
Torbjörn Leksellcae9a902021-03-26 14:20:05 +0100487
488 SYS_PORT_TRACING_FUNC(k_poll_api, signal_check, sig);
Andrew Boie3772f772018-05-07 16:52:57 -0700489}
490
491#ifdef CONFIG_USERSPACE
Anas Nashifb503be22021-03-22 08:09:55 -0400492void z_vrfy_k_poll_signal_check(struct k_poll_signal *sig,
Andy Ross65649742019-08-06 13:34:31 -0700493 unsigned int *signaled, int *result)
Andrew Boie3772f772018-05-07 16:52:57 -0700494{
Anas Nashifb503be22021-03-22 08:09:55 -0400495 Z_OOPS(Z_SYSCALL_OBJ(sig, K_OBJ_POLL_SIGNAL));
Andrew Boie3772f772018-05-07 16:52:57 -0700496 Z_OOPS(Z_SYSCALL_MEMORY_WRITE(signaled, sizeof(unsigned int)));
497 Z_OOPS(Z_SYSCALL_MEMORY_WRITE(result, sizeof(int)));
Anas Nashifb503be22021-03-22 08:09:55 -0400498 z_impl_k_poll_signal_check(sig, signaled, result);
Andrew Boie3772f772018-05-07 16:52:57 -0700499}
Andy Ross65649742019-08-06 13:34:31 -0700500#include <syscalls/k_poll_signal_check_mrsh.c>
Andrew Boie3772f772018-05-07 16:52:57 -0700501#endif
502
Anas Nashifb503be22021-03-22 08:09:55 -0400503int z_impl_k_poll_signal_raise(struct k_poll_signal *sig, int result)
Benjamin Walshacc68c12017-01-29 18:57:45 -0500504{
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700505 k_spinlock_key_t key = k_spin_lock(&lock);
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300506 struct k_poll_event *poll_event;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500507
Anas Nashifb503be22021-03-22 08:09:55 -0400508 sig->result = result;
509 sig->signaled = 1U;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500510
Anas Nashifb503be22021-03-22 08:09:55 -0400511 poll_event = (struct k_poll_event *)sys_dlist_get(&sig->poll_events);
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700512 if (poll_event == NULL) {
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700513 k_spin_unlock(&lock, key);
Torbjörn Leksellcae9a902021-03-26 14:20:05 +0100514
515 SYS_PORT_TRACING_FUNC(k_poll_api, signal_raise, sig, 0);
516
Benjamin Walshacc68c12017-01-29 18:57:45 -0500517 return 0;
518 }
519
Andy Ross8606fab2018-03-26 10:54:40 -0700520 int rc = signal_poll_event(poll_event, K_POLL_STATE_SIGNALED);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500521
Torbjörn Leksellcae9a902021-03-26 14:20:05 +0100522 SYS_PORT_TRACING_FUNC(k_poll_api, signal_raise, sig, rc);
523
Patrik Flykt4344e272019-03-08 14:19:05 -0700524 z_reschedule(&lock, key);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500525 return rc;
526}
Andrew Boie3772f772018-05-07 16:52:57 -0700527
528#ifdef CONFIG_USERSPACE
Anas Nashifb503be22021-03-22 08:09:55 -0400529static inline int z_vrfy_k_poll_signal_raise(struct k_poll_signal *sig,
Andy Ross643701a2019-08-13 12:58:38 -0700530 int result)
Andrew Boie3772f772018-05-07 16:52:57 -0700531{
Anas Nashifb503be22021-03-22 08:09:55 -0400532 Z_OOPS(Z_SYSCALL_OBJ(sig, K_OBJ_POLL_SIGNAL));
533 return z_impl_k_poll_signal_raise(sig, result);
Andrew Boie3772f772018-05-07 16:52:57 -0700534}
Andy Ross65649742019-08-06 13:34:31 -0700535#include <syscalls/k_poll_signal_raise_mrsh.c>
536
Anas Nashifb503be22021-03-22 08:09:55 -0400537static inline void z_vrfy_k_poll_signal_reset(struct k_poll_signal *sig)
Andy Ross65649742019-08-06 13:34:31 -0700538{
Anas Nashifb503be22021-03-22 08:09:55 -0400539 Z_OOPS(Z_SYSCALL_OBJ(sig, K_OBJ_POLL_SIGNAL));
540 z_impl_k_poll_signal_reset(sig);
Andy Ross65649742019-08-06 13:34:31 -0700541}
542#include <syscalls/k_poll_signal_reset_mrsh.c>
543
Andrew Boie3772f772018-05-07 16:52:57 -0700544#endif
545
Piotr Zięcik19d83492019-09-27 09:16:25 +0200546static void triggered_work_handler(struct k_work *work)
547{
Piotr Zięcik19d83492019-09-27 09:16:25 +0200548 struct k_work_poll *twork =
549 CONTAINER_OF(work, struct k_work_poll, work);
550
551 /*
552 * If callback is not set, the k_work_poll_submit_to_queue()
553 * already cleared event registrations.
554 */
Andy Ross0c7af402020-10-30 11:18:53 -0700555 if (twork->poller.mode != MODE_NONE) {
Piotr Zięcik19d83492019-09-27 09:16:25 +0200556 k_spinlock_key_t key;
557
558 key = k_spin_lock(&lock);
559 clear_event_registrations(twork->events,
560 twork->num_events, key);
561 k_spin_unlock(&lock, key);
562 }
563
564 /* Drop work ownership and execute real handler. */
Andy Ross310f60f2020-11-09 10:43:25 -0800565 twork->workq = NULL;
566 twork->real_handler(work);
Piotr Zięcik19d83492019-09-27 09:16:25 +0200567}
568
569static void triggered_work_expiration_handler(struct _timeout *timeout)
570{
571 struct k_work_poll *twork =
572 CONTAINER_OF(timeout, struct k_work_poll, timeout);
Piotr Zięcik19d83492019-09-27 09:16:25 +0200573
574 twork->poller.is_polling = false;
575 twork->poll_result = -EAGAIN;
Andy Ross310f60f2020-11-09 10:43:25 -0800576 k_work_submit_to_queue(twork->workq, &twork->work);
Piotr Zięcik19d83492019-09-27 09:16:25 +0200577}
578
Andy Ross0c7af402020-10-30 11:18:53 -0700579static int signal_triggered_work(struct k_poll_event *event, uint32_t status)
Piotr Zięcik19d83492019-09-27 09:16:25 +0200580{
Andy Ross202adf52020-11-10 09:54:49 -0800581 struct z_poller *poller = event->poller;
Andy Ross310f60f2020-11-09 10:43:25 -0800582 struct k_work_poll *twork =
583 CONTAINER_OF(poller, struct k_work_poll, poller);
Piotr Zięcik19d83492019-09-27 09:16:25 +0200584
Andy Ross310f60f2020-11-09 10:43:25 -0800585 if (poller->is_polling && twork->workq != NULL) {
586 struct k_work_q *work_q = twork->workq;
Piotr Zięcik19d83492019-09-27 09:16:25 +0200587
588 z_abort_timeout(&twork->timeout);
589 twork->poll_result = 0;
590 k_work_submit_to_queue(work_q, &twork->work);
591 }
592
593 return 0;
594}
595
596static int triggered_work_cancel(struct k_work_poll *work,
597 k_spinlock_key_t key)
598{
599 /* Check if the work waits for event. */
Andy Ross0c7af402020-10-30 11:18:53 -0700600 if (work->poller.is_polling && work->poller.mode != MODE_NONE) {
Piotr Zięcik19d83492019-09-27 09:16:25 +0200601 /* Remove timeout associated with the work. */
602 z_abort_timeout(&work->timeout);
603
604 /*
605 * Prevent work execution if event arrives while we will be
606 * clearing registrations.
607 */
Andy Ross0c7af402020-10-30 11:18:53 -0700608 work->poller.mode = MODE_NONE;
Piotr Zięcik19d83492019-09-27 09:16:25 +0200609
610 /* Clear registrations and work ownership. */
611 clear_event_registrations(work->events, work->num_events, key);
Andy Ross310f60f2020-11-09 10:43:25 -0800612 work->workq = NULL;
Piotr Zięcik19d83492019-09-27 09:16:25 +0200613 return 0;
614 }
615
616 /*
617 * If we reached here, the work is either being registered in
618 * the k_work_poll_submit_to_queue(), executed or is pending.
619 * Only in the last case we have a chance to cancel it, but
620 * unfortunately there is no public API performing this task.
621 */
622
623 return -EINVAL;
624}
625
626void k_work_poll_init(struct k_work_poll *work,
627 k_work_handler_t handler)
628{
Torbjörn Leksellcae9a902021-03-26 14:20:05 +0100629 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_poll, init, work);
630
Andy Ross310f60f2020-11-09 10:43:25 -0800631 *work = (struct k_work_poll) {};
Piotr Zięcik19d83492019-09-27 09:16:25 +0200632 k_work_init(&work->work, triggered_work_handler);
Piotr Zięcik19d83492019-09-27 09:16:25 +0200633 work->real_handler = handler;
634 z_init_timeout(&work->timeout);
Torbjörn Leksellcae9a902021-03-26 14:20:05 +0100635
636 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_poll, init, work);
Piotr Zięcik19d83492019-09-27 09:16:25 +0200637}
638
639int k_work_poll_submit_to_queue(struct k_work_q *work_q,
640 struct k_work_poll *work,
641 struct k_poll_event *events,
642 int num_events,
Andy Ross78327382020-03-05 15:18:14 -0800643 k_timeout_t timeout)
Piotr Zięcik19d83492019-09-27 09:16:25 +0200644{
645 int events_registered;
646 k_spinlock_key_t key;
647
648 __ASSERT(work_q != NULL, "NULL work_q\n");
649 __ASSERT(work != NULL, "NULL work\n");
650 __ASSERT(events != NULL, "NULL events\n");
651 __ASSERT(num_events > 0, "zero events\n");
652
Torbjörn Leksellcae9a902021-03-26 14:20:05 +0100653 SYS_PORT_TRACING_FUNC_ENTER(k_work_poll, submit_to_queue, work_q, work, timeout);
654
Stefan Eicherfbe7d722021-08-23 11:36:22 +0000655 /* Take ownership of the work if it is possible. */
Piotr Zięcik19d83492019-09-27 09:16:25 +0200656 key = k_spin_lock(&lock);
Andy Ross310f60f2020-11-09 10:43:25 -0800657 if (work->workq != NULL) {
658 if (work->workq == work_q) {
Piotr Zięcik19d83492019-09-27 09:16:25 +0200659 int retval;
660
661 retval = triggered_work_cancel(work, key);
662 if (retval < 0) {
663 k_spin_unlock(&lock, key);
Torbjörn Leksellcae9a902021-03-26 14:20:05 +0100664
665 SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q,
666 work, timeout, retval);
667
Piotr Zięcik19d83492019-09-27 09:16:25 +0200668 return retval;
669 }
670 } else {
671 k_spin_unlock(&lock, key);
Torbjörn Leksellcae9a902021-03-26 14:20:05 +0100672
673 SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q,
674 work, timeout, -EADDRINUSE);
675
Piotr Zięcik19d83492019-09-27 09:16:25 +0200676 return -EADDRINUSE;
677 }
678 }
679
Andy Ross310f60f2020-11-09 10:43:25 -0800680
Piotr Zięcik19d83492019-09-27 09:16:25 +0200681 work->poller.is_polling = true;
Andy Ross310f60f2020-11-09 10:43:25 -0800682 work->workq = work_q;
Andy Ross0c7af402020-10-30 11:18:53 -0700683 work->poller.mode = MODE_NONE;
Piotr Zięcik19d83492019-09-27 09:16:25 +0200684 k_spin_unlock(&lock, key);
685
686 /* Save list of events. */
687 work->events = events;
688 work->num_events = num_events;
689
690 /* Clear result */
691 work->poll_result = -EINPROGRESS;
692
693 /* Register events */
694 events_registered = register_events(events, num_events,
695 &work->poller, false);
696
697 key = k_spin_lock(&lock);
Andy Ross78327382020-03-05 15:18:14 -0800698 if (work->poller.is_polling && !K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
Piotr Zięcik19d83492019-09-27 09:16:25 +0200699 /*
700 * Poller is still polling.
701 * No event is ready and all are watched.
702 */
703 __ASSERT(num_events == events_registered,
704 "Some events were not registered!\n");
705
706 /* Setup timeout if such action is requested */
Andy Ross78327382020-03-05 15:18:14 -0800707 if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
Piotr Zięcik19d83492019-09-27 09:16:25 +0200708 z_add_timeout(&work->timeout,
709 triggered_work_expiration_handler,
Andy Ross78327382020-03-05 15:18:14 -0800710 timeout);
Piotr Zięcik19d83492019-09-27 09:16:25 +0200711 }
712
713 /* From now, any event will result in submitted work. */
Andy Ross0c7af402020-10-30 11:18:53 -0700714 work->poller.mode = MODE_TRIGGERED;
Piotr Zięcik19d83492019-09-27 09:16:25 +0200715 k_spin_unlock(&lock, key);
Torbjörn Leksellcae9a902021-03-26 14:20:05 +0100716
717 SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q, work, timeout, 0);
718
Piotr Zięcik19d83492019-09-27 09:16:25 +0200719 return 0;
720 }
721
722 /*
Andy Ross0c7af402020-10-30 11:18:53 -0700723 * The K_NO_WAIT timeout was specified or at least one event
724 * was ready at registration time or changed state since
725 * registration. Hopefully, the poller mode was not set, so
726 * work was not submitted to workqueue.
Piotr Zięcik19d83492019-09-27 09:16:25 +0200727 */
728
729 /*
730 * If poller is still polling, no watched event occurred. This means
731 * we reached here due to K_NO_WAIT timeout "expiration".
732 */
733 if (work->poller.is_polling) {
734 work->poller.is_polling = false;
735 work->poll_result = -EAGAIN;
736 } else {
737 work->poll_result = 0;
738 }
739
740 /* Clear registrations. */
741 clear_event_registrations(events, events_registered, key);
742 k_spin_unlock(&lock, key);
743
744 /* Submit work. */
745 k_work_submit_to_queue(work_q, &work->work);
746
Torbjörn Leksellcae9a902021-03-26 14:20:05 +0100747 SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q, work, timeout, 0);
748
Piotr Zięcik19d83492019-09-27 09:16:25 +0200749 return 0;
750}
751
Torbjörn Leksellcae9a902021-03-26 14:20:05 +0100752int k_work_poll_submit(struct k_work_poll *work,
753 struct k_poll_event *events,
754 int num_events,
755 k_timeout_t timeout)
756{
757 SYS_PORT_TRACING_FUNC_ENTER(k_work_poll, submit, work, timeout);
758
759 int ret = k_work_poll_submit_to_queue(&k_sys_work_q, work,
760 events, num_events, timeout);
761
762 SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit, work, timeout, ret);
763
764 return ret;
765}
766
Piotr Zięcik19d83492019-09-27 09:16:25 +0200767int k_work_poll_cancel(struct k_work_poll *work)
768{
769 k_spinlock_key_t key;
770 int retval;
771
Torbjörn Leksellcae9a902021-03-26 14:20:05 +0100772 SYS_PORT_TRACING_FUNC_ENTER(k_work_poll, cancel, work);
773
Piotr Zięcik19d83492019-09-27 09:16:25 +0200774 /* Check if the work was submitted. */
Andy Ross310f60f2020-11-09 10:43:25 -0800775 if (work == NULL || work->workq == NULL) {
Torbjörn Leksellcae9a902021-03-26 14:20:05 +0100776 SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, cancel, work, -EINVAL);
777
Piotr Zięcik19d83492019-09-27 09:16:25 +0200778 return -EINVAL;
779 }
780
781 key = k_spin_lock(&lock);
782 retval = triggered_work_cancel(work, key);
783 k_spin_unlock(&lock, key);
784
Torbjörn Leksellcae9a902021-03-26 14:20:05 +0100785 SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, cancel, work, retval);
786
Piotr Zięcik19d83492019-09-27 09:16:25 +0200787 return retval;
788}