blob: 699a33d5cdfe9e5b0c4df1004ce79d901e2dc9d8 [file] [log] [blame]
Benjamin Walshacc68c12017-01-29 18:57:45 -05001/*
2 * Copyright (c) 2017 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7/**
8 * @file
9 *
10 * @brief Kernel asynchronous event polling interface.
11 *
12 * This polling mechanism allows waiting on multiple events concurrently,
13 * either events triggered directly, or from kernel objects or other kernel
14 * constructs.
15 */
16
17#include <kernel.h>
18#include <kernel_structs.h>
Andy Ross245b54e2018-02-08 09:10:46 -080019#include <kernel_internal.h>
Benjamin Walshacc68c12017-01-29 18:57:45 -050020#include <wait_q.h>
21#include <ksched.h>
Andrew Boie3772f772018-05-07 16:52:57 -070022#include <syscall_handler.h>
Anas Nashif536dd5a2019-06-26 10:33:52 -040023#include <sys/slist.h>
Anas Nashifee9dd1a2019-06-26 10:33:41 -040024#include <sys/dlist.h>
Anas Nashifa2fd7d72019-06-26 10:33:55 -040025#include <sys/util.h>
Anas Nashif5eb90ec2019-06-26 10:33:39 -040026#include <sys/__assert.h>
Flavio Ceolin4f2e9a72018-12-16 14:27:10 -080027#include <stdbool.h>
Benjamin Walshacc68c12017-01-29 18:57:45 -050028
Andy Rossf2b1a4b2018-07-25 09:40:32 -070029/* Single subsystem lock. Locking per-event would be better on highly
30 * contended SMP systems, but the original locking scheme here is
31 * subtle (it relies on releasing/reacquiring the lock in areas for
32 * latency control and it's sometimes hard to see exactly what data is
33 * "inside" a given critical section). Do the synchronization port
34 * later as an optimization.
35 */
36static struct k_spinlock lock;
37
Kumar Galacc334c72017-04-21 10:55:34 -050038void k_poll_event_init(struct k_poll_event *event, u32_t type,
Benjamin Walshacc68c12017-01-29 18:57:45 -050039 int mode, void *obj)
40{
41 __ASSERT(mode == K_POLL_MODE_NOTIFY_ONLY,
42 "only NOTIFY_ONLY mode is supported\n");
Flavio Ceolin8aec0872018-08-15 11:52:00 -070043 __ASSERT(type < (BIT(_POLL_NUM_TYPES)), "invalid type\n");
Flavio Ceolind8837c62018-09-18 12:40:54 -070044 __ASSERT(obj != NULL, "must provide an object\n");
Benjamin Walshacc68c12017-01-29 18:57:45 -050045
46 event->poller = NULL;
Benjamin Walsh969d4a72017-02-02 11:25:11 -050047 /* event->tag is left uninitialized: the user will set it if needed */
Benjamin Walshacc68c12017-01-29 18:57:45 -050048 event->type = type;
49 event->state = K_POLL_STATE_NOT_READY;
50 event->mode = mode;
Patrik Flykt24d71432019-03-26 19:57:45 -060051 event->unused = 0U;
Benjamin Walshacc68c12017-01-29 18:57:45 -050052 event->obj = obj;
53}
54
55/* must be called with interrupts locked */
Flavio Ceolin4f2e9a72018-12-16 14:27:10 -080056static inline bool is_condition_met(struct k_poll_event *event, u32_t *state)
Benjamin Walshacc68c12017-01-29 18:57:45 -050057{
58 switch (event->type) {
59 case K_POLL_TYPE_SEM_AVAILABLE:
60 if (k_sem_count_get(event->sem) > 0) {
61 *state = K_POLL_STATE_SEM_AVAILABLE;
Flavio Ceolin4f2e9a72018-12-16 14:27:10 -080062 return true;
Benjamin Walshacc68c12017-01-29 18:57:45 -050063 }
64 break;
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +020065 case K_POLL_TYPE_DATA_AVAILABLE:
66 if (!k_queue_is_empty(event->queue)) {
Benjamin Walshacc68c12017-01-29 18:57:45 -050067 *state = K_POLL_STATE_FIFO_DATA_AVAILABLE;
Flavio Ceolin4f2e9a72018-12-16 14:27:10 -080068 return true;
Benjamin Walshacc68c12017-01-29 18:57:45 -050069 }
70 break;
71 case K_POLL_TYPE_SIGNAL:
Patrik Flykt24d71432019-03-26 19:57:45 -060072 if (event->signal->signaled != 0U) {
Benjamin Walshacc68c12017-01-29 18:57:45 -050073 *state = K_POLL_STATE_SIGNALED;
Flavio Ceolin4f2e9a72018-12-16 14:27:10 -080074 return true;
Benjamin Walshacc68c12017-01-29 18:57:45 -050075 }
76 break;
77 case K_POLL_TYPE_IGNORE:
Flavio Ceolind7271ec2018-11-15 09:42:14 -080078 break;
Benjamin Walshacc68c12017-01-29 18:57:45 -050079 default:
Flavio Ceolin6fdc56d2018-09-18 12:32:27 -070080 __ASSERT(false, "invalid event type (0x%x)\n", event->type);
Flavio Ceolina3cea502018-09-10 22:54:55 -070081 break;
Benjamin Walshacc68c12017-01-29 18:57:45 -050082 }
83
Flavio Ceolin4f2e9a72018-12-16 14:27:10 -080084 return false;
Benjamin Walshacc68c12017-01-29 18:57:45 -050085}
86
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +030087static inline void add_event(sys_dlist_t *events, struct k_poll_event *event,
88 struct _poller *poller)
89{
90 struct k_poll_event *pending;
91
92 pending = (struct k_poll_event *)sys_dlist_peek_tail(events);
Flavio Ceolin4218d5f2018-09-17 09:39:51 -070093 if ((pending == NULL) ||
Patrik Flykt4344e272019-03-08 14:19:05 -070094 z_is_t1_higher_prio_than_t2(pending->poller->thread,
95 poller->thread)) {
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +030096 sys_dlist_append(events, &event->_node);
97 return;
98 }
99
100 SYS_DLIST_FOR_EACH_CONTAINER(events, pending, _node) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700101 if (z_is_t1_higher_prio_than_t2(poller->thread,
102 pending->poller->thread)) {
Andy Rosseda4c022019-01-28 09:35:27 -0800103 sys_dlist_insert(&pending->_node, &event->_node);
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300104 return;
105 }
106 }
107
108 sys_dlist_append(events, &event->_node);
109}
110
Benjamin Walshacc68c12017-01-29 18:57:45 -0500111/* must be called with interrupts locked */
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300112static inline int register_event(struct k_poll_event *event,
113 struct _poller *poller)
Benjamin Walshacc68c12017-01-29 18:57:45 -0500114{
115 switch (event->type) {
116 case K_POLL_TYPE_SEM_AVAILABLE:
Flavio Ceolind8837c62018-09-18 12:40:54 -0700117 __ASSERT(event->sem != NULL, "invalid semaphore\n");
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300118 add_event(&event->sem->poll_events, event, poller);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500119 break;
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +0200120 case K_POLL_TYPE_DATA_AVAILABLE:
Flavio Ceolind8837c62018-09-18 12:40:54 -0700121 __ASSERT(event->queue != NULL, "invalid queue\n");
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300122 add_event(&event->queue->poll_events, event, poller);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500123 break;
124 case K_POLL_TYPE_SIGNAL:
Flavio Ceolind8837c62018-09-18 12:40:54 -0700125 __ASSERT(event->signal != NULL, "invalid poll signal\n");
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300126 add_event(&event->signal->poll_events, event, poller);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500127 break;
128 case K_POLL_TYPE_IGNORE:
129 /* nothing to do */
130 break;
131 default:
Flavio Ceolin6fdc56d2018-09-18 12:32:27 -0700132 __ASSERT(false, "invalid event type\n");
Flavio Ceolina3cea502018-09-10 22:54:55 -0700133 break;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500134 }
135
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300136 event->poller = poller;
137
Benjamin Walshacc68c12017-01-29 18:57:45 -0500138 return 0;
139}
140
141/* must be called with interrupts locked */
142static inline void clear_event_registration(struct k_poll_event *event)
143{
Peter A. Bigot4863aa82018-12-30 06:38:53 -0600144 bool remove = false;
145
Benjamin Walshacc68c12017-01-29 18:57:45 -0500146 event->poller = NULL;
147
148 switch (event->type) {
149 case K_POLL_TYPE_SEM_AVAILABLE:
Flavio Ceolind8837c62018-09-18 12:40:54 -0700150 __ASSERT(event->sem != NULL, "invalid semaphore\n");
Peter A. Bigot4863aa82018-12-30 06:38:53 -0600151 remove = true;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500152 break;
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +0200153 case K_POLL_TYPE_DATA_AVAILABLE:
Flavio Ceolind8837c62018-09-18 12:40:54 -0700154 __ASSERT(event->queue != NULL, "invalid queue\n");
Peter A. Bigot4863aa82018-12-30 06:38:53 -0600155 remove = true;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500156 break;
157 case K_POLL_TYPE_SIGNAL:
Flavio Ceolind8837c62018-09-18 12:40:54 -0700158 __ASSERT(event->signal != NULL, "invalid poll signal\n");
Peter A. Bigot4863aa82018-12-30 06:38:53 -0600159 remove = true;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500160 break;
161 case K_POLL_TYPE_IGNORE:
162 /* nothing to do */
163 break;
164 default:
Flavio Ceolin6fdc56d2018-09-18 12:32:27 -0700165 __ASSERT(false, "invalid event type\n");
Flavio Ceolina3cea502018-09-10 22:54:55 -0700166 break;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500167 }
Peter A. Bigot4863aa82018-12-30 06:38:53 -0600168 if (remove && sys_dnode_is_linked(&event->_node)) {
169 sys_dlist_remove(&event->_node);
170 }
Benjamin Walshacc68c12017-01-29 18:57:45 -0500171}
172
173/* must be called with interrupts locked */
174static inline void clear_event_registrations(struct k_poll_event *events,
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200175 int num_events,
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700176 k_spinlock_key_t key)
Benjamin Walshacc68c12017-01-29 18:57:45 -0500177{
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200178 while (num_events--) {
179 clear_event_registration(&events[num_events]);
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700180 k_spin_unlock(&lock, key);
181 key = k_spin_lock(&lock);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500182 }
183}
184
Kumar Galacc334c72017-04-21 10:55:34 -0500185static inline void set_event_ready(struct k_poll_event *event, u32_t state)
Benjamin Walshacc68c12017-01-29 18:57:45 -0500186{
187 event->poller = NULL;
188 event->state |= state;
189}
190
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200191static inline int register_events(struct k_poll_event *events,
192 int num_events,
193 struct _poller *poller,
194 bool just_check)
Benjamin Walshacc68c12017-01-29 18:57:45 -0500195{
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200196 int events_registered = 0;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500197
Benjamin Walshacc68c12017-01-29 18:57:45 -0500198 for (int ii = 0; ii < num_events; ii++) {
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200199 k_spinlock_key_t key;
Kumar Galacc334c72017-04-21 10:55:34 -0500200 u32_t state;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500201
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700202 key = k_spin_lock(&lock);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500203 if (is_condition_met(&events[ii], &state)) {
204 set_event_ready(&events[ii], state);
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200205 poller->is_polling = false;
206 } else if (!just_check && poller->is_polling) {
207 int rc = register_event(&events[ii], poller);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500208 if (rc == 0) {
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200209 events_registered += 1;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500210 } else {
Flavio Ceolin6fdc56d2018-09-18 12:32:27 -0700211 __ASSERT(false, "unexpected return code\n");
Benjamin Walshacc68c12017-01-29 18:57:45 -0500212 }
213 }
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700214 k_spin_unlock(&lock, key);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500215 }
216
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200217 return events_registered;
218}
219
220static int k_poll_poller_cb(struct k_poll_event *event, u32_t state)
221{
222 struct k_thread *thread = event->poller->thread;
223
224 __ASSERT(thread != NULL, "poller should have a thread\n");
225
226 if (!z_is_thread_pending(thread)) {
227 return 0;
228 }
229
230 if (z_is_thread_timeout_expired(thread)) {
231 return -EAGAIN;
232 }
233
234 z_unpend_thread(thread);
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800235 arch_thread_return_value_set(thread,
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200236 state == K_POLL_STATE_CANCELLED ? -EINTR : 0);
237
238 if (!z_is_thread_ready(thread)) {
239 return 0;
240 }
241
242 z_ready_thread(thread);
243
244 return 0;
245}
246
247int z_impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
248{
249 int events_registered;
250 k_spinlock_key_t key;
251 struct _poller poller = { .is_polling = true,
252 .thread = _current,
253 .cb = k_poll_poller_cb };
254
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800255 __ASSERT(!arch_is_in_isr(), "");
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200256 __ASSERT(events != NULL, "NULL events\n");
Jukka Rissanencc6317d2019-11-01 14:03:32 +0200257 __ASSERT(num_events >= 0, "<0 events\n");
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200258
259 events_registered = register_events(events, num_events, &poller,
260 (timeout == K_NO_WAIT));
261
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700262 key = k_spin_lock(&lock);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500263
264 /*
265 * If we're not polling anymore, it means that at least one event
266 * condition is met, either when looping through the events here or
Luiz Augusto von Dentz87862442017-09-07 10:57:27 +0300267 * because one of the events registered has had its state changed.
Benjamin Walshacc68c12017-01-29 18:57:45 -0500268 */
Andy Ross55a7e462018-05-31 11:58:09 -0700269 if (!poller.is_polling) {
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200270 clear_event_registrations(events, events_registered, key);
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700271 k_spin_unlock(&lock, key);
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300272 return 0;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500273 }
274
Flavio Ceolin76b35182018-12-16 12:48:29 -0800275 poller.is_polling = false;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500276
277 if (timeout == K_NO_WAIT) {
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700278 k_spin_unlock(&lock, key);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500279 return -EAGAIN;
280 }
281
Patrik Flykt4344e272019-03-08 14:19:05 -0700282 _wait_q_t wait_q = Z_WAIT_Q_INIT(&wait_q);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500283
Patrik Flykt4344e272019-03-08 14:19:05 -0700284 int swap_rc = z_pend_curr(&lock, key, &wait_q, timeout);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500285
286 /*
287 * Clear all event registrations. If events happen while we're in this
288 * loop, and we already had one that triggered, that's OK: they will
289 * end up in the list of events that are ready; if we timed out, and
290 * events happen while we're in this loop, that is OK as well since
291 * we've already know the return code (-EAGAIN), and even if they are
292 * added to the list of events that occurred, the user has to check the
293 * return code first, which invalidates the whole list of event states.
294 */
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700295 key = k_spin_lock(&lock);
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200296 clear_event_registrations(events, events_registered, key);
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700297 k_spin_unlock(&lock, key);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500298
299 return swap_rc;
300}
301
Andrew Boie3772f772018-05-07 16:52:57 -0700302#ifdef CONFIG_USERSPACE
Andy Ross643701a2019-08-13 12:58:38 -0700303static inline int z_vrfy_k_poll(struct k_poll_event *events,
304 int num_events, s32_t timeout)
Andrew Boie3772f772018-05-07 16:52:57 -0700305{
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700306 int ret;
307 k_spinlock_key_t key;
Andrew Boie3772f772018-05-07 16:52:57 -0700308 struct k_poll_event *events_copy = NULL;
Jakob Olesenc8708d92019-05-07 10:17:35 -0700309 u32_t bounds;
Andrew Boie3772f772018-05-07 16:52:57 -0700310
311 /* Validate the events buffer and make a copy of it in an
312 * allocated kernel-side buffer.
313 */
Jukka Rissanencc6317d2019-11-01 14:03:32 +0200314 if (Z_SYSCALL_VERIFY(num_events >= 0)) {
Andrew Boie3772f772018-05-07 16:52:57 -0700315 ret = -EINVAL;
316 goto out;
317 }
Jakob Olesenc8708d92019-05-07 10:17:35 -0700318 if (Z_SYSCALL_VERIFY_MSG(!u32_mul_overflow(num_events,
319 sizeof(struct k_poll_event),
320 &bounds),
321 "num_events too large")) {
Andrew Boie3772f772018-05-07 16:52:57 -0700322 ret = -EINVAL;
323 goto out;
324 }
325 events_copy = z_thread_malloc(bounds);
326 if (!events_copy) {
327 ret = -ENOMEM;
328 goto out;
329 }
330
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700331 key = k_spin_lock(&lock);
Andrew Boie3772f772018-05-07 16:52:57 -0700332 if (Z_SYSCALL_MEMORY_WRITE(events, bounds)) {
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700333 k_spin_unlock(&lock, key);
Andrew Boie3772f772018-05-07 16:52:57 -0700334 goto oops_free;
335 }
Andy Ross65649742019-08-06 13:34:31 -0700336 (void)memcpy(events_copy, events, bounds);
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700337 k_spin_unlock(&lock, key);
Andrew Boie3772f772018-05-07 16:52:57 -0700338
339 /* Validate what's inside events_copy */
340 for (int i = 0; i < num_events; i++) {
341 struct k_poll_event *e = &events_copy[i];
342
343 if (Z_SYSCALL_VERIFY(e->mode == K_POLL_MODE_NOTIFY_ONLY)) {
344 ret = -EINVAL;
345 goto out_free;
346 }
347
348 switch (e->type) {
349 case K_POLL_TYPE_IGNORE:
350 break;
351 case K_POLL_TYPE_SIGNAL:
352 Z_OOPS(Z_SYSCALL_OBJ(e->signal, K_OBJ_POLL_SIGNAL));
353 break;
354 case K_POLL_TYPE_SEM_AVAILABLE:
355 Z_OOPS(Z_SYSCALL_OBJ(e->sem, K_OBJ_SEM));
356 break;
357 case K_POLL_TYPE_DATA_AVAILABLE:
358 Z_OOPS(Z_SYSCALL_OBJ(e->queue, K_OBJ_QUEUE));
359 break;
360 default:
361 ret = -EINVAL;
362 goto out_free;
363 }
364 }
365
366 ret = k_poll(events_copy, num_events, timeout);
Flavio Ceolin66994232018-08-13 15:17:04 -0700367 (void)memcpy((void *)events, events_copy, bounds);
Andrew Boie3772f772018-05-07 16:52:57 -0700368out_free:
369 k_free(events_copy);
370out:
371 return ret;
372oops_free:
373 k_free(events_copy);
374 Z_OOPS(1);
375}
Andy Ross65649742019-08-06 13:34:31 -0700376#include <syscalls/k_poll_mrsh.c>
Andrew Boie3772f772018-05-07 16:52:57 -0700377#endif
378
Benjamin Walshacc68c12017-01-29 18:57:45 -0500379/* must be called with interrupts locked */
Andy Ross8606fab2018-03-26 10:54:40 -0700380static int signal_poll_event(struct k_poll_event *event, u32_t state)
Benjamin Walshacc68c12017-01-29 18:57:45 -0500381{
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200382 struct _poller *poller = event->poller;
383 int retcode = 0;
384
385 if (poller) {
386 if (poller->cb != NULL) {
387 retcode = poller->cb(event, state);
388 }
389
390 poller->is_polling = false;
391
392 if (retcode < 0) {
393 return retcode;
394 }
Benjamin Walshacc68c12017-01-29 18:57:45 -0500395 }
396
Benjamin Walshacc68c12017-01-29 18:57:45 -0500397 set_event_ready(event, state);
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200398 return retcode;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500399}
400
Patrik Flykt4344e272019-03-08 14:19:05 -0700401void z_handle_obj_poll_events(sys_dlist_t *events, u32_t state)
Benjamin Walshacc68c12017-01-29 18:57:45 -0500402{
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300403 struct k_poll_event *poll_event;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500404
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300405 poll_event = (struct k_poll_event *)sys_dlist_get(events);
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700406 if (poll_event != NULL) {
Andy Ross8606fab2018-03-26 10:54:40 -0700407 (void) signal_poll_event(poll_event, state);
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300408 }
Benjamin Walshacc68c12017-01-29 18:57:45 -0500409}
410
Patrik Flykt4344e272019-03-08 14:19:05 -0700411void z_impl_k_poll_signal_init(struct k_poll_signal *signal)
Benjamin Walsha304f162017-02-02 16:46:09 -0500412{
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300413 sys_dlist_init(&signal->poll_events);
Patrik Flykt24d71432019-03-26 19:57:45 -0600414 signal->signaled = 0U;
Benjamin Walsha304f162017-02-02 16:46:09 -0500415 /* signal->result is left unitialized */
Patrik Flykt4344e272019-03-08 14:19:05 -0700416 z_object_init(signal);
Benjamin Walsha304f162017-02-02 16:46:09 -0500417}
418
Andrew Boie3772f772018-05-07 16:52:57 -0700419#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -0700420static inline void z_vrfy_k_poll_signal_init(struct k_poll_signal *signal)
Andrew Boie3772f772018-05-07 16:52:57 -0700421{
422 Z_OOPS(Z_SYSCALL_OBJ_INIT(signal, K_OBJ_POLL_SIGNAL));
Andy Ross65649742019-08-06 13:34:31 -0700423 z_impl_k_poll_signal_init(signal);
Andrew Boie3772f772018-05-07 16:52:57 -0700424}
Andy Ross65649742019-08-06 13:34:31 -0700425#include <syscalls/k_poll_signal_init_mrsh.c>
Andrew Boie3772f772018-05-07 16:52:57 -0700426#endif
427
Patrik Flykt4344e272019-03-08 14:19:05 -0700428void z_impl_k_poll_signal_check(struct k_poll_signal *signal,
Andrew Boie3772f772018-05-07 16:52:57 -0700429 unsigned int *signaled, int *result)
430{
431 *signaled = signal->signaled;
432 *result = signal->result;
433}
434
435#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -0700436void z_vrfy_k_poll_signal_check(struct k_poll_signal *signal,
437 unsigned int *signaled, int *result)
Andrew Boie3772f772018-05-07 16:52:57 -0700438{
439 Z_OOPS(Z_SYSCALL_OBJ(signal, K_OBJ_POLL_SIGNAL));
440 Z_OOPS(Z_SYSCALL_MEMORY_WRITE(signaled, sizeof(unsigned int)));
441 Z_OOPS(Z_SYSCALL_MEMORY_WRITE(result, sizeof(int)));
Andy Ross65649742019-08-06 13:34:31 -0700442 z_impl_k_poll_signal_check(signal, signaled, result);
Andrew Boie3772f772018-05-07 16:52:57 -0700443}
Andy Ross65649742019-08-06 13:34:31 -0700444#include <syscalls/k_poll_signal_check_mrsh.c>
Andrew Boie3772f772018-05-07 16:52:57 -0700445#endif
446
Patrik Flykt4344e272019-03-08 14:19:05 -0700447int z_impl_k_poll_signal_raise(struct k_poll_signal *signal, int result)
Benjamin Walshacc68c12017-01-29 18:57:45 -0500448{
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700449 k_spinlock_key_t key = k_spin_lock(&lock);
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300450 struct k_poll_event *poll_event;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500451
452 signal->result = result;
Patrik Flykt24d71432019-03-26 19:57:45 -0600453 signal->signaled = 1U;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500454
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300455 poll_event = (struct k_poll_event *)sys_dlist_get(&signal->poll_events);
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700456 if (poll_event == NULL) {
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700457 k_spin_unlock(&lock, key);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500458 return 0;
459 }
460
Andy Ross8606fab2018-03-26 10:54:40 -0700461 int rc = signal_poll_event(poll_event, K_POLL_STATE_SIGNALED);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500462
Patrik Flykt4344e272019-03-08 14:19:05 -0700463 z_reschedule(&lock, key);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500464 return rc;
465}
Andrew Boie3772f772018-05-07 16:52:57 -0700466
467#ifdef CONFIG_USERSPACE
Andy Ross643701a2019-08-13 12:58:38 -0700468static inline int z_vrfy_k_poll_signal_raise(struct k_poll_signal *signal,
469 int result)
Andrew Boie3772f772018-05-07 16:52:57 -0700470{
471 Z_OOPS(Z_SYSCALL_OBJ(signal, K_OBJ_POLL_SIGNAL));
Andy Ross65649742019-08-06 13:34:31 -0700472 return z_impl_k_poll_signal_raise(signal, result);
Andrew Boie3772f772018-05-07 16:52:57 -0700473}
Andy Ross65649742019-08-06 13:34:31 -0700474#include <syscalls/k_poll_signal_raise_mrsh.c>
475
476static inline void z_vrfy_k_poll_signal_reset(struct k_poll_signal *signal)
477{
478 Z_OOPS(Z_SYSCALL_OBJ(signal, K_OBJ_POLL_SIGNAL));
479 z_impl_k_poll_signal_reset(signal);
480}
481#include <syscalls/k_poll_signal_reset_mrsh.c>
482
Andrew Boie3772f772018-05-07 16:52:57 -0700483#endif
484
Piotr Zięcik19d83492019-09-27 09:16:25 +0200485static void triggered_work_handler(struct k_work *work)
486{
487 k_work_handler_t handler;
488 struct k_work_poll *twork =
489 CONTAINER_OF(work, struct k_work_poll, work);
490
491 /*
492 * If callback is not set, the k_work_poll_submit_to_queue()
493 * already cleared event registrations.
494 */
495 if (twork->poller.cb != NULL) {
496 k_spinlock_key_t key;
497
498 key = k_spin_lock(&lock);
499 clear_event_registrations(twork->events,
500 twork->num_events, key);
501 k_spin_unlock(&lock, key);
502 }
503
504 /* Drop work ownership and execute real handler. */
505 handler = twork->real_handler;
506 twork->poller.thread = NULL;
507 handler(work);
508}
509
510static void triggered_work_expiration_handler(struct _timeout *timeout)
511{
512 struct k_work_poll *twork =
513 CONTAINER_OF(timeout, struct k_work_poll, timeout);
514 struct k_work_q *work_q =
515 CONTAINER_OF(twork->poller.thread, struct k_work_q, thread);
516
517 twork->poller.is_polling = false;
518 twork->poll_result = -EAGAIN;
519
520 k_work_submit_to_queue(work_q, &twork->work);
521}
522
523static int triggered_work_poller_cb(struct k_poll_event *event, u32_t status)
524{
525 struct _poller *poller = event->poller;
526
527 if (poller->is_polling && poller->thread) {
528 struct k_work_poll *twork =
529 CONTAINER_OF(poller, struct k_work_poll, poller);
530 struct k_work_q *work_q =
531 CONTAINER_OF(poller->thread, struct k_work_q, thread);
532
533 z_abort_timeout(&twork->timeout);
534 twork->poll_result = 0;
535 k_work_submit_to_queue(work_q, &twork->work);
536 }
537
538 return 0;
539}
540
541static int triggered_work_cancel(struct k_work_poll *work,
542 k_spinlock_key_t key)
543{
544 /* Check if the work waits for event. */
545 if (work->poller.is_polling && work->poller.cb != NULL) {
546 /* Remove timeout associated with the work. */
547 z_abort_timeout(&work->timeout);
548
549 /*
550 * Prevent work execution if event arrives while we will be
551 * clearing registrations.
552 */
553 work->poller.cb = NULL;
554
555 /* Clear registrations and work ownership. */
556 clear_event_registrations(work->events, work->num_events, key);
557 work->poller.thread = NULL;
558 return 0;
559 }
560
561 /*
562 * If we reached here, the work is either being registered in
563 * the k_work_poll_submit_to_queue(), executed or is pending.
564 * Only in the last case we have a chance to cancel it, but
565 * unfortunately there is no public API performing this task.
566 */
567
568 return -EINVAL;
569}
570
571void k_work_poll_init(struct k_work_poll *work,
572 k_work_handler_t handler)
573{
574 k_work_init(&work->work, triggered_work_handler);
575 work->events = NULL;
576 work->poller.thread = NULL;
577 work->real_handler = handler;
578 z_init_timeout(&work->timeout);
579}
580
581int k_work_poll_submit_to_queue(struct k_work_q *work_q,
582 struct k_work_poll *work,
583 struct k_poll_event *events,
584 int num_events,
585 s32_t timeout)
586{
587 int events_registered;
588 k_spinlock_key_t key;
589
590 __ASSERT(work_q != NULL, "NULL work_q\n");
591 __ASSERT(work != NULL, "NULL work\n");
592 __ASSERT(events != NULL, "NULL events\n");
593 __ASSERT(num_events > 0, "zero events\n");
594
595 /* Take overship of the work if it is possible. */
596 key = k_spin_lock(&lock);
597 if (work->poller.thread != NULL) {
598 if (work->poller.thread == &work_q->thread) {
599 int retval;
600
601 retval = triggered_work_cancel(work, key);
602 if (retval < 0) {
603 k_spin_unlock(&lock, key);
604 return retval;
605 }
606 } else {
607 k_spin_unlock(&lock, key);
608 return -EADDRINUSE;
609 }
610 }
611
612 work->poller.is_polling = true;
613 work->poller.thread = &work_q->thread;
614 work->poller.cb = NULL;
615 k_spin_unlock(&lock, key);
616
617 /* Save list of events. */
618 work->events = events;
619 work->num_events = num_events;
620
621 /* Clear result */
622 work->poll_result = -EINPROGRESS;
623
624 /* Register events */
625 events_registered = register_events(events, num_events,
626 &work->poller, false);
627
628 key = k_spin_lock(&lock);
629 if (work->poller.is_polling && timeout != K_NO_WAIT) {
630 /*
631 * Poller is still polling.
632 * No event is ready and all are watched.
633 */
634 __ASSERT(num_events == events_registered,
635 "Some events were not registered!\n");
636
637 /* Setup timeout if such action is requested */
638 if (timeout != K_FOREVER) {
639 z_add_timeout(&work->timeout,
640 triggered_work_expiration_handler,
Andy Ross88924062019-10-03 11:43:10 -0700641 k_ms_to_ticks_ceil32(timeout));
Piotr Zięcik19d83492019-09-27 09:16:25 +0200642 }
643
644 /* From now, any event will result in submitted work. */
645 work->poller.cb = triggered_work_poller_cb;
646 k_spin_unlock(&lock, key);
647 return 0;
648 }
649
650 /*
651 * The K_NO_WAIT timeout was specified or at least one event was ready
652 * at registration time or changed state since registration. Hopefully,
653 * the poller->cb was not set, so work was not submitted to workqueue.
654 */
655
656 /*
657 * If poller is still polling, no watched event occurred. This means
658 * we reached here due to K_NO_WAIT timeout "expiration".
659 */
660 if (work->poller.is_polling) {
661 work->poller.is_polling = false;
662 work->poll_result = -EAGAIN;
663 } else {
664 work->poll_result = 0;
665 }
666
667 /* Clear registrations. */
668 clear_event_registrations(events, events_registered, key);
669 k_spin_unlock(&lock, key);
670
671 /* Submit work. */
672 k_work_submit_to_queue(work_q, &work->work);
673
674 return 0;
675}
676
677int k_work_poll_cancel(struct k_work_poll *work)
678{
679 k_spinlock_key_t key;
680 int retval;
681
682 /* Check if the work was submitted. */
683 if (work == NULL || work->poller.thread == NULL) {
684 return -EINVAL;
685 }
686
687 key = k_spin_lock(&lock);
688 retval = triggered_work_cancel(work, key);
689 k_spin_unlock(&lock, key);
690
691 return retval;
692}