blob: 56886db0aab9a3f2a0fe3fd0d80892511f53f2af [file] [log] [blame]
Benjamin Walshacc68c12017-01-29 18:57:45 -05001/*
2 * Copyright (c) 2017 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7/**
8 * @file
9 *
10 * @brief Kernel asynchronous event polling interface.
11 *
12 * This polling mechanism allows waiting on multiple events concurrently,
13 * either events triggered directly, or from kernel objects or other kernel
14 * constructs.
15 */
16
17#include <kernel.h>
18#include <kernel_structs.h>
Andy Ross245b54e2018-02-08 09:10:46 -080019#include <kernel_internal.h>
Benjamin Walshacc68c12017-01-29 18:57:45 -050020#include <wait_q.h>
21#include <ksched.h>
Andrew Boie3772f772018-05-07 16:52:57 -070022#include <syscall_handler.h>
Anas Nashifee9dd1a2019-06-26 10:33:41 -040023#include <sys/dlist.h>
Anas Nashifa2fd7d72019-06-26 10:33:55 -040024#include <sys/util.h>
Anas Nashif5eb90ec2019-06-26 10:33:39 -040025#include <sys/__assert.h>
Flavio Ceolin4f2e9a72018-12-16 14:27:10 -080026#include <stdbool.h>
Benjamin Walshacc68c12017-01-29 18:57:45 -050027
Andy Rossf2b1a4b2018-07-25 09:40:32 -070028/* Single subsystem lock. Locking per-event would be better on highly
29 * contended SMP systems, but the original locking scheme here is
30 * subtle (it relies on releasing/reacquiring the lock in areas for
31 * latency control and it's sometimes hard to see exactly what data is
32 * "inside" a given critical section). Do the synchronization port
33 * later as an optimization.
34 */
35static struct k_spinlock lock;
36
Andy Ross0c7af402020-10-30 11:18:53 -070037enum POLL_MODE { MODE_NONE, MODE_POLL, MODE_TRIGGERED };
38
39static int signal_poller(struct k_poll_event *event, uint32_t state);
40static int signal_triggered_work(struct k_poll_event *event, uint32_t status);
41
Kumar Galaa1b77fd2020-05-27 11:26:57 -050042void k_poll_event_init(struct k_poll_event *event, uint32_t type,
Benjamin Walshacc68c12017-01-29 18:57:45 -050043 int mode, void *obj)
44{
45 __ASSERT(mode == K_POLL_MODE_NOTIFY_ONLY,
46 "only NOTIFY_ONLY mode is supported\n");
Flavio Ceolin8aec0872018-08-15 11:52:00 -070047 __ASSERT(type < (BIT(_POLL_NUM_TYPES)), "invalid type\n");
Flavio Ceolind8837c62018-09-18 12:40:54 -070048 __ASSERT(obj != NULL, "must provide an object\n");
Benjamin Walshacc68c12017-01-29 18:57:45 -050049
50 event->poller = NULL;
Benjamin Walsh969d4a72017-02-02 11:25:11 -050051 /* event->tag is left uninitialized: the user will set it if needed */
Benjamin Walshacc68c12017-01-29 18:57:45 -050052 event->type = type;
53 event->state = K_POLL_STATE_NOT_READY;
54 event->mode = mode;
Patrik Flykt24d71432019-03-26 19:57:45 -060055 event->unused = 0U;
Benjamin Walshacc68c12017-01-29 18:57:45 -050056 event->obj = obj;
57}
58
59/* must be called with interrupts locked */
Kumar Galaa1b77fd2020-05-27 11:26:57 -050060static inline bool is_condition_met(struct k_poll_event *event, uint32_t *state)
Benjamin Walshacc68c12017-01-29 18:57:45 -050061{
62 switch (event->type) {
63 case K_POLL_TYPE_SEM_AVAILABLE:
Aastha Grover83b9f692020-08-20 16:47:11 -070064 if (k_sem_count_get(event->sem) > 0U) {
Benjamin Walshacc68c12017-01-29 18:57:45 -050065 *state = K_POLL_STATE_SEM_AVAILABLE;
Flavio Ceolin4f2e9a72018-12-16 14:27:10 -080066 return true;
Benjamin Walshacc68c12017-01-29 18:57:45 -050067 }
68 break;
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +020069 case K_POLL_TYPE_DATA_AVAILABLE:
70 if (!k_queue_is_empty(event->queue)) {
Benjamin Walshacc68c12017-01-29 18:57:45 -050071 *state = K_POLL_STATE_FIFO_DATA_AVAILABLE;
Flavio Ceolin4f2e9a72018-12-16 14:27:10 -080072 return true;
Benjamin Walshacc68c12017-01-29 18:57:45 -050073 }
74 break;
75 case K_POLL_TYPE_SIGNAL:
Patrik Flykt24d71432019-03-26 19:57:45 -060076 if (event->signal->signaled != 0U) {
Benjamin Walshacc68c12017-01-29 18:57:45 -050077 *state = K_POLL_STATE_SIGNALED;
Flavio Ceolin4f2e9a72018-12-16 14:27:10 -080078 return true;
Benjamin Walshacc68c12017-01-29 18:57:45 -050079 }
80 break;
81 case K_POLL_TYPE_IGNORE:
Flavio Ceolind7271ec2018-11-15 09:42:14 -080082 break;
Benjamin Walshacc68c12017-01-29 18:57:45 -050083 default:
Flavio Ceolin6fdc56d2018-09-18 12:32:27 -070084 __ASSERT(false, "invalid event type (0x%x)\n", event->type);
Flavio Ceolina3cea502018-09-10 22:54:55 -070085 break;
Benjamin Walshacc68c12017-01-29 18:57:45 -050086 }
87
Flavio Ceolin4f2e9a72018-12-16 14:27:10 -080088 return false;
Benjamin Walshacc68c12017-01-29 18:57:45 -050089}
90
Andy Ross202adf52020-11-10 09:54:49 -080091static struct k_thread *poller_thread(struct z_poller *p)
Andy Rossdadc6642020-11-09 15:17:18 -080092{
93 return p ? CONTAINER_OF(p, struct k_thread, poller) : NULL;
94}
95
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +030096static inline void add_event(sys_dlist_t *events, struct k_poll_event *event,
Andy Ross202adf52020-11-10 09:54:49 -080097 struct z_poller *poller)
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +030098{
99 struct k_poll_event *pending;
100
101 pending = (struct k_poll_event *)sys_dlist_peek_tail(events);
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700102 if ((pending == NULL) ||
James Harris2cd0f662021-03-01 09:19:57 -0800103 (z_sched_prio_cmp(poller_thread(pending->poller),
104 poller_thread(poller)) > 0)) {
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300105 sys_dlist_append(events, &event->_node);
106 return;
107 }
108
109 SYS_DLIST_FOR_EACH_CONTAINER(events, pending, _node) {
James Harris2cd0f662021-03-01 09:19:57 -0800110 if (z_sched_prio_cmp(poller_thread(poller),
111 poller_thread(pending->poller)) > 0) {
Andy Rosseda4c022019-01-28 09:35:27 -0800112 sys_dlist_insert(&pending->_node, &event->_node);
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300113 return;
114 }
115 }
116
117 sys_dlist_append(events, &event->_node);
118}
119
Benjamin Walshacc68c12017-01-29 18:57:45 -0500120/* must be called with interrupts locked */
Ningx Zhaoff7ec0c2021-01-13 16:46:03 +0800121static inline void register_event(struct k_poll_event *event,
Andy Ross202adf52020-11-10 09:54:49 -0800122 struct z_poller *poller)
Benjamin Walshacc68c12017-01-29 18:57:45 -0500123{
124 switch (event->type) {
125 case K_POLL_TYPE_SEM_AVAILABLE:
Flavio Ceolind8837c62018-09-18 12:40:54 -0700126 __ASSERT(event->sem != NULL, "invalid semaphore\n");
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300127 add_event(&event->sem->poll_events, event, poller);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500128 break;
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +0200129 case K_POLL_TYPE_DATA_AVAILABLE:
Flavio Ceolind8837c62018-09-18 12:40:54 -0700130 __ASSERT(event->queue != NULL, "invalid queue\n");
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300131 add_event(&event->queue->poll_events, event, poller);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500132 break;
133 case K_POLL_TYPE_SIGNAL:
Flavio Ceolind8837c62018-09-18 12:40:54 -0700134 __ASSERT(event->signal != NULL, "invalid poll signal\n");
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300135 add_event(&event->signal->poll_events, event, poller);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500136 break;
137 case K_POLL_TYPE_IGNORE:
138 /* nothing to do */
139 break;
140 default:
Flavio Ceolin6fdc56d2018-09-18 12:32:27 -0700141 __ASSERT(false, "invalid event type\n");
Flavio Ceolina3cea502018-09-10 22:54:55 -0700142 break;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500143 }
144
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300145 event->poller = poller;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500146}
147
148/* must be called with interrupts locked */
149static inline void clear_event_registration(struct k_poll_event *event)
150{
Anas Nashif669f7f72021-03-22 07:53:54 -0400151 bool remove_event = false;
Peter A. Bigot4863aa82018-12-30 06:38:53 -0600152
Benjamin Walshacc68c12017-01-29 18:57:45 -0500153 event->poller = NULL;
154
155 switch (event->type) {
156 case K_POLL_TYPE_SEM_AVAILABLE:
Flavio Ceolind8837c62018-09-18 12:40:54 -0700157 __ASSERT(event->sem != NULL, "invalid semaphore\n");
Anas Nashif669f7f72021-03-22 07:53:54 -0400158 remove_event = true;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500159 break;
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +0200160 case K_POLL_TYPE_DATA_AVAILABLE:
Flavio Ceolind8837c62018-09-18 12:40:54 -0700161 __ASSERT(event->queue != NULL, "invalid queue\n");
Anas Nashif669f7f72021-03-22 07:53:54 -0400162 remove_event = true;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500163 break;
164 case K_POLL_TYPE_SIGNAL:
Flavio Ceolind8837c62018-09-18 12:40:54 -0700165 __ASSERT(event->signal != NULL, "invalid poll signal\n");
Anas Nashif669f7f72021-03-22 07:53:54 -0400166 remove_event = true;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500167 break;
168 case K_POLL_TYPE_IGNORE:
169 /* nothing to do */
170 break;
171 default:
Flavio Ceolin6fdc56d2018-09-18 12:32:27 -0700172 __ASSERT(false, "invalid event type\n");
Flavio Ceolina3cea502018-09-10 22:54:55 -0700173 break;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500174 }
Anas Nashif669f7f72021-03-22 07:53:54 -0400175 if (remove_event && sys_dnode_is_linked(&event->_node)) {
Peter A. Bigot4863aa82018-12-30 06:38:53 -0600176 sys_dlist_remove(&event->_node);
177 }
Benjamin Walshacc68c12017-01-29 18:57:45 -0500178}
179
180/* must be called with interrupts locked */
181static inline void clear_event_registrations(struct k_poll_event *events,
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200182 int num_events,
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700183 k_spinlock_key_t key)
Benjamin Walshacc68c12017-01-29 18:57:45 -0500184{
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200185 while (num_events--) {
186 clear_event_registration(&events[num_events]);
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700187 k_spin_unlock(&lock, key);
188 key = k_spin_lock(&lock);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500189 }
190}
191
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500192static inline void set_event_ready(struct k_poll_event *event, uint32_t state)
Benjamin Walshacc68c12017-01-29 18:57:45 -0500193{
194 event->poller = NULL;
195 event->state |= state;
196}
197
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200198static inline int register_events(struct k_poll_event *events,
199 int num_events,
Andy Ross202adf52020-11-10 09:54:49 -0800200 struct z_poller *poller,
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200201 bool just_check)
Benjamin Walshacc68c12017-01-29 18:57:45 -0500202{
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200203 int events_registered = 0;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500204
Benjamin Walshacc68c12017-01-29 18:57:45 -0500205 for (int ii = 0; ii < num_events; ii++) {
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200206 k_spinlock_key_t key;
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500207 uint32_t state;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500208
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700209 key = k_spin_lock(&lock);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500210 if (is_condition_met(&events[ii], &state)) {
211 set_event_ready(&events[ii], state);
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200212 poller->is_polling = false;
213 } else if (!just_check && poller->is_polling) {
Ningx Zhaoff7ec0c2021-01-13 16:46:03 +0800214 register_event(&events[ii], poller);
215 events_registered += 1;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500216 }
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700217 k_spin_unlock(&lock, key);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500218 }
219
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200220 return events_registered;
221}
222
Andy Ross0c7af402020-10-30 11:18:53 -0700223static int signal_poller(struct k_poll_event *event, uint32_t state)
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200224{
Andy Rossdadc6642020-11-09 15:17:18 -0800225 struct k_thread *thread = poller_thread(event->poller);
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200226
227 __ASSERT(thread != NULL, "poller should have a thread\n");
228
229 if (!z_is_thread_pending(thread)) {
230 return 0;
231 }
232
233 if (z_is_thread_timeout_expired(thread)) {
234 return -EAGAIN;
235 }
236
237 z_unpend_thread(thread);
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800238 arch_thread_return_value_set(thread,
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200239 state == K_POLL_STATE_CANCELLED ? -EINTR : 0);
240
241 if (!z_is_thread_ready(thread)) {
242 return 0;
243 }
244
245 z_ready_thread(thread);
246
247 return 0;
248}
249
Andy Ross78327382020-03-05 15:18:14 -0800250int z_impl_k_poll(struct k_poll_event *events, int num_events,
251 k_timeout_t timeout)
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200252{
253 int events_registered;
254 k_spinlock_key_t key;
Andy Ross202adf52020-11-10 09:54:49 -0800255 struct z_poller *poller = &_current->poller;
Andy Ross4a8b3d12020-06-05 08:39:45 -0700256
Andy Rossdadc6642020-11-09 15:17:18 -0800257 poller->is_polling = true;
258 poller->mode = MODE_POLL;
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200259
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800260 __ASSERT(!arch_is_in_isr(), "");
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200261 __ASSERT(events != NULL, "NULL events\n");
Jukka Rissanencc6317d2019-11-01 14:03:32 +0200262 __ASSERT(num_events >= 0, "<0 events\n");
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200263
Andy Rossdadc6642020-11-09 15:17:18 -0800264 events_registered = register_events(events, num_events, poller,
Andy Ross78327382020-03-05 15:18:14 -0800265 K_TIMEOUT_EQ(timeout, K_NO_WAIT));
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200266
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700267 key = k_spin_lock(&lock);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500268
269 /*
270 * If we're not polling anymore, it means that at least one event
271 * condition is met, either when looping through the events here or
Luiz Augusto von Dentz87862442017-09-07 10:57:27 +0300272 * because one of the events registered has had its state changed.
Benjamin Walshacc68c12017-01-29 18:57:45 -0500273 */
Andy Rossdadc6642020-11-09 15:17:18 -0800274 if (!poller->is_polling) {
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200275 clear_event_registrations(events, events_registered, key);
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700276 k_spin_unlock(&lock, key);
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300277 return 0;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500278 }
279
Andy Rossdadc6642020-11-09 15:17:18 -0800280 poller->is_polling = false;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500281
Andy Ross78327382020-03-05 15:18:14 -0800282 if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700283 k_spin_unlock(&lock, key);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500284 return -EAGAIN;
285 }
286
Andy Ross4dc6a0b2021-02-09 13:46:21 -0800287 static _wait_q_t wait_q = Z_WAIT_Q_INIT(&wait_q);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500288
Patrik Flykt4344e272019-03-08 14:19:05 -0700289 int swap_rc = z_pend_curr(&lock, key, &wait_q, timeout);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500290
291 /*
292 * Clear all event registrations. If events happen while we're in this
293 * loop, and we already had one that triggered, that's OK: they will
294 * end up in the list of events that are ready; if we timed out, and
295 * events happen while we're in this loop, that is OK as well since
296 * we've already know the return code (-EAGAIN), and even if they are
297 * added to the list of events that occurred, the user has to check the
298 * return code first, which invalidates the whole list of event states.
299 */
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700300 key = k_spin_lock(&lock);
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200301 clear_event_registrations(events, events_registered, key);
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700302 k_spin_unlock(&lock, key);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500303
304 return swap_rc;
305}
306
Andrew Boie3772f772018-05-07 16:52:57 -0700307#ifdef CONFIG_USERSPACE
Andy Ross643701a2019-08-13 12:58:38 -0700308static inline int z_vrfy_k_poll(struct k_poll_event *events,
Andy Ross78327382020-03-05 15:18:14 -0800309 int num_events, k_timeout_t timeout)
Andrew Boie3772f772018-05-07 16:52:57 -0700310{
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700311 int ret;
312 k_spinlock_key_t key;
Andrew Boie3772f772018-05-07 16:52:57 -0700313 struct k_poll_event *events_copy = NULL;
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500314 uint32_t bounds;
Andrew Boie3772f772018-05-07 16:52:57 -0700315
316 /* Validate the events buffer and make a copy of it in an
317 * allocated kernel-side buffer.
318 */
Anas Nashifbbbc38b2021-03-29 10:03:49 -0400319 if (Z_SYSCALL_VERIFY(num_events >= 0U)) {
Andrew Boie3772f772018-05-07 16:52:57 -0700320 ret = -EINVAL;
321 goto out;
322 }
Jakob Olesenc8708d92019-05-07 10:17:35 -0700323 if (Z_SYSCALL_VERIFY_MSG(!u32_mul_overflow(num_events,
324 sizeof(struct k_poll_event),
325 &bounds),
326 "num_events too large")) {
Andrew Boie3772f772018-05-07 16:52:57 -0700327 ret = -EINVAL;
328 goto out;
329 }
330 events_copy = z_thread_malloc(bounds);
331 if (!events_copy) {
332 ret = -ENOMEM;
333 goto out;
334 }
335
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700336 key = k_spin_lock(&lock);
Andrew Boie3772f772018-05-07 16:52:57 -0700337 if (Z_SYSCALL_MEMORY_WRITE(events, bounds)) {
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700338 k_spin_unlock(&lock, key);
Andrew Boie3772f772018-05-07 16:52:57 -0700339 goto oops_free;
340 }
Andy Ross65649742019-08-06 13:34:31 -0700341 (void)memcpy(events_copy, events, bounds);
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700342 k_spin_unlock(&lock, key);
Andrew Boie3772f772018-05-07 16:52:57 -0700343
344 /* Validate what's inside events_copy */
345 for (int i = 0; i < num_events; i++) {
346 struct k_poll_event *e = &events_copy[i];
347
348 if (Z_SYSCALL_VERIFY(e->mode == K_POLL_MODE_NOTIFY_ONLY)) {
349 ret = -EINVAL;
350 goto out_free;
351 }
352
353 switch (e->type) {
354 case K_POLL_TYPE_IGNORE:
355 break;
356 case K_POLL_TYPE_SIGNAL:
357 Z_OOPS(Z_SYSCALL_OBJ(e->signal, K_OBJ_POLL_SIGNAL));
358 break;
359 case K_POLL_TYPE_SEM_AVAILABLE:
360 Z_OOPS(Z_SYSCALL_OBJ(e->sem, K_OBJ_SEM));
361 break;
362 case K_POLL_TYPE_DATA_AVAILABLE:
363 Z_OOPS(Z_SYSCALL_OBJ(e->queue, K_OBJ_QUEUE));
364 break;
365 default:
366 ret = -EINVAL;
367 goto out_free;
368 }
369 }
370
371 ret = k_poll(events_copy, num_events, timeout);
Flavio Ceolin66994232018-08-13 15:17:04 -0700372 (void)memcpy((void *)events, events_copy, bounds);
Andrew Boie3772f772018-05-07 16:52:57 -0700373out_free:
374 k_free(events_copy);
375out:
376 return ret;
377oops_free:
378 k_free(events_copy);
379 Z_OOPS(1);
380}
Andy Ross65649742019-08-06 13:34:31 -0700381#include <syscalls/k_poll_mrsh.c>
Andrew Boie3772f772018-05-07 16:52:57 -0700382#endif
383
Benjamin Walshacc68c12017-01-29 18:57:45 -0500384/* must be called with interrupts locked */
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500385static int signal_poll_event(struct k_poll_event *event, uint32_t state)
Benjamin Walshacc68c12017-01-29 18:57:45 -0500386{
Andy Ross202adf52020-11-10 09:54:49 -0800387 struct z_poller *poller = event->poller;
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200388 int retcode = 0;
389
390 if (poller) {
Andy Ross0c7af402020-10-30 11:18:53 -0700391 if (poller->mode == MODE_POLL) {
392 retcode = signal_poller(event, state);
393 } else if (poller->mode == MODE_TRIGGERED) {
394 retcode = signal_triggered_work(event, state);
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200395 }
396
397 poller->is_polling = false;
398
399 if (retcode < 0) {
400 return retcode;
401 }
Benjamin Walshacc68c12017-01-29 18:57:45 -0500402 }
403
Benjamin Walshacc68c12017-01-29 18:57:45 -0500404 set_event_ready(event, state);
Piotr Zięcik1c4177d2019-08-27 12:19:26 +0200405 return retcode;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500406}
407
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500408void z_handle_obj_poll_events(sys_dlist_t *events, uint32_t state)
Benjamin Walshacc68c12017-01-29 18:57:45 -0500409{
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300410 struct k_poll_event *poll_event;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500411
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300412 poll_event = (struct k_poll_event *)sys_dlist_get(events);
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700413 if (poll_event != NULL) {
Andy Ross8606fab2018-03-26 10:54:40 -0700414 (void) signal_poll_event(poll_event, state);
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300415 }
Benjamin Walshacc68c12017-01-29 18:57:45 -0500416}
417
Anas Nashifb503be22021-03-22 08:09:55 -0400418void z_impl_k_poll_signal_init(struct k_poll_signal *sig)
Benjamin Walsha304f162017-02-02 16:46:09 -0500419{
Anas Nashifb503be22021-03-22 08:09:55 -0400420 sys_dlist_init(&sig->poll_events);
421 sig->signaled = 0U;
422 /* sig->result is left unitialized */
423 z_object_init(sig);
Benjamin Walsha304f162017-02-02 16:46:09 -0500424}
425
Andrew Boie3772f772018-05-07 16:52:57 -0700426#ifdef CONFIG_USERSPACE
Anas Nashifb503be22021-03-22 08:09:55 -0400427static inline void z_vrfy_k_poll_signal_init(struct k_poll_signal *sig)
Andrew Boie3772f772018-05-07 16:52:57 -0700428{
Anas Nashifb503be22021-03-22 08:09:55 -0400429 Z_OOPS(Z_SYSCALL_OBJ_INIT(sig, K_OBJ_POLL_SIGNAL));
430 z_impl_k_poll_signal_init(sig);
Andrew Boie3772f772018-05-07 16:52:57 -0700431}
Andy Ross65649742019-08-06 13:34:31 -0700432#include <syscalls/k_poll_signal_init_mrsh.c>
Andrew Boie3772f772018-05-07 16:52:57 -0700433#endif
434
Anas Nashifb503be22021-03-22 08:09:55 -0400435void z_impl_k_poll_signal_check(struct k_poll_signal *sig,
Andrew Boie3772f772018-05-07 16:52:57 -0700436 unsigned int *signaled, int *result)
437{
Anas Nashifb503be22021-03-22 08:09:55 -0400438 *signaled = sig->signaled;
439 *result = sig->result;
Andrew Boie3772f772018-05-07 16:52:57 -0700440}
441
442#ifdef CONFIG_USERSPACE
Anas Nashifb503be22021-03-22 08:09:55 -0400443void z_vrfy_k_poll_signal_check(struct k_poll_signal *sig,
Andy Ross65649742019-08-06 13:34:31 -0700444 unsigned int *signaled, int *result)
Andrew Boie3772f772018-05-07 16:52:57 -0700445{
Anas Nashifb503be22021-03-22 08:09:55 -0400446 Z_OOPS(Z_SYSCALL_OBJ(sig, K_OBJ_POLL_SIGNAL));
Andrew Boie3772f772018-05-07 16:52:57 -0700447 Z_OOPS(Z_SYSCALL_MEMORY_WRITE(signaled, sizeof(unsigned int)));
448 Z_OOPS(Z_SYSCALL_MEMORY_WRITE(result, sizeof(int)));
Anas Nashifb503be22021-03-22 08:09:55 -0400449 z_impl_k_poll_signal_check(sig, signaled, result);
Andrew Boie3772f772018-05-07 16:52:57 -0700450}
Andy Ross65649742019-08-06 13:34:31 -0700451#include <syscalls/k_poll_signal_check_mrsh.c>
Andrew Boie3772f772018-05-07 16:52:57 -0700452#endif
453
Anas Nashifb503be22021-03-22 08:09:55 -0400454int z_impl_k_poll_signal_raise(struct k_poll_signal *sig, int result)
Benjamin Walshacc68c12017-01-29 18:57:45 -0500455{
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700456 k_spinlock_key_t key = k_spin_lock(&lock);
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300457 struct k_poll_event *poll_event;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500458
Anas Nashifb503be22021-03-22 08:09:55 -0400459 sig->result = result;
460 sig->signaled = 1U;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500461
Anas Nashifb503be22021-03-22 08:09:55 -0400462 poll_event = (struct k_poll_event *)sys_dlist_get(&sig->poll_events);
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700463 if (poll_event == NULL) {
Andy Rossf2b1a4b2018-07-25 09:40:32 -0700464 k_spin_unlock(&lock, key);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500465 return 0;
466 }
467
Andy Ross8606fab2018-03-26 10:54:40 -0700468 int rc = signal_poll_event(poll_event, K_POLL_STATE_SIGNALED);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500469
Patrik Flykt4344e272019-03-08 14:19:05 -0700470 z_reschedule(&lock, key);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500471 return rc;
472}
Andrew Boie3772f772018-05-07 16:52:57 -0700473
474#ifdef CONFIG_USERSPACE
Anas Nashifb503be22021-03-22 08:09:55 -0400475static inline int z_vrfy_k_poll_signal_raise(struct k_poll_signal *sig,
Andy Ross643701a2019-08-13 12:58:38 -0700476 int result)
Andrew Boie3772f772018-05-07 16:52:57 -0700477{
Anas Nashifb503be22021-03-22 08:09:55 -0400478 Z_OOPS(Z_SYSCALL_OBJ(sig, K_OBJ_POLL_SIGNAL));
479 return z_impl_k_poll_signal_raise(sig, result);
Andrew Boie3772f772018-05-07 16:52:57 -0700480}
Andy Ross65649742019-08-06 13:34:31 -0700481#include <syscalls/k_poll_signal_raise_mrsh.c>
482
Anas Nashifb503be22021-03-22 08:09:55 -0400483static inline void z_vrfy_k_poll_signal_reset(struct k_poll_signal *sig)
Andy Ross65649742019-08-06 13:34:31 -0700484{
Anas Nashifb503be22021-03-22 08:09:55 -0400485 Z_OOPS(Z_SYSCALL_OBJ(sig, K_OBJ_POLL_SIGNAL));
486 z_impl_k_poll_signal_reset(sig);
Andy Ross65649742019-08-06 13:34:31 -0700487}
488#include <syscalls/k_poll_signal_reset_mrsh.c>
489
Andrew Boie3772f772018-05-07 16:52:57 -0700490#endif
491
Piotr Zięcik19d83492019-09-27 09:16:25 +0200492static void triggered_work_handler(struct k_work *work)
493{
Piotr Zięcik19d83492019-09-27 09:16:25 +0200494 struct k_work_poll *twork =
495 CONTAINER_OF(work, struct k_work_poll, work);
496
497 /*
498 * If callback is not set, the k_work_poll_submit_to_queue()
499 * already cleared event registrations.
500 */
Andy Ross0c7af402020-10-30 11:18:53 -0700501 if (twork->poller.mode != MODE_NONE) {
Piotr Zięcik19d83492019-09-27 09:16:25 +0200502 k_spinlock_key_t key;
503
504 key = k_spin_lock(&lock);
505 clear_event_registrations(twork->events,
506 twork->num_events, key);
507 k_spin_unlock(&lock, key);
508 }
509
510 /* Drop work ownership and execute real handler. */
Andy Ross310f60f2020-11-09 10:43:25 -0800511 twork->workq = NULL;
512 twork->real_handler(work);
Piotr Zięcik19d83492019-09-27 09:16:25 +0200513}
514
515static void triggered_work_expiration_handler(struct _timeout *timeout)
516{
517 struct k_work_poll *twork =
518 CONTAINER_OF(timeout, struct k_work_poll, timeout);
Piotr Zięcik19d83492019-09-27 09:16:25 +0200519
520 twork->poller.is_polling = false;
521 twork->poll_result = -EAGAIN;
Andy Ross310f60f2020-11-09 10:43:25 -0800522 k_work_submit_to_queue(twork->workq, &twork->work);
Piotr Zięcik19d83492019-09-27 09:16:25 +0200523}
524
Andy Ross0c7af402020-10-30 11:18:53 -0700525static int signal_triggered_work(struct k_poll_event *event, uint32_t status)
Piotr Zięcik19d83492019-09-27 09:16:25 +0200526{
Andy Ross202adf52020-11-10 09:54:49 -0800527 struct z_poller *poller = event->poller;
Andy Ross310f60f2020-11-09 10:43:25 -0800528 struct k_work_poll *twork =
529 CONTAINER_OF(poller, struct k_work_poll, poller);
Piotr Zięcik19d83492019-09-27 09:16:25 +0200530
Andy Ross310f60f2020-11-09 10:43:25 -0800531 if (poller->is_polling && twork->workq != NULL) {
532 struct k_work_q *work_q = twork->workq;
Piotr Zięcik19d83492019-09-27 09:16:25 +0200533
534 z_abort_timeout(&twork->timeout);
535 twork->poll_result = 0;
536 k_work_submit_to_queue(work_q, &twork->work);
537 }
538
539 return 0;
540}
541
542static int triggered_work_cancel(struct k_work_poll *work,
543 k_spinlock_key_t key)
544{
545 /* Check if the work waits for event. */
Andy Ross0c7af402020-10-30 11:18:53 -0700546 if (work->poller.is_polling && work->poller.mode != MODE_NONE) {
Piotr Zięcik19d83492019-09-27 09:16:25 +0200547 /* Remove timeout associated with the work. */
548 z_abort_timeout(&work->timeout);
549
550 /*
551 * Prevent work execution if event arrives while we will be
552 * clearing registrations.
553 */
Andy Ross0c7af402020-10-30 11:18:53 -0700554 work->poller.mode = MODE_NONE;
Piotr Zięcik19d83492019-09-27 09:16:25 +0200555
556 /* Clear registrations and work ownership. */
557 clear_event_registrations(work->events, work->num_events, key);
Andy Ross310f60f2020-11-09 10:43:25 -0800558 work->workq = NULL;
Piotr Zięcik19d83492019-09-27 09:16:25 +0200559 return 0;
560 }
561
562 /*
563 * If we reached here, the work is either being registered in
564 * the k_work_poll_submit_to_queue(), executed or is pending.
565 * Only in the last case we have a chance to cancel it, but
566 * unfortunately there is no public API performing this task.
567 */
568
569 return -EINVAL;
570}
571
572void k_work_poll_init(struct k_work_poll *work,
573 k_work_handler_t handler)
574{
Andy Ross310f60f2020-11-09 10:43:25 -0800575 *work = (struct k_work_poll) {};
Piotr Zięcik19d83492019-09-27 09:16:25 +0200576 k_work_init(&work->work, triggered_work_handler);
Piotr Zięcik19d83492019-09-27 09:16:25 +0200577 work->real_handler = handler;
578 z_init_timeout(&work->timeout);
579}
580
581int k_work_poll_submit_to_queue(struct k_work_q *work_q,
582 struct k_work_poll *work,
583 struct k_poll_event *events,
584 int num_events,
Andy Ross78327382020-03-05 15:18:14 -0800585 k_timeout_t timeout)
Piotr Zięcik19d83492019-09-27 09:16:25 +0200586{
587 int events_registered;
588 k_spinlock_key_t key;
589
590 __ASSERT(work_q != NULL, "NULL work_q\n");
591 __ASSERT(work != NULL, "NULL work\n");
592 __ASSERT(events != NULL, "NULL events\n");
593 __ASSERT(num_events > 0, "zero events\n");
594
595 /* Take overship of the work if it is possible. */
596 key = k_spin_lock(&lock);
Andy Ross310f60f2020-11-09 10:43:25 -0800597 if (work->workq != NULL) {
598 if (work->workq == work_q) {
Piotr Zięcik19d83492019-09-27 09:16:25 +0200599 int retval;
600
601 retval = triggered_work_cancel(work, key);
602 if (retval < 0) {
603 k_spin_unlock(&lock, key);
604 return retval;
605 }
606 } else {
607 k_spin_unlock(&lock, key);
608 return -EADDRINUSE;
609 }
610 }
611
Andy Ross310f60f2020-11-09 10:43:25 -0800612
Piotr Zięcik19d83492019-09-27 09:16:25 +0200613 work->poller.is_polling = true;
Andy Ross310f60f2020-11-09 10:43:25 -0800614 work->workq = work_q;
Andy Ross0c7af402020-10-30 11:18:53 -0700615 work->poller.mode = MODE_NONE;
Piotr Zięcik19d83492019-09-27 09:16:25 +0200616 k_spin_unlock(&lock, key);
617
618 /* Save list of events. */
619 work->events = events;
620 work->num_events = num_events;
621
622 /* Clear result */
623 work->poll_result = -EINPROGRESS;
624
625 /* Register events */
626 events_registered = register_events(events, num_events,
627 &work->poller, false);
628
629 key = k_spin_lock(&lock);
Andy Ross78327382020-03-05 15:18:14 -0800630 if (work->poller.is_polling && !K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
Piotr Zięcik19d83492019-09-27 09:16:25 +0200631 /*
632 * Poller is still polling.
633 * No event is ready and all are watched.
634 */
635 __ASSERT(num_events == events_registered,
636 "Some events were not registered!\n");
637
638 /* Setup timeout if such action is requested */
Andy Ross78327382020-03-05 15:18:14 -0800639 if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
Piotr Zięcik19d83492019-09-27 09:16:25 +0200640 z_add_timeout(&work->timeout,
641 triggered_work_expiration_handler,
Andy Ross78327382020-03-05 15:18:14 -0800642 timeout);
Piotr Zięcik19d83492019-09-27 09:16:25 +0200643 }
644
645 /* From now, any event will result in submitted work. */
Andy Ross0c7af402020-10-30 11:18:53 -0700646 work->poller.mode = MODE_TRIGGERED;
Piotr Zięcik19d83492019-09-27 09:16:25 +0200647 k_spin_unlock(&lock, key);
648 return 0;
649 }
650
651 /*
Andy Ross0c7af402020-10-30 11:18:53 -0700652 * The K_NO_WAIT timeout was specified or at least one event
653 * was ready at registration time or changed state since
654 * registration. Hopefully, the poller mode was not set, so
655 * work was not submitted to workqueue.
Piotr Zięcik19d83492019-09-27 09:16:25 +0200656 */
657
658 /*
659 * If poller is still polling, no watched event occurred. This means
660 * we reached here due to K_NO_WAIT timeout "expiration".
661 */
662 if (work->poller.is_polling) {
663 work->poller.is_polling = false;
664 work->poll_result = -EAGAIN;
665 } else {
666 work->poll_result = 0;
667 }
668
669 /* Clear registrations. */
670 clear_event_registrations(events, events_registered, key);
671 k_spin_unlock(&lock, key);
672
673 /* Submit work. */
674 k_work_submit_to_queue(work_q, &work->work);
675
676 return 0;
677}
678
679int k_work_poll_cancel(struct k_work_poll *work)
680{
681 k_spinlock_key_t key;
682 int retval;
683
684 /* Check if the work was submitted. */
Andy Ross310f60f2020-11-09 10:43:25 -0800685 if (work == NULL || work->workq == NULL) {
Piotr Zięcik19d83492019-09-27 09:16:25 +0200686 return -EINVAL;
687 }
688
689 key = k_spin_lock(&lock);
690 retval = triggered_work_cancel(work, key);
691 k_spin_unlock(&lock, key);
692
693 return retval;
694}