blob: 7ba173edccbe6093e627866faca1bf696f2d622a [file] [log] [blame]
Benjamin Walshacc68c12017-01-29 18:57:45 -05001/*
2 * Copyright (c) 2017 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7/**
8 * @file
9 *
10 * @brief Kernel asynchronous event polling interface.
11 *
12 * This polling mechanism allows waiting on multiple events concurrently,
13 * either events triggered directly, or from kernel objects or other kernel
14 * constructs.
15 */
16
17#include <kernel.h>
18#include <kernel_structs.h>
Andy Ross245b54e2018-02-08 09:10:46 -080019#include <kernel_internal.h>
Benjamin Walshacc68c12017-01-29 18:57:45 -050020#include <wait_q.h>
21#include <ksched.h>
Andrew Boie3772f772018-05-07 16:52:57 -070022#include <syscall_handler.h>
Benjamin Walshacc68c12017-01-29 18:57:45 -050023#include <misc/slist.h>
24#include <misc/dlist.h>
Flavio Ceolin8aec0872018-08-15 11:52:00 -070025#include <misc/util.h>
Benjamin Walshacc68c12017-01-29 18:57:45 -050026#include <misc/__assert.h>
27
Kumar Galacc334c72017-04-21 10:55:34 -050028void k_poll_event_init(struct k_poll_event *event, u32_t type,
Benjamin Walshacc68c12017-01-29 18:57:45 -050029 int mode, void *obj)
30{
31 __ASSERT(mode == K_POLL_MODE_NOTIFY_ONLY,
32 "only NOTIFY_ONLY mode is supported\n");
Flavio Ceolin8aec0872018-08-15 11:52:00 -070033 __ASSERT(type < (BIT(_POLL_NUM_TYPES)), "invalid type\n");
Benjamin Walshacc68c12017-01-29 18:57:45 -050034 __ASSERT(obj, "must provide an object\n");
35
36 event->poller = NULL;
Benjamin Walsh969d4a72017-02-02 11:25:11 -050037 /* event->tag is left uninitialized: the user will set it if needed */
Benjamin Walshacc68c12017-01-29 18:57:45 -050038 event->type = type;
39 event->state = K_POLL_STATE_NOT_READY;
40 event->mode = mode;
41 event->unused = 0;
42 event->obj = obj;
43}
44
45/* must be called with interrupts locked */
Kumar Galacc334c72017-04-21 10:55:34 -050046static inline int is_condition_met(struct k_poll_event *event, u32_t *state)
Benjamin Walshacc68c12017-01-29 18:57:45 -050047{
48 switch (event->type) {
49 case K_POLL_TYPE_SEM_AVAILABLE:
50 if (k_sem_count_get(event->sem) > 0) {
51 *state = K_POLL_STATE_SEM_AVAILABLE;
52 return 1;
53 }
54 break;
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +020055 case K_POLL_TYPE_DATA_AVAILABLE:
56 if (!k_queue_is_empty(event->queue)) {
Benjamin Walshacc68c12017-01-29 18:57:45 -050057 *state = K_POLL_STATE_FIFO_DATA_AVAILABLE;
58 return 1;
59 }
60 break;
61 case K_POLL_TYPE_SIGNAL:
62 if (event->signal->signaled) {
63 *state = K_POLL_STATE_SIGNALED;
64 return 1;
65 }
66 break;
67 case K_POLL_TYPE_IGNORE:
68 return 0;
69 default:
70 __ASSERT(0, "invalid event type (0x%x)\n", event->type);
Benjamin Walshacc68c12017-01-29 18:57:45 -050071 }
72
73 return 0;
74}
75
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +030076static inline void add_event(sys_dlist_t *events, struct k_poll_event *event,
77 struct _poller *poller)
78{
79 struct k_poll_event *pending;
80
81 pending = (struct k_poll_event *)sys_dlist_peek_tail(events);
82 if (!pending || _is_t1_higher_prio_than_t2(pending->poller->thread,
83 poller->thread)) {
84 sys_dlist_append(events, &event->_node);
85 return;
86 }
87
88 SYS_DLIST_FOR_EACH_CONTAINER(events, pending, _node) {
89 if (_is_t1_higher_prio_than_t2(poller->thread,
90 pending->poller->thread)) {
91 sys_dlist_insert_before(events, &pending->_node,
92 &event->_node);
93 return;
94 }
95 }
96
97 sys_dlist_append(events, &event->_node);
98}
99
Benjamin Walshacc68c12017-01-29 18:57:45 -0500100/* must be called with interrupts locked */
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300101static inline int register_event(struct k_poll_event *event,
102 struct _poller *poller)
Benjamin Walshacc68c12017-01-29 18:57:45 -0500103{
104 switch (event->type) {
105 case K_POLL_TYPE_SEM_AVAILABLE:
106 __ASSERT(event->sem, "invalid semaphore\n");
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300107 add_event(&event->sem->poll_events, event, poller);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500108 break;
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +0200109 case K_POLL_TYPE_DATA_AVAILABLE:
110 __ASSERT(event->queue, "invalid queue\n");
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300111 add_event(&event->queue->poll_events, event, poller);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500112 break;
113 case K_POLL_TYPE_SIGNAL:
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300114 __ASSERT(event->signal, "invalid poll signal\n");
115 add_event(&event->signal->poll_events, event, poller);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500116 break;
117 case K_POLL_TYPE_IGNORE:
118 /* nothing to do */
119 break;
120 default:
121 __ASSERT(0, "invalid event type\n");
Benjamin Walshacc68c12017-01-29 18:57:45 -0500122 }
123
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300124 event->poller = poller;
125
Benjamin Walshacc68c12017-01-29 18:57:45 -0500126 return 0;
127}
128
129/* must be called with interrupts locked */
130static inline void clear_event_registration(struct k_poll_event *event)
131{
132 event->poller = NULL;
133
134 switch (event->type) {
135 case K_POLL_TYPE_SEM_AVAILABLE:
136 __ASSERT(event->sem, "invalid semaphore\n");
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300137 sys_dlist_remove(&event->_node);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500138 break;
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +0200139 case K_POLL_TYPE_DATA_AVAILABLE:
140 __ASSERT(event->queue, "invalid queue\n");
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300141 sys_dlist_remove(&event->_node);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500142 break;
143 case K_POLL_TYPE_SIGNAL:
144 __ASSERT(event->signal, "invalid poll signal\n");
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300145 sys_dlist_remove(&event->_node);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500146 break;
147 case K_POLL_TYPE_IGNORE:
148 /* nothing to do */
149 break;
150 default:
151 __ASSERT(0, "invalid event type\n");
Benjamin Walshacc68c12017-01-29 18:57:45 -0500152 }
153}
154
155/* must be called with interrupts locked */
156static inline void clear_event_registrations(struct k_poll_event *events,
157 int last_registered,
158 unsigned int key)
159{
160 for (; last_registered >= 0; last_registered--) {
161 clear_event_registration(&events[last_registered]);
162 irq_unlock(key);
163 key = irq_lock();
164 }
165}
166
Kumar Galacc334c72017-04-21 10:55:34 -0500167static inline void set_event_ready(struct k_poll_event *event, u32_t state)
Benjamin Walshacc68c12017-01-29 18:57:45 -0500168{
169 event->poller = NULL;
170 event->state |= state;
171}
172
Andrew Boie3772f772018-05-07 16:52:57 -0700173int _impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
Benjamin Walshacc68c12017-01-29 18:57:45 -0500174{
175 __ASSERT(!_is_in_isr(), "");
176 __ASSERT(events, "NULL events\n");
177 __ASSERT(num_events > 0, "zero events\n");
178
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300179 int last_registered = -1, rc;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500180 unsigned int key;
181
Andy Ross55a7e462018-05-31 11:58:09 -0700182 struct _poller poller = { .thread = _current, .is_polling = 1, };
Benjamin Walshacc68c12017-01-29 18:57:45 -0500183
184 /* find events whose condition is already fulfilled */
185 for (int ii = 0; ii < num_events; ii++) {
Kumar Galacc334c72017-04-21 10:55:34 -0500186 u32_t state;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500187
188 key = irq_lock();
189 if (is_condition_met(&events[ii], &state)) {
190 set_event_ready(&events[ii], state);
Andy Ross55a7e462018-05-31 11:58:09 -0700191 poller.is_polling = 0;
192 } else if (timeout != K_NO_WAIT && poller.is_polling) {
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300193 rc = register_event(&events[ii], &poller);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500194 if (rc == 0) {
Benjamin Walshacc68c12017-01-29 18:57:45 -0500195 ++last_registered;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500196 } else {
197 __ASSERT(0, "unexpected return code\n");
198 }
199 }
200 irq_unlock(key);
201 }
202
203 key = irq_lock();
204
205 /*
206 * If we're not polling anymore, it means that at least one event
207 * condition is met, either when looping through the events here or
Luiz Augusto von Dentz87862442017-09-07 10:57:27 +0300208 * because one of the events registered has had its state changed.
Benjamin Walshacc68c12017-01-29 18:57:45 -0500209 */
Andy Ross55a7e462018-05-31 11:58:09 -0700210 if (!poller.is_polling) {
Benjamin Walshacc68c12017-01-29 18:57:45 -0500211 clear_event_registrations(events, last_registered, key);
212 irq_unlock(key);
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300213 return 0;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500214 }
215
Andy Ross55a7e462018-05-31 11:58:09 -0700216 poller.is_polling = 0;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500217
218 if (timeout == K_NO_WAIT) {
219 irq_unlock(key);
220 return -EAGAIN;
221 }
222
223 _wait_q_t wait_q = _WAIT_Q_INIT(&wait_q);
224
Andy Rosse0a572b2018-03-26 11:58:10 -0700225 int swap_rc = _pend_current_thread(key, &wait_q, timeout);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500226
227 /*
228 * Clear all event registrations. If events happen while we're in this
229 * loop, and we already had one that triggered, that's OK: they will
230 * end up in the list of events that are ready; if we timed out, and
231 * events happen while we're in this loop, that is OK as well since
232 * we've already know the return code (-EAGAIN), and even if they are
233 * added to the list of events that occurred, the user has to check the
234 * return code first, which invalidates the whole list of event states.
235 */
236 key = irq_lock();
237 clear_event_registrations(events, last_registered, key);
238 irq_unlock(key);
239
240 return swap_rc;
241}
242
Andrew Boie3772f772018-05-07 16:52:57 -0700243#ifdef CONFIG_USERSPACE
244Z_SYSCALL_HANDLER(k_poll, events, num_events, timeout)
245{
246 int ret, key;
247 struct k_poll_event *events_copy = NULL;
248 unsigned int bounds;
249
250 /* Validate the events buffer and make a copy of it in an
251 * allocated kernel-side buffer.
252 */
253 if (Z_SYSCALL_VERIFY(num_events > 0)) {
254 ret = -EINVAL;
255 goto out;
256 }
257 if (Z_SYSCALL_VERIFY_MSG(
258 !__builtin_umul_overflow(num_events,
259 sizeof(struct k_poll_event),
260 &bounds), "num_events too large")) {
261 ret = -EINVAL;
262 goto out;
263 }
264 events_copy = z_thread_malloc(bounds);
265 if (!events_copy) {
266 ret = -ENOMEM;
267 goto out;
268 }
269
270 key = irq_lock();
271 if (Z_SYSCALL_MEMORY_WRITE(events, bounds)) {
272 irq_unlock(key);
273 goto oops_free;
274 }
Flavio Ceolin66994232018-08-13 15:17:04 -0700275 (void)memcpy(events_copy, (void *)events, bounds);
Andrew Boie3772f772018-05-07 16:52:57 -0700276 irq_unlock(key);
277
278 /* Validate what's inside events_copy */
279 for (int i = 0; i < num_events; i++) {
280 struct k_poll_event *e = &events_copy[i];
281
282 if (Z_SYSCALL_VERIFY(e->mode == K_POLL_MODE_NOTIFY_ONLY)) {
283 ret = -EINVAL;
284 goto out_free;
285 }
286
287 switch (e->type) {
288 case K_POLL_TYPE_IGNORE:
289 break;
290 case K_POLL_TYPE_SIGNAL:
291 Z_OOPS(Z_SYSCALL_OBJ(e->signal, K_OBJ_POLL_SIGNAL));
292 break;
293 case K_POLL_TYPE_SEM_AVAILABLE:
294 Z_OOPS(Z_SYSCALL_OBJ(e->sem, K_OBJ_SEM));
295 break;
296 case K_POLL_TYPE_DATA_AVAILABLE:
297 Z_OOPS(Z_SYSCALL_OBJ(e->queue, K_OBJ_QUEUE));
298 break;
299 default:
300 ret = -EINVAL;
301 goto out_free;
302 }
303 }
304
305 ret = k_poll(events_copy, num_events, timeout);
Flavio Ceolin66994232018-08-13 15:17:04 -0700306 (void)memcpy((void *)events, events_copy, bounds);
Andrew Boie3772f772018-05-07 16:52:57 -0700307out_free:
308 k_free(events_copy);
309out:
310 return ret;
311oops_free:
312 k_free(events_copy);
313 Z_OOPS(1);
314}
315#endif
316
Benjamin Walshacc68c12017-01-29 18:57:45 -0500317/* must be called with interrupts locked */
Andy Ross8606fab2018-03-26 10:54:40 -0700318static int signal_poll_event(struct k_poll_event *event, u32_t state)
Benjamin Walshacc68c12017-01-29 18:57:45 -0500319{
Benjamin Walshacc68c12017-01-29 18:57:45 -0500320 if (!event->poller) {
321 goto ready_event;
322 }
323
324 struct k_thread *thread = event->poller->thread;
325
326 __ASSERT(event->poller->thread, "poller should have a thread\n");
327
Andy Ross55a7e462018-05-31 11:58:09 -0700328 event->poller->is_polling = 0;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500329
330 if (!_is_thread_pending(thread)) {
331 goto ready_event;
332 }
333
334 if (_is_thread_timeout_expired(thread)) {
335 return -EAGAIN;
336 }
337
338 _unpend_thread(thread);
Luiz Augusto von Dentzfc775a02017-10-17 15:33:32 +0300339 _set_thread_return_value(thread,
340 state == K_POLL_STATE_NOT_READY ? -EINTR : 0);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500341
342 if (!_is_thread_ready(thread)) {
343 goto ready_event;
344 }
345
Andy Ross85bc0a32018-03-09 12:17:45 -0800346 _ready_thread(thread);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500347
348ready_event:
349 set_event_ready(event, state);
350 return 0;
351}
352
Andy Ross8606fab2018-03-26 10:54:40 -0700353void _handle_obj_poll_events(sys_dlist_t *events, u32_t state)
Benjamin Walshacc68c12017-01-29 18:57:45 -0500354{
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300355 struct k_poll_event *poll_event;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500356
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300357 poll_event = (struct k_poll_event *)sys_dlist_get(events);
Andy Ross8606fab2018-03-26 10:54:40 -0700358 if (poll_event) {
359 (void) signal_poll_event(poll_event, state);
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300360 }
Benjamin Walshacc68c12017-01-29 18:57:45 -0500361}
362
Andrew Boie3772f772018-05-07 16:52:57 -0700363void _impl_k_poll_signal_init(struct k_poll_signal *signal)
Benjamin Walsha304f162017-02-02 16:46:09 -0500364{
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300365 sys_dlist_init(&signal->poll_events);
Benjamin Walsha304f162017-02-02 16:46:09 -0500366 signal->signaled = 0;
367 /* signal->result is left unitialized */
Andrew Boie3772f772018-05-07 16:52:57 -0700368 _k_object_init(signal);
Benjamin Walsha304f162017-02-02 16:46:09 -0500369}
370
Andrew Boie3772f772018-05-07 16:52:57 -0700371#ifdef CONFIG_USERSPACE
372Z_SYSCALL_HANDLER(k_poll_signal_init, signal)
373{
374 Z_OOPS(Z_SYSCALL_OBJ_INIT(signal, K_OBJ_POLL_SIGNAL));
375 _impl_k_poll_signal_init((struct k_poll_signal *)signal);
376 return 0;
377}
378#endif
379
380void _impl_k_poll_signal_check(struct k_poll_signal *signal,
381 unsigned int *signaled, int *result)
382{
383 *signaled = signal->signaled;
384 *result = signal->result;
385}
386
387#ifdef CONFIG_USERSPACE
388Z_SYSCALL_HANDLER(k_poll_signal_check, signal, signaled, result)
389{
390 Z_OOPS(Z_SYSCALL_OBJ(signal, K_OBJ_POLL_SIGNAL));
391 Z_OOPS(Z_SYSCALL_MEMORY_WRITE(signaled, sizeof(unsigned int)));
392 Z_OOPS(Z_SYSCALL_MEMORY_WRITE(result, sizeof(int)));
393
394 _impl_k_poll_signal_check((struct k_poll_signal *)signal,
395 (unsigned int *)signaled, (int *)result);
396 return 0;
397}
398#endif
399
400int _impl_k_poll_signal(struct k_poll_signal *signal, int result)
Benjamin Walshacc68c12017-01-29 18:57:45 -0500401{
402 unsigned int key = irq_lock();
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300403 struct k_poll_event *poll_event;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500404
405 signal->result = result;
Benjamin Walsh3c1ab5d2017-02-09 15:36:29 -0500406 signal->signaled = 1;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500407
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +0300408 poll_event = (struct k_poll_event *)sys_dlist_get(&signal->poll_events);
409 if (!poll_event) {
Benjamin Walshacc68c12017-01-29 18:57:45 -0500410 irq_unlock(key);
411 return 0;
412 }
413
Andy Ross8606fab2018-03-26 10:54:40 -0700414 int rc = signal_poll_event(poll_event, K_POLL_STATE_SIGNALED);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500415
Andy Ross15cb5d72018-04-02 18:40:10 -0700416 _reschedule(key);
Benjamin Walshacc68c12017-01-29 18:57:45 -0500417 return rc;
418}
Andrew Boie3772f772018-05-07 16:52:57 -0700419
420#ifdef CONFIG_USERSPACE
421Z_SYSCALL_HANDLER(k_poll_signal, signal, result)
422{
423 Z_OOPS(Z_SYSCALL_OBJ(signal, K_OBJ_POLL_SIGNAL));
424 return _impl_k_poll_signal((struct k_poll_signal *)signal, result);
425}
426Z_SYSCALL_HANDLER1_SIMPLE_VOID(k_poll_signal_reset, K_OBJ_POLL_SIGNAL,
427 struct k_poll_signal *);
428#endif
429