Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2017 Wind River Systems, Inc. |
| 3 | * |
| 4 | * SPDX-License-Identifier: Apache-2.0 |
| 5 | */ |
| 6 | |
| 7 | /** |
| 8 | * @file |
| 9 | * |
| 10 | * @brief Kernel asynchronous event polling interface. |
| 11 | * |
| 12 | * This polling mechanism allows waiting on multiple events concurrently, |
| 13 | * either events triggered directly, or from kernel objects or other kernel |
| 14 | * constructs. |
| 15 | */ |
| 16 | |
| 17 | #include <kernel.h> |
| 18 | #include <kernel_structs.h> |
Andy Ross | 245b54e | 2018-02-08 09:10:46 -0800 | [diff] [blame] | 19 | #include <kernel_internal.h> |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 20 | #include <wait_q.h> |
| 21 | #include <ksched.h> |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 22 | #include <syscall_handler.h> |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 23 | #include <misc/slist.h> |
| 24 | #include <misc/dlist.h> |
Flavio Ceolin | 8aec087 | 2018-08-15 11:52:00 -0700 | [diff] [blame] | 25 | #include <misc/util.h> |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 26 | #include <misc/__assert.h> |
| 27 | |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 28 | void k_poll_event_init(struct k_poll_event *event, u32_t type, |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 29 | int mode, void *obj) |
| 30 | { |
| 31 | __ASSERT(mode == K_POLL_MODE_NOTIFY_ONLY, |
| 32 | "only NOTIFY_ONLY mode is supported\n"); |
Flavio Ceolin | 8aec087 | 2018-08-15 11:52:00 -0700 | [diff] [blame] | 33 | __ASSERT(type < (BIT(_POLL_NUM_TYPES)), "invalid type\n"); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 34 | __ASSERT(obj, "must provide an object\n"); |
| 35 | |
| 36 | event->poller = NULL; |
Benjamin Walsh | 969d4a7 | 2017-02-02 11:25:11 -0500 | [diff] [blame] | 37 | /* event->tag is left uninitialized: the user will set it if needed */ |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 38 | event->type = type; |
| 39 | event->state = K_POLL_STATE_NOT_READY; |
| 40 | event->mode = mode; |
| 41 | event->unused = 0; |
| 42 | event->obj = obj; |
| 43 | } |
| 44 | |
| 45 | /* must be called with interrupts locked */ |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 46 | static inline int is_condition_met(struct k_poll_event *event, u32_t *state) |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 47 | { |
| 48 | switch (event->type) { |
| 49 | case K_POLL_TYPE_SEM_AVAILABLE: |
| 50 | if (k_sem_count_get(event->sem) > 0) { |
| 51 | *state = K_POLL_STATE_SEM_AVAILABLE; |
| 52 | return 1; |
| 53 | } |
| 54 | break; |
Luiz Augusto von Dentz | e5ed88f | 2017-02-21 15:27:20 +0200 | [diff] [blame] | 55 | case K_POLL_TYPE_DATA_AVAILABLE: |
| 56 | if (!k_queue_is_empty(event->queue)) { |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 57 | *state = K_POLL_STATE_FIFO_DATA_AVAILABLE; |
| 58 | return 1; |
| 59 | } |
| 60 | break; |
| 61 | case K_POLL_TYPE_SIGNAL: |
| 62 | if (event->signal->signaled) { |
| 63 | *state = K_POLL_STATE_SIGNALED; |
| 64 | return 1; |
| 65 | } |
| 66 | break; |
| 67 | case K_POLL_TYPE_IGNORE: |
| 68 | return 0; |
| 69 | default: |
| 70 | __ASSERT(0, "invalid event type (0x%x)\n", event->type); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 71 | } |
| 72 | |
| 73 | return 0; |
| 74 | } |
| 75 | |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 76 | static inline void add_event(sys_dlist_t *events, struct k_poll_event *event, |
| 77 | struct _poller *poller) |
| 78 | { |
| 79 | struct k_poll_event *pending; |
| 80 | |
| 81 | pending = (struct k_poll_event *)sys_dlist_peek_tail(events); |
| 82 | if (!pending || _is_t1_higher_prio_than_t2(pending->poller->thread, |
| 83 | poller->thread)) { |
| 84 | sys_dlist_append(events, &event->_node); |
| 85 | return; |
| 86 | } |
| 87 | |
| 88 | SYS_DLIST_FOR_EACH_CONTAINER(events, pending, _node) { |
| 89 | if (_is_t1_higher_prio_than_t2(poller->thread, |
| 90 | pending->poller->thread)) { |
| 91 | sys_dlist_insert_before(events, &pending->_node, |
| 92 | &event->_node); |
| 93 | return; |
| 94 | } |
| 95 | } |
| 96 | |
| 97 | sys_dlist_append(events, &event->_node); |
| 98 | } |
| 99 | |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 100 | /* must be called with interrupts locked */ |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 101 | static inline int register_event(struct k_poll_event *event, |
| 102 | struct _poller *poller) |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 103 | { |
| 104 | switch (event->type) { |
| 105 | case K_POLL_TYPE_SEM_AVAILABLE: |
| 106 | __ASSERT(event->sem, "invalid semaphore\n"); |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 107 | add_event(&event->sem->poll_events, event, poller); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 108 | break; |
Luiz Augusto von Dentz | e5ed88f | 2017-02-21 15:27:20 +0200 | [diff] [blame] | 109 | case K_POLL_TYPE_DATA_AVAILABLE: |
| 110 | __ASSERT(event->queue, "invalid queue\n"); |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 111 | add_event(&event->queue->poll_events, event, poller); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 112 | break; |
| 113 | case K_POLL_TYPE_SIGNAL: |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 114 | __ASSERT(event->signal, "invalid poll signal\n"); |
| 115 | add_event(&event->signal->poll_events, event, poller); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 116 | break; |
| 117 | case K_POLL_TYPE_IGNORE: |
| 118 | /* nothing to do */ |
| 119 | break; |
| 120 | default: |
| 121 | __ASSERT(0, "invalid event type\n"); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 122 | } |
| 123 | |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 124 | event->poller = poller; |
| 125 | |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 126 | return 0; |
| 127 | } |
| 128 | |
| 129 | /* must be called with interrupts locked */ |
| 130 | static inline void clear_event_registration(struct k_poll_event *event) |
| 131 | { |
| 132 | event->poller = NULL; |
| 133 | |
| 134 | switch (event->type) { |
| 135 | case K_POLL_TYPE_SEM_AVAILABLE: |
| 136 | __ASSERT(event->sem, "invalid semaphore\n"); |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 137 | sys_dlist_remove(&event->_node); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 138 | break; |
Luiz Augusto von Dentz | e5ed88f | 2017-02-21 15:27:20 +0200 | [diff] [blame] | 139 | case K_POLL_TYPE_DATA_AVAILABLE: |
| 140 | __ASSERT(event->queue, "invalid queue\n"); |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 141 | sys_dlist_remove(&event->_node); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 142 | break; |
| 143 | case K_POLL_TYPE_SIGNAL: |
| 144 | __ASSERT(event->signal, "invalid poll signal\n"); |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 145 | sys_dlist_remove(&event->_node); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 146 | break; |
| 147 | case K_POLL_TYPE_IGNORE: |
| 148 | /* nothing to do */ |
| 149 | break; |
| 150 | default: |
| 151 | __ASSERT(0, "invalid event type\n"); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 152 | } |
| 153 | } |
| 154 | |
| 155 | /* must be called with interrupts locked */ |
| 156 | static inline void clear_event_registrations(struct k_poll_event *events, |
| 157 | int last_registered, |
| 158 | unsigned int key) |
| 159 | { |
| 160 | for (; last_registered >= 0; last_registered--) { |
| 161 | clear_event_registration(&events[last_registered]); |
| 162 | irq_unlock(key); |
| 163 | key = irq_lock(); |
| 164 | } |
| 165 | } |
| 166 | |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 167 | static inline void set_event_ready(struct k_poll_event *event, u32_t state) |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 168 | { |
| 169 | event->poller = NULL; |
| 170 | event->state |= state; |
| 171 | } |
| 172 | |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 173 | int _impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout) |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 174 | { |
| 175 | __ASSERT(!_is_in_isr(), ""); |
| 176 | __ASSERT(events, "NULL events\n"); |
| 177 | __ASSERT(num_events > 0, "zero events\n"); |
| 178 | |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 179 | int last_registered = -1, rc; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 180 | unsigned int key; |
| 181 | |
Andy Ross | 55a7e46 | 2018-05-31 11:58:09 -0700 | [diff] [blame] | 182 | struct _poller poller = { .thread = _current, .is_polling = 1, }; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 183 | |
| 184 | /* find events whose condition is already fulfilled */ |
| 185 | for (int ii = 0; ii < num_events; ii++) { |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 186 | u32_t state; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 187 | |
| 188 | key = irq_lock(); |
| 189 | if (is_condition_met(&events[ii], &state)) { |
| 190 | set_event_ready(&events[ii], state); |
Andy Ross | 55a7e46 | 2018-05-31 11:58:09 -0700 | [diff] [blame] | 191 | poller.is_polling = 0; |
| 192 | } else if (timeout != K_NO_WAIT && poller.is_polling) { |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 193 | rc = register_event(&events[ii], &poller); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 194 | if (rc == 0) { |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 195 | ++last_registered; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 196 | } else { |
| 197 | __ASSERT(0, "unexpected return code\n"); |
| 198 | } |
| 199 | } |
| 200 | irq_unlock(key); |
| 201 | } |
| 202 | |
| 203 | key = irq_lock(); |
| 204 | |
| 205 | /* |
| 206 | * If we're not polling anymore, it means that at least one event |
| 207 | * condition is met, either when looping through the events here or |
Luiz Augusto von Dentz | 8786244 | 2017-09-07 10:57:27 +0300 | [diff] [blame] | 208 | * because one of the events registered has had its state changed. |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 209 | */ |
Andy Ross | 55a7e46 | 2018-05-31 11:58:09 -0700 | [diff] [blame] | 210 | if (!poller.is_polling) { |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 211 | clear_event_registrations(events, last_registered, key); |
| 212 | irq_unlock(key); |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 213 | return 0; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 214 | } |
| 215 | |
Andy Ross | 55a7e46 | 2018-05-31 11:58:09 -0700 | [diff] [blame] | 216 | poller.is_polling = 0; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 217 | |
| 218 | if (timeout == K_NO_WAIT) { |
| 219 | irq_unlock(key); |
| 220 | return -EAGAIN; |
| 221 | } |
| 222 | |
| 223 | _wait_q_t wait_q = _WAIT_Q_INIT(&wait_q); |
| 224 | |
Andy Ross | e0a572b | 2018-03-26 11:58:10 -0700 | [diff] [blame] | 225 | int swap_rc = _pend_current_thread(key, &wait_q, timeout); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 226 | |
| 227 | /* |
| 228 | * Clear all event registrations. If events happen while we're in this |
| 229 | * loop, and we already had one that triggered, that's OK: they will |
| 230 | * end up in the list of events that are ready; if we timed out, and |
| 231 | * events happen while we're in this loop, that is OK as well since |
| 232 | * we've already know the return code (-EAGAIN), and even if they are |
| 233 | * added to the list of events that occurred, the user has to check the |
| 234 | * return code first, which invalidates the whole list of event states. |
| 235 | */ |
| 236 | key = irq_lock(); |
| 237 | clear_event_registrations(events, last_registered, key); |
| 238 | irq_unlock(key); |
| 239 | |
| 240 | return swap_rc; |
| 241 | } |
| 242 | |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 243 | #ifdef CONFIG_USERSPACE |
| 244 | Z_SYSCALL_HANDLER(k_poll, events, num_events, timeout) |
| 245 | { |
| 246 | int ret, key; |
| 247 | struct k_poll_event *events_copy = NULL; |
| 248 | unsigned int bounds; |
| 249 | |
| 250 | /* Validate the events buffer and make a copy of it in an |
| 251 | * allocated kernel-side buffer. |
| 252 | */ |
| 253 | if (Z_SYSCALL_VERIFY(num_events > 0)) { |
| 254 | ret = -EINVAL; |
| 255 | goto out; |
| 256 | } |
| 257 | if (Z_SYSCALL_VERIFY_MSG( |
| 258 | !__builtin_umul_overflow(num_events, |
| 259 | sizeof(struct k_poll_event), |
| 260 | &bounds), "num_events too large")) { |
| 261 | ret = -EINVAL; |
| 262 | goto out; |
| 263 | } |
| 264 | events_copy = z_thread_malloc(bounds); |
| 265 | if (!events_copy) { |
| 266 | ret = -ENOMEM; |
| 267 | goto out; |
| 268 | } |
| 269 | |
| 270 | key = irq_lock(); |
| 271 | if (Z_SYSCALL_MEMORY_WRITE(events, bounds)) { |
| 272 | irq_unlock(key); |
| 273 | goto oops_free; |
| 274 | } |
Flavio Ceolin | 6699423 | 2018-08-13 15:17:04 -0700 | [diff] [blame^] | 275 | (void)memcpy(events_copy, (void *)events, bounds); |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 276 | irq_unlock(key); |
| 277 | |
| 278 | /* Validate what's inside events_copy */ |
| 279 | for (int i = 0; i < num_events; i++) { |
| 280 | struct k_poll_event *e = &events_copy[i]; |
| 281 | |
| 282 | if (Z_SYSCALL_VERIFY(e->mode == K_POLL_MODE_NOTIFY_ONLY)) { |
| 283 | ret = -EINVAL; |
| 284 | goto out_free; |
| 285 | } |
| 286 | |
| 287 | switch (e->type) { |
| 288 | case K_POLL_TYPE_IGNORE: |
| 289 | break; |
| 290 | case K_POLL_TYPE_SIGNAL: |
| 291 | Z_OOPS(Z_SYSCALL_OBJ(e->signal, K_OBJ_POLL_SIGNAL)); |
| 292 | break; |
| 293 | case K_POLL_TYPE_SEM_AVAILABLE: |
| 294 | Z_OOPS(Z_SYSCALL_OBJ(e->sem, K_OBJ_SEM)); |
| 295 | break; |
| 296 | case K_POLL_TYPE_DATA_AVAILABLE: |
| 297 | Z_OOPS(Z_SYSCALL_OBJ(e->queue, K_OBJ_QUEUE)); |
| 298 | break; |
| 299 | default: |
| 300 | ret = -EINVAL; |
| 301 | goto out_free; |
| 302 | } |
| 303 | } |
| 304 | |
| 305 | ret = k_poll(events_copy, num_events, timeout); |
Flavio Ceolin | 6699423 | 2018-08-13 15:17:04 -0700 | [diff] [blame^] | 306 | (void)memcpy((void *)events, events_copy, bounds); |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 307 | out_free: |
| 308 | k_free(events_copy); |
| 309 | out: |
| 310 | return ret; |
| 311 | oops_free: |
| 312 | k_free(events_copy); |
| 313 | Z_OOPS(1); |
| 314 | } |
| 315 | #endif |
| 316 | |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 317 | /* must be called with interrupts locked */ |
Andy Ross | 8606fab | 2018-03-26 10:54:40 -0700 | [diff] [blame] | 318 | static int signal_poll_event(struct k_poll_event *event, u32_t state) |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 319 | { |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 320 | if (!event->poller) { |
| 321 | goto ready_event; |
| 322 | } |
| 323 | |
| 324 | struct k_thread *thread = event->poller->thread; |
| 325 | |
| 326 | __ASSERT(event->poller->thread, "poller should have a thread\n"); |
| 327 | |
Andy Ross | 55a7e46 | 2018-05-31 11:58:09 -0700 | [diff] [blame] | 328 | event->poller->is_polling = 0; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 329 | |
| 330 | if (!_is_thread_pending(thread)) { |
| 331 | goto ready_event; |
| 332 | } |
| 333 | |
| 334 | if (_is_thread_timeout_expired(thread)) { |
| 335 | return -EAGAIN; |
| 336 | } |
| 337 | |
| 338 | _unpend_thread(thread); |
Luiz Augusto von Dentz | fc775a0 | 2017-10-17 15:33:32 +0300 | [diff] [blame] | 339 | _set_thread_return_value(thread, |
| 340 | state == K_POLL_STATE_NOT_READY ? -EINTR : 0); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 341 | |
| 342 | if (!_is_thread_ready(thread)) { |
| 343 | goto ready_event; |
| 344 | } |
| 345 | |
Andy Ross | 85bc0a3 | 2018-03-09 12:17:45 -0800 | [diff] [blame] | 346 | _ready_thread(thread); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 347 | |
| 348 | ready_event: |
| 349 | set_event_ready(event, state); |
| 350 | return 0; |
| 351 | } |
| 352 | |
Andy Ross | 8606fab | 2018-03-26 10:54:40 -0700 | [diff] [blame] | 353 | void _handle_obj_poll_events(sys_dlist_t *events, u32_t state) |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 354 | { |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 355 | struct k_poll_event *poll_event; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 356 | |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 357 | poll_event = (struct k_poll_event *)sys_dlist_get(events); |
Andy Ross | 8606fab | 2018-03-26 10:54:40 -0700 | [diff] [blame] | 358 | if (poll_event) { |
| 359 | (void) signal_poll_event(poll_event, state); |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 360 | } |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 361 | } |
| 362 | |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 363 | void _impl_k_poll_signal_init(struct k_poll_signal *signal) |
Benjamin Walsh | a304f16 | 2017-02-02 16:46:09 -0500 | [diff] [blame] | 364 | { |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 365 | sys_dlist_init(&signal->poll_events); |
Benjamin Walsh | a304f16 | 2017-02-02 16:46:09 -0500 | [diff] [blame] | 366 | signal->signaled = 0; |
| 367 | /* signal->result is left unitialized */ |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 368 | _k_object_init(signal); |
Benjamin Walsh | a304f16 | 2017-02-02 16:46:09 -0500 | [diff] [blame] | 369 | } |
| 370 | |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 371 | #ifdef CONFIG_USERSPACE |
| 372 | Z_SYSCALL_HANDLER(k_poll_signal_init, signal) |
| 373 | { |
| 374 | Z_OOPS(Z_SYSCALL_OBJ_INIT(signal, K_OBJ_POLL_SIGNAL)); |
| 375 | _impl_k_poll_signal_init((struct k_poll_signal *)signal); |
| 376 | return 0; |
| 377 | } |
| 378 | #endif |
| 379 | |
| 380 | void _impl_k_poll_signal_check(struct k_poll_signal *signal, |
| 381 | unsigned int *signaled, int *result) |
| 382 | { |
| 383 | *signaled = signal->signaled; |
| 384 | *result = signal->result; |
| 385 | } |
| 386 | |
| 387 | #ifdef CONFIG_USERSPACE |
| 388 | Z_SYSCALL_HANDLER(k_poll_signal_check, signal, signaled, result) |
| 389 | { |
| 390 | Z_OOPS(Z_SYSCALL_OBJ(signal, K_OBJ_POLL_SIGNAL)); |
| 391 | Z_OOPS(Z_SYSCALL_MEMORY_WRITE(signaled, sizeof(unsigned int))); |
| 392 | Z_OOPS(Z_SYSCALL_MEMORY_WRITE(result, sizeof(int))); |
| 393 | |
| 394 | _impl_k_poll_signal_check((struct k_poll_signal *)signal, |
| 395 | (unsigned int *)signaled, (int *)result); |
| 396 | return 0; |
| 397 | } |
| 398 | #endif |
| 399 | |
| 400 | int _impl_k_poll_signal(struct k_poll_signal *signal, int result) |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 401 | { |
| 402 | unsigned int key = irq_lock(); |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 403 | struct k_poll_event *poll_event; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 404 | |
| 405 | signal->result = result; |
Benjamin Walsh | 3c1ab5d | 2017-02-09 15:36:29 -0500 | [diff] [blame] | 406 | signal->signaled = 1; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 407 | |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 408 | poll_event = (struct k_poll_event *)sys_dlist_get(&signal->poll_events); |
| 409 | if (!poll_event) { |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 410 | irq_unlock(key); |
| 411 | return 0; |
| 412 | } |
| 413 | |
Andy Ross | 8606fab | 2018-03-26 10:54:40 -0700 | [diff] [blame] | 414 | int rc = signal_poll_event(poll_event, K_POLL_STATE_SIGNALED); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 415 | |
Andy Ross | 15cb5d7 | 2018-04-02 18:40:10 -0700 | [diff] [blame] | 416 | _reschedule(key); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 417 | return rc; |
| 418 | } |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 419 | |
| 420 | #ifdef CONFIG_USERSPACE |
| 421 | Z_SYSCALL_HANDLER(k_poll_signal, signal, result) |
| 422 | { |
| 423 | Z_OOPS(Z_SYSCALL_OBJ(signal, K_OBJ_POLL_SIGNAL)); |
| 424 | return _impl_k_poll_signal((struct k_poll_signal *)signal, result); |
| 425 | } |
| 426 | Z_SYSCALL_HANDLER1_SIMPLE_VOID(k_poll_signal_reset, K_OBJ_POLL_SIGNAL, |
| 427 | struct k_poll_signal *); |
| 428 | #endif |
| 429 | |