Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2017 Wind River Systems, Inc. |
| 3 | * |
| 4 | * SPDX-License-Identifier: Apache-2.0 |
| 5 | */ |
| 6 | |
| 7 | /** |
| 8 | * @file |
| 9 | * |
| 10 | * @brief Kernel asynchronous event polling interface. |
| 11 | * |
| 12 | * This polling mechanism allows waiting on multiple events concurrently, |
| 13 | * either events triggered directly, or from kernel objects or other kernel |
| 14 | * constructs. |
| 15 | */ |
| 16 | |
| 17 | #include <kernel.h> |
| 18 | #include <kernel_structs.h> |
Andy Ross | 245b54e | 2018-02-08 09:10:46 -0800 | [diff] [blame] | 19 | #include <kernel_internal.h> |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 20 | #include <wait_q.h> |
| 21 | #include <ksched.h> |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 22 | #include <syscall_handler.h> |
Anas Nashif | ee9dd1a | 2019-06-26 10:33:41 -0400 | [diff] [blame] | 23 | #include <sys/dlist.h> |
Anas Nashif | a2fd7d7 | 2019-06-26 10:33:55 -0400 | [diff] [blame] | 24 | #include <sys/util.h> |
Anas Nashif | 5eb90ec | 2019-06-26 10:33:39 -0400 | [diff] [blame] | 25 | #include <sys/__assert.h> |
Flavio Ceolin | 4f2e9a7 | 2018-12-16 14:27:10 -0800 | [diff] [blame] | 26 | #include <stdbool.h> |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 27 | |
Andy Ross | f2b1a4b | 2018-07-25 09:40:32 -0700 | [diff] [blame] | 28 | /* Single subsystem lock. Locking per-event would be better on highly |
| 29 | * contended SMP systems, but the original locking scheme here is |
| 30 | * subtle (it relies on releasing/reacquiring the lock in areas for |
| 31 | * latency control and it's sometimes hard to see exactly what data is |
| 32 | * "inside" a given critical section). Do the synchronization port |
| 33 | * later as an optimization. |
| 34 | */ |
| 35 | static struct k_spinlock lock; |
| 36 | |
Andy Ross | 0c7af40 | 2020-10-30 11:18:53 -0700 | [diff] [blame] | 37 | enum POLL_MODE { MODE_NONE, MODE_POLL, MODE_TRIGGERED }; |
| 38 | |
| 39 | static int signal_poller(struct k_poll_event *event, uint32_t state); |
| 40 | static int signal_triggered_work(struct k_poll_event *event, uint32_t status); |
| 41 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 42 | void k_poll_event_init(struct k_poll_event *event, uint32_t type, |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 43 | int mode, void *obj) |
| 44 | { |
| 45 | __ASSERT(mode == K_POLL_MODE_NOTIFY_ONLY, |
| 46 | "only NOTIFY_ONLY mode is supported\n"); |
Flavio Ceolin | 8aec087 | 2018-08-15 11:52:00 -0700 | [diff] [blame] | 47 | __ASSERT(type < (BIT(_POLL_NUM_TYPES)), "invalid type\n"); |
Flavio Ceolin | d8837c6 | 2018-09-18 12:40:54 -0700 | [diff] [blame] | 48 | __ASSERT(obj != NULL, "must provide an object\n"); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 49 | |
| 50 | event->poller = NULL; |
Benjamin Walsh | 969d4a7 | 2017-02-02 11:25:11 -0500 | [diff] [blame] | 51 | /* event->tag is left uninitialized: the user will set it if needed */ |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 52 | event->type = type; |
| 53 | event->state = K_POLL_STATE_NOT_READY; |
| 54 | event->mode = mode; |
Patrik Flykt | 24d7143 | 2019-03-26 19:57:45 -0600 | [diff] [blame] | 55 | event->unused = 0U; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 56 | event->obj = obj; |
| 57 | } |
| 58 | |
| 59 | /* must be called with interrupts locked */ |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 60 | static inline bool is_condition_met(struct k_poll_event *event, uint32_t *state) |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 61 | { |
| 62 | switch (event->type) { |
| 63 | case K_POLL_TYPE_SEM_AVAILABLE: |
Aastha Grover | 83b9f69 | 2020-08-20 16:47:11 -0700 | [diff] [blame] | 64 | if (k_sem_count_get(event->sem) > 0U) { |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 65 | *state = K_POLL_STATE_SEM_AVAILABLE; |
Flavio Ceolin | 4f2e9a7 | 2018-12-16 14:27:10 -0800 | [diff] [blame] | 66 | return true; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 67 | } |
| 68 | break; |
Luiz Augusto von Dentz | e5ed88f | 2017-02-21 15:27:20 +0200 | [diff] [blame] | 69 | case K_POLL_TYPE_DATA_AVAILABLE: |
| 70 | if (!k_queue_is_empty(event->queue)) { |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 71 | *state = K_POLL_STATE_FIFO_DATA_AVAILABLE; |
Flavio Ceolin | 4f2e9a7 | 2018-12-16 14:27:10 -0800 | [diff] [blame] | 72 | return true; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 73 | } |
| 74 | break; |
| 75 | case K_POLL_TYPE_SIGNAL: |
Patrik Flykt | 24d7143 | 2019-03-26 19:57:45 -0600 | [diff] [blame] | 76 | if (event->signal->signaled != 0U) { |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 77 | *state = K_POLL_STATE_SIGNALED; |
Flavio Ceolin | 4f2e9a7 | 2018-12-16 14:27:10 -0800 | [diff] [blame] | 78 | return true; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 79 | } |
| 80 | break; |
| 81 | case K_POLL_TYPE_IGNORE: |
Flavio Ceolin | d7271ec | 2018-11-15 09:42:14 -0800 | [diff] [blame] | 82 | break; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 83 | default: |
Flavio Ceolin | 6fdc56d | 2018-09-18 12:32:27 -0700 | [diff] [blame] | 84 | __ASSERT(false, "invalid event type (0x%x)\n", event->type); |
Flavio Ceolin | a3cea50 | 2018-09-10 22:54:55 -0700 | [diff] [blame] | 85 | break; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 86 | } |
| 87 | |
Flavio Ceolin | 4f2e9a7 | 2018-12-16 14:27:10 -0800 | [diff] [blame] | 88 | return false; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 89 | } |
| 90 | |
Andy Ross | 202adf5 | 2020-11-10 09:54:49 -0800 | [diff] [blame] | 91 | static struct k_thread *poller_thread(struct z_poller *p) |
Andy Ross | dadc664 | 2020-11-09 15:17:18 -0800 | [diff] [blame] | 92 | { |
| 93 | return p ? CONTAINER_OF(p, struct k_thread, poller) : NULL; |
| 94 | } |
| 95 | |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 96 | static inline void add_event(sys_dlist_t *events, struct k_poll_event *event, |
Andy Ross | 202adf5 | 2020-11-10 09:54:49 -0800 | [diff] [blame] | 97 | struct z_poller *poller) |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 98 | { |
| 99 | struct k_poll_event *pending; |
| 100 | |
| 101 | pending = (struct k_poll_event *)sys_dlist_peek_tail(events); |
Flavio Ceolin | 4218d5f | 2018-09-17 09:39:51 -0700 | [diff] [blame] | 102 | if ((pending == NULL) || |
James Harris | 2cd0f66 | 2021-03-01 09:19:57 -0800 | [diff] [blame] | 103 | (z_sched_prio_cmp(poller_thread(pending->poller), |
| 104 | poller_thread(poller)) > 0)) { |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 105 | sys_dlist_append(events, &event->_node); |
| 106 | return; |
| 107 | } |
| 108 | |
| 109 | SYS_DLIST_FOR_EACH_CONTAINER(events, pending, _node) { |
James Harris | 2cd0f66 | 2021-03-01 09:19:57 -0800 | [diff] [blame] | 110 | if (z_sched_prio_cmp(poller_thread(poller), |
| 111 | poller_thread(pending->poller)) > 0) { |
Andy Ross | eda4c02 | 2019-01-28 09:35:27 -0800 | [diff] [blame] | 112 | sys_dlist_insert(&pending->_node, &event->_node); |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 113 | return; |
| 114 | } |
| 115 | } |
| 116 | |
| 117 | sys_dlist_append(events, &event->_node); |
| 118 | } |
| 119 | |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 120 | /* must be called with interrupts locked */ |
Ningx Zhao | ff7ec0c | 2021-01-13 16:46:03 +0800 | [diff] [blame] | 121 | static inline void register_event(struct k_poll_event *event, |
Andy Ross | 202adf5 | 2020-11-10 09:54:49 -0800 | [diff] [blame] | 122 | struct z_poller *poller) |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 123 | { |
| 124 | switch (event->type) { |
| 125 | case K_POLL_TYPE_SEM_AVAILABLE: |
Flavio Ceolin | d8837c6 | 2018-09-18 12:40:54 -0700 | [diff] [blame] | 126 | __ASSERT(event->sem != NULL, "invalid semaphore\n"); |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 127 | add_event(&event->sem->poll_events, event, poller); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 128 | break; |
Luiz Augusto von Dentz | e5ed88f | 2017-02-21 15:27:20 +0200 | [diff] [blame] | 129 | case K_POLL_TYPE_DATA_AVAILABLE: |
Flavio Ceolin | d8837c6 | 2018-09-18 12:40:54 -0700 | [diff] [blame] | 130 | __ASSERT(event->queue != NULL, "invalid queue\n"); |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 131 | add_event(&event->queue->poll_events, event, poller); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 132 | break; |
| 133 | case K_POLL_TYPE_SIGNAL: |
Flavio Ceolin | d8837c6 | 2018-09-18 12:40:54 -0700 | [diff] [blame] | 134 | __ASSERT(event->signal != NULL, "invalid poll signal\n"); |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 135 | add_event(&event->signal->poll_events, event, poller); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 136 | break; |
| 137 | case K_POLL_TYPE_IGNORE: |
| 138 | /* nothing to do */ |
| 139 | break; |
| 140 | default: |
Flavio Ceolin | 6fdc56d | 2018-09-18 12:32:27 -0700 | [diff] [blame] | 141 | __ASSERT(false, "invalid event type\n"); |
Flavio Ceolin | a3cea50 | 2018-09-10 22:54:55 -0700 | [diff] [blame] | 142 | break; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 143 | } |
| 144 | |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 145 | event->poller = poller; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 146 | } |
| 147 | |
| 148 | /* must be called with interrupts locked */ |
| 149 | static inline void clear_event_registration(struct k_poll_event *event) |
| 150 | { |
Anas Nashif | 669f7f7 | 2021-03-22 07:53:54 -0400 | [diff] [blame] | 151 | bool remove_event = false; |
Peter A. Bigot | 4863aa8 | 2018-12-30 06:38:53 -0600 | [diff] [blame] | 152 | |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 153 | event->poller = NULL; |
| 154 | |
| 155 | switch (event->type) { |
| 156 | case K_POLL_TYPE_SEM_AVAILABLE: |
Flavio Ceolin | d8837c6 | 2018-09-18 12:40:54 -0700 | [diff] [blame] | 157 | __ASSERT(event->sem != NULL, "invalid semaphore\n"); |
Anas Nashif | 669f7f7 | 2021-03-22 07:53:54 -0400 | [diff] [blame] | 158 | remove_event = true; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 159 | break; |
Luiz Augusto von Dentz | e5ed88f | 2017-02-21 15:27:20 +0200 | [diff] [blame] | 160 | case K_POLL_TYPE_DATA_AVAILABLE: |
Flavio Ceolin | d8837c6 | 2018-09-18 12:40:54 -0700 | [diff] [blame] | 161 | __ASSERT(event->queue != NULL, "invalid queue\n"); |
Anas Nashif | 669f7f7 | 2021-03-22 07:53:54 -0400 | [diff] [blame] | 162 | remove_event = true; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 163 | break; |
| 164 | case K_POLL_TYPE_SIGNAL: |
Flavio Ceolin | d8837c6 | 2018-09-18 12:40:54 -0700 | [diff] [blame] | 165 | __ASSERT(event->signal != NULL, "invalid poll signal\n"); |
Anas Nashif | 669f7f7 | 2021-03-22 07:53:54 -0400 | [diff] [blame] | 166 | remove_event = true; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 167 | break; |
| 168 | case K_POLL_TYPE_IGNORE: |
| 169 | /* nothing to do */ |
| 170 | break; |
| 171 | default: |
Flavio Ceolin | 6fdc56d | 2018-09-18 12:32:27 -0700 | [diff] [blame] | 172 | __ASSERT(false, "invalid event type\n"); |
Flavio Ceolin | a3cea50 | 2018-09-10 22:54:55 -0700 | [diff] [blame] | 173 | break; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 174 | } |
Anas Nashif | 669f7f7 | 2021-03-22 07:53:54 -0400 | [diff] [blame] | 175 | if (remove_event && sys_dnode_is_linked(&event->_node)) { |
Peter A. Bigot | 4863aa8 | 2018-12-30 06:38:53 -0600 | [diff] [blame] | 176 | sys_dlist_remove(&event->_node); |
| 177 | } |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 178 | } |
| 179 | |
| 180 | /* must be called with interrupts locked */ |
| 181 | static inline void clear_event_registrations(struct k_poll_event *events, |
Piotr Zięcik | 1c4177d | 2019-08-27 12:19:26 +0200 | [diff] [blame] | 182 | int num_events, |
Andy Ross | f2b1a4b | 2018-07-25 09:40:32 -0700 | [diff] [blame] | 183 | k_spinlock_key_t key) |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 184 | { |
Piotr Zięcik | 1c4177d | 2019-08-27 12:19:26 +0200 | [diff] [blame] | 185 | while (num_events--) { |
| 186 | clear_event_registration(&events[num_events]); |
Andy Ross | f2b1a4b | 2018-07-25 09:40:32 -0700 | [diff] [blame] | 187 | k_spin_unlock(&lock, key); |
| 188 | key = k_spin_lock(&lock); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 189 | } |
| 190 | } |
| 191 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 192 | static inline void set_event_ready(struct k_poll_event *event, uint32_t state) |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 193 | { |
| 194 | event->poller = NULL; |
| 195 | event->state |= state; |
| 196 | } |
| 197 | |
Piotr Zięcik | 1c4177d | 2019-08-27 12:19:26 +0200 | [diff] [blame] | 198 | static inline int register_events(struct k_poll_event *events, |
| 199 | int num_events, |
Andy Ross | 202adf5 | 2020-11-10 09:54:49 -0800 | [diff] [blame] | 200 | struct z_poller *poller, |
Piotr Zięcik | 1c4177d | 2019-08-27 12:19:26 +0200 | [diff] [blame] | 201 | bool just_check) |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 202 | { |
Piotr Zięcik | 1c4177d | 2019-08-27 12:19:26 +0200 | [diff] [blame] | 203 | int events_registered = 0; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 204 | |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 205 | for (int ii = 0; ii < num_events; ii++) { |
Piotr Zięcik | 1c4177d | 2019-08-27 12:19:26 +0200 | [diff] [blame] | 206 | k_spinlock_key_t key; |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 207 | uint32_t state; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 208 | |
Andy Ross | f2b1a4b | 2018-07-25 09:40:32 -0700 | [diff] [blame] | 209 | key = k_spin_lock(&lock); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 210 | if (is_condition_met(&events[ii], &state)) { |
| 211 | set_event_ready(&events[ii], state); |
Piotr Zięcik | 1c4177d | 2019-08-27 12:19:26 +0200 | [diff] [blame] | 212 | poller->is_polling = false; |
| 213 | } else if (!just_check && poller->is_polling) { |
Ningx Zhao | ff7ec0c | 2021-01-13 16:46:03 +0800 | [diff] [blame] | 214 | register_event(&events[ii], poller); |
| 215 | events_registered += 1; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 216 | } |
Andy Ross | f2b1a4b | 2018-07-25 09:40:32 -0700 | [diff] [blame] | 217 | k_spin_unlock(&lock, key); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 218 | } |
| 219 | |
Piotr Zięcik | 1c4177d | 2019-08-27 12:19:26 +0200 | [diff] [blame] | 220 | return events_registered; |
| 221 | } |
| 222 | |
Andy Ross | 0c7af40 | 2020-10-30 11:18:53 -0700 | [diff] [blame] | 223 | static int signal_poller(struct k_poll_event *event, uint32_t state) |
Piotr Zięcik | 1c4177d | 2019-08-27 12:19:26 +0200 | [diff] [blame] | 224 | { |
Andy Ross | dadc664 | 2020-11-09 15:17:18 -0800 | [diff] [blame] | 225 | struct k_thread *thread = poller_thread(event->poller); |
Piotr Zięcik | 1c4177d | 2019-08-27 12:19:26 +0200 | [diff] [blame] | 226 | |
| 227 | __ASSERT(thread != NULL, "poller should have a thread\n"); |
| 228 | |
| 229 | if (!z_is_thread_pending(thread)) { |
| 230 | return 0; |
| 231 | } |
| 232 | |
| 233 | if (z_is_thread_timeout_expired(thread)) { |
| 234 | return -EAGAIN; |
| 235 | } |
| 236 | |
| 237 | z_unpend_thread(thread); |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 238 | arch_thread_return_value_set(thread, |
Piotr Zięcik | 1c4177d | 2019-08-27 12:19:26 +0200 | [diff] [blame] | 239 | state == K_POLL_STATE_CANCELLED ? -EINTR : 0); |
| 240 | |
| 241 | if (!z_is_thread_ready(thread)) { |
| 242 | return 0; |
| 243 | } |
| 244 | |
| 245 | z_ready_thread(thread); |
| 246 | |
| 247 | return 0; |
| 248 | } |
| 249 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 250 | int z_impl_k_poll(struct k_poll_event *events, int num_events, |
| 251 | k_timeout_t timeout) |
Piotr Zięcik | 1c4177d | 2019-08-27 12:19:26 +0200 | [diff] [blame] | 252 | { |
| 253 | int events_registered; |
| 254 | k_spinlock_key_t key; |
Andy Ross | 202adf5 | 2020-11-10 09:54:49 -0800 | [diff] [blame] | 255 | struct z_poller *poller = &_current->poller; |
Andy Ross | 4a8b3d1 | 2020-06-05 08:39:45 -0700 | [diff] [blame] | 256 | |
Andy Ross | dadc664 | 2020-11-09 15:17:18 -0800 | [diff] [blame] | 257 | poller->is_polling = true; |
| 258 | poller->mode = MODE_POLL; |
Piotr Zięcik | 1c4177d | 2019-08-27 12:19:26 +0200 | [diff] [blame] | 259 | |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 260 | __ASSERT(!arch_is_in_isr(), ""); |
Piotr Zięcik | 1c4177d | 2019-08-27 12:19:26 +0200 | [diff] [blame] | 261 | __ASSERT(events != NULL, "NULL events\n"); |
Jukka Rissanen | cc6317d | 2019-11-01 14:03:32 +0200 | [diff] [blame] | 262 | __ASSERT(num_events >= 0, "<0 events\n"); |
Piotr Zięcik | 1c4177d | 2019-08-27 12:19:26 +0200 | [diff] [blame] | 263 | |
Andy Ross | dadc664 | 2020-11-09 15:17:18 -0800 | [diff] [blame] | 264 | events_registered = register_events(events, num_events, poller, |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 265 | K_TIMEOUT_EQ(timeout, K_NO_WAIT)); |
Piotr Zięcik | 1c4177d | 2019-08-27 12:19:26 +0200 | [diff] [blame] | 266 | |
Andy Ross | f2b1a4b | 2018-07-25 09:40:32 -0700 | [diff] [blame] | 267 | key = k_spin_lock(&lock); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 268 | |
| 269 | /* |
| 270 | * If we're not polling anymore, it means that at least one event |
| 271 | * condition is met, either when looping through the events here or |
Luiz Augusto von Dentz | 8786244 | 2017-09-07 10:57:27 +0300 | [diff] [blame] | 272 | * because one of the events registered has had its state changed. |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 273 | */ |
Andy Ross | dadc664 | 2020-11-09 15:17:18 -0800 | [diff] [blame] | 274 | if (!poller->is_polling) { |
Piotr Zięcik | 1c4177d | 2019-08-27 12:19:26 +0200 | [diff] [blame] | 275 | clear_event_registrations(events, events_registered, key); |
Andy Ross | f2b1a4b | 2018-07-25 09:40:32 -0700 | [diff] [blame] | 276 | k_spin_unlock(&lock, key); |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 277 | return 0; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 278 | } |
| 279 | |
Andy Ross | dadc664 | 2020-11-09 15:17:18 -0800 | [diff] [blame] | 280 | poller->is_polling = false; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 281 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 282 | if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { |
Andy Ross | f2b1a4b | 2018-07-25 09:40:32 -0700 | [diff] [blame] | 283 | k_spin_unlock(&lock, key); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 284 | return -EAGAIN; |
| 285 | } |
| 286 | |
Andy Ross | 4dc6a0b | 2021-02-09 13:46:21 -0800 | [diff] [blame] | 287 | static _wait_q_t wait_q = Z_WAIT_Q_INIT(&wait_q); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 288 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 289 | int swap_rc = z_pend_curr(&lock, key, &wait_q, timeout); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 290 | |
| 291 | /* |
| 292 | * Clear all event registrations. If events happen while we're in this |
| 293 | * loop, and we already had one that triggered, that's OK: they will |
| 294 | * end up in the list of events that are ready; if we timed out, and |
| 295 | * events happen while we're in this loop, that is OK as well since |
| 296 | * we've already know the return code (-EAGAIN), and even if they are |
| 297 | * added to the list of events that occurred, the user has to check the |
| 298 | * return code first, which invalidates the whole list of event states. |
| 299 | */ |
Andy Ross | f2b1a4b | 2018-07-25 09:40:32 -0700 | [diff] [blame] | 300 | key = k_spin_lock(&lock); |
Piotr Zięcik | 1c4177d | 2019-08-27 12:19:26 +0200 | [diff] [blame] | 301 | clear_event_registrations(events, events_registered, key); |
Andy Ross | f2b1a4b | 2018-07-25 09:40:32 -0700 | [diff] [blame] | 302 | k_spin_unlock(&lock, key); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 303 | |
| 304 | return swap_rc; |
| 305 | } |
| 306 | |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 307 | #ifdef CONFIG_USERSPACE |
Andy Ross | 643701a | 2019-08-13 12:58:38 -0700 | [diff] [blame] | 308 | static inline int z_vrfy_k_poll(struct k_poll_event *events, |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 309 | int num_events, k_timeout_t timeout) |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 310 | { |
Andy Ross | f2b1a4b | 2018-07-25 09:40:32 -0700 | [diff] [blame] | 311 | int ret; |
| 312 | k_spinlock_key_t key; |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 313 | struct k_poll_event *events_copy = NULL; |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 314 | uint32_t bounds; |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 315 | |
| 316 | /* Validate the events buffer and make a copy of it in an |
| 317 | * allocated kernel-side buffer. |
| 318 | */ |
Anas Nashif | bbbc38b | 2021-03-29 10:03:49 -0400 | [diff] [blame^] | 319 | if (Z_SYSCALL_VERIFY(num_events >= 0U)) { |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 320 | ret = -EINVAL; |
| 321 | goto out; |
| 322 | } |
Jakob Olesen | c8708d9 | 2019-05-07 10:17:35 -0700 | [diff] [blame] | 323 | if (Z_SYSCALL_VERIFY_MSG(!u32_mul_overflow(num_events, |
| 324 | sizeof(struct k_poll_event), |
| 325 | &bounds), |
| 326 | "num_events too large")) { |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 327 | ret = -EINVAL; |
| 328 | goto out; |
| 329 | } |
| 330 | events_copy = z_thread_malloc(bounds); |
| 331 | if (!events_copy) { |
| 332 | ret = -ENOMEM; |
| 333 | goto out; |
| 334 | } |
| 335 | |
Andy Ross | f2b1a4b | 2018-07-25 09:40:32 -0700 | [diff] [blame] | 336 | key = k_spin_lock(&lock); |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 337 | if (Z_SYSCALL_MEMORY_WRITE(events, bounds)) { |
Andy Ross | f2b1a4b | 2018-07-25 09:40:32 -0700 | [diff] [blame] | 338 | k_spin_unlock(&lock, key); |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 339 | goto oops_free; |
| 340 | } |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 341 | (void)memcpy(events_copy, events, bounds); |
Andy Ross | f2b1a4b | 2018-07-25 09:40:32 -0700 | [diff] [blame] | 342 | k_spin_unlock(&lock, key); |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 343 | |
| 344 | /* Validate what's inside events_copy */ |
| 345 | for (int i = 0; i < num_events; i++) { |
| 346 | struct k_poll_event *e = &events_copy[i]; |
| 347 | |
| 348 | if (Z_SYSCALL_VERIFY(e->mode == K_POLL_MODE_NOTIFY_ONLY)) { |
| 349 | ret = -EINVAL; |
| 350 | goto out_free; |
| 351 | } |
| 352 | |
| 353 | switch (e->type) { |
| 354 | case K_POLL_TYPE_IGNORE: |
| 355 | break; |
| 356 | case K_POLL_TYPE_SIGNAL: |
| 357 | Z_OOPS(Z_SYSCALL_OBJ(e->signal, K_OBJ_POLL_SIGNAL)); |
| 358 | break; |
| 359 | case K_POLL_TYPE_SEM_AVAILABLE: |
| 360 | Z_OOPS(Z_SYSCALL_OBJ(e->sem, K_OBJ_SEM)); |
| 361 | break; |
| 362 | case K_POLL_TYPE_DATA_AVAILABLE: |
| 363 | Z_OOPS(Z_SYSCALL_OBJ(e->queue, K_OBJ_QUEUE)); |
| 364 | break; |
| 365 | default: |
| 366 | ret = -EINVAL; |
| 367 | goto out_free; |
| 368 | } |
| 369 | } |
| 370 | |
| 371 | ret = k_poll(events_copy, num_events, timeout); |
Flavio Ceolin | 6699423 | 2018-08-13 15:17:04 -0700 | [diff] [blame] | 372 | (void)memcpy((void *)events, events_copy, bounds); |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 373 | out_free: |
| 374 | k_free(events_copy); |
| 375 | out: |
| 376 | return ret; |
| 377 | oops_free: |
| 378 | k_free(events_copy); |
| 379 | Z_OOPS(1); |
| 380 | } |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 381 | #include <syscalls/k_poll_mrsh.c> |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 382 | #endif |
| 383 | |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 384 | /* must be called with interrupts locked */ |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 385 | static int signal_poll_event(struct k_poll_event *event, uint32_t state) |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 386 | { |
Andy Ross | 202adf5 | 2020-11-10 09:54:49 -0800 | [diff] [blame] | 387 | struct z_poller *poller = event->poller; |
Piotr Zięcik | 1c4177d | 2019-08-27 12:19:26 +0200 | [diff] [blame] | 388 | int retcode = 0; |
| 389 | |
| 390 | if (poller) { |
Andy Ross | 0c7af40 | 2020-10-30 11:18:53 -0700 | [diff] [blame] | 391 | if (poller->mode == MODE_POLL) { |
| 392 | retcode = signal_poller(event, state); |
| 393 | } else if (poller->mode == MODE_TRIGGERED) { |
| 394 | retcode = signal_triggered_work(event, state); |
Piotr Zięcik | 1c4177d | 2019-08-27 12:19:26 +0200 | [diff] [blame] | 395 | } |
| 396 | |
| 397 | poller->is_polling = false; |
| 398 | |
| 399 | if (retcode < 0) { |
| 400 | return retcode; |
| 401 | } |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 402 | } |
| 403 | |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 404 | set_event_ready(event, state); |
Piotr Zięcik | 1c4177d | 2019-08-27 12:19:26 +0200 | [diff] [blame] | 405 | return retcode; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 406 | } |
| 407 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 408 | void z_handle_obj_poll_events(sys_dlist_t *events, uint32_t state) |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 409 | { |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 410 | struct k_poll_event *poll_event; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 411 | |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 412 | poll_event = (struct k_poll_event *)sys_dlist_get(events); |
Flavio Ceolin | 4218d5f | 2018-09-17 09:39:51 -0700 | [diff] [blame] | 413 | if (poll_event != NULL) { |
Andy Ross | 8606fab | 2018-03-26 10:54:40 -0700 | [diff] [blame] | 414 | (void) signal_poll_event(poll_event, state); |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 415 | } |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 416 | } |
| 417 | |
Anas Nashif | b503be2 | 2021-03-22 08:09:55 -0400 | [diff] [blame] | 418 | void z_impl_k_poll_signal_init(struct k_poll_signal *sig) |
Benjamin Walsh | a304f16 | 2017-02-02 16:46:09 -0500 | [diff] [blame] | 419 | { |
Anas Nashif | b503be2 | 2021-03-22 08:09:55 -0400 | [diff] [blame] | 420 | sys_dlist_init(&sig->poll_events); |
| 421 | sig->signaled = 0U; |
| 422 | /* sig->result is left unitialized */ |
| 423 | z_object_init(sig); |
Benjamin Walsh | a304f16 | 2017-02-02 16:46:09 -0500 | [diff] [blame] | 424 | } |
| 425 | |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 426 | #ifdef CONFIG_USERSPACE |
Anas Nashif | b503be2 | 2021-03-22 08:09:55 -0400 | [diff] [blame] | 427 | static inline void z_vrfy_k_poll_signal_init(struct k_poll_signal *sig) |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 428 | { |
Anas Nashif | b503be2 | 2021-03-22 08:09:55 -0400 | [diff] [blame] | 429 | Z_OOPS(Z_SYSCALL_OBJ_INIT(sig, K_OBJ_POLL_SIGNAL)); |
| 430 | z_impl_k_poll_signal_init(sig); |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 431 | } |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 432 | #include <syscalls/k_poll_signal_init_mrsh.c> |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 433 | #endif |
| 434 | |
Anas Nashif | b503be2 | 2021-03-22 08:09:55 -0400 | [diff] [blame] | 435 | void z_impl_k_poll_signal_check(struct k_poll_signal *sig, |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 436 | unsigned int *signaled, int *result) |
| 437 | { |
Anas Nashif | b503be2 | 2021-03-22 08:09:55 -0400 | [diff] [blame] | 438 | *signaled = sig->signaled; |
| 439 | *result = sig->result; |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 440 | } |
| 441 | |
| 442 | #ifdef CONFIG_USERSPACE |
Anas Nashif | b503be2 | 2021-03-22 08:09:55 -0400 | [diff] [blame] | 443 | void z_vrfy_k_poll_signal_check(struct k_poll_signal *sig, |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 444 | unsigned int *signaled, int *result) |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 445 | { |
Anas Nashif | b503be2 | 2021-03-22 08:09:55 -0400 | [diff] [blame] | 446 | Z_OOPS(Z_SYSCALL_OBJ(sig, K_OBJ_POLL_SIGNAL)); |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 447 | Z_OOPS(Z_SYSCALL_MEMORY_WRITE(signaled, sizeof(unsigned int))); |
| 448 | Z_OOPS(Z_SYSCALL_MEMORY_WRITE(result, sizeof(int))); |
Anas Nashif | b503be2 | 2021-03-22 08:09:55 -0400 | [diff] [blame] | 449 | z_impl_k_poll_signal_check(sig, signaled, result); |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 450 | } |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 451 | #include <syscalls/k_poll_signal_check_mrsh.c> |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 452 | #endif |
| 453 | |
Anas Nashif | b503be2 | 2021-03-22 08:09:55 -0400 | [diff] [blame] | 454 | int z_impl_k_poll_signal_raise(struct k_poll_signal *sig, int result) |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 455 | { |
Andy Ross | f2b1a4b | 2018-07-25 09:40:32 -0700 | [diff] [blame] | 456 | k_spinlock_key_t key = k_spin_lock(&lock); |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 457 | struct k_poll_event *poll_event; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 458 | |
Anas Nashif | b503be2 | 2021-03-22 08:09:55 -0400 | [diff] [blame] | 459 | sig->result = result; |
| 460 | sig->signaled = 1U; |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 461 | |
Anas Nashif | b503be2 | 2021-03-22 08:09:55 -0400 | [diff] [blame] | 462 | poll_event = (struct k_poll_event *)sys_dlist_get(&sig->poll_events); |
Flavio Ceolin | 4218d5f | 2018-09-17 09:39:51 -0700 | [diff] [blame] | 463 | if (poll_event == NULL) { |
Andy Ross | f2b1a4b | 2018-07-25 09:40:32 -0700 | [diff] [blame] | 464 | k_spin_unlock(&lock, key); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 465 | return 0; |
| 466 | } |
| 467 | |
Andy Ross | 8606fab | 2018-03-26 10:54:40 -0700 | [diff] [blame] | 468 | int rc = signal_poll_event(poll_event, K_POLL_STATE_SIGNALED); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 469 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 470 | z_reschedule(&lock, key); |
Benjamin Walsh | acc68c1 | 2017-01-29 18:57:45 -0500 | [diff] [blame] | 471 | return rc; |
| 472 | } |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 473 | |
| 474 | #ifdef CONFIG_USERSPACE |
Anas Nashif | b503be2 | 2021-03-22 08:09:55 -0400 | [diff] [blame] | 475 | static inline int z_vrfy_k_poll_signal_raise(struct k_poll_signal *sig, |
Andy Ross | 643701a | 2019-08-13 12:58:38 -0700 | [diff] [blame] | 476 | int result) |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 477 | { |
Anas Nashif | b503be2 | 2021-03-22 08:09:55 -0400 | [diff] [blame] | 478 | Z_OOPS(Z_SYSCALL_OBJ(sig, K_OBJ_POLL_SIGNAL)); |
| 479 | return z_impl_k_poll_signal_raise(sig, result); |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 480 | } |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 481 | #include <syscalls/k_poll_signal_raise_mrsh.c> |
| 482 | |
Anas Nashif | b503be2 | 2021-03-22 08:09:55 -0400 | [diff] [blame] | 483 | static inline void z_vrfy_k_poll_signal_reset(struct k_poll_signal *sig) |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 484 | { |
Anas Nashif | b503be2 | 2021-03-22 08:09:55 -0400 | [diff] [blame] | 485 | Z_OOPS(Z_SYSCALL_OBJ(sig, K_OBJ_POLL_SIGNAL)); |
| 486 | z_impl_k_poll_signal_reset(sig); |
Andy Ross | 6564974 | 2019-08-06 13:34:31 -0700 | [diff] [blame] | 487 | } |
| 488 | #include <syscalls/k_poll_signal_reset_mrsh.c> |
| 489 | |
Andrew Boie | 3772f77 | 2018-05-07 16:52:57 -0700 | [diff] [blame] | 490 | #endif |
| 491 | |
Piotr Zięcik | 19d8349 | 2019-09-27 09:16:25 +0200 | [diff] [blame] | 492 | static void triggered_work_handler(struct k_work *work) |
| 493 | { |
Piotr Zięcik | 19d8349 | 2019-09-27 09:16:25 +0200 | [diff] [blame] | 494 | struct k_work_poll *twork = |
| 495 | CONTAINER_OF(work, struct k_work_poll, work); |
| 496 | |
| 497 | /* |
| 498 | * If callback is not set, the k_work_poll_submit_to_queue() |
| 499 | * already cleared event registrations. |
| 500 | */ |
Andy Ross | 0c7af40 | 2020-10-30 11:18:53 -0700 | [diff] [blame] | 501 | if (twork->poller.mode != MODE_NONE) { |
Piotr Zięcik | 19d8349 | 2019-09-27 09:16:25 +0200 | [diff] [blame] | 502 | k_spinlock_key_t key; |
| 503 | |
| 504 | key = k_spin_lock(&lock); |
| 505 | clear_event_registrations(twork->events, |
| 506 | twork->num_events, key); |
| 507 | k_spin_unlock(&lock, key); |
| 508 | } |
| 509 | |
| 510 | /* Drop work ownership and execute real handler. */ |
Andy Ross | 310f60f | 2020-11-09 10:43:25 -0800 | [diff] [blame] | 511 | twork->workq = NULL; |
| 512 | twork->real_handler(work); |
Piotr Zięcik | 19d8349 | 2019-09-27 09:16:25 +0200 | [diff] [blame] | 513 | } |
| 514 | |
| 515 | static void triggered_work_expiration_handler(struct _timeout *timeout) |
| 516 | { |
| 517 | struct k_work_poll *twork = |
| 518 | CONTAINER_OF(timeout, struct k_work_poll, timeout); |
Piotr Zięcik | 19d8349 | 2019-09-27 09:16:25 +0200 | [diff] [blame] | 519 | |
| 520 | twork->poller.is_polling = false; |
| 521 | twork->poll_result = -EAGAIN; |
Andy Ross | 310f60f | 2020-11-09 10:43:25 -0800 | [diff] [blame] | 522 | k_work_submit_to_queue(twork->workq, &twork->work); |
Piotr Zięcik | 19d8349 | 2019-09-27 09:16:25 +0200 | [diff] [blame] | 523 | } |
| 524 | |
Andy Ross | 0c7af40 | 2020-10-30 11:18:53 -0700 | [diff] [blame] | 525 | static int signal_triggered_work(struct k_poll_event *event, uint32_t status) |
Piotr Zięcik | 19d8349 | 2019-09-27 09:16:25 +0200 | [diff] [blame] | 526 | { |
Andy Ross | 202adf5 | 2020-11-10 09:54:49 -0800 | [diff] [blame] | 527 | struct z_poller *poller = event->poller; |
Andy Ross | 310f60f | 2020-11-09 10:43:25 -0800 | [diff] [blame] | 528 | struct k_work_poll *twork = |
| 529 | CONTAINER_OF(poller, struct k_work_poll, poller); |
Piotr Zięcik | 19d8349 | 2019-09-27 09:16:25 +0200 | [diff] [blame] | 530 | |
Andy Ross | 310f60f | 2020-11-09 10:43:25 -0800 | [diff] [blame] | 531 | if (poller->is_polling && twork->workq != NULL) { |
| 532 | struct k_work_q *work_q = twork->workq; |
Piotr Zięcik | 19d8349 | 2019-09-27 09:16:25 +0200 | [diff] [blame] | 533 | |
| 534 | z_abort_timeout(&twork->timeout); |
| 535 | twork->poll_result = 0; |
| 536 | k_work_submit_to_queue(work_q, &twork->work); |
| 537 | } |
| 538 | |
| 539 | return 0; |
| 540 | } |
| 541 | |
| 542 | static int triggered_work_cancel(struct k_work_poll *work, |
| 543 | k_spinlock_key_t key) |
| 544 | { |
| 545 | /* Check if the work waits for event. */ |
Andy Ross | 0c7af40 | 2020-10-30 11:18:53 -0700 | [diff] [blame] | 546 | if (work->poller.is_polling && work->poller.mode != MODE_NONE) { |
Piotr Zięcik | 19d8349 | 2019-09-27 09:16:25 +0200 | [diff] [blame] | 547 | /* Remove timeout associated with the work. */ |
| 548 | z_abort_timeout(&work->timeout); |
| 549 | |
| 550 | /* |
| 551 | * Prevent work execution if event arrives while we will be |
| 552 | * clearing registrations. |
| 553 | */ |
Andy Ross | 0c7af40 | 2020-10-30 11:18:53 -0700 | [diff] [blame] | 554 | work->poller.mode = MODE_NONE; |
Piotr Zięcik | 19d8349 | 2019-09-27 09:16:25 +0200 | [diff] [blame] | 555 | |
| 556 | /* Clear registrations and work ownership. */ |
| 557 | clear_event_registrations(work->events, work->num_events, key); |
Andy Ross | 310f60f | 2020-11-09 10:43:25 -0800 | [diff] [blame] | 558 | work->workq = NULL; |
Piotr Zięcik | 19d8349 | 2019-09-27 09:16:25 +0200 | [diff] [blame] | 559 | return 0; |
| 560 | } |
| 561 | |
| 562 | /* |
| 563 | * If we reached here, the work is either being registered in |
| 564 | * the k_work_poll_submit_to_queue(), executed or is pending. |
| 565 | * Only in the last case we have a chance to cancel it, but |
| 566 | * unfortunately there is no public API performing this task. |
| 567 | */ |
| 568 | |
| 569 | return -EINVAL; |
| 570 | } |
| 571 | |
| 572 | void k_work_poll_init(struct k_work_poll *work, |
| 573 | k_work_handler_t handler) |
| 574 | { |
Andy Ross | 310f60f | 2020-11-09 10:43:25 -0800 | [diff] [blame] | 575 | *work = (struct k_work_poll) {}; |
Piotr Zięcik | 19d8349 | 2019-09-27 09:16:25 +0200 | [diff] [blame] | 576 | k_work_init(&work->work, triggered_work_handler); |
Piotr Zięcik | 19d8349 | 2019-09-27 09:16:25 +0200 | [diff] [blame] | 577 | work->real_handler = handler; |
| 578 | z_init_timeout(&work->timeout); |
| 579 | } |
| 580 | |
| 581 | int k_work_poll_submit_to_queue(struct k_work_q *work_q, |
| 582 | struct k_work_poll *work, |
| 583 | struct k_poll_event *events, |
| 584 | int num_events, |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 585 | k_timeout_t timeout) |
Piotr Zięcik | 19d8349 | 2019-09-27 09:16:25 +0200 | [diff] [blame] | 586 | { |
| 587 | int events_registered; |
| 588 | k_spinlock_key_t key; |
| 589 | |
| 590 | __ASSERT(work_q != NULL, "NULL work_q\n"); |
| 591 | __ASSERT(work != NULL, "NULL work\n"); |
| 592 | __ASSERT(events != NULL, "NULL events\n"); |
| 593 | __ASSERT(num_events > 0, "zero events\n"); |
| 594 | |
| 595 | /* Take overship of the work if it is possible. */ |
| 596 | key = k_spin_lock(&lock); |
Andy Ross | 310f60f | 2020-11-09 10:43:25 -0800 | [diff] [blame] | 597 | if (work->workq != NULL) { |
| 598 | if (work->workq == work_q) { |
Piotr Zięcik | 19d8349 | 2019-09-27 09:16:25 +0200 | [diff] [blame] | 599 | int retval; |
| 600 | |
| 601 | retval = triggered_work_cancel(work, key); |
| 602 | if (retval < 0) { |
| 603 | k_spin_unlock(&lock, key); |
| 604 | return retval; |
| 605 | } |
| 606 | } else { |
| 607 | k_spin_unlock(&lock, key); |
| 608 | return -EADDRINUSE; |
| 609 | } |
| 610 | } |
| 611 | |
Andy Ross | 310f60f | 2020-11-09 10:43:25 -0800 | [diff] [blame] | 612 | |
Piotr Zięcik | 19d8349 | 2019-09-27 09:16:25 +0200 | [diff] [blame] | 613 | work->poller.is_polling = true; |
Andy Ross | 310f60f | 2020-11-09 10:43:25 -0800 | [diff] [blame] | 614 | work->workq = work_q; |
Andy Ross | 0c7af40 | 2020-10-30 11:18:53 -0700 | [diff] [blame] | 615 | work->poller.mode = MODE_NONE; |
Piotr Zięcik | 19d8349 | 2019-09-27 09:16:25 +0200 | [diff] [blame] | 616 | k_spin_unlock(&lock, key); |
| 617 | |
| 618 | /* Save list of events. */ |
| 619 | work->events = events; |
| 620 | work->num_events = num_events; |
| 621 | |
| 622 | /* Clear result */ |
| 623 | work->poll_result = -EINPROGRESS; |
| 624 | |
| 625 | /* Register events */ |
| 626 | events_registered = register_events(events, num_events, |
| 627 | &work->poller, false); |
| 628 | |
| 629 | key = k_spin_lock(&lock); |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 630 | if (work->poller.is_polling && !K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { |
Piotr Zięcik | 19d8349 | 2019-09-27 09:16:25 +0200 | [diff] [blame] | 631 | /* |
| 632 | * Poller is still polling. |
| 633 | * No event is ready and all are watched. |
| 634 | */ |
| 635 | __ASSERT(num_events == events_registered, |
| 636 | "Some events were not registered!\n"); |
| 637 | |
| 638 | /* Setup timeout if such action is requested */ |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 639 | if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) { |
Piotr Zięcik | 19d8349 | 2019-09-27 09:16:25 +0200 | [diff] [blame] | 640 | z_add_timeout(&work->timeout, |
| 641 | triggered_work_expiration_handler, |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 642 | timeout); |
Piotr Zięcik | 19d8349 | 2019-09-27 09:16:25 +0200 | [diff] [blame] | 643 | } |
| 644 | |
| 645 | /* From now, any event will result in submitted work. */ |
Andy Ross | 0c7af40 | 2020-10-30 11:18:53 -0700 | [diff] [blame] | 646 | work->poller.mode = MODE_TRIGGERED; |
Piotr Zięcik | 19d8349 | 2019-09-27 09:16:25 +0200 | [diff] [blame] | 647 | k_spin_unlock(&lock, key); |
| 648 | return 0; |
| 649 | } |
| 650 | |
| 651 | /* |
Andy Ross | 0c7af40 | 2020-10-30 11:18:53 -0700 | [diff] [blame] | 652 | * The K_NO_WAIT timeout was specified or at least one event |
| 653 | * was ready at registration time or changed state since |
| 654 | * registration. Hopefully, the poller mode was not set, so |
| 655 | * work was not submitted to workqueue. |
Piotr Zięcik | 19d8349 | 2019-09-27 09:16:25 +0200 | [diff] [blame] | 656 | */ |
| 657 | |
| 658 | /* |
| 659 | * If poller is still polling, no watched event occurred. This means |
| 660 | * we reached here due to K_NO_WAIT timeout "expiration". |
| 661 | */ |
| 662 | if (work->poller.is_polling) { |
| 663 | work->poller.is_polling = false; |
| 664 | work->poll_result = -EAGAIN; |
| 665 | } else { |
| 666 | work->poll_result = 0; |
| 667 | } |
| 668 | |
| 669 | /* Clear registrations. */ |
| 670 | clear_event_registrations(events, events_registered, key); |
| 671 | k_spin_unlock(&lock, key); |
| 672 | |
| 673 | /* Submit work. */ |
| 674 | k_work_submit_to_queue(work_q, &work->work); |
| 675 | |
| 676 | return 0; |
| 677 | } |
| 678 | |
| 679 | int k_work_poll_cancel(struct k_work_poll *work) |
| 680 | { |
| 681 | k_spinlock_key_t key; |
| 682 | int retval; |
| 683 | |
| 684 | /* Check if the work was submitted. */ |
Andy Ross | 310f60f | 2020-11-09 10:43:25 -0800 | [diff] [blame] | 685 | if (work == NULL || work->workq == NULL) { |
Piotr Zięcik | 19d8349 | 2019-09-27 09:16:25 +0200 | [diff] [blame] | 686 | return -EINVAL; |
| 687 | } |
| 688 | |
| 689 | key = k_spin_lock(&lock); |
| 690 | retval = triggered_work_cancel(work, key); |
| 691 | k_spin_unlock(&lock, key); |
| 692 | |
| 693 | return retval; |
| 694 | } |