Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2021 Intel Corporation |
| 3 | * |
| 4 | * SPDX-License-Identifier: Apache-2.0 |
| 5 | */ |
| 6 | |
| 7 | /** |
| 8 | * @file event objects library |
| 9 | * |
| 10 | * Event objects are used to signal one or more threads that a custom set of |
| 11 | * events has occurred. Threads wait on event objects until another thread or |
| 12 | * ISR posts the desired set of events to the event object. Each time events |
| 13 | * are posted to an event object, all threads waiting on that event object are |
| 14 | * processed to determine if there is a match. All threads that whose wait |
| 15 | * conditions match the current set of events now belonging to the event object |
| 16 | * are awakened. |
| 17 | * |
| 18 | * Threads waiting on an event object have the option of either waking once |
| 19 | * any or all of the events it desires have been posted to the event object. |
| 20 | * |
| 21 | * @brief Kernel event object |
| 22 | */ |
| 23 | |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 24 | #include <zephyr/kernel.h> |
| 25 | #include <zephyr/kernel_structs.h> |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 26 | |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 27 | #include <zephyr/toolchain.h> |
| 28 | #include <zephyr/wait_q.h> |
| 29 | #include <zephyr/sys/dlist.h> |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 30 | #include <ksched.h> |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 31 | #include <zephyr/init.h> |
| 32 | #include <zephyr/syscall_handler.h> |
| 33 | #include <zephyr/tracing/tracing.h> |
| 34 | #include <zephyr/sys/check.h> |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 35 | |
| 36 | #define K_EVENT_WAIT_ANY 0x00 /* Wait for any events */ |
| 37 | #define K_EVENT_WAIT_ALL 0x01 /* Wait for all events */ |
| 38 | #define K_EVENT_WAIT_MASK 0x01 |
| 39 | |
| 40 | #define K_EVENT_WAIT_RESET 0x02 /* Reset events prior to waiting */ |
| 41 | |
| 42 | void z_impl_k_event_init(struct k_event *event) |
| 43 | { |
| 44 | event->events = 0; |
| 45 | event->lock = (struct k_spinlock) {}; |
| 46 | |
| 47 | SYS_PORT_TRACING_OBJ_INIT(k_event, event); |
| 48 | |
| 49 | z_waitq_init(&event->wait_q); |
| 50 | |
| 51 | z_object_init(event); |
| 52 | } |
| 53 | |
| 54 | #ifdef CONFIG_USERSPACE |
| 55 | void z_vrfy_k_event_init(struct k_event *event) |
| 56 | { |
| 57 | Z_OOPS(Z_SYSCALL_OBJ_NEVER_INIT(event, K_OBJ_EVENT)); |
| 58 | z_impl_k_event_init(event); |
| 59 | } |
| 60 | #include <syscalls/k_event_init_mrsh.c> |
| 61 | #endif |
| 62 | |
| 63 | /** |
| 64 | * @brief determine if desired set of events been satisfied |
| 65 | * |
| 66 | * This routine determines if the current set of events satisfies the desired |
| 67 | * set of events. If @a wait_condition is K_EVENT_WAIT_ALL, then at least |
| 68 | * all the desired events must be present to satisfy the request. If @a |
| 69 | * wait_condition is not K_EVENT_WAIT_ALL, it is assumed to be K_EVENT_WAIT_ANY. |
| 70 | * In the K_EVENT_WAIT_ANY case, the request is satisfied when any of the |
| 71 | * current set of events are present in the desired set of events. |
| 72 | */ |
| 73 | static bool are_wait_conditions_met(uint32_t desired, uint32_t current, |
| 74 | unsigned int wait_condition) |
| 75 | { |
| 76 | uint32_t match = current & desired; |
| 77 | |
| 78 | if (wait_condition == K_EVENT_WAIT_ALL) { |
| 79 | return match == desired; |
| 80 | } |
| 81 | |
| 82 | /* wait_condition assumed to be K_EVENT_WAIT_ANY */ |
| 83 | |
| 84 | return match != 0; |
| 85 | } |
| 86 | |
| 87 | static void k_event_post_internal(struct k_event *event, uint32_t events, |
Andrew Jackson | e7e827a | 2022-07-07 06:01:03 +0100 | [diff] [blame^] | 88 | uint32_t events_mask) |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 89 | { |
| 90 | k_spinlock_key_t key; |
| 91 | struct k_thread *thread; |
| 92 | unsigned int wait_condition; |
| 93 | struct k_thread *head = NULL; |
| 94 | |
| 95 | key = k_spin_lock(&event->lock); |
| 96 | |
| 97 | SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_event, post, event, events, |
Andrew Jackson | e7e827a | 2022-07-07 06:01:03 +0100 | [diff] [blame^] | 98 | events_mask); |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 99 | |
Andrew Jackson | e7e827a | 2022-07-07 06:01:03 +0100 | [diff] [blame^] | 100 | events = (event->events & ~events_mask) | |
| 101 | (events & events_mask); |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 102 | event->events = events; |
| 103 | |
| 104 | /* |
| 105 | * Posting an event has the potential to wake multiple pended threads. |
| 106 | * It is desirable to unpend all affected threads simultaneously. To |
| 107 | * do so, this must be done in three steps as it is unsafe to unpend |
| 108 | * threads from within the _WAIT_Q_FOR_EACH() loop. |
| 109 | * |
| 110 | * 1. Create a linked list of threads to unpend. |
| 111 | * 2. Unpend each of the threads in the linked list |
| 112 | * 3. Ready each of the threads in the linked list |
| 113 | */ |
| 114 | |
| 115 | _WAIT_Q_FOR_EACH(&event->wait_q, thread) { |
| 116 | wait_condition = thread->event_options & K_EVENT_WAIT_MASK; |
| 117 | |
| 118 | if (are_wait_conditions_met(thread->events, events, |
| 119 | wait_condition)) { |
| 120 | /* |
| 121 | * The wait conditions have been satisfied. Add this |
| 122 | * thread to the list of threads to unpend. |
| 123 | */ |
| 124 | |
| 125 | thread->next_event_link = head; |
| 126 | head = thread; |
| 127 | } |
| 128 | |
| 129 | |
| 130 | } |
| 131 | |
| 132 | if (head != NULL) { |
| 133 | thread = head; |
| 134 | do { |
| 135 | z_unpend_thread(thread); |
| 136 | arch_thread_return_value_set(thread, 0); |
| 137 | thread->events = events; |
| 138 | z_ready_thread(thread); |
| 139 | thread = thread->next_event_link; |
| 140 | } while (thread != NULL); |
| 141 | } |
| 142 | |
| 143 | z_reschedule(&event->lock, key); |
| 144 | |
| 145 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_event, post, event, events, |
Andrew Jackson | e7e827a | 2022-07-07 06:01:03 +0100 | [diff] [blame^] | 146 | events_mask); |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 147 | } |
| 148 | |
| 149 | void z_impl_k_event_post(struct k_event *event, uint32_t events) |
| 150 | { |
Andrew Jackson | e7e827a | 2022-07-07 06:01:03 +0100 | [diff] [blame^] | 151 | k_event_post_internal(event, events, events); |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 152 | } |
| 153 | |
| 154 | #ifdef CONFIG_USERSPACE |
| 155 | void z_vrfy_k_event_post(struct k_event *event, uint32_t events) |
| 156 | { |
| 157 | Z_OOPS(Z_SYSCALL_OBJ(event, K_OBJ_EVENT)); |
| 158 | z_impl_k_event_post(event, events); |
| 159 | } |
| 160 | #include <syscalls/k_event_post_mrsh.c> |
| 161 | #endif |
| 162 | |
| 163 | void z_impl_k_event_set(struct k_event *event, uint32_t events) |
| 164 | { |
Andrew Jackson | e7e827a | 2022-07-07 06:01:03 +0100 | [diff] [blame^] | 165 | k_event_post_internal(event, events, ~0); |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 166 | } |
| 167 | |
| 168 | #ifdef CONFIG_USERSPACE |
| 169 | void z_vrfy_k_event_set(struct k_event *event, uint32_t events) |
| 170 | { |
| 171 | Z_OOPS(Z_SYSCALL_OBJ(event, K_OBJ_EVENT)); |
| 172 | z_impl_k_event_set(event, events); |
| 173 | } |
| 174 | #include <syscalls/k_event_set_mrsh.c> |
| 175 | #endif |
| 176 | |
| 177 | static uint32_t k_event_wait_internal(struct k_event *event, uint32_t events, |
| 178 | unsigned int options, k_timeout_t timeout) |
| 179 | { |
| 180 | uint32_t rv = 0; |
| 181 | unsigned int wait_condition; |
| 182 | struct k_thread *thread; |
| 183 | |
| 184 | __ASSERT(((arch_is_in_isr() == false) || |
| 185 | K_TIMEOUT_EQ(timeout, K_NO_WAIT)), ""); |
| 186 | |
| 187 | SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_event, wait, event, events, |
| 188 | options, timeout); |
| 189 | |
| 190 | if (events == 0) { |
| 191 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_event, wait, event, events, 0); |
| 192 | return 0; |
| 193 | } |
| 194 | |
| 195 | wait_condition = options & K_EVENT_WAIT_MASK; |
| 196 | thread = z_current_get(); |
| 197 | |
| 198 | k_spinlock_key_t key = k_spin_lock(&event->lock); |
| 199 | |
| 200 | if (options & K_EVENT_WAIT_RESET) { |
| 201 | event->events = 0; |
| 202 | } |
| 203 | |
| 204 | /* Test if the wait conditions have already been met. */ |
| 205 | |
| 206 | if (are_wait_conditions_met(events, event->events, wait_condition)) { |
| 207 | rv = event->events; |
| 208 | |
| 209 | k_spin_unlock(&event->lock, key); |
| 210 | goto out; |
| 211 | } |
| 212 | |
| 213 | /* Match conditions have not been met. */ |
| 214 | |
| 215 | if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { |
| 216 | k_spin_unlock(&event->lock, key); |
| 217 | goto out; |
| 218 | } |
| 219 | |
| 220 | /* |
| 221 | * The caller must pend to wait for the match. Save the desired |
| 222 | * set of events in the k_thread structure. |
| 223 | */ |
| 224 | |
| 225 | thread->events = events; |
| 226 | thread->event_options = options; |
| 227 | |
| 228 | SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_event, wait, event, events, |
| 229 | options, timeout); |
| 230 | |
| 231 | if (z_pend_curr(&event->lock, key, &event->wait_q, timeout) == 0) { |
| 232 | /* Retrieve the set of events that woke the thread */ |
| 233 | rv = thread->events; |
| 234 | } |
| 235 | |
| 236 | out: |
| 237 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_event, wait, event, |
| 238 | events, rv & events); |
| 239 | |
| 240 | return rv & events; |
| 241 | } |
| 242 | |
| 243 | /** |
| 244 | * Wait for any of the specified events |
| 245 | */ |
| 246 | uint32_t z_impl_k_event_wait(struct k_event *event, uint32_t events, |
| 247 | bool reset, k_timeout_t timeout) |
| 248 | { |
| 249 | uint32_t options = reset ? K_EVENT_WAIT_RESET : 0; |
| 250 | |
| 251 | return k_event_wait_internal(event, events, options, timeout); |
| 252 | } |
| 253 | #ifdef CONFIG_USERSPACE |
| 254 | uint32_t z_vrfy_k_event_wait(struct k_event *event, uint32_t events, |
| 255 | bool reset, k_timeout_t timeout) |
| 256 | { |
| 257 | Z_OOPS(Z_SYSCALL_OBJ(event, K_OBJ_EVENT)); |
| 258 | return z_impl_k_event_wait(event, events, reset, timeout); |
| 259 | } |
| 260 | #include <syscalls/k_event_wait_mrsh.c> |
| 261 | #endif |
| 262 | |
| 263 | /** |
| 264 | * Wait for all of the specified events |
| 265 | */ |
| 266 | uint32_t z_impl_k_event_wait_all(struct k_event *event, uint32_t events, |
| 267 | bool reset, k_timeout_t timeout) |
| 268 | { |
| 269 | uint32_t options = reset ? (K_EVENT_WAIT_RESET | K_EVENT_WAIT_ALL) |
| 270 | : K_EVENT_WAIT_ALL; |
| 271 | |
| 272 | return k_event_wait_internal(event, events, options, timeout); |
| 273 | } |
| 274 | |
| 275 | #ifdef CONFIG_USERSPACE |
| 276 | uint32_t z_vrfy_k_event_wait_all(struct k_event *event, uint32_t events, |
| 277 | bool reset, k_timeout_t timeout) |
| 278 | { |
| 279 | Z_OOPS(Z_SYSCALL_OBJ(event, K_OBJ_EVENT)); |
| 280 | return z_impl_k_event_wait_all(event, events, reset, timeout); |
| 281 | } |
| 282 | #include <syscalls/k_event_wait_all_mrsh.c> |
| 283 | #endif |