Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2021 Intel Corporation |
| 3 | * |
| 4 | * SPDX-License-Identifier: Apache-2.0 |
| 5 | */ |
| 6 | |
| 7 | /** |
| 8 | * @file event objects library |
| 9 | * |
| 10 | * Event objects are used to signal one or more threads that a custom set of |
| 11 | * events has occurred. Threads wait on event objects until another thread or |
| 12 | * ISR posts the desired set of events to the event object. Each time events |
| 13 | * are posted to an event object, all threads waiting on that event object are |
| 14 | * processed to determine if there is a match. All threads that whose wait |
| 15 | * conditions match the current set of events now belonging to the event object |
| 16 | * are awakened. |
| 17 | * |
| 18 | * Threads waiting on an event object have the option of either waking once |
| 19 | * any or all of the events it desires have been posted to the event object. |
| 20 | * |
| 21 | * @brief Kernel event object |
| 22 | */ |
| 23 | |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 24 | #include <zephyr/kernel.h> |
| 25 | #include <zephyr/kernel_structs.h> |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 26 | |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 27 | #include <zephyr/toolchain.h> |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 28 | #include <zephyr/sys/dlist.h> |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 29 | #include <zephyr/init.h> |
Anas Nashif | 4e39617 | 2023-09-26 22:46:01 +0000 | [diff] [blame] | 30 | #include <zephyr/internal/syscall_handler.h> |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 31 | #include <zephyr/tracing/tracing.h> |
| 32 | #include <zephyr/sys/check.h> |
Anas Nashif | 8634c3b | 2023-08-29 17:03:12 +0000 | [diff] [blame] | 33 | /* private kernel APIs */ |
| 34 | #include <wait_q.h> |
| 35 | #include <ksched.h> |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 36 | |
| 37 | #define K_EVENT_WAIT_ANY 0x00 /* Wait for any events */ |
| 38 | #define K_EVENT_WAIT_ALL 0x01 /* Wait for all events */ |
| 39 | #define K_EVENT_WAIT_MASK 0x01 |
| 40 | |
| 41 | #define K_EVENT_WAIT_RESET 0x02 /* Reset events prior to waiting */ |
| 42 | |
Aastha Grover | a2dccf1 | 2023-03-01 14:27:33 -0500 | [diff] [blame] | 43 | struct event_walk_data { |
| 44 | struct k_thread *head; |
| 45 | uint32_t events; |
| 46 | }; |
| 47 | |
Peter Mitsis | 6df8efe | 2023-05-11 14:06:46 -0400 | [diff] [blame] | 48 | #ifdef CONFIG_OBJ_CORE_EVENT |
| 49 | static struct k_obj_type obj_type_event; |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 50 | #endif /* CONFIG_OBJ_CORE_EVENT */ |
Peter Mitsis | 6df8efe | 2023-05-11 14:06:46 -0400 | [diff] [blame] | 51 | |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 52 | void z_impl_k_event_init(struct k_event *event) |
| 53 | { |
| 54 | event->events = 0; |
| 55 | event->lock = (struct k_spinlock) {}; |
| 56 | |
| 57 | SYS_PORT_TRACING_OBJ_INIT(k_event, event); |
| 58 | |
| 59 | z_waitq_init(&event->wait_q); |
| 60 | |
Anas Nashif | c91cad7 | 2023-09-26 21:32:13 +0000 | [diff] [blame] | 61 | k_object_init(event); |
Peter Mitsis | 6df8efe | 2023-05-11 14:06:46 -0400 | [diff] [blame] | 62 | |
| 63 | #ifdef CONFIG_OBJ_CORE_EVENT |
| 64 | k_obj_core_init_and_link(K_OBJ_CORE(event), &obj_type_event); |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 65 | #endif /* CONFIG_OBJ_CORE_EVENT */ |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 66 | } |
| 67 | |
| 68 | #ifdef CONFIG_USERSPACE |
| 69 | void z_vrfy_k_event_init(struct k_event *event) |
| 70 | { |
Anas Nashif | a08bfeb | 2023-09-27 11:20:28 +0000 | [diff] [blame] | 71 | K_OOPS(K_SYSCALL_OBJ_NEVER_INIT(event, K_OBJ_EVENT)); |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 72 | z_impl_k_event_init(event); |
| 73 | } |
Yong Cong Sin | bbe5e1e | 2024-01-24 17:35:04 +0800 | [diff] [blame] | 74 | #include <zephyr/syscalls/k_event_init_mrsh.c> |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 75 | #endif /* CONFIG_USERSPACE */ |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 76 | |
| 77 | /** |
| 78 | * @brief determine if desired set of events been satisfied |
| 79 | * |
| 80 | * This routine determines if the current set of events satisfies the desired |
| 81 | * set of events. If @a wait_condition is K_EVENT_WAIT_ALL, then at least |
| 82 | * all the desired events must be present to satisfy the request. If @a |
| 83 | * wait_condition is not K_EVENT_WAIT_ALL, it is assumed to be K_EVENT_WAIT_ANY. |
| 84 | * In the K_EVENT_WAIT_ANY case, the request is satisfied when any of the |
| 85 | * current set of events are present in the desired set of events. |
| 86 | */ |
| 87 | static bool are_wait_conditions_met(uint32_t desired, uint32_t current, |
| 88 | unsigned int wait_condition) |
| 89 | { |
| 90 | uint32_t match = current & desired; |
| 91 | |
| 92 | if (wait_condition == K_EVENT_WAIT_ALL) { |
| 93 | return match == desired; |
| 94 | } |
| 95 | |
| 96 | /* wait_condition assumed to be K_EVENT_WAIT_ANY */ |
| 97 | |
| 98 | return match != 0; |
| 99 | } |
| 100 | |
Aastha Grover | a2dccf1 | 2023-03-01 14:27:33 -0500 | [diff] [blame] | 101 | static int event_walk_op(struct k_thread *thread, void *data) |
| 102 | { |
| 103 | unsigned int wait_condition; |
| 104 | struct event_walk_data *event_data = data; |
| 105 | |
| 106 | wait_condition = thread->event_options & K_EVENT_WAIT_MASK; |
| 107 | |
| 108 | if (are_wait_conditions_met(thread->events, event_data->events, |
| 109 | wait_condition)) { |
Aastha Grover | 877fc3d | 2023-03-08 16:56:31 -0500 | [diff] [blame] | 110 | |
| 111 | /* |
| 112 | * Events create a list of threads to wake up. We do |
| 113 | * not want z_thread_timeout to wake these threads; they |
| 114 | * will be woken up by k_event_post_internal once they |
| 115 | * have been processed. |
| 116 | */ |
| 117 | thread->no_wake_on_timeout = true; |
| 118 | |
Aastha Grover | a2dccf1 | 2023-03-01 14:27:33 -0500 | [diff] [blame] | 119 | /* |
| 120 | * The wait conditions have been satisfied. Add this |
| 121 | * thread to the list of threads to unpend. |
| 122 | */ |
Aastha Grover | a2dccf1 | 2023-03-01 14:27:33 -0500 | [diff] [blame] | 123 | thread->next_event_link = event_data->head; |
| 124 | event_data->head = thread; |
Aastha Grover | 877fc3d | 2023-03-08 16:56:31 -0500 | [diff] [blame] | 125 | z_abort_timeout(&thread->base.timeout); |
Aastha Grover | a2dccf1 | 2023-03-01 14:27:33 -0500 | [diff] [blame] | 126 | } |
| 127 | |
| 128 | return 0; |
| 129 | } |
| 130 | |
Jordan Yates | 4ce1f03 | 2023-04-25 12:54:58 +1000 | [diff] [blame] | 131 | static uint32_t k_event_post_internal(struct k_event *event, uint32_t events, |
Andrew Jackson | e7e827a | 2022-07-07 06:01:03 +0100 | [diff] [blame] | 132 | uint32_t events_mask) |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 133 | { |
| 134 | k_spinlock_key_t key; |
| 135 | struct k_thread *thread; |
Aastha Grover | a2dccf1 | 2023-03-01 14:27:33 -0500 | [diff] [blame] | 136 | struct event_walk_data data; |
Jordan Yates | 4ce1f03 | 2023-04-25 12:54:58 +1000 | [diff] [blame] | 137 | uint32_t previous_events; |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 138 | |
Aastha Grover | a2dccf1 | 2023-03-01 14:27:33 -0500 | [diff] [blame] | 139 | data.head = NULL; |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 140 | key = k_spin_lock(&event->lock); |
| 141 | |
| 142 | SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_event, post, event, events, |
Andrew Jackson | e7e827a | 2022-07-07 06:01:03 +0100 | [diff] [blame] | 143 | events_mask); |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 144 | |
Jordan Yates | 4ce1f03 | 2023-04-25 12:54:58 +1000 | [diff] [blame] | 145 | previous_events = event->events & events_mask; |
Andrew Jackson | e7e827a | 2022-07-07 06:01:03 +0100 | [diff] [blame] | 146 | events = (event->events & ~events_mask) | |
| 147 | (events & events_mask); |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 148 | event->events = events; |
Aastha Grover | a2dccf1 | 2023-03-01 14:27:33 -0500 | [diff] [blame] | 149 | data.events = events; |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 150 | /* |
| 151 | * Posting an event has the potential to wake multiple pended threads. |
Aastha Grover | 877fc3d | 2023-03-08 16:56:31 -0500 | [diff] [blame] | 152 | * It is desirable to unpend all affected threads simultaneously. This |
| 153 | * is done in three steps: |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 154 | * |
Aastha Grover | 877fc3d | 2023-03-08 16:56:31 -0500 | [diff] [blame] | 155 | * 1. Walk the waitq and create a linked list of threads to unpend. |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 156 | * 2. Unpend each of the threads in the linked list |
| 157 | * 3. Ready each of the threads in the linked list |
| 158 | */ |
| 159 | |
Aastha Grover | a2dccf1 | 2023-03-01 14:27:33 -0500 | [diff] [blame] | 160 | z_sched_waitq_walk(&event->wait_q, event_walk_op, &data); |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 161 | |
Aastha Grover | a2dccf1 | 2023-03-01 14:27:33 -0500 | [diff] [blame] | 162 | if (data.head != NULL) { |
| 163 | thread = data.head; |
Aastha Grover | 877fc3d | 2023-03-08 16:56:31 -0500 | [diff] [blame] | 164 | struct k_thread *next; |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 165 | do { |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 166 | arch_thread_return_value_set(thread, 0); |
| 167 | thread->events = events; |
Aastha Grover | 877fc3d | 2023-03-08 16:56:31 -0500 | [diff] [blame] | 168 | next = thread->next_event_link; |
| 169 | z_sched_wake_thread(thread, false); |
| 170 | thread = next; |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 171 | } while (thread != NULL); |
| 172 | } |
| 173 | |
| 174 | z_reschedule(&event->lock, key); |
| 175 | |
| 176 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_event, post, event, events, |
Andrew Jackson | e7e827a | 2022-07-07 06:01:03 +0100 | [diff] [blame] | 177 | events_mask); |
Jordan Yates | 4ce1f03 | 2023-04-25 12:54:58 +1000 | [diff] [blame] | 178 | |
| 179 | return previous_events; |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 180 | } |
| 181 | |
Jordan Yates | 4ce1f03 | 2023-04-25 12:54:58 +1000 | [diff] [blame] | 182 | uint32_t z_impl_k_event_post(struct k_event *event, uint32_t events) |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 183 | { |
Jordan Yates | 4ce1f03 | 2023-04-25 12:54:58 +1000 | [diff] [blame] | 184 | return k_event_post_internal(event, events, events); |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 185 | } |
| 186 | |
| 187 | #ifdef CONFIG_USERSPACE |
Jordan Yates | 4ce1f03 | 2023-04-25 12:54:58 +1000 | [diff] [blame] | 188 | uint32_t z_vrfy_k_event_post(struct k_event *event, uint32_t events) |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 189 | { |
Anas Nashif | a08bfeb | 2023-09-27 11:20:28 +0000 | [diff] [blame] | 190 | K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT)); |
Jordan Yates | 4ce1f03 | 2023-04-25 12:54:58 +1000 | [diff] [blame] | 191 | return z_impl_k_event_post(event, events); |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 192 | } |
Yong Cong Sin | bbe5e1e | 2024-01-24 17:35:04 +0800 | [diff] [blame] | 193 | #include <zephyr/syscalls/k_event_post_mrsh.c> |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 194 | #endif /* CONFIG_USERSPACE */ |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 195 | |
Jordan Yates | 4ce1f03 | 2023-04-25 12:54:58 +1000 | [diff] [blame] | 196 | uint32_t z_impl_k_event_set(struct k_event *event, uint32_t events) |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 197 | { |
Jordan Yates | 4ce1f03 | 2023-04-25 12:54:58 +1000 | [diff] [blame] | 198 | return k_event_post_internal(event, events, ~0); |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 199 | } |
| 200 | |
| 201 | #ifdef CONFIG_USERSPACE |
Jordan Yates | 4ce1f03 | 2023-04-25 12:54:58 +1000 | [diff] [blame] | 202 | uint32_t z_vrfy_k_event_set(struct k_event *event, uint32_t events) |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 203 | { |
Anas Nashif | a08bfeb | 2023-09-27 11:20:28 +0000 | [diff] [blame] | 204 | K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT)); |
Jordan Yates | 4ce1f03 | 2023-04-25 12:54:58 +1000 | [diff] [blame] | 205 | return z_impl_k_event_set(event, events); |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 206 | } |
Yong Cong Sin | bbe5e1e | 2024-01-24 17:35:04 +0800 | [diff] [blame] | 207 | #include <zephyr/syscalls/k_event_set_mrsh.c> |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 208 | #endif /* CONFIG_USERSPACE */ |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 209 | |
Jordan Yates | 4ce1f03 | 2023-04-25 12:54:58 +1000 | [diff] [blame] | 210 | uint32_t z_impl_k_event_set_masked(struct k_event *event, uint32_t events, |
Andrew Jackson | e183671 | 2022-07-07 06:06:59 +0100 | [diff] [blame] | 211 | uint32_t events_mask) |
| 212 | { |
Jordan Yates | 4ce1f03 | 2023-04-25 12:54:58 +1000 | [diff] [blame] | 213 | return k_event_post_internal(event, events, events_mask); |
Andrew Jackson | e183671 | 2022-07-07 06:06:59 +0100 | [diff] [blame] | 214 | } |
| 215 | |
| 216 | #ifdef CONFIG_USERSPACE |
Jordan Yates | 4ce1f03 | 2023-04-25 12:54:58 +1000 | [diff] [blame] | 217 | uint32_t z_vrfy_k_event_set_masked(struct k_event *event, uint32_t events, |
Andrew Jackson | e183671 | 2022-07-07 06:06:59 +0100 | [diff] [blame] | 218 | uint32_t events_mask) |
| 219 | { |
Anas Nashif | a08bfeb | 2023-09-27 11:20:28 +0000 | [diff] [blame] | 220 | K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT)); |
Jordan Yates | 4ce1f03 | 2023-04-25 12:54:58 +1000 | [diff] [blame] | 221 | return z_impl_k_event_set_masked(event, events, events_mask); |
Andrew Jackson | e183671 | 2022-07-07 06:06:59 +0100 | [diff] [blame] | 222 | } |
Yong Cong Sin | bbe5e1e | 2024-01-24 17:35:04 +0800 | [diff] [blame] | 223 | #include <zephyr/syscalls/k_event_set_masked_mrsh.c> |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 224 | #endif /* CONFIG_USERSPACE */ |
Andrew Jackson | e183671 | 2022-07-07 06:06:59 +0100 | [diff] [blame] | 225 | |
Jordan Yates | 4ce1f03 | 2023-04-25 12:54:58 +1000 | [diff] [blame] | 226 | uint32_t z_impl_k_event_clear(struct k_event *event, uint32_t events) |
Martin Jäger | caba2ad | 2022-09-30 12:03:09 +0200 | [diff] [blame] | 227 | { |
Jordan Yates | 4ce1f03 | 2023-04-25 12:54:58 +1000 | [diff] [blame] | 228 | return k_event_post_internal(event, 0, events); |
Martin Jäger | caba2ad | 2022-09-30 12:03:09 +0200 | [diff] [blame] | 229 | } |
| 230 | |
| 231 | #ifdef CONFIG_USERSPACE |
Jordan Yates | 4ce1f03 | 2023-04-25 12:54:58 +1000 | [diff] [blame] | 232 | uint32_t z_vrfy_k_event_clear(struct k_event *event, uint32_t events) |
Martin Jäger | caba2ad | 2022-09-30 12:03:09 +0200 | [diff] [blame] | 233 | { |
Anas Nashif | a08bfeb | 2023-09-27 11:20:28 +0000 | [diff] [blame] | 234 | K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT)); |
Jordan Yates | 4ce1f03 | 2023-04-25 12:54:58 +1000 | [diff] [blame] | 235 | return z_impl_k_event_clear(event, events); |
Martin Jäger | caba2ad | 2022-09-30 12:03:09 +0200 | [diff] [blame] | 236 | } |
Yong Cong Sin | bbe5e1e | 2024-01-24 17:35:04 +0800 | [diff] [blame] | 237 | #include <zephyr/syscalls/k_event_clear_mrsh.c> |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 238 | #endif /* CONFIG_USERSPACE */ |
Martin Jäger | caba2ad | 2022-09-30 12:03:09 +0200 | [diff] [blame] | 239 | |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 240 | static uint32_t k_event_wait_internal(struct k_event *event, uint32_t events, |
| 241 | unsigned int options, k_timeout_t timeout) |
| 242 | { |
| 243 | uint32_t rv = 0; |
| 244 | unsigned int wait_condition; |
| 245 | struct k_thread *thread; |
| 246 | |
| 247 | __ASSERT(((arch_is_in_isr() == false) || |
| 248 | K_TIMEOUT_EQ(timeout, K_NO_WAIT)), ""); |
| 249 | |
| 250 | SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_event, wait, event, events, |
| 251 | options, timeout); |
| 252 | |
| 253 | if (events == 0) { |
| 254 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_event, wait, event, events, 0); |
| 255 | return 0; |
| 256 | } |
| 257 | |
| 258 | wait_condition = options & K_EVENT_WAIT_MASK; |
Daniel Leung | 0a50ff3 | 2023-09-25 11:56:10 -0700 | [diff] [blame] | 259 | thread = k_sched_current_thread_query(); |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 260 | |
| 261 | k_spinlock_key_t key = k_spin_lock(&event->lock); |
| 262 | |
| 263 | if (options & K_EVENT_WAIT_RESET) { |
| 264 | event->events = 0; |
| 265 | } |
| 266 | |
| 267 | /* Test if the wait conditions have already been met. */ |
| 268 | |
| 269 | if (are_wait_conditions_met(events, event->events, wait_condition)) { |
| 270 | rv = event->events; |
| 271 | |
| 272 | k_spin_unlock(&event->lock, key); |
| 273 | goto out; |
| 274 | } |
| 275 | |
| 276 | /* Match conditions have not been met. */ |
| 277 | |
| 278 | if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { |
| 279 | k_spin_unlock(&event->lock, key); |
| 280 | goto out; |
| 281 | } |
| 282 | |
| 283 | /* |
| 284 | * The caller must pend to wait for the match. Save the desired |
| 285 | * set of events in the k_thread structure. |
| 286 | */ |
| 287 | |
| 288 | thread->events = events; |
| 289 | thread->event_options = options; |
| 290 | |
| 291 | SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_event, wait, event, events, |
| 292 | options, timeout); |
| 293 | |
| 294 | if (z_pend_curr(&event->lock, key, &event->wait_q, timeout) == 0) { |
| 295 | /* Retrieve the set of events that woke the thread */ |
| 296 | rv = thread->events; |
| 297 | } |
| 298 | |
| 299 | out: |
| 300 | SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_event, wait, event, |
| 301 | events, rv & events); |
| 302 | |
| 303 | return rv & events; |
| 304 | } |
| 305 | |
| 306 | /** |
| 307 | * Wait for any of the specified events |
| 308 | */ |
| 309 | uint32_t z_impl_k_event_wait(struct k_event *event, uint32_t events, |
| 310 | bool reset, k_timeout_t timeout) |
| 311 | { |
| 312 | uint32_t options = reset ? K_EVENT_WAIT_RESET : 0; |
| 313 | |
| 314 | return k_event_wait_internal(event, events, options, timeout); |
| 315 | } |
| 316 | #ifdef CONFIG_USERSPACE |
| 317 | uint32_t z_vrfy_k_event_wait(struct k_event *event, uint32_t events, |
| 318 | bool reset, k_timeout_t timeout) |
| 319 | { |
Anas Nashif | a08bfeb | 2023-09-27 11:20:28 +0000 | [diff] [blame] | 320 | K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT)); |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 321 | return z_impl_k_event_wait(event, events, reset, timeout); |
| 322 | } |
Yong Cong Sin | bbe5e1e | 2024-01-24 17:35:04 +0800 | [diff] [blame] | 323 | #include <zephyr/syscalls/k_event_wait_mrsh.c> |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 324 | #endif /* CONFIG_USERSPACE */ |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 325 | |
| 326 | /** |
| 327 | * Wait for all of the specified events |
| 328 | */ |
| 329 | uint32_t z_impl_k_event_wait_all(struct k_event *event, uint32_t events, |
| 330 | bool reset, k_timeout_t timeout) |
| 331 | { |
| 332 | uint32_t options = reset ? (K_EVENT_WAIT_RESET | K_EVENT_WAIT_ALL) |
| 333 | : K_EVENT_WAIT_ALL; |
| 334 | |
| 335 | return k_event_wait_internal(event, events, options, timeout); |
| 336 | } |
| 337 | |
| 338 | #ifdef CONFIG_USERSPACE |
| 339 | uint32_t z_vrfy_k_event_wait_all(struct k_event *event, uint32_t events, |
| 340 | bool reset, k_timeout_t timeout) |
| 341 | { |
Anas Nashif | a08bfeb | 2023-09-27 11:20:28 +0000 | [diff] [blame] | 342 | K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT)); |
Peter Mitsis | ae394bf | 2021-09-20 14:14:32 -0400 | [diff] [blame] | 343 | return z_impl_k_event_wait_all(event, events, reset, timeout); |
| 344 | } |
Yong Cong Sin | bbe5e1e | 2024-01-24 17:35:04 +0800 | [diff] [blame] | 345 | #include <zephyr/syscalls/k_event_wait_all_mrsh.c> |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 346 | #endif /* CONFIG_USERSPACE */ |
Peter Mitsis | 6df8efe | 2023-05-11 14:06:46 -0400 | [diff] [blame] | 347 | |
| 348 | #ifdef CONFIG_OBJ_CORE_EVENT |
| 349 | static int init_event_obj_core_list(void) |
| 350 | { |
| 351 | /* Initialize condvar object type */ |
| 352 | |
| 353 | z_obj_type_init(&obj_type_event, K_OBJ_TYPE_EVENT_ID, |
| 354 | offsetof(struct k_event, obj_core)); |
| 355 | |
| 356 | /* Initialize and link statically defined condvars */ |
| 357 | |
| 358 | STRUCT_SECTION_FOREACH(k_event, event) { |
| 359 | k_obj_core_init_and_link(K_OBJ_CORE(event), &obj_type_event); |
| 360 | } |
| 361 | |
| 362 | return 0; |
| 363 | } |
| 364 | |
| 365 | SYS_INIT(init_event_obj_core_list, PRE_KERNEL_1, |
| 366 | CONFIG_KERNEL_INIT_PRIORITY_OBJECTS); |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 367 | #endif /* CONFIG_OBJ_CORE_EVENT */ |