blob: d2950e43c4f23b779270fa0d9d6f2263e2ae83b9 [file] [log] [blame]
Peter Mitsisae394bf2021-09-20 14:14:32 -04001/*
2 * Copyright (c) 2021 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7/**
8 * @file event objects library
9 *
10 * Event objects are used to signal one or more threads that a custom set of
11 * events has occurred. Threads wait on event objects until another thread or
12 * ISR posts the desired set of events to the event object. Each time events
13 * are posted to an event object, all threads waiting on that event object are
14 * processed to determine if there is a match. All threads that whose wait
15 * conditions match the current set of events now belonging to the event object
16 * are awakened.
17 *
18 * Threads waiting on an event object have the option of either waking once
19 * any or all of the events it desires have been posted to the event object.
20 *
21 * @brief Kernel event object
22 */
23
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020024#include <zephyr/kernel.h>
25#include <zephyr/kernel_structs.h>
Peter Mitsisae394bf2021-09-20 14:14:32 -040026
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020027#include <zephyr/toolchain.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020028#include <zephyr/sys/dlist.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020029#include <zephyr/init.h>
Anas Nashif4e396172023-09-26 22:46:01 +000030#include <zephyr/internal/syscall_handler.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020031#include <zephyr/tracing/tracing.h>
32#include <zephyr/sys/check.h>
Anas Nashif8634c3b2023-08-29 17:03:12 +000033/* private kernel APIs */
34#include <wait_q.h>
35#include <ksched.h>
Peter Mitsisae394bf2021-09-20 14:14:32 -040036
37#define K_EVENT_WAIT_ANY 0x00 /* Wait for any events */
38#define K_EVENT_WAIT_ALL 0x01 /* Wait for all events */
39#define K_EVENT_WAIT_MASK 0x01
40
41#define K_EVENT_WAIT_RESET 0x02 /* Reset events prior to waiting */
42
Aastha Grovera2dccf12023-03-01 14:27:33 -050043struct event_walk_data {
44 struct k_thread *head;
45 uint32_t events;
46};
47
Peter Mitsis6df8efe2023-05-11 14:06:46 -040048#ifdef CONFIG_OBJ_CORE_EVENT
49static struct k_obj_type obj_type_event;
Simon Heinbcd1d192024-03-08 12:00:10 +010050#endif /* CONFIG_OBJ_CORE_EVENT */
Peter Mitsis6df8efe2023-05-11 14:06:46 -040051
Peter Mitsisae394bf2021-09-20 14:14:32 -040052void z_impl_k_event_init(struct k_event *event)
53{
54 event->events = 0;
55 event->lock = (struct k_spinlock) {};
56
57 SYS_PORT_TRACING_OBJ_INIT(k_event, event);
58
59 z_waitq_init(&event->wait_q);
60
Anas Nashifc91cad72023-09-26 21:32:13 +000061 k_object_init(event);
Peter Mitsis6df8efe2023-05-11 14:06:46 -040062
63#ifdef CONFIG_OBJ_CORE_EVENT
64 k_obj_core_init_and_link(K_OBJ_CORE(event), &obj_type_event);
Simon Heinbcd1d192024-03-08 12:00:10 +010065#endif /* CONFIG_OBJ_CORE_EVENT */
Peter Mitsisae394bf2021-09-20 14:14:32 -040066}
67
68#ifdef CONFIG_USERSPACE
69void z_vrfy_k_event_init(struct k_event *event)
70{
Anas Nashifa08bfeb2023-09-27 11:20:28 +000071 K_OOPS(K_SYSCALL_OBJ_NEVER_INIT(event, K_OBJ_EVENT));
Peter Mitsisae394bf2021-09-20 14:14:32 -040072 z_impl_k_event_init(event);
73}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +080074#include <zephyr/syscalls/k_event_init_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +010075#endif /* CONFIG_USERSPACE */
Peter Mitsisae394bf2021-09-20 14:14:32 -040076
77/**
78 * @brief determine if desired set of events been satisfied
79 *
80 * This routine determines if the current set of events satisfies the desired
81 * set of events. If @a wait_condition is K_EVENT_WAIT_ALL, then at least
82 * all the desired events must be present to satisfy the request. If @a
83 * wait_condition is not K_EVENT_WAIT_ALL, it is assumed to be K_EVENT_WAIT_ANY.
84 * In the K_EVENT_WAIT_ANY case, the request is satisfied when any of the
85 * current set of events are present in the desired set of events.
86 */
87static bool are_wait_conditions_met(uint32_t desired, uint32_t current,
88 unsigned int wait_condition)
89{
90 uint32_t match = current & desired;
91
92 if (wait_condition == K_EVENT_WAIT_ALL) {
93 return match == desired;
94 }
95
96 /* wait_condition assumed to be K_EVENT_WAIT_ANY */
97
98 return match != 0;
99}
100
Aastha Grovera2dccf12023-03-01 14:27:33 -0500101static int event_walk_op(struct k_thread *thread, void *data)
102{
103 unsigned int wait_condition;
104 struct event_walk_data *event_data = data;
105
106 wait_condition = thread->event_options & K_EVENT_WAIT_MASK;
107
108 if (are_wait_conditions_met(thread->events, event_data->events,
109 wait_condition)) {
Aastha Grover877fc3d2023-03-08 16:56:31 -0500110
111 /*
112 * Events create a list of threads to wake up. We do
113 * not want z_thread_timeout to wake these threads; they
114 * will be woken up by k_event_post_internal once they
115 * have been processed.
116 */
117 thread->no_wake_on_timeout = true;
118
Aastha Grovera2dccf12023-03-01 14:27:33 -0500119 /*
120 * The wait conditions have been satisfied. Add this
121 * thread to the list of threads to unpend.
122 */
Aastha Grovera2dccf12023-03-01 14:27:33 -0500123 thread->next_event_link = event_data->head;
124 event_data->head = thread;
Aastha Grover877fc3d2023-03-08 16:56:31 -0500125 z_abort_timeout(&thread->base.timeout);
Aastha Grovera2dccf12023-03-01 14:27:33 -0500126 }
127
128 return 0;
129}
130
Jordan Yates4ce1f032023-04-25 12:54:58 +1000131static uint32_t k_event_post_internal(struct k_event *event, uint32_t events,
Andrew Jacksone7e827a2022-07-07 06:01:03 +0100132 uint32_t events_mask)
Peter Mitsisae394bf2021-09-20 14:14:32 -0400133{
134 k_spinlock_key_t key;
135 struct k_thread *thread;
Aastha Grovera2dccf12023-03-01 14:27:33 -0500136 struct event_walk_data data;
Jordan Yates4ce1f032023-04-25 12:54:58 +1000137 uint32_t previous_events;
Peter Mitsisae394bf2021-09-20 14:14:32 -0400138
Aastha Grovera2dccf12023-03-01 14:27:33 -0500139 data.head = NULL;
Peter Mitsisae394bf2021-09-20 14:14:32 -0400140 key = k_spin_lock(&event->lock);
141
142 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_event, post, event, events,
Andrew Jacksone7e827a2022-07-07 06:01:03 +0100143 events_mask);
Peter Mitsisae394bf2021-09-20 14:14:32 -0400144
Jordan Yates4ce1f032023-04-25 12:54:58 +1000145 previous_events = event->events & events_mask;
Andrew Jacksone7e827a2022-07-07 06:01:03 +0100146 events = (event->events & ~events_mask) |
147 (events & events_mask);
Peter Mitsisae394bf2021-09-20 14:14:32 -0400148 event->events = events;
Aastha Grovera2dccf12023-03-01 14:27:33 -0500149 data.events = events;
Peter Mitsisae394bf2021-09-20 14:14:32 -0400150 /*
151 * Posting an event has the potential to wake multiple pended threads.
Aastha Grover877fc3d2023-03-08 16:56:31 -0500152 * It is desirable to unpend all affected threads simultaneously. This
153 * is done in three steps:
Peter Mitsisae394bf2021-09-20 14:14:32 -0400154 *
Aastha Grover877fc3d2023-03-08 16:56:31 -0500155 * 1. Walk the waitq and create a linked list of threads to unpend.
Peter Mitsisae394bf2021-09-20 14:14:32 -0400156 * 2. Unpend each of the threads in the linked list
157 * 3. Ready each of the threads in the linked list
158 */
159
Aastha Grovera2dccf12023-03-01 14:27:33 -0500160 z_sched_waitq_walk(&event->wait_q, event_walk_op, &data);
Peter Mitsisae394bf2021-09-20 14:14:32 -0400161
Aastha Grovera2dccf12023-03-01 14:27:33 -0500162 if (data.head != NULL) {
163 thread = data.head;
Aastha Grover877fc3d2023-03-08 16:56:31 -0500164 struct k_thread *next;
Peter Mitsisae394bf2021-09-20 14:14:32 -0400165 do {
Peter Mitsisae394bf2021-09-20 14:14:32 -0400166 arch_thread_return_value_set(thread, 0);
167 thread->events = events;
Aastha Grover877fc3d2023-03-08 16:56:31 -0500168 next = thread->next_event_link;
169 z_sched_wake_thread(thread, false);
170 thread = next;
Peter Mitsisae394bf2021-09-20 14:14:32 -0400171 } while (thread != NULL);
172 }
173
174 z_reschedule(&event->lock, key);
175
176 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_event, post, event, events,
Andrew Jacksone7e827a2022-07-07 06:01:03 +0100177 events_mask);
Jordan Yates4ce1f032023-04-25 12:54:58 +1000178
179 return previous_events;
Peter Mitsisae394bf2021-09-20 14:14:32 -0400180}
181
Jordan Yates4ce1f032023-04-25 12:54:58 +1000182uint32_t z_impl_k_event_post(struct k_event *event, uint32_t events)
Peter Mitsisae394bf2021-09-20 14:14:32 -0400183{
Jordan Yates4ce1f032023-04-25 12:54:58 +1000184 return k_event_post_internal(event, events, events);
Peter Mitsisae394bf2021-09-20 14:14:32 -0400185}
186
187#ifdef CONFIG_USERSPACE
Jordan Yates4ce1f032023-04-25 12:54:58 +1000188uint32_t z_vrfy_k_event_post(struct k_event *event, uint32_t events)
Peter Mitsisae394bf2021-09-20 14:14:32 -0400189{
Anas Nashifa08bfeb2023-09-27 11:20:28 +0000190 K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT));
Jordan Yates4ce1f032023-04-25 12:54:58 +1000191 return z_impl_k_event_post(event, events);
Peter Mitsisae394bf2021-09-20 14:14:32 -0400192}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +0800193#include <zephyr/syscalls/k_event_post_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +0100194#endif /* CONFIG_USERSPACE */
Peter Mitsisae394bf2021-09-20 14:14:32 -0400195
Jordan Yates4ce1f032023-04-25 12:54:58 +1000196uint32_t z_impl_k_event_set(struct k_event *event, uint32_t events)
Peter Mitsisae394bf2021-09-20 14:14:32 -0400197{
Jordan Yates4ce1f032023-04-25 12:54:58 +1000198 return k_event_post_internal(event, events, ~0);
Peter Mitsisae394bf2021-09-20 14:14:32 -0400199}
200
201#ifdef CONFIG_USERSPACE
Jordan Yates4ce1f032023-04-25 12:54:58 +1000202uint32_t z_vrfy_k_event_set(struct k_event *event, uint32_t events)
Peter Mitsisae394bf2021-09-20 14:14:32 -0400203{
Anas Nashifa08bfeb2023-09-27 11:20:28 +0000204 K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT));
Jordan Yates4ce1f032023-04-25 12:54:58 +1000205 return z_impl_k_event_set(event, events);
Peter Mitsisae394bf2021-09-20 14:14:32 -0400206}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +0800207#include <zephyr/syscalls/k_event_set_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +0100208#endif /* CONFIG_USERSPACE */
Peter Mitsisae394bf2021-09-20 14:14:32 -0400209
Jordan Yates4ce1f032023-04-25 12:54:58 +1000210uint32_t z_impl_k_event_set_masked(struct k_event *event, uint32_t events,
Andrew Jacksone1836712022-07-07 06:06:59 +0100211 uint32_t events_mask)
212{
Jordan Yates4ce1f032023-04-25 12:54:58 +1000213 return k_event_post_internal(event, events, events_mask);
Andrew Jacksone1836712022-07-07 06:06:59 +0100214}
215
216#ifdef CONFIG_USERSPACE
Jordan Yates4ce1f032023-04-25 12:54:58 +1000217uint32_t z_vrfy_k_event_set_masked(struct k_event *event, uint32_t events,
Andrew Jacksone1836712022-07-07 06:06:59 +0100218 uint32_t events_mask)
219{
Anas Nashifa08bfeb2023-09-27 11:20:28 +0000220 K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT));
Jordan Yates4ce1f032023-04-25 12:54:58 +1000221 return z_impl_k_event_set_masked(event, events, events_mask);
Andrew Jacksone1836712022-07-07 06:06:59 +0100222}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +0800223#include <zephyr/syscalls/k_event_set_masked_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +0100224#endif /* CONFIG_USERSPACE */
Andrew Jacksone1836712022-07-07 06:06:59 +0100225
Jordan Yates4ce1f032023-04-25 12:54:58 +1000226uint32_t z_impl_k_event_clear(struct k_event *event, uint32_t events)
Martin Jägercaba2ad2022-09-30 12:03:09 +0200227{
Jordan Yates4ce1f032023-04-25 12:54:58 +1000228 return k_event_post_internal(event, 0, events);
Martin Jägercaba2ad2022-09-30 12:03:09 +0200229}
230
231#ifdef CONFIG_USERSPACE
Jordan Yates4ce1f032023-04-25 12:54:58 +1000232uint32_t z_vrfy_k_event_clear(struct k_event *event, uint32_t events)
Martin Jägercaba2ad2022-09-30 12:03:09 +0200233{
Anas Nashifa08bfeb2023-09-27 11:20:28 +0000234 K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT));
Jordan Yates4ce1f032023-04-25 12:54:58 +1000235 return z_impl_k_event_clear(event, events);
Martin Jägercaba2ad2022-09-30 12:03:09 +0200236}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +0800237#include <zephyr/syscalls/k_event_clear_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +0100238#endif /* CONFIG_USERSPACE */
Martin Jägercaba2ad2022-09-30 12:03:09 +0200239
Peter Mitsisae394bf2021-09-20 14:14:32 -0400240static uint32_t k_event_wait_internal(struct k_event *event, uint32_t events,
241 unsigned int options, k_timeout_t timeout)
242{
243 uint32_t rv = 0;
244 unsigned int wait_condition;
245 struct k_thread *thread;
246
247 __ASSERT(((arch_is_in_isr() == false) ||
248 K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");
249
250 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_event, wait, event, events,
251 options, timeout);
252
253 if (events == 0) {
254 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_event, wait, event, events, 0);
255 return 0;
256 }
257
258 wait_condition = options & K_EVENT_WAIT_MASK;
Daniel Leung0a50ff32023-09-25 11:56:10 -0700259 thread = k_sched_current_thread_query();
Peter Mitsisae394bf2021-09-20 14:14:32 -0400260
261 k_spinlock_key_t key = k_spin_lock(&event->lock);
262
263 if (options & K_EVENT_WAIT_RESET) {
264 event->events = 0;
265 }
266
267 /* Test if the wait conditions have already been met. */
268
269 if (are_wait_conditions_met(events, event->events, wait_condition)) {
270 rv = event->events;
271
272 k_spin_unlock(&event->lock, key);
273 goto out;
274 }
275
276 /* Match conditions have not been met. */
277
278 if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
279 k_spin_unlock(&event->lock, key);
280 goto out;
281 }
282
283 /*
284 * The caller must pend to wait for the match. Save the desired
285 * set of events in the k_thread structure.
286 */
287
288 thread->events = events;
289 thread->event_options = options;
290
291 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_event, wait, event, events,
292 options, timeout);
293
294 if (z_pend_curr(&event->lock, key, &event->wait_q, timeout) == 0) {
295 /* Retrieve the set of events that woke the thread */
296 rv = thread->events;
297 }
298
299out:
300 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_event, wait, event,
301 events, rv & events);
302
303 return rv & events;
304}
305
306/**
307 * Wait for any of the specified events
308 */
309uint32_t z_impl_k_event_wait(struct k_event *event, uint32_t events,
310 bool reset, k_timeout_t timeout)
311{
312 uint32_t options = reset ? K_EVENT_WAIT_RESET : 0;
313
314 return k_event_wait_internal(event, events, options, timeout);
315}
316#ifdef CONFIG_USERSPACE
317uint32_t z_vrfy_k_event_wait(struct k_event *event, uint32_t events,
318 bool reset, k_timeout_t timeout)
319{
Anas Nashifa08bfeb2023-09-27 11:20:28 +0000320 K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT));
Peter Mitsisae394bf2021-09-20 14:14:32 -0400321 return z_impl_k_event_wait(event, events, reset, timeout);
322}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +0800323#include <zephyr/syscalls/k_event_wait_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +0100324#endif /* CONFIG_USERSPACE */
Peter Mitsisae394bf2021-09-20 14:14:32 -0400325
326/**
327 * Wait for all of the specified events
328 */
329uint32_t z_impl_k_event_wait_all(struct k_event *event, uint32_t events,
330 bool reset, k_timeout_t timeout)
331{
332 uint32_t options = reset ? (K_EVENT_WAIT_RESET | K_EVENT_WAIT_ALL)
333 : K_EVENT_WAIT_ALL;
334
335 return k_event_wait_internal(event, events, options, timeout);
336}
337
338#ifdef CONFIG_USERSPACE
339uint32_t z_vrfy_k_event_wait_all(struct k_event *event, uint32_t events,
340 bool reset, k_timeout_t timeout)
341{
Anas Nashifa08bfeb2023-09-27 11:20:28 +0000342 K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT));
Peter Mitsisae394bf2021-09-20 14:14:32 -0400343 return z_impl_k_event_wait_all(event, events, reset, timeout);
344}
Yong Cong Sinbbe5e1e2024-01-24 17:35:04 +0800345#include <zephyr/syscalls/k_event_wait_all_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +0100346#endif /* CONFIG_USERSPACE */
Peter Mitsis6df8efe2023-05-11 14:06:46 -0400347
348#ifdef CONFIG_OBJ_CORE_EVENT
349static int init_event_obj_core_list(void)
350{
351 /* Initialize condvar object type */
352
353 z_obj_type_init(&obj_type_event, K_OBJ_TYPE_EVENT_ID,
354 offsetof(struct k_event, obj_core));
355
356 /* Initialize and link statically defined condvars */
357
358 STRUCT_SECTION_FOREACH(k_event, event) {
359 k_obj_core_init_and_link(K_OBJ_CORE(event), &obj_type_event);
360 }
361
362 return 0;
363}
364
365SYS_INIT(init_event_obj_core_list, PRE_KERNEL_1,
366 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
Simon Heinbcd1d192024-03-08 12:00:10 +0100367#endif /* CONFIG_OBJ_CORE_EVENT */