blob: 259d1b8ada871748ff9687dfc0c9c4702dec96b1 [file] [log] [blame]
Peter Mitsisae394bf2021-09-20 14:14:32 -04001/*
2 * Copyright (c) 2021 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7/**
8 * @file event objects library
9 *
10 * Event objects are used to signal one or more threads that a custom set of
11 * events has occurred. Threads wait on event objects until another thread or
12 * ISR posts the desired set of events to the event object. Each time events
13 * are posted to an event object, all threads waiting on that event object are
14 * processed to determine if there is a match. All threads that whose wait
15 * conditions match the current set of events now belonging to the event object
16 * are awakened.
17 *
18 * Threads waiting on an event object have the option of either waking once
19 * any or all of the events it desires have been posted to the event object.
20 *
21 * @brief Kernel event object
22 */
23
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020024#include <zephyr/kernel.h>
25#include <zephyr/kernel_structs.h>
Peter Mitsisae394bf2021-09-20 14:14:32 -040026
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020027#include <zephyr/toolchain.h>
28#include <zephyr/wait_q.h>
29#include <zephyr/sys/dlist.h>
Peter Mitsisae394bf2021-09-20 14:14:32 -040030#include <ksched.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020031#include <zephyr/init.h>
32#include <zephyr/syscall_handler.h>
33#include <zephyr/tracing/tracing.h>
34#include <zephyr/sys/check.h>
Peter Mitsisae394bf2021-09-20 14:14:32 -040035
36#define K_EVENT_WAIT_ANY 0x00 /* Wait for any events */
37#define K_EVENT_WAIT_ALL 0x01 /* Wait for all events */
38#define K_EVENT_WAIT_MASK 0x01
39
40#define K_EVENT_WAIT_RESET 0x02 /* Reset events prior to waiting */
41
42void z_impl_k_event_init(struct k_event *event)
43{
44 event->events = 0;
45 event->lock = (struct k_spinlock) {};
46
47 SYS_PORT_TRACING_OBJ_INIT(k_event, event);
48
49 z_waitq_init(&event->wait_q);
50
51 z_object_init(event);
52}
53
54#ifdef CONFIG_USERSPACE
55void z_vrfy_k_event_init(struct k_event *event)
56{
57 Z_OOPS(Z_SYSCALL_OBJ_NEVER_INIT(event, K_OBJ_EVENT));
58 z_impl_k_event_init(event);
59}
60#include <syscalls/k_event_init_mrsh.c>
61#endif
62
63/**
64 * @brief determine if desired set of events been satisfied
65 *
66 * This routine determines if the current set of events satisfies the desired
67 * set of events. If @a wait_condition is K_EVENT_WAIT_ALL, then at least
68 * all the desired events must be present to satisfy the request. If @a
69 * wait_condition is not K_EVENT_WAIT_ALL, it is assumed to be K_EVENT_WAIT_ANY.
70 * In the K_EVENT_WAIT_ANY case, the request is satisfied when any of the
71 * current set of events are present in the desired set of events.
72 */
73static bool are_wait_conditions_met(uint32_t desired, uint32_t current,
74 unsigned int wait_condition)
75{
76 uint32_t match = current & desired;
77
78 if (wait_condition == K_EVENT_WAIT_ALL) {
79 return match == desired;
80 }
81
82 /* wait_condition assumed to be K_EVENT_WAIT_ANY */
83
84 return match != 0;
85}
86
87static void k_event_post_internal(struct k_event *event, uint32_t events,
Andrew Jacksone7e827a2022-07-07 06:01:03 +010088 uint32_t events_mask)
Peter Mitsisae394bf2021-09-20 14:14:32 -040089{
90 k_spinlock_key_t key;
91 struct k_thread *thread;
92 unsigned int wait_condition;
93 struct k_thread *head = NULL;
94
95 key = k_spin_lock(&event->lock);
96
97 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_event, post, event, events,
Andrew Jacksone7e827a2022-07-07 06:01:03 +010098 events_mask);
Peter Mitsisae394bf2021-09-20 14:14:32 -040099
Andrew Jacksone7e827a2022-07-07 06:01:03 +0100100 events = (event->events & ~events_mask) |
101 (events & events_mask);
Peter Mitsisae394bf2021-09-20 14:14:32 -0400102 event->events = events;
103
104 /*
105 * Posting an event has the potential to wake multiple pended threads.
106 * It is desirable to unpend all affected threads simultaneously. To
107 * do so, this must be done in three steps as it is unsafe to unpend
108 * threads from within the _WAIT_Q_FOR_EACH() loop.
109 *
110 * 1. Create a linked list of threads to unpend.
111 * 2. Unpend each of the threads in the linked list
112 * 3. Ready each of the threads in the linked list
113 */
114
115 _WAIT_Q_FOR_EACH(&event->wait_q, thread) {
116 wait_condition = thread->event_options & K_EVENT_WAIT_MASK;
117
118 if (are_wait_conditions_met(thread->events, events,
119 wait_condition)) {
120 /*
121 * The wait conditions have been satisfied. Add this
122 * thread to the list of threads to unpend.
123 */
124
125 thread->next_event_link = head;
126 head = thread;
127 }
128
129
130 }
131
132 if (head != NULL) {
133 thread = head;
134 do {
135 z_unpend_thread(thread);
136 arch_thread_return_value_set(thread, 0);
137 thread->events = events;
138 z_ready_thread(thread);
139 thread = thread->next_event_link;
140 } while (thread != NULL);
141 }
142
143 z_reschedule(&event->lock, key);
144
145 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_event, post, event, events,
Andrew Jacksone7e827a2022-07-07 06:01:03 +0100146 events_mask);
Peter Mitsisae394bf2021-09-20 14:14:32 -0400147}
148
149void z_impl_k_event_post(struct k_event *event, uint32_t events)
150{
Andrew Jacksone7e827a2022-07-07 06:01:03 +0100151 k_event_post_internal(event, events, events);
Peter Mitsisae394bf2021-09-20 14:14:32 -0400152}
153
154#ifdef CONFIG_USERSPACE
155void z_vrfy_k_event_post(struct k_event *event, uint32_t events)
156{
157 Z_OOPS(Z_SYSCALL_OBJ(event, K_OBJ_EVENT));
158 z_impl_k_event_post(event, events);
159}
160#include <syscalls/k_event_post_mrsh.c>
161#endif
162
163void z_impl_k_event_set(struct k_event *event, uint32_t events)
164{
Andrew Jacksone7e827a2022-07-07 06:01:03 +0100165 k_event_post_internal(event, events, ~0);
Peter Mitsisae394bf2021-09-20 14:14:32 -0400166}
167
168#ifdef CONFIG_USERSPACE
169void z_vrfy_k_event_set(struct k_event *event, uint32_t events)
170{
171 Z_OOPS(Z_SYSCALL_OBJ(event, K_OBJ_EVENT));
172 z_impl_k_event_set(event, events);
173}
174#include <syscalls/k_event_set_mrsh.c>
175#endif
176
177static uint32_t k_event_wait_internal(struct k_event *event, uint32_t events,
178 unsigned int options, k_timeout_t timeout)
179{
180 uint32_t rv = 0;
181 unsigned int wait_condition;
182 struct k_thread *thread;
183
184 __ASSERT(((arch_is_in_isr() == false) ||
185 K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");
186
187 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_event, wait, event, events,
188 options, timeout);
189
190 if (events == 0) {
191 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_event, wait, event, events, 0);
192 return 0;
193 }
194
195 wait_condition = options & K_EVENT_WAIT_MASK;
196 thread = z_current_get();
197
198 k_spinlock_key_t key = k_spin_lock(&event->lock);
199
200 if (options & K_EVENT_WAIT_RESET) {
201 event->events = 0;
202 }
203
204 /* Test if the wait conditions have already been met. */
205
206 if (are_wait_conditions_met(events, event->events, wait_condition)) {
207 rv = event->events;
208
209 k_spin_unlock(&event->lock, key);
210 goto out;
211 }
212
213 /* Match conditions have not been met. */
214
215 if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
216 k_spin_unlock(&event->lock, key);
217 goto out;
218 }
219
220 /*
221 * The caller must pend to wait for the match. Save the desired
222 * set of events in the k_thread structure.
223 */
224
225 thread->events = events;
226 thread->event_options = options;
227
228 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_event, wait, event, events,
229 options, timeout);
230
231 if (z_pend_curr(&event->lock, key, &event->wait_q, timeout) == 0) {
232 /* Retrieve the set of events that woke the thread */
233 rv = thread->events;
234 }
235
236out:
237 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_event, wait, event,
238 events, rv & events);
239
240 return rv & events;
241}
242
243/**
244 * Wait for any of the specified events
245 */
246uint32_t z_impl_k_event_wait(struct k_event *event, uint32_t events,
247 bool reset, k_timeout_t timeout)
248{
249 uint32_t options = reset ? K_EVENT_WAIT_RESET : 0;
250
251 return k_event_wait_internal(event, events, options, timeout);
252}
253#ifdef CONFIG_USERSPACE
254uint32_t z_vrfy_k_event_wait(struct k_event *event, uint32_t events,
255 bool reset, k_timeout_t timeout)
256{
257 Z_OOPS(Z_SYSCALL_OBJ(event, K_OBJ_EVENT));
258 return z_impl_k_event_wait(event, events, reset, timeout);
259}
260#include <syscalls/k_event_wait_mrsh.c>
261#endif
262
263/**
264 * Wait for all of the specified events
265 */
266uint32_t z_impl_k_event_wait_all(struct k_event *event, uint32_t events,
267 bool reset, k_timeout_t timeout)
268{
269 uint32_t options = reset ? (K_EVENT_WAIT_RESET | K_EVENT_WAIT_ALL)
270 : K_EVENT_WAIT_ALL;
271
272 return k_event_wait_internal(event, events, options, timeout);
273}
274
275#ifdef CONFIG_USERSPACE
276uint32_t z_vrfy_k_event_wait_all(struct k_event *event, uint32_t events,
277 bool reset, k_timeout_t timeout)
278{
279 Z_OOPS(Z_SYSCALL_OBJ(event, K_OBJ_EVENT));
280 return z_impl_k_event_wait_all(event, events, reset, timeout);
281}
282#include <syscalls/k_event_wait_all_mrsh.c>
283#endif