blob: 13db565b072edb545d5ae9c14ac62a88047e4b55 [file] [log] [blame]
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001/*
2 * Copyright (c) 2016, Wind River Systems, Inc.
3 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08004 * SPDX-License-Identifier: Apache-2.0
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005 */
6
7/**
8 * @file
9 *
10 * @brief Public kernel APIs.
11 */
12
Flavio Ceolin67ca1762018-09-14 10:43:44 -070013#ifndef ZEPHYR_INCLUDE_KERNEL_H_
14#define ZEPHYR_INCLUDE_KERNEL_H_
Benjamin Walsh456c6da2016-09-02 18:55:39 -040015
Benjamin Walshdfa7ce52017-01-22 17:06:05 -050016#if !defined(_ASMLANGUAGE)
Ioannis Glaropoulos92b8a412018-06-20 17:30:48 +020017#include <kernel_includes.h>
Kumar Gala8777ff12018-07-25 20:24:34 -050018#include <errno.h>
James Harrisb1042812021-03-03 12:02:05 -080019#include <limits.h>
Flavio Ceolin6fdc56d2018-09-18 12:32:27 -070020#include <stdbool.h>
Stephanos Ioannidis33fbe002019-09-09 21:26:59 +090021#include <toolchain.h>
Torbjörn Leksell16bbb8e2021-03-26 08:31:23 +010022#include <tracing/tracing_macros.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040023
Daniel Leungfd7a68d2020-10-14 12:17:12 -070024#ifdef CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS
25#include <timing/timing.h>
26#endif
27
Benjamin Walsh456c6da2016-09-02 18:55:39 -040028#ifdef __cplusplus
29extern "C" {
30#endif
31
Anas Nashifbbb157d2017-01-15 08:46:31 -050032/**
33 * @brief Kernel APIs
34 * @defgroup kernel_apis Kernel APIs
35 * @{
36 * @}
37 */
38
Benjamin Walsh456c6da2016-09-02 18:55:39 -040039#define K_ANY NULL
40#define K_END NULL
41
Andy Ross851d14a2021-05-13 15:46:43 -070042#if CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES == 0
43#error Zero available thread priorities defined!
44#endif
45
46#define K_PRIO_COOP(x) (-(CONFIG_NUM_COOP_PRIORITIES - (x)))
47#define K_PRIO_PREEMPT(x) (x)
48
Benjamin Walsh456c6da2016-09-02 18:55:39 -040049#define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040050#define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES
Benjamin Walshfab8d922016-11-08 15:36:36 -050051#define K_IDLE_PRIO K_LOWEST_THREAD_PRIO
Benjamin Walsh456c6da2016-09-02 18:55:39 -040052#define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO)
53#define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)
54
Benjamin Walshacc68c12017-01-29 18:57:45 -050055#ifdef CONFIG_POLL
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +030056#define _POLL_EVENT_OBJ_INIT(obj) \
57 .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events),
58#define _POLL_EVENT sys_dlist_t poll_events
Benjamin Walshacc68c12017-01-29 18:57:45 -050059#else
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +030060#define _POLL_EVENT_OBJ_INIT(obj)
Benjamin Walshacc68c12017-01-29 18:57:45 -050061#define _POLL_EVENT
62#endif
63
Benjamin Walshf6ca7de2016-11-08 10:36:50 -050064struct k_thread;
Benjamin Walsh456c6da2016-09-02 18:55:39 -040065struct k_mutex;
66struct k_sem;
Benjamin Walsh456c6da2016-09-02 18:55:39 -040067struct k_msgq;
68struct k_mbox;
69struct k_pipe;
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020070struct k_queue;
Benjamin Walsh456c6da2016-09-02 18:55:39 -040071struct k_fifo;
72struct k_lifo;
73struct k_stack;
Benjamin Walsh7ef0f622016-10-24 17:04:43 -040074struct k_mem_slab;
Benjamin Walsh456c6da2016-09-02 18:55:39 -040075struct k_mem_pool;
76struct k_timer;
Benjamin Walshacc68c12017-01-29 18:57:45 -050077struct k_poll_event;
78struct k_poll_signal;
Chunlin Hane9c97022017-07-07 20:29:30 +080079struct k_mem_domain;
80struct k_mem_partition;
Wentong Wu5611e922019-06-20 23:51:27 +080081struct k_futex;
Peter Mitsisae394bf2021-09-20 14:14:32 -040082struct k_event;
Andrew Boiebca15da2017-10-15 14:17:48 -070083
Benjamin Walsh456c6da2016-09-02 18:55:39 -040084enum execution_context_types {
85 K_ISR = 0,
86 K_COOP_THREAD,
87 K_PREEMPT_THREAD,
88};
89
Anas Nashiffc1b5de2020-11-11 08:42:53 -050090/* private, used by k_poll and k_work_poll */
91struct k_work_poll;
92typedef int (*_poller_cb_t)(struct k_poll_event *event, uint32_t state);
93
Peter Mitsis348eb4c2016-10-26 11:22:14 -040094/**
Anas Nashif4bcb2942019-01-23 23:06:29 -050095 * @addtogroup thread_apis
Carles Cuficb0cf9f2017-01-10 10:57:38 +010096 * @{
97 */
Anas Nashife71293e2019-12-04 20:00:14 -050098
Ramakrishna Pallala110b8e42018-04-27 12:55:43 +053099typedef void (*k_thread_user_cb_t)(const struct k_thread *thread,
100 void *user_data);
Carles Cuficb0cf9f2017-01-10 10:57:38 +0100101
102/**
Ramakrishna Pallala110b8e42018-04-27 12:55:43 +0530103 * @brief Iterate over all the threads in the system.
104 *
105 * This routine iterates over all the threads in the system and
106 * calls the user_cb function for each thread.
107 *
108 * @param user_cb Pointer to the user callback function.
109 * @param user_data Pointer to user data.
110 *
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200111 * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
Radoslaw Koppel2c529ce2019-11-27 14:20:37 +0100112 * to be effective.
113 * @note This API uses @ref k_spin_lock to protect the _kernel.threads
114 * list which means creation of new threads and terminations of existing
115 * threads are blocked until this API returns.
Ramakrishna Pallala110b8e42018-04-27 12:55:43 +0530116 *
117 * @return N/A
118 */
119extern void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data);
120
Radoslaw Koppel2c529ce2019-11-27 14:20:37 +0100121/**
122 * @brief Iterate over all the threads in the system without locking.
123 *
124 * This routine works exactly the same like @ref k_thread_foreach
125 * but unlocks interrupts when user_cb is executed.
126 *
127 * @param user_cb Pointer to the user callback function.
128 * @param user_data Pointer to user data.
129 *
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200130 * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
Radoslaw Koppel2c529ce2019-11-27 14:20:37 +0100131 * to be effective.
132 * @note This API uses @ref k_spin_lock only when accessing the _kernel.threads
133 * queue elements. It unlocks it during user callback function processing.
134 * If a new task is created when this @c foreach function is in progress,
135 * the added new task would not be included in the enumeration.
136 * If a task is aborted during this enumeration, there would be a race here
137 * and there is a possibility that this aborted task would be included in the
138 * enumeration.
139 * @note If the task is aborted and the memory occupied by its @c k_thread
140 * structure is reused when this @c k_thread_foreach_unlocked is in progress
141 * it might even lead to the system behave unstable.
142 * This function may never return, as it would follow some @c next task
143 * pointers treating given pointer as a pointer to the k_thread structure
144 * while it is something different right now.
145 * Do not reuse the memory that was occupied by k_thread structure of aborted
146 * task if it was aborted after this function was called in any context.
147 */
148extern void k_thread_foreach_unlocked(
149 k_thread_user_cb_t user_cb, void *user_data);
150
Anas Nashif166f5192018-02-25 08:02:36 -0600151/** @} */
Carles Cuficb0cf9f2017-01-10 10:57:38 +0100152
153/**
Allan Stephensc98da842016-11-11 15:45:03 -0500154 * @defgroup thread_apis Thread APIs
155 * @ingroup kernel_apis
156 * @{
157 */
158
Benjamin Walshed240f22017-01-22 13:05:08 -0500159#endif /* !_ASMLANGUAGE */
160
161
162/*
163 * Thread user options. May be needed by assembly code. Common part uses low
164 * bits, arch-specific use high bits.
165 */
166
Anas Nashifa541e932018-05-24 11:19:16 -0500167/**
168 * @brief system thread that must not abort
Anas Nashifa541e932018-05-24 11:19:16 -0500169 * */
Flavio Ceolin8aec0872018-08-15 11:52:00 -0700170#define K_ESSENTIAL (BIT(0))
Benjamin Walshed240f22017-01-22 13:05:08 -0500171
Stephanos Ioannidisaaf93202020-05-03 18:03:19 +0900172#if defined(CONFIG_FPU_SHARING)
Anas Nashifa541e932018-05-24 11:19:16 -0500173/**
Katsuhiro Suzukifadef432020-12-16 11:22:13 +0900174 * @brief FPU registers are managed by context switch
175 *
176 * @details
177 * This option indicates that the thread uses the CPU's floating point
178 * registers. This instructs the kernel to take additional steps to save
179 * and restore the contents of these registers when scheduling the thread.
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200180 * No effect if @kconfig{CONFIG_FPU_SHARING} is not enabled.
Anas Nashifa541e932018-05-24 11:19:16 -0500181 */
Flavio Ceolin8aec0872018-08-15 11:52:00 -0700182#define K_FP_REGS (BIT(1))
Benjamin Walshed240f22017-01-22 13:05:08 -0500183#endif
184
Anas Nashifa541e932018-05-24 11:19:16 -0500185/**
186 * @brief user mode thread
187 *
188 * This thread has dropped from supervisor mode to user mode and consequently
Andrew Boie5cfa5dc2017-08-30 14:17:44 -0700189 * has additional restrictions
190 */
Flavio Ceolin8aec0872018-08-15 11:52:00 -0700191#define K_USER (BIT(2))
Andrew Boie5cfa5dc2017-08-30 14:17:44 -0700192
Anas Nashifa541e932018-05-24 11:19:16 -0500193/**
194 * @brief Inherit Permissions
195 *
196 * @details
197 * Indicates that the thread being created should inherit all kernel object
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300198 * permissions from the thread that created it. No effect if
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200199 * @kconfig{CONFIG_USERSPACE} is not enabled.
Andrew Boie47f8fd12017-10-05 11:11:02 -0700200 */
Flavio Ceolin8aec0872018-08-15 11:52:00 -0700201#define K_INHERIT_PERMS (BIT(3))
Andrew Boie47f8fd12017-10-05 11:11:02 -0700202
Andy Ross9a594a02021-02-10 14:54:21 -0800203/**
204 * @brief Callback item state
205 *
206 * @details
207 * This is a single bit of state reserved for "callback manager"
208 * utilities (p4wq initially) who need to track operations invoked
209 * from within a user-provided callback they have been invoked.
210 * Effectively it serves as a tiny bit of zero-overhead TLS data.
211 */
212#define K_CALLBACK_STATE (BIT(4))
213
Benjamin Walshed240f22017-01-22 13:05:08 -0500214#ifdef CONFIG_X86
215/* x86 Bitmask definitions for threads user options */
216
Daniel Leungce440482021-01-07 15:07:29 -0800217#if defined(CONFIG_FPU_SHARING) && defined(CONFIG_X86_SSE)
Daniel Leung482a1502021-08-31 10:36:58 -0700218/**
219 * @brief FP and SSE registers are managed by context switch on x86
220 *
221 * @details
222 * This option indicates that the thread uses the x86 CPU's floating point
223 * and SSE registers. This instructs the kernel to take additional steps to
224 * save and restore the contents of these registers when scheduling
225 * the thread. No effect if @kconfig{CONFIG_X86_SSE} is not enabled.
226 */
Flavio Ceolin8aec0872018-08-15 11:52:00 -0700227#define K_SSE_REGS (BIT(7))
Benjamin Walshed240f22017-01-22 13:05:08 -0500228#endif
229#endif
230
231/* end - thread options */
232
233#if !defined(_ASMLANGUAGE)
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400234/**
Andrew Boied26cf2d2017-03-30 13:07:02 -0700235 * @brief Create a thread.
236 *
237 * This routine initializes a thread, then schedules it for execution.
238 *
239 * The new thread may be scheduled for immediate execution or a delayed start.
240 * If the newly spawned thread does not have a delayed start the kernel
241 * scheduler may preempt the current thread to allow the new thread to
242 * execute.
243 *
244 * Thread options are architecture-specific, and can include K_ESSENTIAL,
245 * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
246 * them using "|" (the logical OR operator).
247 *
Andrew Boie8ce260d2020-04-24 16:24:46 -0700248 * Stack objects passed to this function must be originally defined with
249 * either of these macros in order to be portable:
250 *
251 * - K_THREAD_STACK_DEFINE() - For stacks that may support either user or
252 * supervisor threads.
253 * - K_KERNEL_STACK_DEFINE() - For stacks that may support supervisor
254 * threads only. These stacks use less memory if CONFIG_USERSPACE is
255 * enabled.
256 *
257 * The stack_size parameter has constraints. It must either be:
258 *
259 * - The original size value passed to K_THREAD_STACK_DEFINE() or
260 * K_KERNEL_STACK_DEFINE()
261 * - The return value of K_THREAD_STACK_SIZEOF(stack) if the stack was
262 * defined with K_THREAD_STACK_DEFINE()
263 * - The return value of K_KERNEL_STACK_SIZEOF(stack) if the stack was
264 * defined with K_KERNEL_STACK_DEFINE().
265 *
266 * Using other values, or sizeof(stack) may produce undefined behavior.
Andrew Boied26cf2d2017-03-30 13:07:02 -0700267 *
268 * @param new_thread Pointer to uninitialized struct k_thread
269 * @param stack Pointer to the stack space.
270 * @param stack_size Stack size in bytes.
271 * @param entry Thread entry function.
272 * @param p1 1st entry point parameter.
273 * @param p2 2nd entry point parameter.
274 * @param p3 3rd entry point parameter.
275 * @param prio Thread priority.
276 * @param options Thread options.
Andy Ross78327382020-03-05 15:18:14 -0800277 * @param delay Scheduling delay, or K_NO_WAIT (for no delay).
Andrew Boied26cf2d2017-03-30 13:07:02 -0700278 *
279 * @return ID of new thread.
Anas Nashif47420d02018-05-24 14:20:56 -0400280 *
Andrew Boied26cf2d2017-03-30 13:07:02 -0700281 */
Andrew Boie662c3452017-10-02 10:51:18 -0700282__syscall k_tid_t k_thread_create(struct k_thread *new_thread,
Andrew Boiec5c104f2017-10-16 14:46:34 -0700283 k_thread_stack_t *stack,
Andrew Boie662c3452017-10-02 10:51:18 -0700284 size_t stack_size,
285 k_thread_entry_t entry,
286 void *p1, void *p2, void *p3,
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500287 int prio, uint32_t options, k_timeout_t delay);
Andrew Boied26cf2d2017-03-30 13:07:02 -0700288
Andrew Boie3f091b52017-08-30 14:34:14 -0700289/**
290 * @brief Drop a thread's privileges permanently to user mode
291 *
Andrew Boie4d6bc472020-10-24 13:11:35 -0700292 * This allows a supervisor thread to be re-used as a user thread.
293 * This function does not return, but control will transfer to the provided
294 * entry point as if this was a new user thread.
295 *
296 * The implementation ensures that the stack buffer contents are erased.
297 * Any thread-local storage will be reverted to a pristine state.
298 *
299 * Memory domain membership, resource pool assignment, kernel object
300 * permissions, priority, and thread options are preserved.
301 *
302 * A common use of this function is to re-use the main thread as a user thread
303 * once all supervisor mode-only tasks have been completed.
304 *
Andrew Boie3f091b52017-08-30 14:34:14 -0700305 * @param entry Function to start executing from
306 * @param p1 1st entry point parameter
307 * @param p2 2nd entry point parameter
308 * @param p3 3rd entry point parameter
309 */
310extern FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
311 void *p1, void *p2,
312 void *p3);
Andrew Boie3f091b52017-08-30 14:34:14 -0700313
Andrew Boied26cf2d2017-03-30 13:07:02 -0700314/**
Adithya Baglody392219e2019-01-02 14:40:39 +0530315 * @brief Grant a thread access to a set of kernel objects
Andrew Boiee12857a2017-10-17 11:38:26 -0700316 *
317 * This is a convenience function. For the provided thread, grant access to
318 * the remaining arguments, which must be pointers to kernel objects.
Andrew Boiee12857a2017-10-17 11:38:26 -0700319 *
320 * The thread object must be initialized (i.e. running). The objects don't
321 * need to be.
Adithya Baglody392219e2019-01-02 14:40:39 +0530322 * Note that NULL shouldn't be passed as an argument.
Andrew Boiee12857a2017-10-17 11:38:26 -0700323 *
324 * @param thread Thread to grant access to objects
Adithya Baglody392219e2019-01-02 14:40:39 +0530325 * @param ... list of kernel object pointers
Andrew Boiee12857a2017-10-17 11:38:26 -0700326 */
Adithya Baglody392219e2019-01-02 14:40:39 +0530327#define k_thread_access_grant(thread, ...) \
Krzysztof Chruscinski1b4b9382020-05-08 07:06:58 +0200328 FOR_EACH_FIXED_ARG(k_object_access_grant, (;), thread, __VA_ARGS__)
Andrew Boiee12857a2017-10-17 11:38:26 -0700329
330/**
Andrew Boie92e5bd72018-04-12 17:12:15 -0700331 * @brief Assign a resource memory pool to a thread
332 *
333 * By default, threads have no resource pool assigned unless their parent
334 * thread has a resource pool, in which case it is inherited. Multiple
335 * threads may be assigned to the same memory pool.
336 *
337 * Changing a thread's resource pool will not migrate allocations from the
338 * previous pool.
339 *
Jukka Rissanenfdf18482020-05-01 12:37:51 +0300340 * @param thread Target thread to assign a memory pool for resource requests.
Andy Rossc770cab2020-10-02 08:22:03 -0700341 * @param heap Heap object to use for resources,
Jukka Rissanenfdf18482020-05-01 12:37:51 +0300342 * or NULL if the thread should no longer have a memory pool.
Andrew Boie92e5bd72018-04-12 17:12:15 -0700343 */
Andy Rossc770cab2020-10-02 08:22:03 -0700344static inline void k_thread_heap_assign(struct k_thread *thread,
345 struct k_heap *heap)
346{
347 thread->resource_pool = heap;
348}
349
Andrew Boieefc5fe02020-02-05 10:41:58 -0800350#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
351/**
352 * @brief Obtain stack usage information for the specified thread
353 *
354 * User threads will need to have permission on the target thread object.
355 *
356 * Some hardware may prevent inspection of a stack buffer currently in use.
357 * If this API is called from supervisor mode, on the currently running thread,
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200358 * on a platform which selects @kconfig{CONFIG_NO_UNUSED_STACK_INSPECTION}, an
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300359 * error will be generated.
Andrew Boieefc5fe02020-02-05 10:41:58 -0800360 *
361 * @param thread Thread to inspect stack information
362 * @param unused_ptr Output parameter, filled in with the unused stack space
363 * of the target thread in bytes.
364 * @return 0 on success
365 * @return -EBADF Bad thread object (user mode only)
366 * @return -EPERM No permissions on thread object (user mode only)
367 * #return -ENOTSUP Forbidden by hardware policy
368 * @return -EINVAL Thread is uninitialized or exited (user mode only)
369 * @return -EFAULT Bad memory address for unused_ptr (user mode only)
370 */
371__syscall int k_thread_stack_space_get(const struct k_thread *thread,
372 size_t *unused_ptr);
373#endif
374
Andrew Boie92e5bd72018-04-12 17:12:15 -0700375#if (CONFIG_HEAP_MEM_POOL_SIZE > 0)
376/**
377 * @brief Assign the system heap as a thread's resource pool
378 *
Yasushi SHOJIa3e0f8c2021-03-11 20:45:20 +0900379 * Similar to z_thread_heap_assign(), but the thread will use
Andrew Boie92e5bd72018-04-12 17:12:15 -0700380 * the kernel heap to draw memory.
381 *
382 * Use with caution, as a malicious thread could perform DoS attacks on the
383 * kernel heap.
384 *
385 * @param thread Target thread to assign the system heap for resource requests
Anas Nashif47420d02018-05-24 14:20:56 -0400386 *
Andrew Boie92e5bd72018-04-12 17:12:15 -0700387 */
388void k_thread_system_pool_assign(struct k_thread *thread);
389#endif /* (CONFIG_HEAP_MEM_POOL_SIZE > 0) */
390
391/**
Andrew Boie322816e2020-02-20 16:33:06 -0800392 * @brief Sleep until a thread exits
393 *
394 * The caller will be put to sleep until the target thread exits, either due
395 * to being aborted, self-exiting, or taking a fatal error. This API returns
396 * immediately if the thread isn't running.
397 *
Andy Ross23f699b2021-02-23 06:12:17 -0800398 * This API may only be called from ISRs with a K_NO_WAIT timeout,
399 * where it can be useful as a predicate to detect when a thread has
400 * aborted.
Andrew Boie322816e2020-02-20 16:33:06 -0800401 *
402 * @param thread Thread to wait to exit
Andy Ross78327382020-03-05 15:18:14 -0800403 * @param timeout upper bound time to wait for the thread to exit.
Andrew Boie322816e2020-02-20 16:33:06 -0800404 * @retval 0 success, target thread has exited or wasn't running
405 * @retval -EBUSY returned without waiting
406 * @retval -EAGAIN waiting period timed out
407 * @retval -EDEADLK target thread is joining on the caller, or target thread
408 * is the caller
409 */
Andy Ross78327382020-03-05 15:18:14 -0800410__syscall int k_thread_join(struct k_thread *thread, k_timeout_t timeout);
411
412/**
413 * @brief Put the current thread to sleep.
414 *
415 * This routine puts the current thread to sleep for @a duration,
416 * specified as a k_timeout_t object.
417 *
Anas Nashifd2c71792020-10-17 07:52:17 -0400418 * @note if @a timeout is set to K_FOREVER then the thread is suspended.
419 *
Andy Ross78327382020-03-05 15:18:14 -0800420 * @param timeout Desired duration of sleep.
421 *
422 * @return Zero if the requested time has elapsed or the number of milliseconds
423 * left to sleep, if thread was woken up by \ref k_wakeup call.
424 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500425__syscall int32_t k_sleep(k_timeout_t timeout);
Andrew Boie322816e2020-02-20 16:33:06 -0800426
427/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500428 * @brief Put the current thread to sleep.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400429 *
Charles E. Yousea5678312019-05-09 16:46:46 -0700430 * This routine puts the current thread to sleep for @a duration milliseconds.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400431 *
Charles E. Yousea5678312019-05-09 16:46:46 -0700432 * @param ms Number of milliseconds to sleep.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400433 *
Piotr Zięcik7700eb22018-10-25 17:45:08 +0200434 * @return Zero if the requested time has elapsed or the number of milliseconds
435 * left to sleep, if thread was woken up by \ref k_wakeup call.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400436 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500437static inline int32_t k_msleep(int32_t ms)
Andy Ross78327382020-03-05 15:18:14 -0800438{
439 return k_sleep(Z_TIMEOUT_MS(ms));
440}
Charles E. Yousea5678312019-05-09 16:46:46 -0700441
442/**
443 * @brief Put the current thread to sleep with microsecond resolution.
444 *
445 * This function is unlikely to work as expected without kernel tuning.
446 * In particular, because the lower bound on the duration of a sleep is
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200447 * the duration of a tick, @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} must be
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300448 * adjusted to achieve the resolution desired. The implications of doing
449 * this must be understood before attempting to use k_usleep(). Use with
450 * caution.
Charles E. Yousea5678312019-05-09 16:46:46 -0700451 *
452 * @param us Number of microseconds to sleep.
453 *
454 * @return Zero if the requested time has elapsed or the number of microseconds
455 * left to sleep, if thread was woken up by \ref k_wakeup call.
456 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500457__syscall int32_t k_usleep(int32_t us);
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400458
459/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500460 * @brief Cause the current thread to busy wait.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400461 *
462 * This routine causes the current thread to execute a "do nothing" loop for
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500463 * @a usec_to_wait microseconds.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400464 *
Peter Bigot6a794362020-05-22 14:17:01 -0500465 * @note The clock used for the microsecond-resolution delay here may
466 * be skewed relative to the clock used for system timeouts like
467 * k_sleep(). For example k_busy_wait(1000) may take slightly more or
468 * less time than k_sleep(K_MSEC(1)), with the offset dependent on
469 * clock tolerances.
470 *
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400471 * @return N/A
472 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500473__syscall void k_busy_wait(uint32_t usec_to_wait);
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400474
475/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500476 * @brief Yield the current thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400477 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500478 * This routine causes the current thread to yield execution to another
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400479 * thread of the same or higher priority. If there are no other ready threads
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500480 * of the same or higher priority, the routine returns immediately.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400481 *
482 * @return N/A
483 */
Andrew Boie468190a2017-09-29 14:00:48 -0700484__syscall void k_yield(void);
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400485
486/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500487 * @brief Wake up a sleeping thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400488 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500489 * This routine prematurely wakes up @a thread from sleeping.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400490 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500491 * If @a thread is not currently sleeping, the routine has no effect.
492 *
493 * @param thread ID of thread to wake.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400494 *
495 * @return N/A
496 */
Andrew Boie468190a2017-09-29 14:00:48 -0700497__syscall void k_wakeup(k_tid_t thread);
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400498
499/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500500 * @brief Get thread ID of the current thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400501 *
Andrew Boief07df422020-11-06 13:11:12 -0800502 * This unconditionally queries the kernel via a system call.
503 *
504 * @return ID of current thread.
505 */
Daniel Leung8530cfa2021-08-09 10:04:11 -0700506__attribute_const__
Andrew Boief07df422020-11-06 13:11:12 -0800507__syscall k_tid_t z_current_get(void);
508
509#ifdef CONFIG_THREAD_LOCAL_STORAGE
510/* Thread-local cache of current thread ID, set in z_thread_entry() */
511extern __thread k_tid_t z_tls_current;
512#endif
513
514/**
515 * @brief Get thread ID of the current thread.
516 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500517 * @return ID of current thread.
Anas Nashif47420d02018-05-24 14:20:56 -0400518 *
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400519 */
Andrew Boief07df422020-11-06 13:11:12 -0800520__attribute_const__
521static inline k_tid_t k_current_get(void)
522{
523#ifdef CONFIG_THREAD_LOCAL_STORAGE
524 return z_tls_current;
525#else
526 return z_current_get();
527#endif
528}
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400529
530/**
Allan Stephensc98da842016-11-11 15:45:03 -0500531 * @brief Abort a thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400532 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500533 * This routine permanently stops execution of @a thread. The thread is taken
534 * off all kernel queues it is part of (i.e. the ready queue, the timeout
535 * queue, or a kernel object wait queue). However, any kernel resources the
536 * thread might currently own (such as mutexes or memory blocks) are not
537 * released. It is the responsibility of the caller of this routine to ensure
538 * all necessary cleanup is performed.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400539 *
Andy Ross23f699b2021-02-23 06:12:17 -0800540 * After k_thread_abort() returns, the thread is guaranteed not to be
541 * running or to become runnable anywhere on the system. Normally
542 * this is done via blocking the caller (in the same manner as
543 * k_thread_join()), but in interrupt context on SMP systems the
544 * implementation is required to spin for threads that are running on
545 * other CPUs. Note that as specified, this means that on SMP
546 * platforms it is possible for application code to create a deadlock
547 * condition by simultaneously aborting a cycle of threads using at
548 * least one termination from interrupt context. Zephyr cannot detect
549 * all such conditions.
550 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500551 * @param thread ID of thread to abort.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400552 *
553 * @return N/A
554 */
Andrew Boie468190a2017-09-29 14:00:48 -0700555__syscall void k_thread_abort(k_tid_t thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400556
Andrew Boie7d627c52017-08-30 11:01:56 -0700557
558/**
559 * @brief Start an inactive thread
560 *
561 * If a thread was created with K_FOREVER in the delay parameter, it will
562 * not be added to the scheduling queue until this function is called
563 * on it.
564 *
565 * @param thread thread to start
566 */
Andrew Boie468190a2017-09-29 14:00:48 -0700567__syscall void k_thread_start(k_tid_t thread);
Andrew Boie7d627c52017-08-30 11:01:56 -0700568
Peter A. Bigot16a40812020-09-18 16:24:57 -0500569extern k_ticks_t z_timeout_expires(const struct _timeout *timeout);
570extern k_ticks_t z_timeout_remaining(const struct _timeout *timeout);
Andy Ross5a5d3da2020-03-09 13:59:15 -0700571
572#ifdef CONFIG_SYS_CLOCK_EXISTS
573
574/**
Andy Rosse39bf292020-03-19 10:30:33 -0700575 * @brief Get time when a thread wakes up, in system ticks
Andy Ross5a5d3da2020-03-09 13:59:15 -0700576 *
577 * This routine computes the system uptime when a waiting thread next
578 * executes, in units of system ticks. If the thread is not waiting,
579 * it returns current system time.
580 */
Peter Bigot0ab314f2020-11-16 15:28:59 -0600581__syscall k_ticks_t k_thread_timeout_expires_ticks(const struct k_thread *t);
Andy Ross5a5d3da2020-03-09 13:59:15 -0700582
583static inline k_ticks_t z_impl_k_thread_timeout_expires_ticks(
Peter Bigot0ab314f2020-11-16 15:28:59 -0600584 const struct k_thread *t)
Andy Ross5a5d3da2020-03-09 13:59:15 -0700585{
586 return z_timeout_expires(&t->base.timeout);
587}
588
589/**
Andy Rosse39bf292020-03-19 10:30:33 -0700590 * @brief Get time remaining before a thread wakes up, in system ticks
Andy Ross5a5d3da2020-03-09 13:59:15 -0700591 *
592 * This routine computes the time remaining before a waiting thread
593 * next executes, in units of system ticks. If the thread is not
594 * waiting, it returns zero.
595 */
Peter Bigot0ab314f2020-11-16 15:28:59 -0600596__syscall k_ticks_t k_thread_timeout_remaining_ticks(const struct k_thread *t);
Andy Ross5a5d3da2020-03-09 13:59:15 -0700597
598static inline k_ticks_t z_impl_k_thread_timeout_remaining_ticks(
Peter Bigot0ab314f2020-11-16 15:28:59 -0600599 const struct k_thread *t)
Andy Ross5a5d3da2020-03-09 13:59:15 -0700600{
601 return z_timeout_remaining(&t->base.timeout);
602}
603
604#endif /* CONFIG_SYS_CLOCK_EXISTS */
605
Allan Stephensc98da842016-11-11 15:45:03 -0500606/**
607 * @cond INTERNAL_HIDDEN
608 */
609
Benjamin Walshd211a522016-12-06 11:44:01 -0500610/* timeout has timed out and is not on _timeout_q anymore */
611#define _EXPIRED (-2)
612
Peter Mitsisa04c0d72016-09-28 19:26:00 -0400613struct _static_thread_data {
Andrew Boied26cf2d2017-03-30 13:07:02 -0700614 struct k_thread *init_thread;
Andrew Boiec5c104f2017-10-16 14:46:34 -0700615 k_thread_stack_t *init_stack;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400616 unsigned int init_stack_size;
Andrew Boie1e06ffc2017-09-11 09:30:04 -0700617 k_thread_entry_t init_entry;
Allan Stephens7c5bffa2016-10-26 10:01:28 -0500618 void *init_p1;
619 void *init_p2;
620 void *init_p3;
621 int init_prio;
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500622 uint32_t init_options;
623 int32_t init_delay;
Allan Stephens7c5bffa2016-10-26 10:01:28 -0500624 void (*init_abort)(void);
Anas Nashif57554052018-03-03 02:31:05 -0600625 const char *init_name;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400626};
627
Anas Nashif45a1d8a2020-04-24 11:29:17 -0400628#define Z_THREAD_INITIALIZER(thread, stack, stack_size, \
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400629 entry, p1, p2, p3, \
Anas Nashif57554052018-03-03 02:31:05 -0600630 prio, options, delay, abort, tname) \
Allan Stephens6cfe1322016-10-26 10:16:51 -0500631 { \
Andrew Boied26cf2d2017-03-30 13:07:02 -0700632 .init_thread = (thread), \
633 .init_stack = (stack), \
Allan Stephens6cfe1322016-10-26 10:16:51 -0500634 .init_stack_size = (stack_size), \
Andrew Boie1e06ffc2017-09-11 09:30:04 -0700635 .init_entry = (k_thread_entry_t)entry, \
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400636 .init_p1 = (void *)p1, \
637 .init_p2 = (void *)p2, \
638 .init_p3 = (void *)p3, \
Allan Stephens6cfe1322016-10-26 10:16:51 -0500639 .init_prio = (prio), \
640 .init_options = (options), \
641 .init_delay = (delay), \
642 .init_abort = (abort), \
Anas Nashif57554052018-03-03 02:31:05 -0600643 .init_name = STRINGIFY(tname), \
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400644 }
645
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400646/**
Allan Stephensc98da842016-11-11 15:45:03 -0500647 * INTERNAL_HIDDEN @endcond
648 */
649
650/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500651 * @brief Statically define and initialize a thread.
652 *
653 * The thread may be scheduled for immediate execution or a delayed start.
654 *
655 * Thread options are architecture-specific, and can include K_ESSENTIAL,
656 * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
657 * them using "|" (the logical OR operator).
658 *
659 * The ID of the thread can be accessed using:
660 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -0500661 * @code extern const k_tid_t <name>; @endcode
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500662 *
663 * @param name Name of the thread.
664 * @param stack_size Stack size in bytes.
665 * @param entry Thread entry function.
666 * @param p1 1st entry point parameter.
667 * @param p2 2nd entry point parameter.
668 * @param p3 3rd entry point parameter.
669 * @param prio Thread priority.
670 * @param options Thread options.
Peter Bigot73c387c2020-04-20 08:55:20 -0500671 * @param delay Scheduling delay (in milliseconds), zero for no delay.
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400672 *
Anas Nashif47420d02018-05-24 14:20:56 -0400673 *
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400674 * @internal It has been observed that the x86 compiler by default aligns
675 * these _static_thread_data structures to 32-byte boundaries, thereby
676 * wasting space. To work around this, force a 4-byte alignment.
Anas Nashif47420d02018-05-24 14:20:56 -0400677 *
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400678 */
Allan Stephens6cfe1322016-10-26 10:16:51 -0500679#define K_THREAD_DEFINE(name, stack_size, \
680 entry, p1, p2, p3, \
681 prio, options, delay) \
Andrew Boiedc5d9352017-06-02 12:56:47 -0700682 K_THREAD_STACK_DEFINE(_k_thread_stack_##name, stack_size); \
Nicolas Pitreb1d37422019-06-03 10:51:32 -0400683 struct k_thread _k_thread_obj_##name; \
Fabio Baltierif88a4202021-08-04 23:05:54 +0100684 STRUCT_SECTION_ITERABLE(_static_thread_data, _k_thread_data_##name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -0400685 Z_THREAD_INITIALIZER(&_k_thread_obj_##name, \
Andrew Boied26cf2d2017-03-30 13:07:02 -0700686 _k_thread_stack_##name, stack_size, \
Allan Stephens6cfe1322016-10-26 10:16:51 -0500687 entry, p1, p2, p3, prio, options, delay, \
Anas Nashif57554052018-03-03 02:31:05 -0600688 NULL, name); \
Andrew Boied26cf2d2017-03-30 13:07:02 -0700689 const k_tid_t name = (k_tid_t)&_k_thread_obj_##name
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400690
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400691/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500692 * @brief Get a thread's priority.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400693 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500694 * This routine gets the priority of @a thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400695 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500696 * @param thread ID of thread whose priority is needed.
697 *
698 * @return Priority of @a thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400699 */
Andrew Boie76c04a22017-09-27 14:45:10 -0700700__syscall int k_thread_priority_get(k_tid_t thread);
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400701
702/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500703 * @brief Set a thread's priority.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400704 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500705 * This routine immediately changes the priority of @a thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400706 *
707 * Rescheduling can occur immediately depending on the priority @a thread is
708 * set to:
709 *
710 * - If its priority is raised above the priority of the caller of this
711 * function, and the caller is preemptible, @a thread will be scheduled in.
712 *
713 * - If the caller operates on itself, it lowers its priority below that of
714 * other threads in the system, and the caller is preemptible, the thread of
715 * highest priority will be scheduled in.
716 *
717 * Priority can be assigned in the range of -CONFIG_NUM_COOP_PRIORITIES to
718 * CONFIG_NUM_PREEMPT_PRIORITIES-1, where -CONFIG_NUM_COOP_PRIORITIES is the
719 * highest priority.
720 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500721 * @param thread ID of thread whose priority is to be set.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400722 * @param prio New priority.
723 *
724 * @warning Changing the priority of a thread currently involved in mutex
725 * priority inheritance may result in undefined behavior.
726 *
727 * @return N/A
728 */
Andrew Boie468190a2017-09-29 14:00:48 -0700729__syscall void k_thread_priority_set(k_tid_t thread, int prio);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400730
Andy Ross4a2e50f2018-05-15 11:06:25 -0700731
732#ifdef CONFIG_SCHED_DEADLINE
733/**
734 * @brief Set deadline expiration time for scheduler
735 *
736 * This sets the "deadline" expiration as a time delta from the
737 * current time, in the same units used by k_cycle_get_32(). The
738 * scheduler (when deadline scheduling is enabled) will choose the
739 * next expiring thread when selecting between threads at the same
740 * static priority. Threads at different priorities will be scheduled
741 * according to their static priority.
742 *
Andy Rossef626572020-07-10 09:43:36 -0700743 * @note Deadlines are stored internally using 32 bit unsigned
744 * integers. The number of cycles between the "first" deadline in the
745 * scheduler queue and the "last" deadline must be less than 2^31 (i.e
746 * a signed non-negative quantity). Failure to adhere to this rule
Peter Mitsis4e8569a2021-09-29 12:49:46 -0400747 * may result in scheduled threads running in an incorrect deadline
Andy Rossef626572020-07-10 09:43:36 -0700748 * order.
Andy Ross4a2e50f2018-05-15 11:06:25 -0700749 *
750 * @note Despite the API naming, the scheduler makes no guarantees the
751 * the thread WILL be scheduled within that deadline, nor does it take
752 * extra metadata (like e.g. the "runtime" and "period" parameters in
753 * Linux sched_setattr()) that allows the kernel to validate the
754 * scheduling for achievability. Such features could be implemented
755 * above this call, which is simply input to the priority selection
756 * logic.
757 *
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200758 * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300759 * configuration.
Anas Nashif240c5162019-06-10 12:25:50 -0400760 *
Andy Ross4a2e50f2018-05-15 11:06:25 -0700761 * @param thread A thread on which to set the deadline
762 * @param deadline A time delta, in cycle units
Anas Nashif47420d02018-05-24 14:20:56 -0400763 *
Andy Ross4a2e50f2018-05-15 11:06:25 -0700764 */
765__syscall void k_thread_deadline_set(k_tid_t thread, int deadline);
766#endif
767
Andy Rossab46b1b2019-01-30 15:00:42 -0800768#ifdef CONFIG_SCHED_CPU_MASK
769/**
770 * @brief Sets all CPU enable masks to zero
771 *
772 * After this returns, the thread will no longer be schedulable on any
773 * CPUs. The thread must not be currently runnable.
774 *
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200775 * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300776 * configuration.
Anas Nashif240c5162019-06-10 12:25:50 -0400777 *
Andy Rossab46b1b2019-01-30 15:00:42 -0800778 * @param thread Thread to operate upon
779 * @return Zero on success, otherwise error code
780 */
781int k_thread_cpu_mask_clear(k_tid_t thread);
782
783/**
784 * @brief Sets all CPU enable masks to one
785 *
786 * After this returns, the thread will be schedulable on any CPU. The
787 * thread must not be currently runnable.
788 *
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200789 * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300790 * configuration.
Anas Nashif240c5162019-06-10 12:25:50 -0400791 *
Andy Rossab46b1b2019-01-30 15:00:42 -0800792 * @param thread Thread to operate upon
793 * @return Zero on success, otherwise error code
794 */
795int k_thread_cpu_mask_enable_all(k_tid_t thread);
796
797/**
798 * @brief Enable thread to run on specified CPU
799 *
800 * The thread must not be currently runnable.
801 *
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200802 * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300803 * configuration.
Anas Nashif240c5162019-06-10 12:25:50 -0400804 *
Andy Rossab46b1b2019-01-30 15:00:42 -0800805 * @param thread Thread to operate upon
806 * @param cpu CPU index
807 * @return Zero on success, otherwise error code
808 */
809int k_thread_cpu_mask_enable(k_tid_t thread, int cpu);
810
811/**
812 * @brief Prevent thread to run on specified CPU
813 *
814 * The thread must not be currently runnable.
815 *
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200816 * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300817 * configuration.
Anas Nashif240c5162019-06-10 12:25:50 -0400818 *
Andy Rossab46b1b2019-01-30 15:00:42 -0800819 * @param thread Thread to operate upon
820 * @param cpu CPU index
821 * @return Zero on success, otherwise error code
822 */
823int k_thread_cpu_mask_disable(k_tid_t thread, int cpu);
824#endif
825
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400826/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500827 * @brief Suspend a thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400828 *
Andy Ross50d09422019-11-19 11:20:07 -0800829 * This routine prevents the kernel scheduler from making @a thread
830 * the current thread. All other internal operations on @a thread are
831 * still performed; for example, kernel objects it is waiting on are
832 * still handed to it. Note that any existing timeouts
833 * (e.g. k_sleep(), or a timeout argument to k_sem_take() et. al.)
834 * will be canceled. On resume, the thread will begin running
835 * immediately and return from the blocked call.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400836 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500837 * If @a thread is already suspended, the routine has no effect.
838 *
839 * @param thread ID of thread to suspend.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400840 *
841 * @return N/A
842 */
Andrew Boie468190a2017-09-29 14:00:48 -0700843__syscall void k_thread_suspend(k_tid_t thread);
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400844
845/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500846 * @brief Resume a suspended thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400847 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500848 * This routine allows the kernel scheduler to make @a thread the current
849 * thread, when it is next eligible for that role.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400850 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500851 * If @a thread is not currently suspended, the routine has no effect.
852 *
853 * @param thread ID of thread to resume.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400854 *
855 * @return N/A
856 */
Andrew Boie468190a2017-09-29 14:00:48 -0700857__syscall void k_thread_resume(k_tid_t thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400858
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400859/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500860 * @brief Set time-slicing period and scope.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400861 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500862 * This routine specifies how the scheduler will perform time slicing of
863 * preemptible threads.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400864 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500865 * To enable time slicing, @a slice must be non-zero. The scheduler
866 * ensures that no thread runs for more than the specified time limit
867 * before other threads of that priority are given a chance to execute.
868 * Any thread whose priority is higher than @a prio is exempted, and may
David B. Kinder8b986d72017-04-18 15:56:26 -0700869 * execute as long as desired without being preempted due to time slicing.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400870 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500871 * Time slicing only limits the maximum amount of time a thread may continuously
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400872 * execute. Once the scheduler selects a thread for execution, there is no
873 * minimum guaranteed time the thread will execute before threads of greater or
874 * equal priority are scheduled.
875 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500876 * When the current thread is the only one of that priority eligible
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400877 * for execution, this routine has no effect; the thread is immediately
878 * rescheduled after the slice period expires.
879 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500880 * To disable timeslicing, set both @a slice and @a prio to zero.
881 *
882 * @param slice Maximum time slice length (in milliseconds).
883 * @param prio Highest thread priority level eligible for time slicing.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400884 *
885 * @return N/A
886 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500887extern void k_sched_time_slice_set(int32_t slice, int prio);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400888
Anas Nashif166f5192018-02-25 08:02:36 -0600889/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -0500890
891/**
892 * @addtogroup isr_apis
893 * @{
894 */
895
896/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500897 * @brief Determine if code is running at interrupt level.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400898 *
Allan Stephensc98da842016-11-11 15:45:03 -0500899 * This routine allows the caller to customize its actions, depending on
900 * whether it is a thread or an ISR.
901 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +0100902 * @funcprops \isr_ok
Allan Stephensc98da842016-11-11 15:45:03 -0500903 *
Flavio Ceolin6a4a86e2018-12-17 12:40:22 -0800904 * @return false if invoked by a thread.
905 * @return true if invoked by an ISR.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400906 */
Flavio Ceolin6a4a86e2018-12-17 12:40:22 -0800907extern bool k_is_in_isr(void);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400908
Benjamin Walsh445830d2016-11-10 15:54:27 -0500909/**
910 * @brief Determine if code is running in a preemptible thread.
911 *
Allan Stephensc98da842016-11-11 15:45:03 -0500912 * This routine allows the caller to customize its actions, depending on
913 * whether it can be preempted by another thread. The routine returns a 'true'
914 * value if all of the following conditions are met:
Benjamin Walsh445830d2016-11-10 15:54:27 -0500915 *
Allan Stephensc98da842016-11-11 15:45:03 -0500916 * - The code is running in a thread, not at ISR.
917 * - The thread's priority is in the preemptible range.
918 * - The thread has not locked the scheduler.
Benjamin Walsh445830d2016-11-10 15:54:27 -0500919 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +0100920 * @funcprops \isr_ok
Allan Stephensc98da842016-11-11 15:45:03 -0500921 *
922 * @return 0 if invoked by an ISR or by a cooperative thread.
Benjamin Walsh445830d2016-11-10 15:54:27 -0500923 * @return Non-zero if invoked by a preemptible thread.
924 */
Andrew Boie468190a2017-09-29 14:00:48 -0700925__syscall int k_is_preempt_thread(void);
Benjamin Walsh445830d2016-11-10 15:54:27 -0500926
Allan Stephensc98da842016-11-11 15:45:03 -0500927/**
Peter Bigot74ef3952019-12-23 11:48:43 -0600928 * @brief Test whether startup is in the before-main-task phase.
929 *
930 * This routine allows the caller to customize its actions, depending on
931 * whether it being invoked before the kernel is fully active.
932 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +0100933 * @funcprops \isr_ok
Peter Bigot74ef3952019-12-23 11:48:43 -0600934 *
935 * @return true if invoked before post-kernel initialization
936 * @return false if invoked during/after post-kernel initialization
937 */
938static inline bool k_is_pre_kernel(void)
939{
940 extern bool z_sys_post_kernel; /* in init.c */
941
942 return !z_sys_post_kernel;
943}
944
945/**
Anas Nashif166f5192018-02-25 08:02:36 -0600946 * @}
Allan Stephensc98da842016-11-11 15:45:03 -0500947 */
948
949/**
950 * @addtogroup thread_apis
951 * @{
952 */
953
954/**
955 * @brief Lock the scheduler.
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500956 *
Allan Stephensc98da842016-11-11 15:45:03 -0500957 * This routine prevents the current thread from being preempted by another
958 * thread by instructing the scheduler to treat it as a cooperative thread.
959 * If the thread subsequently performs an operation that makes it unready,
960 * it will be context switched out in the normal manner. When the thread
961 * again becomes the current thread, its non-preemptible status is maintained.
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500962 *
Allan Stephensc98da842016-11-11 15:45:03 -0500963 * This routine can be called recursively.
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500964 *
Allan Stephensc98da842016-11-11 15:45:03 -0500965 * @note k_sched_lock() and k_sched_unlock() should normally be used
966 * when the operation being performed can be safely interrupted by ISRs.
967 * However, if the amount of processing involved is very small, better
968 * performance may be obtained by using irq_lock() and irq_unlock().
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500969 *
970 * @return N/A
971 */
972extern void k_sched_lock(void);
973
Allan Stephensc98da842016-11-11 15:45:03 -0500974/**
975 * @brief Unlock the scheduler.
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500976 *
Allan Stephensc98da842016-11-11 15:45:03 -0500977 * This routine reverses the effect of a previous call to k_sched_lock().
978 * A thread must call the routine once for each time it called k_sched_lock()
979 * before the thread becomes preemptible.
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500980 *
981 * @return N/A
982 */
983extern void k_sched_unlock(void);
984
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400985/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500986 * @brief Set current thread's custom data.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400987 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500988 * This routine sets the custom data for the current thread to @ value.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400989 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500990 * Custom data is not used by the kernel itself, and is freely available
991 * for a thread to use as it sees fit. It can be used as a framework
992 * upon which to build thread-local storage.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400993 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500994 * @param value New custom data value.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400995 *
996 * @return N/A
Anas Nashif47420d02018-05-24 14:20:56 -0400997 *
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400998 */
Andrew Boie468190a2017-09-29 14:00:48 -0700999__syscall void k_thread_custom_data_set(void *value);
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001000
1001/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001002 * @brief Get current thread's custom data.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001003 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001004 * This routine returns the custom data for the current thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001005 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001006 * @return Current custom data value.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001007 */
Andrew Boie468190a2017-09-29 14:00:48 -07001008__syscall void *k_thread_custom_data_get(void);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001009
1010/**
Anas Nashif57554052018-03-03 02:31:05 -06001011 * @brief Set current thread name
1012 *
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +02001013 * Set the name of the thread to be used when @kconfig{CONFIG_THREAD_MONITOR}
Fabio Utzig39fa56b2020-09-11 10:14:37 -03001014 * is enabled for tracing and debugging.
Anas Nashif57554052018-03-03 02:31:05 -06001015 *
Anas Nashif25c87db2021-03-29 10:54:23 -04001016 * @param thread Thread to set name, or NULL to set the current thread
1017 * @param str Name string
Andrew Boie38129ce2019-06-25 08:54:37 -07001018 * @retval 0 on success
1019 * @retval -EFAULT Memory access error with supplied string
1020 * @retval -ENOSYS Thread name configuration option not enabled
1021 * @retval -EINVAL Thread name too long
Anas Nashif57554052018-03-03 02:31:05 -06001022 */
Anas Nashif25c87db2021-03-29 10:54:23 -04001023__syscall int k_thread_name_set(k_tid_t thread, const char *str);
Anas Nashif57554052018-03-03 02:31:05 -06001024
1025/**
1026 * @brief Get thread name
1027 *
1028 * Get the name of a thread
1029 *
Anas Nashif25c87db2021-03-29 10:54:23 -04001030 * @param thread Thread ID
Andrew Boie38129ce2019-06-25 08:54:37 -07001031 * @retval Thread name, or NULL if configuration not enabled
Anas Nashif57554052018-03-03 02:31:05 -06001032 */
Anas Nashif25c87db2021-03-29 10:54:23 -04001033const char *k_thread_name_get(k_tid_t thread);
Andrew Boie38129ce2019-06-25 08:54:37 -07001034
1035/**
1036 * @brief Copy the thread name into a supplied buffer
1037 *
Anas Nashif25c87db2021-03-29 10:54:23 -04001038 * @param thread Thread to obtain name information
Andrew Boie38129ce2019-06-25 08:54:37 -07001039 * @param buf Destination buffer
David B. Kinder73896c02019-10-28 16:27:57 -07001040 * @param size Destination buffer size
Andrew Boie38129ce2019-06-25 08:54:37 -07001041 * @retval -ENOSPC Destination buffer too small
1042 * @retval -EFAULT Memory access error
1043 * @retval -ENOSYS Thread name feature not enabled
1044 * @retval 0 Success
1045 */
Anas Nashif25c87db2021-03-29 10:54:23 -04001046__syscall int k_thread_name_copy(k_tid_t thread, char *buf,
Andrew Boie38129ce2019-06-25 08:54:37 -07001047 size_t size);
Anas Nashif57554052018-03-03 02:31:05 -06001048
1049/**
Pavlo Hamov8076c802019-07-31 12:43:54 +03001050 * @brief Get thread state string
1051 *
1052 * Get the human friendly thread state string
1053 *
1054 * @param thread_id Thread ID
1055 * @retval Thread state string, empty if no state flag is set
1056 */
1057const char *k_thread_state_str(k_tid_t thread_id);
1058
1059/**
Andy Rosscfe62032018-09-29 07:34:55 -07001060 * @}
1061 */
1062
1063/**
Allan Stephensc2f15a42016-11-17 12:24:22 -05001064 * @addtogroup clock_apis
1065 * @{
1066 */
1067
1068/**
1069 * @brief Generate null timeout delay.
1070 *
Maksim Masalskife1ff2f2019-10-29 16:50:44 +08001071 * This macro generates a timeout delay that instructs a kernel API
Allan Stephensc2f15a42016-11-17 12:24:22 -05001072 * not to wait if the requested operation cannot be performed immediately.
1073 *
1074 * @return Timeout delay value.
1075 */
Andy Ross78327382020-03-05 15:18:14 -08001076#define K_NO_WAIT Z_TIMEOUT_NO_WAIT
Allan Stephensc2f15a42016-11-17 12:24:22 -05001077
1078/**
Andy Rosse1bc5952020-03-09 12:19:54 -07001079 * @brief Generate timeout delay from nanoseconds.
1080 *
1081 * This macro generates a timeout delay that instructs a kernel API to
1082 * wait up to @a t nanoseconds to perform the requested operation.
1083 * Note that timer precision is limited to the tick rate, not the
1084 * requested value.
1085 *
Andy Rosse39bf292020-03-19 10:30:33 -07001086 * @param t Duration in nanoseconds.
Andy Rosse1bc5952020-03-09 12:19:54 -07001087 *
1088 * @return Timeout delay value.
1089 */
1090#define K_NSEC(t) Z_TIMEOUT_NS(t)
1091
1092/**
1093 * @brief Generate timeout delay from microseconds.
1094 *
1095 * This macro generates a timeout delay that instructs a kernel API
1096 * to wait up to @a t microseconds to perform the requested operation.
1097 * Note that timer precision is limited to the tick rate, not the
1098 * requested value.
1099 *
Andy Rosse39bf292020-03-19 10:30:33 -07001100 * @param t Duration in microseconds.
Andy Rosse1bc5952020-03-09 12:19:54 -07001101 *
1102 * @return Timeout delay value.
1103 */
1104#define K_USEC(t) Z_TIMEOUT_US(t)
1105
1106/**
1107 * @brief Generate timeout delay from cycles.
1108 *
1109 * This macro generates a timeout delay that instructs a kernel API
1110 * to wait up to @a t cycles to perform the requested operation.
1111 *
Andy Rosse39bf292020-03-19 10:30:33 -07001112 * @param t Duration in cycles.
Andy Rosse1bc5952020-03-09 12:19:54 -07001113 *
1114 * @return Timeout delay value.
1115 */
1116#define K_CYC(t) Z_TIMEOUT_CYC(t)
1117
1118/**
1119 * @brief Generate timeout delay from system ticks.
1120 *
1121 * This macro generates a timeout delay that instructs a kernel API
1122 * to wait up to @a t ticks to perform the requested operation.
1123 *
Andy Rosse39bf292020-03-19 10:30:33 -07001124 * @param t Duration in system ticks.
Andy Rosse1bc5952020-03-09 12:19:54 -07001125 *
1126 * @return Timeout delay value.
1127 */
1128#define K_TICKS(t) Z_TIMEOUT_TICKS(t)
1129
1130/**
Allan Stephensc2f15a42016-11-17 12:24:22 -05001131 * @brief Generate timeout delay from milliseconds.
1132 *
Maksim Masalskife1ff2f2019-10-29 16:50:44 +08001133 * This macro generates a timeout delay that instructs a kernel API
Allan Stephensc2f15a42016-11-17 12:24:22 -05001134 * to wait up to @a ms milliseconds to perform the requested operation.
1135 *
1136 * @param ms Duration in milliseconds.
1137 *
1138 * @return Timeout delay value.
1139 */
Andy Ross78327382020-03-05 15:18:14 -08001140#define K_MSEC(ms) Z_TIMEOUT_MS(ms)
Allan Stephensc2f15a42016-11-17 12:24:22 -05001141
1142/**
1143 * @brief Generate timeout delay from seconds.
1144 *
Maksim Masalskife1ff2f2019-10-29 16:50:44 +08001145 * This macro generates a timeout delay that instructs a kernel API
Allan Stephensc2f15a42016-11-17 12:24:22 -05001146 * to wait up to @a s seconds to perform the requested operation.
1147 *
1148 * @param s Duration in seconds.
1149 *
1150 * @return Timeout delay value.
1151 */
Johan Hedberg14471692016-11-13 10:52:15 +02001152#define K_SECONDS(s) K_MSEC((s) * MSEC_PER_SEC)
Allan Stephensc2f15a42016-11-17 12:24:22 -05001153
1154/**
1155 * @brief Generate timeout delay from minutes.
Maksim Masalskife1ff2f2019-10-29 16:50:44 +08001156
1157 * This macro generates a timeout delay that instructs a kernel API
Allan Stephensc2f15a42016-11-17 12:24:22 -05001158 * to wait up to @a m minutes to perform the requested operation.
1159 *
1160 * @param m Duration in minutes.
1161 *
1162 * @return Timeout delay value.
1163 */
Johan Hedberg14471692016-11-13 10:52:15 +02001164#define K_MINUTES(m) K_SECONDS((m) * 60)
Allan Stephensc2f15a42016-11-17 12:24:22 -05001165
1166/**
1167 * @brief Generate timeout delay from hours.
1168 *
Maksim Masalskife1ff2f2019-10-29 16:50:44 +08001169 * This macro generates a timeout delay that instructs a kernel API
Allan Stephensc2f15a42016-11-17 12:24:22 -05001170 * to wait up to @a h hours to perform the requested operation.
1171 *
1172 * @param h Duration in hours.
1173 *
1174 * @return Timeout delay value.
1175 */
Johan Hedberg14471692016-11-13 10:52:15 +02001176#define K_HOURS(h) K_MINUTES((h) * 60)
1177
Allan Stephensc98da842016-11-11 15:45:03 -05001178/**
Allan Stephensc2f15a42016-11-17 12:24:22 -05001179 * @brief Generate infinite timeout delay.
1180 *
Maksim Masalskife1ff2f2019-10-29 16:50:44 +08001181 * This macro generates a timeout delay that instructs a kernel API
Allan Stephensc2f15a42016-11-17 12:24:22 -05001182 * to wait as long as necessary to perform the requested operation.
1183 *
1184 * @return Timeout delay value.
1185 */
Andy Ross78327382020-03-05 15:18:14 -08001186#define K_FOREVER Z_FOREVER
Allan Stephensc2f15a42016-11-17 12:24:22 -05001187
Andy Rosse1bc5952020-03-09 12:19:54 -07001188#ifdef CONFIG_TIMEOUT_64BIT
1189
Allan Stephensc2f15a42016-11-17 12:24:22 -05001190/**
Andy Rosse39bf292020-03-19 10:30:33 -07001191 * @brief Generates an absolute/uptime timeout value from system ticks
Andy Ross4c7b77a2020-03-09 09:35:35 -07001192 *
1193 * This macro generates a timeout delay that represents an expiration
Andy Rosse39bf292020-03-19 10:30:33 -07001194 * at the absolute uptime value specified, in system ticks. That is, the
Andy Ross4c7b77a2020-03-09 09:35:35 -07001195 * timeout will expire immediately after the system uptime reaches the
1196 * specified tick count.
1197 *
1198 * @param t Tick uptime value
1199 * @return Timeout delay value
1200 */
Martin Jäger19c2f782020-11-09 10:14:53 +01001201#define K_TIMEOUT_ABS_TICKS(t) \
1202 Z_TIMEOUT_TICKS(Z_TICK_ABS((k_ticks_t)MAX(t, 0)))
Andy Ross4c7b77a2020-03-09 09:35:35 -07001203
1204/**
Andy Rosse39bf292020-03-19 10:30:33 -07001205 * @brief Generates an absolute/uptime timeout value from milliseconds
Andy Ross4c7b77a2020-03-09 09:35:35 -07001206 *
1207 * This macro generates a timeout delay that represents an expiration
1208 * at the absolute uptime value specified, in milliseconds. That is,
1209 * the timeout will expire immediately after the system uptime reaches
1210 * the specified tick count.
1211 *
1212 * @param t Millisecond uptime value
1213 * @return Timeout delay value
1214 */
1215#define K_TIMEOUT_ABS_MS(t) K_TIMEOUT_ABS_TICKS(k_ms_to_ticks_ceil64(t))
1216
1217/**
Andy Rosse39bf292020-03-19 10:30:33 -07001218 * @brief Generates an absolute/uptime timeout value from microseconds
Andy Rosse1bc5952020-03-09 12:19:54 -07001219 *
1220 * This macro generates a timeout delay that represents an expiration
1221 * at the absolute uptime value specified, in microseconds. That is,
1222 * the timeout will expire immediately after the system uptime reaches
1223 * the specified time. Note that timer precision is limited by the
1224 * system tick rate and not the requested timeout value.
1225 *
1226 * @param t Microsecond uptime value
1227 * @return Timeout delay value
1228 */
1229#define K_TIMEOUT_ABS_US(t) K_TIMEOUT_ABS_TICKS(k_us_to_ticks_ceil64(t))
1230
1231/**
Andy Rosse39bf292020-03-19 10:30:33 -07001232 * @brief Generates an absolute/uptime timeout value from nanoseconds
Andy Rosse1bc5952020-03-09 12:19:54 -07001233 *
1234 * This macro generates a timeout delay that represents an expiration
1235 * at the absolute uptime value specified, in nanoseconds. That is,
1236 * the timeout will expire immediately after the system uptime reaches
1237 * the specified time. Note that timer precision is limited by the
1238 * system tick rate and not the requested timeout value.
1239 *
1240 * @param t Nanosecond uptime value
1241 * @return Timeout delay value
1242 */
1243#define K_TIMEOUT_ABS_NS(t) K_TIMEOUT_ABS_TICKS(k_ns_to_ticks_ceil64(t))
1244
1245/**
Andy Rosse39bf292020-03-19 10:30:33 -07001246 * @brief Generates an absolute/uptime timeout value from system cycles
Andy Rosse1bc5952020-03-09 12:19:54 -07001247 *
1248 * This macro generates a timeout delay that represents an expiration
1249 * at the absolute uptime value specified, in cycles. That is, the
1250 * timeout will expire immediately after the system uptime reaches the
1251 * specified time. Note that timer precision is limited by the system
1252 * tick rate and not the requested timeout value.
1253 *
1254 * @param t Cycle uptime value
1255 * @return Timeout delay value
1256 */
1257#define K_TIMEOUT_ABS_CYC(t) K_TIMEOUT_ABS_TICKS(k_cyc_to_ticks_ceil64(t))
1258
1259#endif
1260
1261/**
Anas Nashif166f5192018-02-25 08:02:36 -06001262 * @}
Allan Stephensc2f15a42016-11-17 12:24:22 -05001263 */
1264
1265/**
Allan Stephensc98da842016-11-11 15:45:03 -05001266 * @cond INTERNAL_HIDDEN
1267 */
Benjamin Walsha9604bd2016-09-21 11:05:56 -04001268
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001269struct k_timer {
1270 /*
1271 * _timeout structure must be first here if we want to use
1272 * dynamic timer allocation. timeout.node is used in the double-linked
1273 * list of free timers
1274 */
1275 struct _timeout timeout;
1276
Allan Stephens45bfa372016-10-12 12:39:42 -05001277 /* wait queue for the (single) thread waiting on this timer */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001278 _wait_q_t wait_q;
1279
1280 /* runs in ISR context */
Flavio Ceolin4b35dd22018-11-16 19:06:59 -08001281 void (*expiry_fn)(struct k_timer *timer);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001282
1283 /* runs in the context of the thread that calls k_timer_stop() */
Flavio Ceolin4b35dd22018-11-16 19:06:59 -08001284 void (*stop_fn)(struct k_timer *timer);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001285
1286 /* timer period */
Andy Ross78327382020-03-05 15:18:14 -08001287 k_timeout_t period;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001288
Allan Stephens45bfa372016-10-12 12:39:42 -05001289 /* timer status */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001290 uint32_t status;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001291
Benjamin Walshe4e98f92017-01-12 19:38:53 -05001292 /* user-specific data, also used to support legacy features */
1293 void *user_data;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001294
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001295};
1296
Patrik Flykt97b3bd12019-03-12 15:15:42 -06001297#define Z_TIMER_INITIALIZER(obj, expiry, stop) \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001298 { \
Krzysztof Chruscinskibe063272019-02-13 11:19:54 +01001299 .timeout = { \
1300 .node = {},\
Peter Bigote37c7852020-07-07 12:34:05 -05001301 .fn = z_timer_expiration_handler, \
Krzysztof Chruscinskibe063272019-02-13 11:19:54 +01001302 .dticks = 0, \
Krzysztof Chruscinskibe063272019-02-13 11:19:54 +01001303 }, \
Patrik Flykt4344e272019-03-08 14:19:05 -07001304 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
Allan Stephens1342adb2016-11-03 13:54:53 -05001305 .expiry_fn = expiry, \
1306 .stop_fn = stop, \
1307 .status = 0, \
Benjamin Walshe4e98f92017-01-12 19:38:53 -05001308 .user_data = 0, \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001309 }
1310
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001311/**
Allan Stephensc98da842016-11-11 15:45:03 -05001312 * INTERNAL_HIDDEN @endcond
1313 */
1314
1315/**
1316 * @defgroup timer_apis Timer APIs
1317 * @ingroup kernel_apis
1318 * @{
1319 */
1320
1321/**
Allan Stephens5eceb852016-11-16 10:16:30 -05001322 * @typedef k_timer_expiry_t
1323 * @brief Timer expiry function type.
1324 *
1325 * A timer's expiry function is executed by the system clock interrupt handler
1326 * each time the timer expires. The expiry function is optional, and is only
1327 * invoked if the timer has been initialized with one.
1328 *
1329 * @param timer Address of timer.
1330 *
1331 * @return N/A
1332 */
1333typedef void (*k_timer_expiry_t)(struct k_timer *timer);
1334
1335/**
1336 * @typedef k_timer_stop_t
1337 * @brief Timer stop function type.
1338 *
1339 * A timer's stop function is executed if the timer is stopped prematurely.
Peter A. Bigot82a98d72020-09-21 05:34:56 -05001340 * The function runs in the context of call that stops the timer. As
1341 * k_timer_stop() can be invoked from an ISR, the stop function must be
1342 * callable from interrupt context (isr-ok).
1343 *
Allan Stephens5eceb852016-11-16 10:16:30 -05001344 * The stop function is optional, and is only invoked if the timer has been
1345 * initialized with one.
1346 *
1347 * @param timer Address of timer.
1348 *
1349 * @return N/A
1350 */
1351typedef void (*k_timer_stop_t)(struct k_timer *timer);
1352
1353/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001354 * @brief Statically define and initialize a timer.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001355 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001356 * The timer can be accessed outside the module where it is defined using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001357 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05001358 * @code extern struct k_timer <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001359 *
1360 * @param name Name of the timer variable.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001361 * @param expiry_fn Function to invoke each time the timer expires.
1362 * @param stop_fn Function to invoke if the timer is stopped while running.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001363 */
Allan Stephens1342adb2016-11-03 13:54:53 -05001364#define K_TIMER_DEFINE(name, expiry_fn, stop_fn) \
Fabio Baltierif88a4202021-08-04 23:05:54 +01001365 STRUCT_SECTION_ITERABLE(k_timer, name) = \
Patrik Flykt97b3bd12019-03-12 15:15:42 -06001366 Z_TIMER_INITIALIZER(name, expiry_fn, stop_fn)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001367
Allan Stephens45bfa372016-10-12 12:39:42 -05001368/**
1369 * @brief Initialize a timer.
1370 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001371 * This routine initializes a timer, prior to its first use.
Allan Stephens45bfa372016-10-12 12:39:42 -05001372 *
1373 * @param timer Address of timer.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001374 * @param expiry_fn Function to invoke each time the timer expires.
1375 * @param stop_fn Function to invoke if the timer is stopped while running.
Allan Stephens45bfa372016-10-12 12:39:42 -05001376 *
1377 * @return N/A
1378 */
1379extern void k_timer_init(struct k_timer *timer,
Allan Stephens5eceb852016-11-16 10:16:30 -05001380 k_timer_expiry_t expiry_fn,
1381 k_timer_stop_t stop_fn);
Andy Ross8d8b2ac2016-09-23 10:08:54 -07001382
Allan Stephens45bfa372016-10-12 12:39:42 -05001383/**
1384 * @brief Start a timer.
1385 *
1386 * This routine starts a timer, and resets its status to zero. The timer
1387 * begins counting down using the specified duration and period values.
1388 *
1389 * Attempting to start a timer that is already running is permitted.
1390 * The timer's status is reset to zero and the timer begins counting down
1391 * using the new duration and period values.
1392 *
1393 * @param timer Address of timer.
Andy Ross78327382020-03-05 15:18:14 -08001394 * @param duration Initial timer duration.
1395 * @param period Timer period.
Allan Stephens45bfa372016-10-12 12:39:42 -05001396 *
1397 * @return N/A
1398 */
Andrew Boiea354d492017-09-29 16:22:28 -07001399__syscall void k_timer_start(struct k_timer *timer,
Andy Ross78327382020-03-05 15:18:14 -08001400 k_timeout_t duration, k_timeout_t period);
Allan Stephens45bfa372016-10-12 12:39:42 -05001401
1402/**
1403 * @brief Stop a timer.
1404 *
1405 * This routine stops a running timer prematurely. The timer's stop function,
1406 * if one exists, is invoked by the caller.
1407 *
1408 * Attempting to stop a timer that is not running is permitted, but has no
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001409 * effect on the timer.
Allan Stephens45bfa372016-10-12 12:39:42 -05001410 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001411 * @note The stop handler has to be callable from ISRs if @a k_timer_stop is to
1412 * be called from ISRs.
1413 *
1414 * @funcprops \isr_ok
Anas Nashif4fb12ae2017-02-01 20:06:55 -05001415 *
Allan Stephens45bfa372016-10-12 12:39:42 -05001416 * @param timer Address of timer.
1417 *
1418 * @return N/A
1419 */
Andrew Boiea354d492017-09-29 16:22:28 -07001420__syscall void k_timer_stop(struct k_timer *timer);
Allan Stephens45bfa372016-10-12 12:39:42 -05001421
1422/**
1423 * @brief Read timer status.
1424 *
1425 * This routine reads the timer's status, which indicates the number of times
1426 * it has expired since its status was last read.
1427 *
1428 * Calling this routine resets the timer's status to zero.
1429 *
1430 * @param timer Address of timer.
1431 *
1432 * @return Timer status.
1433 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001434__syscall uint32_t k_timer_status_get(struct k_timer *timer);
Allan Stephens45bfa372016-10-12 12:39:42 -05001435
1436/**
1437 * @brief Synchronize thread to timer expiration.
1438 *
1439 * This routine blocks the calling thread until the timer's status is non-zero
1440 * (indicating that it has expired at least once since it was last examined)
1441 * or the timer is stopped. If the timer status is already non-zero,
1442 * or the timer is already stopped, the caller continues without waiting.
1443 *
1444 * Calling this routine resets the timer's status to zero.
1445 *
1446 * This routine must not be used by interrupt handlers, since they are not
1447 * allowed to block.
1448 *
1449 * @param timer Address of timer.
1450 *
1451 * @return Timer status.
1452 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001453__syscall uint32_t k_timer_status_sync(struct k_timer *timer);
Allan Stephens45bfa372016-10-12 12:39:42 -05001454
Andy Ross5a5d3da2020-03-09 13:59:15 -07001455#ifdef CONFIG_SYS_CLOCK_EXISTS
1456
1457/**
Andy Rosse39bf292020-03-19 10:30:33 -07001458 * @brief Get next expiration time of a timer, in system ticks
Andy Ross5a5d3da2020-03-09 13:59:15 -07001459 *
1460 * This routine returns the future system uptime reached at the next
1461 * time of expiration of the timer, in units of system ticks. If the
1462 * timer is not running, current system time is returned.
1463 *
1464 * @param timer The timer object
1465 * @return Uptime of expiration, in ticks
1466 */
Peter Bigot0ab314f2020-11-16 15:28:59 -06001467__syscall k_ticks_t k_timer_expires_ticks(const struct k_timer *timer);
Andy Ross5a5d3da2020-03-09 13:59:15 -07001468
Peter Bigot0ab314f2020-11-16 15:28:59 -06001469static inline k_ticks_t z_impl_k_timer_expires_ticks(
1470 const struct k_timer *timer)
Andy Ross5a5d3da2020-03-09 13:59:15 -07001471{
1472 return z_timeout_expires(&timer->timeout);
1473}
1474
1475/**
Andy Rosse39bf292020-03-19 10:30:33 -07001476 * @brief Get time remaining before a timer next expires, in system ticks
Andy Ross5a5d3da2020-03-09 13:59:15 -07001477 *
1478 * This routine computes the time remaining before a running timer
1479 * next expires, in units of system ticks. If the timer is not
1480 * running, it returns zero.
1481 */
Peter Bigot0ab314f2020-11-16 15:28:59 -06001482__syscall k_ticks_t k_timer_remaining_ticks(const struct k_timer *timer);
Andy Ross5a5d3da2020-03-09 13:59:15 -07001483
Peter Bigot0ab314f2020-11-16 15:28:59 -06001484static inline k_ticks_t z_impl_k_timer_remaining_ticks(
1485 const struct k_timer *timer)
Andy Ross5a5d3da2020-03-09 13:59:15 -07001486{
1487 return z_timeout_remaining(&timer->timeout);
1488}
Andy Ross52e444b2018-09-28 09:06:37 -07001489
Allan Stephens45bfa372016-10-12 12:39:42 -05001490/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001491 * @brief Get time remaining before a timer next expires.
Allan Stephens45bfa372016-10-12 12:39:42 -05001492 *
1493 * This routine computes the (approximate) time remaining before a running
1494 * timer next expires. If the timer is not running, it returns zero.
1495 *
1496 * @param timer Address of timer.
1497 *
1498 * @return Remaining time (in milliseconds).
1499 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001500static inline uint32_t k_timer_remaining_get(struct k_timer *timer)
Johan Hedbergf99ad3f2016-12-09 10:39:49 +02001501{
Andy Ross5a5d3da2020-03-09 13:59:15 -07001502 return k_ticks_to_ms_floor32(k_timer_remaining_ticks(timer));
Johan Hedbergf99ad3f2016-12-09 10:39:49 +02001503}
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001504
Andy Ross5a5d3da2020-03-09 13:59:15 -07001505#endif /* CONFIG_SYS_CLOCK_EXISTS */
1506
Allan Stephensc98da842016-11-11 15:45:03 -05001507/**
Benjamin Walshe4e98f92017-01-12 19:38:53 -05001508 * @brief Associate user-specific data with a timer.
1509 *
1510 * This routine records the @a user_data with the @a timer, to be retrieved
1511 * later.
1512 *
1513 * It can be used e.g. in a timer handler shared across multiple subsystems to
1514 * retrieve data specific to the subsystem this timer is associated with.
1515 *
1516 * @param timer Address of timer.
1517 * @param user_data User data to associate with the timer.
1518 *
1519 * @return N/A
1520 */
Andrew Boiea354d492017-09-29 16:22:28 -07001521__syscall void k_timer_user_data_set(struct k_timer *timer, void *user_data);
1522
Anas Nashif954d5502018-02-25 08:37:28 -06001523/**
1524 * @internal
1525 */
Patrik Flykt4344e272019-03-08 14:19:05 -07001526static inline void z_impl_k_timer_user_data_set(struct k_timer *timer,
Andrew Boiea354d492017-09-29 16:22:28 -07001527 void *user_data)
Benjamin Walshe4e98f92017-01-12 19:38:53 -05001528{
1529 timer->user_data = user_data;
1530}
1531
1532/**
1533 * @brief Retrieve the user-specific data from a timer.
1534 *
1535 * @param timer Address of timer.
1536 *
1537 * @return The user data.
1538 */
Peter A. Bigotf1b86ca2020-09-18 16:24:57 -05001539__syscall void *k_timer_user_data_get(const struct k_timer *timer);
Andrew Boiea354d492017-09-29 16:22:28 -07001540
Peter A. Bigotf1b86ca2020-09-18 16:24:57 -05001541static inline void *z_impl_k_timer_user_data_get(const struct k_timer *timer)
Benjamin Walshe4e98f92017-01-12 19:38:53 -05001542{
1543 return timer->user_data;
1544}
1545
Anas Nashif166f5192018-02-25 08:02:36 -06001546/** @} */
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001547
Allan Stephensc98da842016-11-11 15:45:03 -05001548/**
Allan Stephensc2f15a42016-11-17 12:24:22 -05001549 * @addtogroup clock_apis
Jian Kanga3ec9b02021-07-21 09:52:14 +08001550 * @ingroup kernel_apis
Allan Stephensc98da842016-11-11 15:45:03 -05001551 * @{
1552 */
Allan Stephens45bfa372016-10-12 12:39:42 -05001553
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001554/**
Andy Rosse39bf292020-03-19 10:30:33 -07001555 * @brief Get system uptime, in system ticks.
Andy Ross914205c2020-03-10 15:26:38 -07001556 *
1557 * This routine returns the elapsed time since the system booted, in
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +02001558 * ticks (c.f. @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC}), which is the
Andy Ross914205c2020-03-10 15:26:38 -07001559 * fundamental unit of resolution of kernel timekeeping.
1560 *
1561 * @return Current uptime in ticks.
1562 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001563__syscall int64_t k_uptime_ticks(void);
Andy Ross914205c2020-03-10 15:26:38 -07001564
1565/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001566 * @brief Get system uptime.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001567 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001568 * This routine returns the elapsed time since the system booted,
1569 * in milliseconds.
1570 *
David B. Kinder00c41ea2019-06-10 11:13:33 -07001571 * @note
David B. Kinder00c41ea2019-06-10 11:13:33 -07001572 * While this function returns time in milliseconds, it does
1573 * not mean it has millisecond resolution. The actual resolution depends on
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +02001574 * @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option.
Paul Sokolovsky65d51fd2019-02-04 22:44:50 +03001575 *
1576 * @return Current uptime in milliseconds.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001577 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001578static inline int64_t k_uptime_get(void)
Andy Ross914205c2020-03-10 15:26:38 -07001579{
1580 return k_ticks_to_ms_floor64(k_uptime_ticks());
1581}
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001582
Ramesh Thomas89ffd442017-02-05 19:37:19 -08001583/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001584 * @brief Get system uptime (32-bit version).
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001585 *
Peter Bigota6067a32019-08-28 08:19:26 -05001586 * This routine returns the lower 32 bits of the system uptime in
1587 * milliseconds.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001588 *
Peter Bigota6067a32019-08-28 08:19:26 -05001589 * Because correct conversion requires full precision of the system
1590 * clock there is no benefit to using this over k_uptime_get() unless
1591 * you know the application will never run long enough for the system
1592 * clock to approach 2^32 ticks. Calls to this function may involve
1593 * interrupt blocking and 64-bit math.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001594 *
David B. Kinder00c41ea2019-06-10 11:13:33 -07001595 * @note
David B. Kinder00c41ea2019-06-10 11:13:33 -07001596 * While this function returns time in milliseconds, it does
1597 * not mean it has millisecond resolution. The actual resolution depends on
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +02001598 * @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option
Paul Sokolovsky65d51fd2019-02-04 22:44:50 +03001599 *
Peter Bigota6067a32019-08-28 08:19:26 -05001600 * @return The low 32 bits of the current uptime, in milliseconds.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001601 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001602static inline uint32_t k_uptime_get_32(void)
Peter Bigota6067a32019-08-28 08:19:26 -05001603{
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001604 return (uint32_t)k_uptime_get();
Peter Bigota6067a32019-08-28 08:19:26 -05001605}
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001606
1607/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001608 * @brief Get elapsed time.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001609 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001610 * This routine computes the elapsed time between the current system uptime
1611 * and an earlier reference time, in milliseconds.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001612 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001613 * @param reftime Pointer to a reference time, which is updated to the current
1614 * uptime upon return.
1615 *
1616 * @return Elapsed time.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001617 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001618static inline int64_t k_uptime_delta(int64_t *reftime)
Andy Ross987c0e52018-09-27 16:50:00 -07001619{
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001620 int64_t uptime, delta;
Andy Ross987c0e52018-09-27 16:50:00 -07001621
1622 uptime = k_uptime_get();
1623 delta = uptime - *reftime;
1624 *reftime = uptime;
1625
1626 return delta;
1627}
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001628
1629/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001630 * @brief Read the hardware clock.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001631 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001632 * This routine returns the current time, as measured by the system's hardware
1633 * clock.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001634 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001635 * @return Current hardware clock up-counter (in cycles).
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001636 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001637static inline uint32_t k_cycle_get_32(void)
Andrew Boie979b17f2019-10-03 15:20:41 -07001638{
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001639 return arch_k_cycle_get_32();
Andrew Boie979b17f2019-10-03 15:20:41 -07001640}
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001641
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001642/**
Anas Nashif166f5192018-02-25 08:02:36 -06001643 * @}
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001644 */
1645
Allan Stephensc98da842016-11-11 15:45:03 -05001646/**
1647 * @cond INTERNAL_HIDDEN
1648 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001649
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001650struct k_queue {
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001651 sys_sflist_t data_q;
Andy Ross603ea422018-07-25 13:01:54 -07001652 struct k_spinlock lock;
Andy Ross99c2d2d2020-06-02 08:34:12 -07001653 _wait_q_t wait_q;
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +03001654
Andy Ross99c2d2d2020-06-02 08:34:12 -07001655 _POLL_EVENT;
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001656};
1657
Anas Nashif45a1d8a2020-04-24 11:29:17 -04001658#define Z_QUEUE_INITIALIZER(obj) \
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001659 { \
Toby Firth680ec0b2020-10-05 13:45:47 +01001660 .data_q = SYS_SFLIST_STATIC_INIT(&obj.data_q), \
Stephanos Ioannidisf628dcd2019-09-11 18:09:49 +09001661 .lock = { }, \
Andy Ross99c2d2d2020-06-02 08:34:12 -07001662 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
1663 _POLL_EVENT_OBJ_INIT(obj) \
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001664 }
1665
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001666extern void *z_queue_node_peek(sys_sfnode_t *node, bool needs_free);
1667
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001668/**
1669 * INTERNAL_HIDDEN @endcond
1670 */
1671
1672/**
1673 * @defgroup queue_apis Queue APIs
1674 * @ingroup kernel_apis
1675 * @{
1676 */
1677
1678/**
1679 * @brief Initialize a queue.
1680 *
1681 * This routine initializes a queue object, prior to its first use.
1682 *
1683 * @param queue Address of the queue.
1684 *
1685 * @return N/A
1686 */
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001687__syscall void k_queue_init(struct k_queue *queue);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001688
1689/**
Paul Sokolovsky3f507072017-04-25 17:54:31 +03001690 * @brief Cancel waiting on a queue.
1691 *
1692 * This routine causes first thread pending on @a queue, if any, to
1693 * return from k_queue_get() call with NULL value (as if timeout expired).
Paul Sokolovsky45c0b202018-08-21 23:29:11 +03001694 * If the queue is being waited on by k_poll(), it will return with
1695 * -EINTR and K_POLL_STATE_CANCELLED state (and per above, subsequent
1696 * k_queue_get() will return NULL).
Paul Sokolovsky3f507072017-04-25 17:54:31 +03001697 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001698 * @funcprops \isr_ok
Paul Sokolovsky3f507072017-04-25 17:54:31 +03001699 *
1700 * @param queue Address of the queue.
1701 *
1702 * @return N/A
1703 */
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001704__syscall void k_queue_cancel_wait(struct k_queue *queue);
Paul Sokolovsky3f507072017-04-25 17:54:31 +03001705
1706/**
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001707 * @brief Append an element to the end of a queue.
1708 *
1709 * This routine appends a data item to @a queue. A queue data item must be
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04001710 * aligned on a word boundary, and the first word of the item is reserved
1711 * for the kernel's use.
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001712 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001713 * @funcprops \isr_ok
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001714 *
1715 * @param queue Address of the queue.
1716 * @param data Address of the data item.
1717 *
1718 * @return N/A
1719 */
1720extern void k_queue_append(struct k_queue *queue, void *data);
1721
1722/**
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001723 * @brief Append an element to a queue.
1724 *
Andrew Boieac3dcc12019-04-01 12:28:03 -07001725 * This routine appends a data item to @a queue. There is an implicit memory
1726 * allocation to create an additional temporary bookkeeping data structure from
1727 * the calling thread's resource pool, which is automatically freed when the
1728 * item is removed. The data itself is not copied.
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001729 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001730 * @funcprops \isr_ok
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001731 *
1732 * @param queue Address of the queue.
1733 * @param data Address of the data item.
1734 *
1735 * @retval 0 on success
1736 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
1737 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001738__syscall int32_t k_queue_alloc_append(struct k_queue *queue, void *data);
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001739
1740/**
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001741 * @brief Prepend an element to a queue.
1742 *
1743 * This routine prepends a data item to @a queue. A queue data item must be
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04001744 * aligned on a word boundary, and the first word of the item is reserved
1745 * for the kernel's use.
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001746 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001747 * @funcprops \isr_ok
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001748 *
1749 * @param queue Address of the queue.
1750 * @param data Address of the data item.
1751 *
1752 * @return N/A
1753 */
1754extern void k_queue_prepend(struct k_queue *queue, void *data);
1755
1756/**
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001757 * @brief Prepend an element to a queue.
1758 *
Andrew Boieac3dcc12019-04-01 12:28:03 -07001759 * This routine prepends a data item to @a queue. There is an implicit memory
1760 * allocation to create an additional temporary bookkeeping data structure from
1761 * the calling thread's resource pool, which is automatically freed when the
1762 * item is removed. The data itself is not copied.
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001763 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001764 * @funcprops \isr_ok
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001765 *
1766 * @param queue Address of the queue.
1767 * @param data Address of the data item.
1768 *
1769 * @retval 0 on success
1770 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
1771 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001772__syscall int32_t k_queue_alloc_prepend(struct k_queue *queue, void *data);
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001773
1774/**
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001775 * @brief Inserts an element to a queue.
1776 *
1777 * This routine inserts a data item to @a queue after previous item. A queue
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04001778 * data item must be aligned on a word boundary, and the first word of
1779 * the item is reserved for the kernel's use.
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001780 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001781 * @funcprops \isr_ok
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001782 *
1783 * @param queue Address of the queue.
1784 * @param prev Address of the previous data item.
1785 * @param data Address of the data item.
1786 *
1787 * @return N/A
1788 */
1789extern void k_queue_insert(struct k_queue *queue, void *prev, void *data);
1790
1791/**
1792 * @brief Atomically append a list of elements to a queue.
1793 *
1794 * This routine adds a list of data items to @a queue in one operation.
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04001795 * The data items must be in a singly-linked list, with the first word
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001796 * in each data item pointing to the next data item; the list must be
1797 * NULL-terminated.
1798 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001799 * @funcprops \isr_ok
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001800 *
1801 * @param queue Address of the queue.
1802 * @param head Pointer to first node in singly-linked list.
1803 * @param tail Pointer to last node in singly-linked list.
1804 *
Anas Nashif756d8b02019-06-16 09:53:55 -04001805 * @retval 0 on success
1806 * @retval -EINVAL on invalid supplied data
1807 *
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001808 */
Anas Nashif756d8b02019-06-16 09:53:55 -04001809extern int k_queue_append_list(struct k_queue *queue, void *head, void *tail);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001810
1811/**
1812 * @brief Atomically add a list of elements to a queue.
1813 *
1814 * This routine adds a list of data items to @a queue in one operation.
1815 * The data items must be in a singly-linked list implemented using a
1816 * sys_slist_t object. Upon completion, the original list is empty.
1817 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001818 * @funcprops \isr_ok
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001819 *
1820 * @param queue Address of the queue.
1821 * @param list Pointer to sys_slist_t object.
1822 *
Anas Nashif756d8b02019-06-16 09:53:55 -04001823 * @retval 0 on success
1824 * @retval -EINVAL on invalid data
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001825 */
Anas Nashif756d8b02019-06-16 09:53:55 -04001826extern int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001827
1828/**
1829 * @brief Get an element from a queue.
1830 *
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04001831 * This routine removes first data item from @a queue. The first word of the
1832 * data item is reserved for the kernel's use.
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001833 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001834 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
1835 *
1836 * @funcprops \isr_ok
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001837 *
1838 * @param queue Address of the queue.
Andy Ross78327382020-03-05 15:18:14 -08001839 * @param timeout Non-negative waiting period to obtain a data item
1840 * or one of the special values K_NO_WAIT and
Krzysztof Chruscinski94f742e2019-11-07 19:28:00 +01001841 * K_FOREVER.
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001842 *
1843 * @return Address of the data item if successful; NULL if returned
1844 * without waiting, or waiting period timed out.
1845 */
Andy Ross78327382020-03-05 15:18:14 -08001846__syscall void *k_queue_get(struct k_queue *queue, k_timeout_t timeout);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001847
1848/**
Luiz Augusto von Dentz50b93772017-07-03 16:52:45 +03001849 * @brief Remove an element from a queue.
1850 *
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04001851 * This routine removes data item from @a queue. The first word of the
1852 * data item is reserved for the kernel's use. Removing elements from k_queue
Luiz Augusto von Dentz50b93772017-07-03 16:52:45 +03001853 * rely on sys_slist_find_and_remove which is not a constant time operation.
1854 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001855 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
1856 *
1857 * @funcprops \isr_ok
Luiz Augusto von Dentz50b93772017-07-03 16:52:45 +03001858 *
1859 * @param queue Address of the queue.
1860 * @param data Address of the data item.
1861 *
1862 * @return true if data item was removed
1863 */
Torbjörn Leksellf9848232021-03-26 11:19:35 +01001864bool k_queue_remove(struct k_queue *queue, void *data);
Luiz Augusto von Dentz50b93772017-07-03 16:52:45 +03001865
1866/**
Dhananjay Gundapu Jayakrishnan24bfa402018-08-22 12:33:00 +02001867 * @brief Append an element to a queue only if it's not present already.
1868 *
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04001869 * This routine appends data item to @a queue. The first word of the data
1870 * item is reserved for the kernel's use. Appending elements to k_queue
Dhananjay Gundapu Jayakrishnan24bfa402018-08-22 12:33:00 +02001871 * relies on sys_slist_is_node_in_list which is not a constant time operation.
1872 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001873 * @funcprops \isr_ok
Dhananjay Gundapu Jayakrishnan24bfa402018-08-22 12:33:00 +02001874 *
1875 * @param queue Address of the queue.
1876 * @param data Address of the data item.
1877 *
1878 * @return true if data item was added, false if not
1879 */
Torbjörn Leksellf9848232021-03-26 11:19:35 +01001880bool k_queue_unique_append(struct k_queue *queue, void *data);
Dhananjay Gundapu Jayakrishnan24bfa402018-08-22 12:33:00 +02001881
1882/**
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001883 * @brief Query a queue to see if it has data available.
1884 *
1885 * Note that the data might be already gone by the time this function returns
1886 * if other threads are also trying to read from the queue.
1887 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001888 * @funcprops \isr_ok
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001889 *
1890 * @param queue Address of the queue.
1891 *
1892 * @return Non-zero if the queue is empty.
1893 * @return 0 if data is available.
1894 */
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001895__syscall int k_queue_is_empty(struct k_queue *queue);
1896
Patrik Flykt4344e272019-03-08 14:19:05 -07001897static inline int z_impl_k_queue_is_empty(struct k_queue *queue)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001898{
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001899 return (int)sys_sflist_is_empty(&queue->data_q);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001900}
1901
1902/**
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03001903 * @brief Peek element at the head of queue.
1904 *
1905 * Return element from the head of queue without removing it.
1906 *
1907 * @param queue Address of the queue.
1908 *
1909 * @return Head element, or NULL if queue is empty.
1910 */
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001911__syscall void *k_queue_peek_head(struct k_queue *queue);
1912
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03001913/**
1914 * @brief Peek element at the tail of queue.
1915 *
1916 * Return element from the tail of queue without removing it.
1917 *
1918 * @param queue Address of the queue.
1919 *
1920 * @return Tail element, or NULL if queue is empty.
1921 */
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001922__syscall void *k_queue_peek_tail(struct k_queue *queue);
1923
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03001924/**
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001925 * @brief Statically define and initialize a queue.
1926 *
1927 * The queue can be accessed outside the module where it is defined using:
1928 *
1929 * @code extern struct k_queue <name>; @endcode
1930 *
1931 * @param name Name of the queue.
1932 */
1933#define K_QUEUE_DEFINE(name) \
Fabio Baltierif88a4202021-08-04 23:05:54 +01001934 STRUCT_SECTION_ITERABLE(k_queue, name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04001935 Z_QUEUE_INITIALIZER(name)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001936
Anas Nashif166f5192018-02-25 08:02:36 -06001937/** @} */
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001938
Wentong Wu5611e922019-06-20 23:51:27 +08001939#ifdef CONFIG_USERSPACE
1940/**
1941 * @brief futex structure
1942 *
1943 * A k_futex is a lightweight mutual exclusion primitive designed
1944 * to minimize kernel involvement. Uncontended operation relies
1945 * only on atomic access to shared memory. k_futex are tracked as
Lauren Murphyd922fed2021-02-01 21:24:47 -06001946 * kernel objects and can live in user memory so that any access
1947 * bypasses the kernel object permission management mechanism.
Wentong Wu5611e922019-06-20 23:51:27 +08001948 */
1949struct k_futex {
1950 atomic_t val;
1951};
1952
1953/**
1954 * @brief futex kernel data structure
1955 *
1956 * z_futex_data are the helper data structure for k_futex to complete
1957 * futex contended operation on kernel side, structure z_futex_data
1958 * of every futex object is invisible in user mode.
1959 */
1960struct z_futex_data {
1961 _wait_q_t wait_q;
1962 struct k_spinlock lock;
1963};
1964
1965#define Z_FUTEX_DATA_INITIALIZER(obj) \
1966 { \
1967 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q) \
1968 }
1969
1970/**
1971 * @defgroup futex_apis FUTEX APIs
1972 * @ingroup kernel_apis
1973 * @{
1974 */
1975
1976/**
Wentong Wu5611e922019-06-20 23:51:27 +08001977 * @brief Pend the current thread on a futex
1978 *
1979 * Tests that the supplied futex contains the expected value, and if so,
1980 * goes to sleep until some other thread calls k_futex_wake() on it.
1981 *
1982 * @param futex Address of the futex.
1983 * @param expected Expected value of the futex, if it is different the caller
1984 * will not wait on it.
Andy Ross78327382020-03-05 15:18:14 -08001985 * @param timeout Non-negative waiting period on the futex, or
Krzysztof Chruscinski94f742e2019-11-07 19:28:00 +01001986 * one of the special values K_NO_WAIT or K_FOREVER.
Wentong Wu5611e922019-06-20 23:51:27 +08001987 * @retval -EACCES Caller does not have read access to futex address.
1988 * @retval -EAGAIN If the futex value did not match the expected parameter.
1989 * @retval -EINVAL Futex parameter address not recognized by the kernel.
1990 * @retval -ETIMEDOUT Thread woke up due to timeout and not a futex wakeup.
1991 * @retval 0 if the caller went to sleep and was woken up. The caller
1992 * should check the futex's value on wakeup to determine if it needs
1993 * to block again.
1994 */
Andy Ross78327382020-03-05 15:18:14 -08001995__syscall int k_futex_wait(struct k_futex *futex, int expected,
1996 k_timeout_t timeout);
Wentong Wu5611e922019-06-20 23:51:27 +08001997
1998/**
1999 * @brief Wake one/all threads pending on a futex
2000 *
2001 * Wake up the highest priority thread pending on the supplied futex, or
2002 * wakeup all the threads pending on the supplied futex, and the behavior
2003 * depends on wake_all.
2004 *
2005 * @param futex Futex to wake up pending threads.
2006 * @param wake_all If true, wake up all pending threads; If false,
2007 * wakeup the highest priority thread.
2008 * @retval -EACCES Caller does not have access to the futex address.
2009 * @retval -EINVAL Futex parameter address not recognized by the kernel.
2010 * @retval Number of threads that were woken up.
2011 */
2012__syscall int k_futex_wake(struct k_futex *futex, bool wake_all);
2013
2014/** @} */
2015#endif
2016
Peter Mitsisae394bf2021-09-20 14:14:32 -04002017/**
2018 * @defgroup event_apis Event APIs
2019 * @ingroup kernel_apis
2020 * @{
2021 */
2022
2023/**
2024 * Event Structure
2025 * @ingroup event_apis
2026 */
2027
2028struct k_event {
2029 _wait_q_t wait_q;
2030 uint32_t events;
2031 struct k_spinlock lock;
2032};
2033
2034#define Z_EVENT_INITIALIZER(obj) \
2035 { \
2036 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2037 .events = 0 \
2038 }
2039/**
2040 * @brief Initialize an event object
2041 *
2042 * This routine initializes an event object, prior to its first use.
2043 *
2044 * @param event Address of the event object.
2045 *
2046 * @return N/A
2047 */
2048
2049__syscall void k_event_init(struct k_event *event);
2050
2051/**
2052 * @brief Post one or more events to an event object
2053 *
2054 * This routine posts one or more events to an event object. All tasks waiting
2055 * on the event object @a event whose waiting conditions become met by this
2056 * posting immediately unpend.
2057 *
2058 * Posting differs from setting in that posted events are merged together with
2059 * the current set of events tracked by the event object.
2060 *
2061 * @param event Address of the event object
2062 * @param events Set of events to post to @a event
2063 *
2064 * @return N/A
2065 */
2066
2067__syscall void k_event_post(struct k_event *event, uint32_t events);
2068
2069/**
2070 * @brief Set the events in an event object
2071 *
2072 * This routine sets the events stored in event object to the specified value.
2073 * All tasks waiting on the event object @a event whose waiting conditions
2074 * become met by this immediately unpend.
2075 *
2076 * Setting differs from posting in that set events replace the current set of
2077 * events tracked by the event object.
2078 *
2079 * @param event Address of the event object
2080 * @param events Set of events to post to @a event
2081 *
2082 * @return N/A
2083 */
2084
2085__syscall void k_event_set(struct k_event *event, uint32_t events);
2086
2087/**
2088 * @brief Wait for any of the specified events
2089 *
2090 * This routine waits on event object @a event until any of the specified
2091 * events have been delivered to the event object, or the maximum wait time
2092 * @a timeout has expired. A thread may wait on up to 32 distinctly numbered
2093 * events that are expressed as bits in a single 32-bit word.
2094 *
2095 * @note The caller must be careful when resetting if there are multiple threads
2096 * waiting for the event object @a event.
2097 *
2098 * @param event Address of the event object
2099 * @param events Set of desired events on which to wait
2100 * @param reset If true, clear the set of events tracked by the event object
2101 * before waiting. If false, do not clear the events.
2102 * @param timeout Waiting period for the desired set of events or one of the
2103 * special values K_NO_WAIT and K_FOREVER.
2104 *
2105 * @retval set of matching events upon success
2106 * @retval 0 if matching events were not received within the specified time
2107 */
2108
2109__syscall uint32_t k_event_wait(struct k_event *event, uint32_t events,
2110 bool reset, k_timeout_t timeout);
2111
2112/**
2113 * @brief Wait for any of the specified events
2114 *
2115 * This routine waits on event object @a event until all of the specified
2116 * events have been delivered to the event object, or the maximum wait time
2117 * @a timeout has expired. A thread may wait on up to 32 distinctly numbered
2118 * events that are expressed as bits in a single 32-bit word.
2119 *
2120 * @note The caller must be careful when resetting if there are multiple threads
2121 * waiting for the event object @a event.
2122 *
2123 * @param event Address of the event object
2124 * @param events Set of desired events on which to wait
2125 * @param reset If true, clear the set of events tracked by the event object
2126 * before waiting. If false, do not clear the events.
2127 * @param timeout Waiting period for the desired set of events or one of the
2128 * special values K_NO_WAIT and K_FOREVER.
2129 *
2130 * @retval set of matching events upon success
2131 * @retval 0 if matching events were not received within the specified time
2132 */
2133
2134__syscall uint32_t k_event_wait_all(struct k_event *event, uint32_t events,
2135 bool reset, k_timeout_t timeout);
2136
2137/**
2138 * @brief Statically define and initialize an event object
2139 *
2140 * The event can be accessed outside the module where it is defined using:
2141 *
2142 * @code extern struct k_event <name>; @endcode
2143 *
2144 * @param name Name of the event object.
2145 */
2146
2147#define K_EVENT_DEFINE(name) \
2148 STRUCT_SECTION_ITERABLE(k_event, name) = \
2149 Z_EVENT_INITIALIZER(name);
2150
2151/** @} */
2152
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002153struct k_fifo {
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02002154 struct k_queue _queue;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002155};
2156
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04002157/**
2158 * @cond INTERNAL_HIDDEN
2159 */
Patrik Flykt97b3bd12019-03-12 15:15:42 -06002160#define Z_FIFO_INITIALIZER(obj) \
Allan Stephensc98da842016-11-11 15:45:03 -05002161 { \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002162 ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
Allan Stephensc98da842016-11-11 15:45:03 -05002163 }
2164
2165/**
2166 * INTERNAL_HIDDEN @endcond
2167 */
2168
2169/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002170 * @defgroup fifo_apis FIFO APIs
Allan Stephensc98da842016-11-11 15:45:03 -05002171 * @ingroup kernel_apis
2172 * @{
2173 */
2174
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002175/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002176 * @brief Initialize a FIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002177 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002178 * This routine initializes a FIFO queue, prior to its first use.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002179 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002180 * @param fifo Address of the FIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002181 *
2182 * @return N/A
2183 */
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02002184#define k_fifo_init(fifo) \
Torbjörn Leksell83ae27b2021-03-26 11:42:16 +01002185 ({ \
2186 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, init, fifo); \
2187 k_queue_init(&(fifo)->_queue); \
2188 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, init, fifo); \
2189 })
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002190
2191/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002192 * @brief Cancel waiting on a FIFO queue.
Paul Sokolovsky3f507072017-04-25 17:54:31 +03002193 *
2194 * This routine causes first thread pending on @a fifo, if any, to
2195 * return from k_fifo_get() call with NULL value (as if timeout
2196 * expired).
2197 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002198 * @funcprops \isr_ok
Paul Sokolovsky3f507072017-04-25 17:54:31 +03002199 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002200 * @param fifo Address of the FIFO queue.
Paul Sokolovsky3f507072017-04-25 17:54:31 +03002201 *
2202 * @return N/A
2203 */
2204#define k_fifo_cancel_wait(fifo) \
Torbjörn Leksell83ae27b2021-03-26 11:42:16 +01002205 ({ \
2206 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, cancel_wait, fifo); \
2207 k_queue_cancel_wait(&(fifo)->_queue); \
2208 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, cancel_wait, fifo); \
2209 })
Paul Sokolovsky3f507072017-04-25 17:54:31 +03002210
2211/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002212 * @brief Add an element to a FIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002213 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002214 * This routine adds a data item to @a fifo. A FIFO data item must be
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04002215 * aligned on a word boundary, and the first word of the item is reserved
2216 * for the kernel's use.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002217 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002218 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002219 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002220 * @param fifo Address of the FIFO.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002221 * @param data Address of the data item.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002222 *
2223 * @return N/A
2224 */
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02002225#define k_fifo_put(fifo, data) \
Torbjörn Leksell83ae27b2021-03-26 11:42:16 +01002226 ({ \
2227 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put, fifo, data); \
2228 k_queue_append(&(fifo)->_queue, data); \
2229 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put, fifo, data); \
2230 })
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002231
2232/**
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002233 * @brief Add an element to a FIFO queue.
2234 *
Andrew Boieac3dcc12019-04-01 12:28:03 -07002235 * This routine adds a data item to @a fifo. There is an implicit memory
2236 * allocation to create an additional temporary bookkeeping data structure from
2237 * the calling thread's resource pool, which is automatically freed when the
2238 * item is removed. The data itself is not copied.
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002239 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002240 * @funcprops \isr_ok
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002241 *
2242 * @param fifo Address of the FIFO.
2243 * @param data Address of the data item.
2244 *
2245 * @retval 0 on success
2246 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2247 */
2248#define k_fifo_alloc_put(fifo, data) \
Torbjörn Leksell83ae27b2021-03-26 11:42:16 +01002249 ({ \
2250 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, alloc_put, fifo, data); \
2251 int ret = k_queue_alloc_append(&(fifo)->_queue, data); \
2252 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, alloc_put, fifo, data, ret); \
2253 ret; \
2254 })
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002255
2256/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002257 * @brief Atomically add a list of elements to a FIFO.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002258 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002259 * This routine adds a list of data items to @a fifo in one operation.
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04002260 * The data items must be in a singly-linked list, with the first word of
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002261 * each data item pointing to the next data item; the list must be
2262 * NULL-terminated.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002263 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002264 * @funcprops \isr_ok
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002265 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002266 * @param fifo Address of the FIFO queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002267 * @param head Pointer to first node in singly-linked list.
2268 * @param tail Pointer to last node in singly-linked list.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002269 *
2270 * @return N/A
2271 */
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02002272#define k_fifo_put_list(fifo, head, tail) \
Torbjörn Leksell83ae27b2021-03-26 11:42:16 +01002273 ({ \
2274 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_list, fifo, head, tail); \
2275 k_queue_append_list(&(fifo)->_queue, head, tail); \
2276 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_list, fifo, head, tail); \
2277 })
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002278
2279/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002280 * @brief Atomically add a list of elements to a FIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002281 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002282 * This routine adds a list of data items to @a fifo in one operation.
2283 * The data items must be in a singly-linked list implemented using a
2284 * sys_slist_t object. Upon completion, the sys_slist_t object is invalid
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002285 * and must be re-initialized via sys_slist_init().
2286 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002287 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002288 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002289 * @param fifo Address of the FIFO queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002290 * @param list Pointer to sys_slist_t object.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002291 *
2292 * @return N/A
2293 */
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02002294#define k_fifo_put_slist(fifo, list) \
Torbjörn Leksell83ae27b2021-03-26 11:42:16 +01002295 ({ \
2296 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_slist, fifo, list); \
2297 k_queue_merge_slist(&(fifo)->_queue, list); \
2298 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_slist, fifo, list); \
2299 })
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002300
2301/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002302 * @brief Get an element from a FIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002303 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002304 * This routine removes a data item from @a fifo in a "first in, first out"
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04002305 * manner. The first word of the data item is reserved for the kernel's use.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002306 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002307 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2308 *
2309 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002310 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002311 * @param fifo Address of the FIFO queue.
Andy Ross78327382020-03-05 15:18:14 -08002312 * @param timeout Waiting period to obtain a data item,
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002313 * or one of the special values K_NO_WAIT and K_FOREVER.
2314 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05002315 * @return Address of the data item if successful; NULL if returned
2316 * without waiting, or waiting period timed out.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002317 */
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02002318#define k_fifo_get(fifo, timeout) \
Torbjörn Leksell83ae27b2021-03-26 11:42:16 +01002319 ({ \
2320 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, get, fifo, timeout); \
2321 void *ret = k_queue_get(&(fifo)->_queue, timeout); \
2322 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, get, fifo, timeout, ret); \
2323 ret; \
2324 })
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002325
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002326/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002327 * @brief Query a FIFO queue to see if it has data available.
Benjamin Walsh39b80d82017-01-28 10:06:07 -05002328 *
2329 * Note that the data might be already gone by the time this function returns
Anas Nashif585fd1f2018-02-25 08:04:59 -06002330 * if other threads is also trying to read from the FIFO.
Benjamin Walsh39b80d82017-01-28 10:06:07 -05002331 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002332 * @funcprops \isr_ok
Benjamin Walsh39b80d82017-01-28 10:06:07 -05002333 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002334 * @param fifo Address of the FIFO queue.
Benjamin Walsh39b80d82017-01-28 10:06:07 -05002335 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002336 * @return Non-zero if the FIFO queue is empty.
Benjamin Walsh39b80d82017-01-28 10:06:07 -05002337 * @return 0 if data is available.
2338 */
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02002339#define k_fifo_is_empty(fifo) \
Nicolas Pitrea04a2ca2019-05-20 23:02:39 -04002340 k_queue_is_empty(&(fifo)->_queue)
Benjamin Walsh39b80d82017-01-28 10:06:07 -05002341
2342/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002343 * @brief Peek element at the head of a FIFO queue.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002344 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002345 * Return element from the head of FIFO queue without removing it. A usecase
Ramakrishna Pallala92489ea2018-03-29 22:44:23 +05302346 * for this is if elements of the FIFO object are themselves containers. Then
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002347 * on each iteration of processing, a head container will be peeked,
2348 * and some data processed out of it, and only if the container is empty,
Anas Nashif585fd1f2018-02-25 08:04:59 -06002349 * it will be completely remove from the FIFO queue.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002350 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002351 * @param fifo Address of the FIFO queue.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002352 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002353 * @return Head element, or NULL if the FIFO queue is empty.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002354 */
2355#define k_fifo_peek_head(fifo) \
Torbjörn Leksell83ae27b2021-03-26 11:42:16 +01002356 ({ \
2357 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_head, fifo); \
2358 void *ret = k_queue_peek_head(&(fifo)->_queue); \
2359 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_head, fifo, ret); \
2360 ret; \
2361 })
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002362
2363/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002364 * @brief Peek element at the tail of FIFO queue.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002365 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002366 * Return element from the tail of FIFO queue (without removing it). A usecase
2367 * for this is if elements of the FIFO queue are themselves containers. Then
2368 * it may be useful to add more data to the last container in a FIFO queue.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002369 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002370 * @param fifo Address of the FIFO queue.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002371 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002372 * @return Tail element, or NULL if a FIFO queue is empty.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002373 */
2374#define k_fifo_peek_tail(fifo) \
Torbjörn Leksell83ae27b2021-03-26 11:42:16 +01002375 ({ \
2376 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_tail, fifo); \
2377 void *ret = k_queue_peek_tail(&(fifo)->_queue); \
2378 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_tail, fifo, ret); \
2379 ret; \
2380 })
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002381
2382/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002383 * @brief Statically define and initialize a FIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002384 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002385 * The FIFO queue can be accessed outside the module where it is defined using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002386 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05002387 * @code extern struct k_fifo <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002388 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002389 * @param name Name of the FIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002390 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002391#define K_FIFO_DEFINE(name) \
Fabio Baltierif88a4202021-08-04 23:05:54 +01002392 STRUCT_SECTION_ITERABLE_ALTERNATE(k_queue, k_fifo, name) = \
Patrik Flykt97b3bd12019-03-12 15:15:42 -06002393 Z_FIFO_INITIALIZER(name)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002394
Anas Nashif166f5192018-02-25 08:02:36 -06002395/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05002396
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002397struct k_lifo {
Luiz Augusto von Dentz0dc4dd42017-02-21 15:49:52 +02002398 struct k_queue _queue;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002399};
2400
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04002401/**
2402 * @cond INTERNAL_HIDDEN
2403 */
2404
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002405#define Z_LIFO_INITIALIZER(obj) \
Allan Stephensc98da842016-11-11 15:45:03 -05002406 { \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002407 ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
Allan Stephensc98da842016-11-11 15:45:03 -05002408 }
2409
2410/**
2411 * INTERNAL_HIDDEN @endcond
2412 */
2413
2414/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002415 * @defgroup lifo_apis LIFO APIs
Allan Stephensc98da842016-11-11 15:45:03 -05002416 * @ingroup kernel_apis
2417 * @{
2418 */
2419
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002420/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002421 * @brief Initialize a LIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002422 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002423 * This routine initializes a LIFO queue object, prior to its first use.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002424 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002425 * @param lifo Address of the LIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002426 *
2427 * @return N/A
2428 */
Luiz Augusto von Dentz0dc4dd42017-02-21 15:49:52 +02002429#define k_lifo_init(lifo) \
Torbjörn Lekselld7654452021-03-26 12:21:24 +01002430 ({ \
2431 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, init, lifo); \
2432 k_queue_init(&(lifo)->_queue); \
2433 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, init, lifo); \
2434 })
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002435
2436/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002437 * @brief Add an element to a LIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002438 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002439 * This routine adds a data item to @a lifo. A LIFO queue data item must be
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04002440 * aligned on a word boundary, and the first word of the item is
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002441 * reserved for the kernel's use.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002442 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002443 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002444 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002445 * @param lifo Address of the LIFO queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002446 * @param data Address of the data item.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002447 *
2448 * @return N/A
2449 */
Luiz Augusto von Dentz0dc4dd42017-02-21 15:49:52 +02002450#define k_lifo_put(lifo, data) \
Torbjörn Lekselld7654452021-03-26 12:21:24 +01002451 ({ \
2452 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, put, lifo, data); \
2453 k_queue_prepend(&(lifo)->_queue, data); \
2454 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, put, lifo, data); \
2455 })
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002456
2457/**
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002458 * @brief Add an element to a LIFO queue.
2459 *
Andrew Boieac3dcc12019-04-01 12:28:03 -07002460 * This routine adds a data item to @a lifo. There is an implicit memory
2461 * allocation to create an additional temporary bookkeeping data structure from
2462 * the calling thread's resource pool, which is automatically freed when the
2463 * item is removed. The data itself is not copied.
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002464 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002465 * @funcprops \isr_ok
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002466 *
2467 * @param lifo Address of the LIFO.
2468 * @param data Address of the data item.
2469 *
2470 * @retval 0 on success
2471 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2472 */
2473#define k_lifo_alloc_put(lifo, data) \
Torbjörn Lekselld7654452021-03-26 12:21:24 +01002474 ({ \
2475 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, alloc_put, lifo, data); \
2476 int ret = k_queue_alloc_prepend(&(lifo)->_queue, data); \
2477 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, alloc_put, lifo, data, ret); \
2478 ret; \
2479 })
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002480
2481/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002482 * @brief Get an element from a LIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002483 *
Anas Nashif56821172020-07-08 14:14:25 -04002484 * This routine removes a data item from @a LIFO in a "last in, first out"
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04002485 * manner. The first word of the data item is reserved for the kernel's use.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002486 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002487 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2488 *
2489 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002490 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002491 * @param lifo Address of the LIFO queue.
Andy Ross78327382020-03-05 15:18:14 -08002492 * @param timeout Waiting period to obtain a data item,
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002493 * or one of the special values K_NO_WAIT and K_FOREVER.
2494 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05002495 * @return Address of the data item if successful; NULL if returned
2496 * without waiting, or waiting period timed out.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002497 */
Luiz Augusto von Dentz0dc4dd42017-02-21 15:49:52 +02002498#define k_lifo_get(lifo, timeout) \
Torbjörn Lekselld7654452021-03-26 12:21:24 +01002499 ({ \
2500 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, get, lifo, timeout); \
2501 void *ret = k_queue_get(&(lifo)->_queue, timeout); \
2502 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, get, lifo, timeout, ret); \
2503 ret; \
2504 })
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002505
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002506/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002507 * @brief Statically define and initialize a LIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002508 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002509 * The LIFO queue can be accessed outside the module where it is defined using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002510 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05002511 * @code extern struct k_lifo <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002512 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002513 * @param name Name of the fifo.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002514 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002515#define K_LIFO_DEFINE(name) \
Fabio Baltierif88a4202021-08-04 23:05:54 +01002516 STRUCT_SECTION_ITERABLE_ALTERNATE(k_queue, k_lifo, name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002517 Z_LIFO_INITIALIZER(name)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002518
Anas Nashif166f5192018-02-25 08:02:36 -06002519/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05002520
2521/**
2522 * @cond INTERNAL_HIDDEN
2523 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05002524#define K_STACK_FLAG_ALLOC ((uint8_t)1) /* Buffer was allocated */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002525
Nicolas Pitre3d51f7c2019-05-17 22:48:26 -04002526typedef uintptr_t stack_data_t;
2527
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002528struct k_stack {
2529 _wait_q_t wait_q;
Andy Rossf0933d02018-07-26 10:23:02 -07002530 struct k_spinlock lock;
Nicolas Pitre3d51f7c2019-05-17 22:48:26 -04002531 stack_data_t *base, *next, *top;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002532
Kumar Galaa1b77fd2020-05-27 11:26:57 -05002533 uint8_t flags;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002534};
2535
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002536#define Z_STACK_INITIALIZER(obj, stack_buffer, stack_num_entries) \
Allan Stephensc98da842016-11-11 15:45:03 -05002537 { \
Patrik Flykt4344e272019-03-08 14:19:05 -07002538 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
Allan Stephensc98da842016-11-11 15:45:03 -05002539 .base = stack_buffer, \
2540 .next = stack_buffer, \
2541 .top = stack_buffer + stack_num_entries, \
Allan Stephensc98da842016-11-11 15:45:03 -05002542 }
2543
2544/**
2545 * INTERNAL_HIDDEN @endcond
2546 */
2547
2548/**
2549 * @defgroup stack_apis Stack APIs
2550 * @ingroup kernel_apis
2551 * @{
2552 */
2553
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002554/**
2555 * @brief Initialize a stack.
2556 *
2557 * This routine initializes a stack object, prior to its first use.
2558 *
2559 * @param stack Address of the stack.
2560 * @param buffer Address of array used to hold stacked values.
2561 * @param num_entries Maximum number of values that can be stacked.
2562 *
2563 * @return N/A
2564 */
Andrew Boief3bee952018-05-02 17:44:39 -07002565void k_stack_init(struct k_stack *stack,
Kumar Galaa1b77fd2020-05-27 11:26:57 -05002566 stack_data_t *buffer, uint32_t num_entries);
Andrew Boief3bee952018-05-02 17:44:39 -07002567
2568
2569/**
2570 * @brief Initialize a stack.
2571 *
2572 * This routine initializes a stack object, prior to its first use. Internal
2573 * buffers will be allocated from the calling thread's resource pool.
2574 * This memory will be released if k_stack_cleanup() is called, or
2575 * userspace is enabled and the stack object loses all references to it.
2576 *
2577 * @param stack Address of the stack.
2578 * @param num_entries Maximum number of values that can be stacked.
2579 *
2580 * @return -ENOMEM if memory couldn't be allocated
2581 */
2582
Kumar Galaa1b77fd2020-05-27 11:26:57 -05002583__syscall int32_t k_stack_alloc_init(struct k_stack *stack,
2584 uint32_t num_entries);
Andrew Boief3bee952018-05-02 17:44:39 -07002585
2586/**
2587 * @brief Release a stack's allocated buffer
2588 *
2589 * If a stack object was given a dynamically allocated buffer via
2590 * k_stack_alloc_init(), this will free it. This function does nothing
2591 * if the buffer wasn't dynamically allocated.
2592 *
2593 * @param stack Address of the stack.
Anas Nashif1ed67d12019-06-16 08:58:10 -04002594 * @retval 0 on success
2595 * @retval -EAGAIN when object is still in use
Andrew Boief3bee952018-05-02 17:44:39 -07002596 */
Anas Nashif1ed67d12019-06-16 08:58:10 -04002597int k_stack_cleanup(struct k_stack *stack);
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002598
2599/**
2600 * @brief Push an element onto a stack.
2601 *
Nicolas Pitre3d51f7c2019-05-17 22:48:26 -04002602 * This routine adds a stack_data_t value @a data to @a stack.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002603 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002604 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002605 *
2606 * @param stack Address of the stack.
2607 * @param data Value to push onto the stack.
2608 *
Anas Nashif1ed67d12019-06-16 08:58:10 -04002609 * @retval 0 on success
2610 * @retval -ENOMEM if stack is full
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002611 */
Anas Nashif1ed67d12019-06-16 08:58:10 -04002612__syscall int k_stack_push(struct k_stack *stack, stack_data_t data);
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002613
2614/**
2615 * @brief Pop an element from a stack.
2616 *
Nicolas Pitre3d51f7c2019-05-17 22:48:26 -04002617 * This routine removes a stack_data_t value from @a stack in a "last in,
2618 * first out" manner and stores the value in @a data.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002619 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002620 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2621 *
2622 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002623 *
2624 * @param stack Address of the stack.
2625 * @param data Address of area to hold the value popped from the stack.
Andy Ross78327382020-03-05 15:18:14 -08002626 * @param timeout Waiting period to obtain a value,
2627 * or one of the special values K_NO_WAIT and
Krzysztof Chruscinski94f742e2019-11-07 19:28:00 +01002628 * K_FOREVER.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002629 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05002630 * @retval 0 Element popped from stack.
2631 * @retval -EBUSY Returned without waiting.
2632 * @retval -EAGAIN Waiting period timed out.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002633 */
Krzysztof Chruscinski94f742e2019-11-07 19:28:00 +01002634__syscall int k_stack_pop(struct k_stack *stack, stack_data_t *data,
Andy Ross78327382020-03-05 15:18:14 -08002635 k_timeout_t timeout);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002636
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002637/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002638 * @brief Statically define and initialize a stack
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002639 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002640 * The stack can be accessed outside the module where it is defined using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002641 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05002642 * @code extern struct k_stack <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002643 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002644 * @param name Name of the stack.
2645 * @param stack_num_entries Maximum number of values that can be stacked.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002646 */
Peter Mitsis602e6a82016-10-17 11:48:43 -04002647#define K_STACK_DEFINE(name, stack_num_entries) \
Nicolas Pitre3d51f7c2019-05-17 22:48:26 -04002648 stack_data_t __noinit \
Peter Mitsis602e6a82016-10-17 11:48:43 -04002649 _k_stack_buf_##name[stack_num_entries]; \
Fabio Baltierif88a4202021-08-04 23:05:54 +01002650 STRUCT_SECTION_ITERABLE(k_stack, name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002651 Z_STACK_INITIALIZER(name, _k_stack_buf_##name, \
Peter Mitsis602e6a82016-10-17 11:48:43 -04002652 stack_num_entries)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002653
Anas Nashif166f5192018-02-25 08:02:36 -06002654/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05002655
Peter Bigotdc34e7c2020-10-28 11:24:05 -05002656/**
2657 * @cond INTERNAL_HIDDEN
2658 */
Peter Bigot44539ed2020-11-21 06:58:58 -06002659
Allan Stephens6bba9b02016-11-16 14:56:54 -05002660struct k_work;
Peter Bigotdc34e7c2020-10-28 11:24:05 -05002661struct k_work_q;
2662struct k_work_queue_config;
2663struct k_delayed_work;
2664extern struct k_work_q k_sys_work_q;
2665
2666/**
2667 * INTERNAL_HIDDEN @endcond
2668 */
2669
Allan Stephensc98da842016-11-11 15:45:03 -05002670/**
Anas Nashifce78d162018-05-24 12:43:11 -05002671 * @defgroup mutex_apis Mutex APIs
2672 * @ingroup kernel_apis
2673 * @{
Allan Stephensc98da842016-11-11 15:45:03 -05002674 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002675
Anas Nashifce78d162018-05-24 12:43:11 -05002676/**
2677 * Mutex Structure
2678 * @ingroup mutex_apis
2679 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002680struct k_mutex {
Anas Nashife71293e2019-12-04 20:00:14 -05002681 /** Mutex wait queue */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002682 _wait_q_t wait_q;
Anas Nashifce78d162018-05-24 12:43:11 -05002683 /** Mutex owner */
Benjamin Walshb7ef0cb2016-10-05 17:32:01 -04002684 struct k_thread *owner;
Anas Nashife71293e2019-12-04 20:00:14 -05002685
2686 /** Current lock count */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05002687 uint32_t lock_count;
Anas Nashife71293e2019-12-04 20:00:14 -05002688
2689 /** Original thread priority */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002690 int owner_orig_prio;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002691};
2692
Anas Nashifce78d162018-05-24 12:43:11 -05002693/**
2694 * @cond INTERNAL_HIDDEN
2695 */
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002696#define Z_MUTEX_INITIALIZER(obj) \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002697 { \
Patrik Flykt4344e272019-03-08 14:19:05 -07002698 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002699 .owner = NULL, \
2700 .lock_count = 0, \
Andy Ross851d14a2021-05-13 15:46:43 -07002701 .owner_orig_prio = K_LOWEST_APPLICATION_THREAD_PRIO, \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002702 }
2703
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002704/**
Allan Stephensc98da842016-11-11 15:45:03 -05002705 * INTERNAL_HIDDEN @endcond
2706 */
2707
2708/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002709 * @brief Statically define and initialize a mutex.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002710 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002711 * The mutex can be accessed outside the module where it is defined using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002712 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05002713 * @code extern struct k_mutex <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002714 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002715 * @param name Name of the mutex.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002716 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002717#define K_MUTEX_DEFINE(name) \
Fabio Baltierif88a4202021-08-04 23:05:54 +01002718 STRUCT_SECTION_ITERABLE(k_mutex, name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002719 Z_MUTEX_INITIALIZER(name)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002720
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002721/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002722 * @brief Initialize a mutex.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002723 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002724 * This routine initializes a mutex object, prior to its first use.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002725 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002726 * Upon completion, the mutex is available and does not have an owner.
2727 *
2728 * @param mutex Address of the mutex.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002729 *
Anas Nashif86bb2d02019-05-04 10:18:13 -04002730 * @retval 0 Mutex object created
2731 *
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002732 */
Anas Nashif86bb2d02019-05-04 10:18:13 -04002733__syscall int k_mutex_init(struct k_mutex *mutex);
2734
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002735
2736/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002737 * @brief Lock a mutex.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002738 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002739 * This routine locks @a mutex. If the mutex is locked by another thread,
2740 * the calling thread waits until the mutex becomes available or until
2741 * a timeout occurs.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002742 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002743 * A thread is permitted to lock a mutex it has already locked. The operation
2744 * completes immediately and the lock count is increased by 1.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002745 *
Andrew Boie6af97932020-05-27 11:48:30 -07002746 * Mutexes may not be locked in ISRs.
2747 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002748 * @param mutex Address of the mutex.
Andy Ross78327382020-03-05 15:18:14 -08002749 * @param timeout Waiting period to lock the mutex,
2750 * or one of the special values K_NO_WAIT and
Krzysztof Chruscinski94f742e2019-11-07 19:28:00 +01002751 * K_FOREVER.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002752 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05002753 * @retval 0 Mutex locked.
2754 * @retval -EBUSY Returned without waiting.
2755 * @retval -EAGAIN Waiting period timed out.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002756 */
Andy Ross78327382020-03-05 15:18:14 -08002757__syscall int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout);
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002758
2759/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002760 * @brief Unlock a mutex.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002761 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002762 * This routine unlocks @a mutex. The mutex must already be locked by the
2763 * calling thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002764 *
2765 * The mutex cannot be claimed by another thread until it has been unlocked by
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002766 * the calling thread as many times as it was previously locked by that
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002767 * thread.
2768 *
Andrew Boie6af97932020-05-27 11:48:30 -07002769 * Mutexes may not be unlocked in ISRs, as mutexes must only be manipulated
2770 * in thread context due to ownership and priority inheritance semantics.
2771 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002772 * @param mutex Address of the mutex.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002773 *
Anas Nashif86bb2d02019-05-04 10:18:13 -04002774 * @retval 0 Mutex unlocked.
2775 * @retval -EPERM The current thread does not own the mutex
2776 * @retval -EINVAL The mutex is not locked
2777 *
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002778 */
Anas Nashif86bb2d02019-05-04 10:18:13 -04002779__syscall int k_mutex_unlock(struct k_mutex *mutex);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002780
Allan Stephensc98da842016-11-11 15:45:03 -05002781/**
Anas Nashif166f5192018-02-25 08:02:36 -06002782 * @}
Allan Stephensc98da842016-11-11 15:45:03 -05002783 */
2784
Anas Nashif06eb4892020-08-23 12:39:09 -04002785
2786struct k_condvar {
2787 _wait_q_t wait_q;
2788};
2789
2790#define Z_CONDVAR_INITIALIZER(obj) \
2791 { \
2792 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2793 }
2794
2795/**
2796 * @defgroup condvar_apis Condition Variables APIs
2797 * @ingroup kernel_apis
2798 * @{
2799 */
2800
2801/**
2802 * @brief Initialize a condition variable
2803 *
2804 * @param condvar pointer to a @p k_condvar structure
2805 * @retval 0 Condition variable created successfully
2806 */
2807__syscall int k_condvar_init(struct k_condvar *condvar);
2808
2809/**
2810 * @brief Signals one thread that is pending on the condition variable
2811 *
2812 * @param condvar pointer to a @p k_condvar structure
2813 * @retval 0 On success
2814 */
2815__syscall int k_condvar_signal(struct k_condvar *condvar);
2816
2817/**
2818 * @brief Unblock all threads that are pending on the condition
2819 * variable
2820 *
2821 * @param condvar pointer to a @p k_condvar structure
2822 * @return An integer with number of woken threads on success
2823 */
2824__syscall int k_condvar_broadcast(struct k_condvar *condvar);
2825
2826/**
2827 * @brief Waits on the condition variable releasing the mutex lock
2828 *
2829 * Automically releases the currently owned mutex, blocks the current thread
2830 * waiting on the condition variable specified by @a condvar,
2831 * and finally acquires the mutex again.
2832 *
2833 * The waiting thread unblocks only after another thread calls
2834 * k_condvar_signal, or k_condvar_broadcast with the same condition variable.
2835 *
2836 * @param condvar pointer to a @p k_condvar structure
2837 * @param mutex Address of the mutex.
2838 * @param timeout Waiting period for the condition variable
2839 * or one of the special values K_NO_WAIT and K_FOREVER.
2840 * @retval 0 On success
2841 * @retval -EAGAIN Waiting period timed out.
2842 */
2843__syscall int k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex,
2844 k_timeout_t timeout);
2845
2846/**
2847 * @brief Statically define and initialize a condition variable.
2848 *
2849 * The condition variable can be accessed outside the module where it is
2850 * defined using:
2851 *
2852 * @code extern struct k_condvar <name>; @endcode
2853 *
2854 * @param name Name of the condition variable.
2855 */
2856#define K_CONDVAR_DEFINE(name) \
Fabio Baltierif88a4202021-08-04 23:05:54 +01002857 STRUCT_SECTION_ITERABLE(k_condvar, name) = \
Anas Nashif06eb4892020-08-23 12:39:09 -04002858 Z_CONDVAR_INITIALIZER(name)
2859/**
2860 * @}
2861 */
2862
Allan Stephensc98da842016-11-11 15:45:03 -05002863/**
2864 * @cond INTERNAL_HIDDEN
2865 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002866
2867struct k_sem {
2868 _wait_q_t wait_q;
James Harrisb1042812021-03-03 12:02:05 -08002869 unsigned int count;
2870 unsigned int limit;
Peter Bigot7aefa3d2021-03-02 06:18:29 -06002871
Benjamin Walshacc68c12017-01-29 18:57:45 -05002872 _POLL_EVENT;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002873
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002874};
2875
Patrik Flykt97b3bd12019-03-12 15:15:42 -06002876#define Z_SEM_INITIALIZER(obj, initial_count, count_limit) \
Allan Stephensc98da842016-11-11 15:45:03 -05002877 { \
Patrik Flykt4344e272019-03-08 14:19:05 -07002878 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
Allan Stephensc98da842016-11-11 15:45:03 -05002879 .count = initial_count, \
2880 .limit = count_limit, \
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +03002881 _POLL_EVENT_OBJ_INIT(obj) \
Allan Stephensc98da842016-11-11 15:45:03 -05002882 }
2883
2884/**
2885 * INTERNAL_HIDDEN @endcond
2886 */
2887
2888/**
2889 * @defgroup semaphore_apis Semaphore APIs
2890 * @ingroup kernel_apis
2891 * @{
2892 */
2893
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002894/**
James Harrisb1042812021-03-03 12:02:05 -08002895 * @brief Maximum limit value allowed for a semaphore.
2896 *
2897 * This is intended for use when a semaphore does not have
2898 * an explicit maximum limit, and instead is just used for
2899 * counting purposes.
2900 *
2901 */
2902#define K_SEM_MAX_LIMIT UINT_MAX
2903
2904/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002905 * @brief Initialize a semaphore.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002906 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002907 * This routine initializes a semaphore object, prior to its first use.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002908 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002909 * @param sem Address of the semaphore.
2910 * @param initial_count Initial semaphore count.
2911 * @param limit Maximum permitted semaphore count.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002912 *
James Harrisb1042812021-03-03 12:02:05 -08002913 * @see K_SEM_MAX_LIMIT
2914 *
Anas Nashif928af3c2019-05-04 10:36:14 -04002915 * @retval 0 Semaphore created successfully
2916 * @retval -EINVAL Invalid values
2917 *
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002918 */
Anas Nashif928af3c2019-05-04 10:36:14 -04002919__syscall int k_sem_init(struct k_sem *sem, unsigned int initial_count,
Andrew Boie99280232017-09-29 14:17:47 -07002920 unsigned int limit);
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002921
2922/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002923 * @brief Take a semaphore.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002924 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002925 * This routine takes @a sem.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002926 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002927 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2928 *
2929 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002930 *
2931 * @param sem Address of the semaphore.
Andy Ross78327382020-03-05 15:18:14 -08002932 * @param timeout Waiting period to take the semaphore,
2933 * or one of the special values K_NO_WAIT and K_FOREVER.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002934 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05002935 * @retval 0 Semaphore taken.
2936 * @retval -EBUSY Returned without waiting.
James Harris53b81792021-03-04 15:47:27 -08002937 * @retval -EAGAIN Waiting period timed out,
2938 * or the semaphore was reset during the waiting period.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002939 */
Andy Ross78327382020-03-05 15:18:14 -08002940__syscall int k_sem_take(struct k_sem *sem, k_timeout_t timeout);
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002941
2942/**
2943 * @brief Give a semaphore.
2944 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002945 * This routine gives @a sem, unless the semaphore is already at its maximum
2946 * permitted count.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002947 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002948 * @funcprops \isr_ok
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002949 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002950 * @param sem Address of the semaphore.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002951 *
2952 * @return N/A
2953 */
Andrew Boie99280232017-09-29 14:17:47 -07002954__syscall void k_sem_give(struct k_sem *sem);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002955
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002956/**
James Harris53b81792021-03-04 15:47:27 -08002957 * @brief Resets a semaphore's count to zero.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002958 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002959 * This routine sets the count of @a sem to zero.
James Harris53b81792021-03-04 15:47:27 -08002960 * Any outstanding semaphore takes will be aborted
2961 * with -EAGAIN.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002962 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002963 * @param sem Address of the semaphore.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002964 *
2965 * @return N/A
2966 */
Andrew Boie990bf162017-10-03 12:36:49 -07002967__syscall void k_sem_reset(struct k_sem *sem);
Andrew Boiefc273c02017-09-23 12:51:23 -07002968
Anas Nashif954d5502018-02-25 08:37:28 -06002969/**
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002970 * @brief Get a semaphore's count.
2971 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002972 * This routine returns the current count of @a sem.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002973 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002974 * @param sem Address of the semaphore.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002975 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002976 * @return Current semaphore count.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002977 */
Andrew Boie990bf162017-10-03 12:36:49 -07002978__syscall unsigned int k_sem_count_get(struct k_sem *sem);
Andrew Boiefc273c02017-09-23 12:51:23 -07002979
Anas Nashif954d5502018-02-25 08:37:28 -06002980/**
2981 * @internal
2982 */
Patrik Flykt4344e272019-03-08 14:19:05 -07002983static inline unsigned int z_impl_k_sem_count_get(struct k_sem *sem)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002984{
2985 return sem->count;
2986}
2987
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002988/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002989 * @brief Statically define and initialize a semaphore.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002990 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002991 * The semaphore can be accessed outside the module where it is defined using:
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002992 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05002993 * @code extern struct k_sem <name>; @endcode
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002994 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002995 * @param name Name of the semaphore.
2996 * @param initial_count Initial semaphore count.
2997 * @param count_limit Maximum permitted semaphore count.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002998 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002999#define K_SEM_DEFINE(name, initial_count, count_limit) \
Fabio Baltierif88a4202021-08-04 23:05:54 +01003000 STRUCT_SECTION_ITERABLE(k_sem, name) = \
Patrik Flykt97b3bd12019-03-12 15:15:42 -06003001 Z_SEM_INITIALIZER(name, initial_count, count_limit); \
Rajavardhan Gundi68040c82018-04-27 10:15:15 +05303002 BUILD_ASSERT(((count_limit) != 0) && \
James Harrisb1042812021-03-03 12:02:05 -08003003 ((initial_count) <= (count_limit)) && \
3004 ((count_limit) <= K_SEM_MAX_LIMIT));
Benjamin Walsh456c6da2016-09-02 18:55:39 -04003005
Anas Nashif166f5192018-02-25 08:02:36 -06003006/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05003007
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003008/**
3009 * @cond INTERNAL_HIDDEN
3010 */
3011
3012struct k_work_delayable;
3013struct k_work_sync;
3014
3015/**
3016 * INTERNAL_HIDDEN @endcond
3017 */
3018
3019/**
Anas Nashifc355b7e2021-04-14 08:49:05 -04003020 * @defgroup workqueue_apis Work Queue APIs
3021 * @ingroup kernel_apis
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003022 * @{
3023 */
3024
3025/** @brief The signature for a work item handler function.
3026 *
3027 * The function will be invoked by the thread animating a work queue.
3028 *
3029 * @param work the work item that provided the handler.
3030 */
3031typedef void (*k_work_handler_t)(struct k_work *work);
3032
3033/** @brief Initialize a (non-delayable) work structure.
3034 *
3035 * This must be invoked before submitting a work structure for the first time.
3036 * It need not be invoked again on the same work structure. It can be
3037 * re-invoked to change the associated handler, but this must be done when the
3038 * work item is idle.
3039 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003040 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003041 *
3042 * @param work the work structure to be initialized.
3043 *
3044 * @param handler the handler to be invoked by the work item.
3045 */
3046void k_work_init(struct k_work *work,
3047 k_work_handler_t handler);
3048
3049/** @brief Busy state flags from the work item.
3050 *
3051 * A zero return value indicates the work item appears to be idle.
3052 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003053 * @note This is a live snapshot of state, which may change before the result
3054 * is checked. Use locks where appropriate.
3055 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003056 * @funcprops \isr_ok
3057 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003058 * @param work pointer to the work item.
3059 *
3060 * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED,
3061 * K_WORK_RUNNING, and K_WORK_CANCELING.
3062 */
3063int k_work_busy_get(const struct k_work *work);
3064
3065/** @brief Test whether a work item is currently pending.
3066 *
3067 * Wrapper to determine whether a work item is in a non-idle dstate.
3068 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003069 * @note This is a live snapshot of state, which may change before the result
3070 * is checked. Use locks where appropriate.
3071 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003072 * @funcprops \isr_ok
3073 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003074 * @param work pointer to the work item.
3075 *
3076 * @return true if and only if k_work_busy_get() returns a non-zero value.
3077 */
3078static inline bool k_work_is_pending(const struct k_work *work);
3079
3080/** @brief Submit a work item to a queue.
3081 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003082 * @param queue pointer to the work queue on which the item should run. If
3083 * NULL the queue from the most recent submission will be used.
3084 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003085 * @funcprops \isr_ok
3086 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003087 * @param work pointer to the work item.
3088 *
3089 * @retval 0 if work was already submitted to a queue
3090 * @retval 1 if work was not submitted and has been queued to @p queue
3091 * @retval 2 if work was running and has been queued to the queue that was
3092 * running it
3093 * @retval -EBUSY
3094 * * if work submission was rejected because the work item is cancelling; or
3095 * * @p queue is draining; or
3096 * * @p queue is plugged.
3097 * @retval -EINVAL if @p queue is null and the work item has never been run.
Peter Bigot47435902021-05-17 06:36:04 -05003098 * @retval -ENODEV if @p queue has not been started.
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003099 */
3100int k_work_submit_to_queue(struct k_work_q *queue,
3101 struct k_work *work);
3102
3103/** @brief Submit a work item to the system queue.
3104 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003105 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003106 *
3107 * @param work pointer to the work item.
3108 *
3109 * @return as with k_work_submit_to_queue().
3110 */
Torbjörn Leksell7a646b32021-03-26 14:41:18 +01003111extern int k_work_submit(struct k_work *work);
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003112
3113/** @brief Wait for last-submitted instance to complete.
3114 *
3115 * Resubmissions may occur while waiting, including chained submissions (from
3116 * within the handler).
3117 *
3118 * @note Be careful of caller and work queue thread relative priority. If
3119 * this function sleeps it will not return until the work queue thread
3120 * completes the tasks that allow this thread to resume.
3121 *
3122 * @note Behavior is undefined if this function is invoked on @p work from a
3123 * work queue running @p work.
3124 *
3125 * @param work pointer to the work item.
3126 *
3127 * @param sync pointer to an opaque item containing state related to the
3128 * pending cancellation. The object must persist until the call returns, and
3129 * be accessible from both the caller thread and the work queue thread. The
3130 * object must not be used for any other flush or cancel operation until this
3131 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3132 * must be allocated in coherent memory.
3133 *
3134 * @retval true if call had to wait for completion
3135 * @retval false if work was already idle
3136 */
3137bool k_work_flush(struct k_work *work,
3138 struct k_work_sync *sync);
3139
3140/** @brief Cancel a work item.
3141 *
3142 * This attempts to prevent a pending (non-delayable) work item from being
3143 * processed by removing it from the work queue. If the item is being
3144 * processed, the work item will continue to be processed, but resubmissions
3145 * are rejected until cancellation completes.
3146 *
3147 * If this returns zero cancellation is complete, otherwise something
3148 * (probably a work queue thread) is still referencing the item.
3149 *
3150 * See also k_work_cancel_sync().
3151 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003152 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003153 *
3154 * @param work pointer to the work item.
3155 *
3156 * @return the k_work_busy_get() status indicating the state of the item after all
3157 * cancellation steps performed by this call are completed.
3158 */
3159int k_work_cancel(struct k_work *work);
3160
3161/** @brief Cancel a work item and wait for it to complete.
3162 *
3163 * Same as k_work_cancel() but does not return until cancellation is complete.
3164 * This can be invoked by a thread after k_work_cancel() to synchronize with a
3165 * previous cancellation.
3166 *
3167 * On return the work structure will be idle unless something submits it after
3168 * the cancellation was complete.
3169 *
3170 * @note Be careful of caller and work queue thread relative priority. If
3171 * this function sleeps it will not return until the work queue thread
3172 * completes the tasks that allow this thread to resume.
3173 *
3174 * @note Behavior is undefined if this function is invoked on @p work from a
3175 * work queue running @p work.
3176 *
3177 * @param work pointer to the work item.
3178 *
3179 * @param sync pointer to an opaque item containing state related to the
3180 * pending cancellation. The object must persist until the call returns, and
3181 * be accessible from both the caller thread and the work queue thread. The
3182 * object must not be used for any other flush or cancel operation until this
3183 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3184 * must be allocated in coherent memory.
3185 *
Peter Bigot707dc222021-04-16 11:48:50 -05003186 * @retval true if work was pending (call had to wait for cancellation of a
3187 * running handler to complete, or scheduled or submitted operations were
3188 * cancelled);
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003189 * @retval false otherwise
3190 */
3191bool k_work_cancel_sync(struct k_work *work, struct k_work_sync *sync);
3192
Flavio Ceolind9aa4142021-08-23 14:33:40 -07003193/** @brief Initialize a work queue structure.
3194 *
3195 * This must be invoked before starting a work queue structure for the first time.
3196 * It need not be invoked again on the same work queue structure.
3197 *
3198 * @funcprops \isr_ok
3199 *
3200 * @param queue the queue structure to be initialized.
3201 */
3202void k_work_queue_init(struct k_work_q *queue);
3203
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003204/** @brief Initialize a work queue.
3205 *
3206 * This configures the work queue thread and starts it running. The function
3207 * should not be re-invoked on a queue.
3208 *
Flavio Ceolinc42cde52021-08-23 15:04:58 -07003209 * @param queue pointer to the queue structure. It must be initialized
3210 * in zeroed/bss memory or with @ref k_work_queue_init before
3211 * use.
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003212 *
3213 * @param stack pointer to the work thread stack area.
3214 *
3215 * @param stack_size size of the the work thread stack area, in bytes.
3216 *
3217 * @param prio initial thread priority
3218 *
3219 * @param cfg optional additional configuration parameters. Pass @c
3220 * NULL if not required, to use the defaults documented in
3221 * k_work_queue_config.
3222 */
3223void k_work_queue_start(struct k_work_q *queue,
3224 k_thread_stack_t *stack, size_t stack_size,
3225 int prio, const struct k_work_queue_config *cfg);
3226
3227/** @brief Access the thread that animates a work queue.
3228 *
3229 * This is necessary to grant a work queue thread access to things the work
3230 * items it will process are expected to use.
3231 *
3232 * @param queue pointer to the queue structure.
3233 *
3234 * @return the thread associated with the work queue.
3235 */
3236static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue);
3237
3238/** @brief Wait until the work queue has drained, optionally plugging it.
3239 *
3240 * This blocks submission to the work queue except when coming from queue
3241 * thread, and blocks the caller until no more work items are available in the
3242 * queue.
3243 *
3244 * If @p plug is true then submission will continue to be blocked after the
3245 * drain operation completes until k_work_queue_unplug() is invoked.
3246 *
3247 * Note that work items that are delayed are not yet associated with their
3248 * work queue. They must be cancelled externally if a goal is to ensure the
3249 * work queue remains empty. The @p plug feature can be used to prevent
3250 * delayed items from being submitted after the drain completes.
3251 *
3252 * @param queue pointer to the queue structure.
3253 *
3254 * @param plug if true the work queue will continue to block new submissions
3255 * after all items have drained.
3256 *
3257 * @retval 1 if call had to wait for the drain to complete
3258 * @retval 0 if call did not have to wait
3259 * @retval negative if wait was interrupted or failed
3260 */
3261int k_work_queue_drain(struct k_work_q *queue, bool plug);
3262
3263/** @brief Release a work queue to accept new submissions.
3264 *
3265 * This releases the block on new submissions placed when k_work_queue_drain()
3266 * is invoked with the @p plug option enabled. If this is invoked before the
3267 * drain completes new items may be submitted as soon as the drain completes.
3268 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003269 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003270 *
3271 * @param queue pointer to the queue structure.
3272 *
3273 * @retval 0 if successfully unplugged
3274 * @retval -EALREADY if the work queue was not plugged.
3275 */
3276int k_work_queue_unplug(struct k_work_q *queue);
3277
3278/** @brief Initialize a delayable work structure.
3279 *
3280 * This must be invoked before scheduling a delayable work structure for the
3281 * first time. It need not be invoked again on the same work structure. It
3282 * can be re-invoked to change the associated handler, but this must be done
3283 * when the work item is idle.
3284 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003285 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003286 *
3287 * @param dwork the delayable work structure to be initialized.
3288 *
3289 * @param handler the handler to be invoked by the work item.
3290 */
3291void k_work_init_delayable(struct k_work_delayable *dwork,
3292 k_work_handler_t handler);
3293
3294/**
3295 * @brief Get the parent delayable work structure from a work pointer.
3296 *
3297 * This function is necessary when a @c k_work_handler_t function is passed to
3298 * k_work_schedule_for_queue() and the handler needs to access data from the
3299 * container of the containing `k_work_delayable`.
3300 *
3301 * @param work Address passed to the work handler
3302 *
3303 * @return Address of the containing @c k_work_delayable structure.
3304 */
3305static inline struct k_work_delayable *
3306k_work_delayable_from_work(struct k_work *work);
3307
3308/** @brief Busy state flags from the delayable work item.
3309 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003310 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003311 *
3312 * @note This is a live snapshot of state, which may change before the result
3313 * can be inspected. Use locks where appropriate.
3314 *
3315 * @param dwork pointer to the delayable work item.
3316 *
3317 * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED, K_WORK_RUNNING, and
3318 * K_WORK_CANCELING. A zero return value indicates the work item appears to
3319 * be idle.
3320 */
3321int k_work_delayable_busy_get(const struct k_work_delayable *dwork);
3322
3323/** @brief Test whether a delayed work item is currently pending.
3324 *
3325 * Wrapper to determine whether a delayed work item is in a non-idle state.
3326 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003327 * @note This is a live snapshot of state, which may change before the result
3328 * can be inspected. Use locks where appropriate.
3329 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003330 * @funcprops \isr_ok
3331 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003332 * @param dwork pointer to the delayable work item.
3333 *
3334 * @return true if and only if k_work_delayable_busy_get() returns a non-zero
3335 * value.
3336 */
3337static inline bool k_work_delayable_is_pending(
3338 const struct k_work_delayable *dwork);
3339
3340/** @brief Get the absolute tick count at which a scheduled delayable work
3341 * will be submitted.
3342 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003343 * @note This is a live snapshot of state, which may change before the result
3344 * can be inspected. Use locks where appropriate.
3345 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003346 * @funcprops \isr_ok
3347 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003348 * @param dwork pointer to the delayable work item.
3349 *
3350 * @return the tick count when the timer that will schedule the work item will
3351 * expire, or the current tick count if the work is not scheduled.
3352 */
3353static inline k_ticks_t k_work_delayable_expires_get(
3354 const struct k_work_delayable *dwork);
3355
3356/** @brief Get the number of ticks until a scheduled delayable work will be
3357 * submitted.
3358 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003359 * @note This is a live snapshot of state, which may change before the result
3360 * can be inspected. Use locks where appropriate.
3361 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003362 * @funcprops \isr_ok
3363 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003364 * @param dwork pointer to the delayable work item.
3365 *
3366 * @return the number of ticks until the timer that will schedule the work
3367 * item will expire, or zero if the item is not scheduled.
3368 */
3369static inline k_ticks_t k_work_delayable_remaining_get(
3370 const struct k_work_delayable *dwork);
3371
3372/** @brief Submit an idle work item to a queue after a delay.
3373 *
3374 * Unlike k_work_reschedule_for_queue() this is a no-op if the work item is
3375 * already scheduled or submitted, even if @p delay is @c K_NO_WAIT.
3376 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003377 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003378 *
3379 * @param queue the queue on which the work item should be submitted after the
3380 * delay.
3381 *
3382 * @param dwork pointer to the delayable work item.
3383 *
3384 * @param delay the time to wait before submitting the work item. If @c
3385 * K_NO_WAIT and the work is not pending this is equivalent to
3386 * k_work_submit_to_queue().
3387 *
3388 * @retval 0 if work was already scheduled or submitted.
3389 * @retval 1 if work has been scheduled.
Peter Bigot47435902021-05-17 06:36:04 -05003390 * @retval -EBUSY if @p delay is @c K_NO_WAIT and
3391 * k_work_submit_to_queue() fails with this code.
3392 * @retval -EINVAL if @p delay is @c K_NO_WAIT and
3393 * k_work_submit_to_queue() fails with this code.
3394 * @retval -ENODEV if @p delay is @c K_NO_WAIT and
3395 * k_work_submit_to_queue() fails with this code.
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003396 */
3397int k_work_schedule_for_queue(struct k_work_q *queue,
3398 struct k_work_delayable *dwork,
3399 k_timeout_t delay);
3400
3401/** @brief Submit an idle work item to the system work queue after a
3402 * delay.
3403 *
3404 * This is a thin wrapper around k_work_schedule_for_queue(), with all the API
3405 * characteristcs of that function.
3406 *
3407 * @param dwork pointer to the delayable work item.
3408 *
3409 * @param delay the time to wait before submitting the work item. If @c
3410 * K_NO_WAIT this is equivalent to k_work_submit_to_queue().
3411 *
3412 * @return as with k_work_schedule_for_queue().
3413 */
Torbjörn Leksell7a646b32021-03-26 14:41:18 +01003414extern int k_work_schedule(struct k_work_delayable *dwork,
3415 k_timeout_t delay);
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003416
3417/** @brief Reschedule a work item to a queue after a delay.
3418 *
3419 * Unlike k_work_schedule_for_queue() this function can change the deadline of
3420 * a scheduled work item, and will schedule a work item that isn't idle
3421 * (e.g. is submitted or running). This function does not affect ("unsubmit")
3422 * a work item that has been submitted to a queue.
3423 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003424 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003425 *
3426 * @param queue the queue on which the work item should be submitted after the
3427 * delay.
3428 *
3429 * @param dwork pointer to the delayable work item.
3430 *
3431 * @param delay the time to wait before submitting the work item. If @c
3432 * K_NO_WAIT this is equivalent to k_work_submit_to_queue() after canceling
3433 * any previous scheduled submission.
3434 *
3435 * @note If delay is @c K_NO_WAIT ("no delay") the return values are as with
3436 * k_work_submit_to_queue().
3437 *
3438 * @retval 0 if delay is @c K_NO_WAIT and work was already on a queue
3439 * @retval 1 if
3440 * * delay is @c K_NO_WAIT and work was not submitted but has now been queued
3441 * to @p queue; or
3442 * * delay not @c K_NO_WAIT and work has been scheduled
3443 * @retval 2 if delay is @c K_NO_WAIT and work was running and has been queued
3444 * to the queue that was running it
Peter Bigot47435902021-05-17 06:36:04 -05003445 * @retval -EBUSY if @p delay is @c K_NO_WAIT and
3446 * k_work_submit_to_queue() fails with this code.
3447 * @retval -EINVAL if @p delay is @c K_NO_WAIT and
3448 * k_work_submit_to_queue() fails with this code.
3449 * @retval -ENODEV if @p delay is @c K_NO_WAIT and
3450 * k_work_submit_to_queue() fails with this code.
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003451 */
3452int k_work_reschedule_for_queue(struct k_work_q *queue,
3453 struct k_work_delayable *dwork,
3454 k_timeout_t delay);
3455
3456/** @brief Reschedule a work item to the system work queue after a
3457 * delay.
3458 *
3459 * This is a thin wrapper around k_work_reschedule_for_queue(), with all the
3460 * API characteristcs of that function.
3461 *
3462 * @param dwork pointer to the delayable work item.
3463 *
3464 * @param delay the time to wait before submitting the work item.
3465 *
3466 * @return as with k_work_reschedule_for_queue().
3467 */
Torbjörn Leksell7a646b32021-03-26 14:41:18 +01003468extern int k_work_reschedule(struct k_work_delayable *dwork,
3469 k_timeout_t delay);
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003470
3471/** @brief Flush delayable work.
3472 *
3473 * If the work is scheduled, it is immediately submitted. Then the caller
3474 * blocks until the work completes, as with k_work_flush().
3475 *
3476 * @note Be careful of caller and work queue thread relative priority. If
3477 * this function sleeps it will not return until the work queue thread
3478 * completes the tasks that allow this thread to resume.
3479 *
3480 * @note Behavior is undefined if this function is invoked on @p dwork from a
3481 * work queue running @p dwork.
3482 *
3483 * @param dwork pointer to the delayable work item.
3484 *
3485 * @param sync pointer to an opaque item containing state related to the
3486 * pending cancellation. The object must persist until the call returns, and
3487 * be accessible from both the caller thread and the work queue thread. The
3488 * object must not be used for any other flush or cancel operation until this
3489 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3490 * must be allocated in coherent memory.
3491 *
3492 * @retval true if call had to wait for completion
3493 * @retval false if work was already idle
3494 */
3495bool k_work_flush_delayable(struct k_work_delayable *dwork,
3496 struct k_work_sync *sync);
3497
3498/** @brief Cancel delayable work.
3499 *
3500 * Similar to k_work_cancel() but for delayable work. If the work is
3501 * scheduled or submitted it is canceled. This function does not wait for the
3502 * cancellation to complete.
3503 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003504 * @note The work may still be running when this returns. Use
3505 * k_work_flush_delayable() or k_work_cancel_delayable_sync() to ensure it is
3506 * not running.
3507 *
3508 * @note Canceling delayable work does not prevent rescheduling it. It does
3509 * prevent submitting it until the cancellation completes.
3510 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003511 * @funcprops \isr_ok
3512 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003513 * @param dwork pointer to the delayable work item.
3514 *
3515 * @return the k_work_delayable_busy_get() status indicating the state of the
3516 * item after all cancellation steps performed by this call are completed.
3517 */
3518int k_work_cancel_delayable(struct k_work_delayable *dwork);
3519
3520/** @brief Cancel delayable work and wait.
3521 *
3522 * Like k_work_cancel_delayable() but waits until the work becomes idle.
3523 *
3524 * @note Canceling delayable work does not prevent rescheduling it. It does
3525 * prevent submitting it until the cancellation completes.
3526 *
3527 * @note Be careful of caller and work queue thread relative priority. If
3528 * this function sleeps it will not return until the work queue thread
3529 * completes the tasks that allow this thread to resume.
3530 *
3531 * @note Behavior is undefined if this function is invoked on @p dwork from a
3532 * work queue running @p dwork.
3533 *
3534 * @param dwork pointer to the delayable work item.
3535 *
3536 * @param sync pointer to an opaque item containing state related to the
3537 * pending cancellation. The object must persist until the call returns, and
3538 * be accessible from both the caller thread and the work queue thread. The
3539 * object must not be used for any other flush or cancel operation until this
3540 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3541 * must be allocated in coherent memory.
3542 *
Peter Bigot707dc222021-04-16 11:48:50 -05003543 * @retval true if work was not idle (call had to wait for cancellation of a
3544 * running handler to complete, or scheduled or submitted operations were
3545 * cancelled);
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003546 * @retval false otherwise
3547 */
3548bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
3549 struct k_work_sync *sync);
3550
3551enum {
3552/**
3553 * @cond INTERNAL_HIDDEN
3554 */
3555
3556 /* The atomic API is used for all work and queue flags fields to
3557 * enforce sequential consistency in SMP environments.
3558 */
3559
3560 /* Bits that represent the work item states. At least nine of the
3561 * combinations are distinct valid stable states.
3562 */
3563 K_WORK_RUNNING_BIT = 0,
3564 K_WORK_CANCELING_BIT = 1,
3565 K_WORK_QUEUED_BIT = 2,
3566 K_WORK_DELAYED_BIT = 3,
3567
3568 K_WORK_MASK = BIT(K_WORK_DELAYED_BIT) | BIT(K_WORK_QUEUED_BIT)
3569 | BIT(K_WORK_RUNNING_BIT) | BIT(K_WORK_CANCELING_BIT),
3570
3571 /* Static work flags */
3572 K_WORK_DELAYABLE_BIT = 8,
3573 K_WORK_DELAYABLE = BIT(K_WORK_DELAYABLE_BIT),
3574
3575 /* Dynamic work queue flags */
3576 K_WORK_QUEUE_STARTED_BIT = 0,
3577 K_WORK_QUEUE_STARTED = BIT(K_WORK_QUEUE_STARTED_BIT),
3578 K_WORK_QUEUE_BUSY_BIT = 1,
3579 K_WORK_QUEUE_BUSY = BIT(K_WORK_QUEUE_BUSY_BIT),
3580 K_WORK_QUEUE_DRAIN_BIT = 2,
3581 K_WORK_QUEUE_DRAIN = BIT(K_WORK_QUEUE_DRAIN_BIT),
3582 K_WORK_QUEUE_PLUGGED_BIT = 3,
3583 K_WORK_QUEUE_PLUGGED = BIT(K_WORK_QUEUE_PLUGGED_BIT),
3584
3585 /* Static work queue flags */
3586 K_WORK_QUEUE_NO_YIELD_BIT = 8,
3587 K_WORK_QUEUE_NO_YIELD = BIT(K_WORK_QUEUE_NO_YIELD_BIT),
3588
3589/**
3590 * INTERNAL_HIDDEN @endcond
3591 */
3592 /* Transient work flags */
3593
3594 /** @brief Flag indicating a work item that is running under a work
3595 * queue thread.
3596 *
3597 * Accessed via k_work_busy_get(). May co-occur with other flags.
3598 */
3599 K_WORK_RUNNING = BIT(K_WORK_RUNNING_BIT),
3600
3601 /** @brief Flag indicating a work item that is being canceled.
3602 *
3603 * Accessed via k_work_busy_get(). May co-occur with other flags.
3604 */
3605 K_WORK_CANCELING = BIT(K_WORK_CANCELING_BIT),
3606
3607 /** @brief Flag indicating a work item that has been submitted to a
3608 * queue but has not started running.
3609 *
3610 * Accessed via k_work_busy_get(). May co-occur with other flags.
3611 */
3612 K_WORK_QUEUED = BIT(K_WORK_QUEUED_BIT),
3613
3614 /** @brief Flag indicating a delayed work item that is scheduled for
3615 * submission to a queue.
3616 *
3617 * Accessed via k_work_busy_get(). May co-occur with other flags.
3618 */
3619 K_WORK_DELAYED = BIT(K_WORK_DELAYED_BIT),
3620};
3621
3622/** @brief A structure used to submit work. */
3623struct k_work {
3624 /* All fields are protected by the work module spinlock. No fields
3625 * are to be accessed except through kernel API.
3626 */
3627
3628 /* Node to link into k_work_q pending list. */
3629 sys_snode_t node;
3630
3631 /* The function to be invoked by the work queue thread. */
3632 k_work_handler_t handler;
3633
3634 /* The queue on which the work item was last submitted. */
3635 struct k_work_q *queue;
3636
3637 /* State of the work item.
3638 *
3639 * The item can be DELAYED, QUEUED, and RUNNING simultaneously.
3640 *
3641 * It can be RUNNING and CANCELING simultaneously.
3642 */
3643 uint32_t flags;
3644};
3645
3646#define Z_WORK_INITIALIZER(work_handler) { \
3647 .handler = work_handler, \
3648}
3649
3650/** @brief A structure used to submit work after a delay. */
3651struct k_work_delayable {
3652 /* The work item. */
3653 struct k_work work;
3654
3655 /* Timeout used to submit work after a delay. */
3656 struct _timeout timeout;
3657
3658 /* The queue to which the work should be submitted. */
3659 struct k_work_q *queue;
3660};
3661
3662#define Z_WORK_DELAYABLE_INITIALIZER(work_handler) { \
3663 .work = { \
3664 .handler = work_handler, \
3665 .flags = K_WORK_DELAYABLE, \
3666 }, \
3667}
3668
3669/**
3670 * @brief Initialize a statically-defined delayable work item.
3671 *
3672 * This macro can be used to initialize a statically-defined delayable
3673 * work item, prior to its first use. For example,
3674 *
3675 * @code static K_WORK_DELAYABLE_DEFINE(<dwork>, <work_handler>); @endcode
3676 *
3677 * Note that if the runtime dependencies support initialization with
3678 * k_work_init_delayable() using that will eliminate the initialized
3679 * object in ROM that is produced by this macro and copied in at
3680 * system startup.
3681 *
3682 * @param work Symbol name for delayable work item object
3683 * @param work_handler Function to invoke each time work item is processed.
3684 */
3685#define K_WORK_DELAYABLE_DEFINE(work, work_handler) \
3686 struct k_work_delayable work \
3687 = Z_WORK_DELAYABLE_INITIALIZER(work_handler)
3688
3689/**
3690 * @cond INTERNAL_HIDDEN
3691 */
3692
3693/* Record used to wait for work to flush.
3694 *
3695 * The work item is inserted into the queue that will process (or is
3696 * processing) the item, and will be processed as soon as the item
3697 * completes. When the flusher is processed the semaphore will be
3698 * signaled, releasing the thread waiting for the flush.
3699 */
3700struct z_work_flusher {
3701 struct k_work work;
3702 struct k_sem sem;
3703};
3704
3705/* Record used to wait for work to complete a cancellation.
3706 *
3707 * The work item is inserted into a global queue of pending cancels.
3708 * When a cancelling work item goes idle any matching waiters are
3709 * removed from pending_cancels and are woken.
3710 */
3711struct z_work_canceller {
3712 sys_snode_t node;
3713 struct k_work *work;
3714 struct k_sem sem;
3715};
3716
3717/**
3718 * INTERNAL_HIDDEN @endcond
3719 */
3720
3721/** @brief A structure holding internal state for a pending synchronous
3722 * operation on a work item or queue.
3723 *
3724 * Instances of this type are provided by the caller for invocation of
3725 * k_work_flush(), k_work_cancel_sync() and sibling flush and cancel APIs. A
3726 * referenced object must persist until the call returns, and be accessible
3727 * from both the caller thread and the work queue thread.
3728 *
3729 * @note If CONFIG_KERNEL_COHERENCE is enabled the object must be allocated in
3730 * coherent memory; see arch_mem_coherent(). The stack on these architectures
3731 * is generally not coherent. be stack-allocated. Violations are detected by
3732 * runtime assertion.
3733 */
3734struct k_work_sync {
3735 union {
3736 struct z_work_flusher flusher;
3737 struct z_work_canceller canceller;
3738 };
3739};
3740
3741/** @brief A structure holding optional configuration items for a work
3742 * queue.
3743 *
3744 * This structure, and values it references, are not retained by
3745 * k_work_queue_start().
3746 */
3747struct k_work_queue_config {
3748 /** The name to be given to the work queue thread.
3749 *
3750 * If left null the thread will not have a name.
3751 */
3752 const char *name;
3753
3754 /** Control whether the work queue thread should yield between
3755 * items.
3756 *
3757 * Yielding between items helps guarantee the work queue
3758 * thread does not starve other threads, including cooperative
3759 * ones released by a work item. This is the default behavior.
3760 *
3761 * Set this to @c true to prevent the work queue thread from
3762 * yielding between items. This may be appropriate when a
3763 * sequence of items should complete without yielding
3764 * control.
3765 */
3766 bool no_yield;
3767};
3768
3769/** @brief A structure used to hold work until it can be processed. */
3770struct k_work_q {
3771 /* The thread that animates the work. */
3772 struct k_thread thread;
3773
3774 /* All the following fields must be accessed only while the
3775 * work module spinlock is held.
3776 */
3777
3778 /* List of k_work items to be worked. */
3779 sys_slist_t pending;
3780
3781 /* Wait queue for idle work thread. */
3782 _wait_q_t notifyq;
3783
3784 /* Wait queue for threads waiting for the queue to drain. */
3785 _wait_q_t drainq;
3786
3787 /* Flags describing queue state. */
3788 uint32_t flags;
3789};
3790
3791/* Provide the implementation for inline functions declared above */
3792
3793static inline bool k_work_is_pending(const struct k_work *work)
3794{
3795 return k_work_busy_get(work) != 0;
3796}
3797
3798static inline struct k_work_delayable *
3799k_work_delayable_from_work(struct k_work *work)
3800{
3801 return CONTAINER_OF(work, struct k_work_delayable, work);
3802}
3803
3804static inline bool k_work_delayable_is_pending(
3805 const struct k_work_delayable *dwork)
3806{
3807 return k_work_delayable_busy_get(dwork) != 0;
3808}
3809
3810static inline k_ticks_t k_work_delayable_expires_get(
3811 const struct k_work_delayable *dwork)
3812{
3813 return z_timeout_expires(&dwork->timeout);
3814}
3815
3816static inline k_ticks_t k_work_delayable_remaining_get(
3817 const struct k_work_delayable *dwork)
3818{
3819 return z_timeout_remaining(&dwork->timeout);
3820}
3821
3822static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue)
3823{
3824 return &queue->thread;
3825}
3826
3827/* Legacy wrappers */
3828
Peter Bigot09a31ce2021-03-04 11:21:46 -06003829__deprecated
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003830static inline bool k_work_pending(const struct k_work *work)
3831{
3832 return k_work_is_pending(work);
3833}
3834
Peter Bigot09a31ce2021-03-04 11:21:46 -06003835__deprecated
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003836static inline void k_work_q_start(struct k_work_q *work_q,
3837 k_thread_stack_t *stack,
3838 size_t stack_size, int prio)
3839{
3840 k_work_queue_start(work_q, stack, stack_size, prio, NULL);
3841}
3842
Peter Bigot09a31ce2021-03-04 11:21:46 -06003843/* deprecated, remove when corresponding deprecated API is removed. */
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003844struct k_delayed_work {
3845 struct k_work_delayable work;
3846};
3847
Peter Bigot09a31ce2021-03-04 11:21:46 -06003848#define Z_DELAYED_WORK_INITIALIZER(work_handler) __DEPRECATED_MACRO { \
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003849 .work = Z_WORK_DELAYABLE_INITIALIZER(work_handler), \
3850}
3851
Peter Bigot09a31ce2021-03-04 11:21:46 -06003852__deprecated
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003853static inline void k_delayed_work_init(struct k_delayed_work *work,
3854 k_work_handler_t handler)
3855{
3856 k_work_init_delayable(&work->work, handler);
3857}
3858
Peter Bigot09a31ce2021-03-04 11:21:46 -06003859__deprecated
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003860static inline int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
3861 struct k_delayed_work *work,
3862 k_timeout_t delay)
3863{
3864 int rc = k_work_reschedule_for_queue(work_q, &work->work, delay);
3865
3866 /* Legacy API doesn't distinguish success cases. */
3867 return (rc >= 0) ? 0 : rc;
3868}
3869
Peter Bigot09a31ce2021-03-04 11:21:46 -06003870__deprecated
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003871static inline int k_delayed_work_submit(struct k_delayed_work *work,
3872 k_timeout_t delay)
3873{
3874 int rc = k_work_reschedule(&work->work, delay);
3875
3876 /* Legacy API doesn't distinguish success cases. */
3877 return (rc >= 0) ? 0 : rc;
3878}
3879
Peter Bigot09a31ce2021-03-04 11:21:46 -06003880__deprecated
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003881static inline int k_delayed_work_cancel(struct k_delayed_work *work)
3882{
3883 bool pending = k_work_delayable_is_pending(&work->work);
3884 int rc = k_work_cancel_delayable(&work->work);
3885
3886 /* Old return value rules:
3887 *
3888 * 0 if:
3889 * * Work item countdown cancelled before the item was submitted to
3890 * its queue; or
3891 * * Work item was removed from its queue before it was processed.
3892 *
3893 * -EINVAL if:
3894 * * Work item has never been submitted; or
3895 * * Work item has been successfully cancelled; or
3896 * * Timeout handler is in the process of submitting the work item to
3897 * its queue; or
3898 * * Work queue thread has removed the work item from the queue but
3899 * has not called its handler.
3900 *
3901 * -EALREADY if:
3902 * * Work queue thread has removed the work item from the queue and
3903 * cleared its pending flag; or
3904 * * Work queue thread is invoking the item handler; or
3905 * * Work item handler has completed.
3906 *
3907
3908 * We can't reconstruct those states, so call it successful only when
3909 * a pending item is no longer pending, -EINVAL if it was pending and
3910 * still is, and cancel, and -EALREADY if it wasn't pending (so
3911 * presumably cancellation should have had no effect, assuming we
3912 * didn't hit a race condition).
3913 */
3914 if (pending) {
3915 return (rc == 0) ? 0 : -EINVAL;
3916 }
3917
3918 return -EALREADY;
3919}
3920
Peter Bigot09a31ce2021-03-04 11:21:46 -06003921__deprecated
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003922static inline bool k_delayed_work_pending(struct k_delayed_work *work)
3923{
3924 return k_work_delayable_is_pending(&work->work);
3925}
3926
Peter Bigot09a31ce2021-03-04 11:21:46 -06003927__deprecated
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003928static inline int32_t k_delayed_work_remaining_get(struct k_delayed_work *work)
3929{
3930 k_ticks_t rem = k_work_delayable_remaining_get(&work->work);
3931
3932 /* Probably should be ceil32, but was floor32 */
3933 return k_ticks_to_ms_floor32(rem);
3934}
3935
Peter Bigot09a31ce2021-03-04 11:21:46 -06003936__deprecated
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003937static inline k_ticks_t k_delayed_work_expires_ticks(
3938 struct k_delayed_work *work)
3939{
3940 return k_work_delayable_expires_get(&work->work);
3941}
3942
Peter Bigot09a31ce2021-03-04 11:21:46 -06003943__deprecated
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003944static inline k_ticks_t k_delayed_work_remaining_ticks(
3945 struct k_delayed_work *work)
3946{
3947 return k_work_delayable_remaining_get(&work->work);
3948}
3949
3950/** @} */
3951
Peter Bigot4e3b9262021-01-15 10:52:38 -06003952struct k_work_user;
3953
3954/**
Anas Nashifc355b7e2021-04-14 08:49:05 -04003955 * @addtogroup workqueue_apis
Peter Bigot4e3b9262021-01-15 10:52:38 -06003956 * @{
3957 */
3958
3959/**
3960 * @typedef k_work_user_handler_t
3961 * @brief Work item handler function type for user work queues.
3962 *
3963 * A work item's handler function is executed by a user workqueue's thread
3964 * when the work item is processed by the workqueue.
3965 *
3966 * @param work Address of the work item.
3967 *
3968 * @return N/A
3969 */
3970typedef void (*k_work_user_handler_t)(struct k_work_user *work);
3971
3972/**
3973 * @cond INTERNAL_HIDDEN
3974 */
3975
3976struct k_work_user_q {
3977 struct k_queue queue;
3978 struct k_thread thread;
3979};
3980
3981enum {
3982 K_WORK_USER_STATE_PENDING, /* Work item pending state */
3983};
3984
3985struct k_work_user {
3986 void *_reserved; /* Used by k_queue implementation. */
3987 k_work_user_handler_t handler;
3988 atomic_t flags;
3989};
3990
3991/**
3992 * INTERNAL_HIDDEN @endcond
3993 */
3994
3995#define Z_WORK_USER_INITIALIZER(work_handler) \
3996 { \
Fredrik Gihl67295be2021-06-11 12:31:58 +02003997 ._reserved = NULL, \
Peter Bigot4e3b9262021-01-15 10:52:38 -06003998 .handler = work_handler, \
Fredrik Gihl67295be2021-06-11 12:31:58 +02003999 .flags = 0 \
Peter Bigot4e3b9262021-01-15 10:52:38 -06004000 }
4001
4002/**
4003 * @brief Initialize a statically-defined user work item.
4004 *
4005 * This macro can be used to initialize a statically-defined user work
4006 * item, prior to its first use. For example,
4007 *
4008 * @code static K_WORK_USER_DEFINE(<work>, <work_handler>); @endcode
4009 *
4010 * @param work Symbol name for work item object
4011 * @param work_handler Function to invoke each time work item is processed.
4012 */
4013#define K_WORK_USER_DEFINE(work, work_handler) \
4014 struct k_work_user work = Z_WORK_USER_INITIALIZER(work_handler)
4015
4016/**
4017 * @brief Initialize a userspace work item.
4018 *
4019 * This routine initializes a user workqueue work item, prior to its
4020 * first use.
4021 *
4022 * @param work Address of work item.
4023 * @param handler Function to invoke each time work item is processed.
4024 *
4025 * @return N/A
4026 */
4027static inline void k_work_user_init(struct k_work_user *work,
4028 k_work_user_handler_t handler)
4029{
4030 *work = (struct k_work_user)Z_WORK_USER_INITIALIZER(handler);
4031}
4032
4033/**
4034 * @brief Check if a userspace work item is pending.
4035 *
4036 * This routine indicates if user work item @a work is pending in a workqueue's
4037 * queue.
4038 *
4039 * @note Checking if the work is pending gives no guarantee that the
4040 * work will still be pending when this information is used. It is up to
4041 * the caller to make sure that this information is used in a safe manner.
4042 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004043 * @funcprops \isr_ok
Peter Bigot4e3b9262021-01-15 10:52:38 -06004044 *
4045 * @param work Address of work item.
4046 *
4047 * @return true if work item is pending, or false if it is not pending.
4048 */
4049static inline bool k_work_user_is_pending(struct k_work_user *work)
4050{
4051 return atomic_test_bit(&work->flags, K_WORK_USER_STATE_PENDING);
4052}
4053
4054/**
4055 * @brief Submit a work item to a user mode workqueue
4056 *
4057 * Submits a work item to a workqueue that runs in user mode. A temporary
4058 * memory allocation is made from the caller's resource pool which is freed
4059 * once the worker thread consumes the k_work item. The workqueue
4060 * thread must have memory access to the k_work item being submitted. The caller
4061 * must have permission granted on the work_q parameter's queue object.
4062 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004063 * @funcprops \isr_ok
Peter Bigot4e3b9262021-01-15 10:52:38 -06004064 *
4065 * @param work_q Address of workqueue.
4066 * @param work Address of work item.
4067 *
4068 * @retval -EBUSY if the work item was already in some workqueue
4069 * @retval -ENOMEM if no memory for thread resource pool allocation
4070 * @retval 0 Success
4071 */
4072static inline int k_work_user_submit_to_queue(struct k_work_user_q *work_q,
4073 struct k_work_user *work)
4074{
4075 int ret = -EBUSY;
4076
4077 if (!atomic_test_and_set_bit(&work->flags,
4078 K_WORK_USER_STATE_PENDING)) {
4079 ret = k_queue_alloc_append(&work_q->queue, work);
4080
4081 /* Couldn't insert into the queue. Clear the pending bit
4082 * so the work item can be submitted again
4083 */
4084 if (ret != 0) {
4085 atomic_clear_bit(&work->flags,
4086 K_WORK_USER_STATE_PENDING);
4087 }
4088 }
4089
4090 return ret;
4091}
4092
4093/**
4094 * @brief Start a workqueue in user mode
4095 *
4096 * This works identically to k_work_queue_start() except it is callable from
4097 * user mode, and the worker thread created will run in user mode. The caller
4098 * must have permissions granted on both the work_q parameter's thread and
4099 * queue objects, and the same restrictions on priority apply as
4100 * k_thread_create().
4101 *
4102 * @param work_q Address of workqueue.
4103 * @param stack Pointer to work queue thread's stack space, as defined by
4104 * K_THREAD_STACK_DEFINE()
4105 * @param stack_size Size of the work queue thread's stack (in bytes), which
4106 * should either be the same constant passed to
4107 * K_THREAD_STACK_DEFINE() or the value of K_THREAD_STACK_SIZEOF().
4108 * @param prio Priority of the work queue's thread.
4109 * @param name optional thread name. If not null a copy is made into the
4110 * thread's name buffer.
4111 *
4112 * @return N/A
4113 */
4114extern void k_work_user_queue_start(struct k_work_user_q *work_q,
4115 k_thread_stack_t *stack,
4116 size_t stack_size, int prio,
4117 const char *name);
4118
4119/** @} */
4120
Allan Stephensc98da842016-11-11 15:45:03 -05004121/**
Peter Bigot3d583982020-11-18 08:55:32 -06004122 * @cond INTERNAL_HIDDEN
4123 */
4124
4125struct k_work_poll {
4126 struct k_work work;
4127 struct k_work_q *workq;
4128 struct z_poller poller;
4129 struct k_poll_event *events;
4130 int num_events;
4131 k_work_handler_t real_handler;
4132 struct _timeout timeout;
4133 int poll_result;
4134};
4135
4136/**
4137 * INTERNAL_HIDDEN @endcond
4138 */
4139
4140/**
Anas Nashifc355b7e2021-04-14 08:49:05 -04004141 * @addtogroup workqueue_apis
Peter Bigot3d583982020-11-18 08:55:32 -06004142 * @{
4143 */
4144
4145/**
Peter Bigotdc34e7c2020-10-28 11:24:05 -05004146 * @brief Initialize a statically-defined work item.
4147 *
4148 * This macro can be used to initialize a statically-defined workqueue work
4149 * item, prior to its first use. For example,
4150 *
4151 * @code static K_WORK_DEFINE(<work>, <work_handler>); @endcode
4152 *
4153 * @param work Symbol name for work item object
4154 * @param work_handler Function to invoke each time work item is processed.
4155 */
4156#define K_WORK_DEFINE(work, work_handler) \
4157 struct k_work work = Z_WORK_INITIALIZER(work_handler)
4158
4159/**
4160 * @brief Initialize a statically-defined delayed work item.
4161 *
4162 * This macro can be used to initialize a statically-defined workqueue
4163 * delayed work item, prior to its first use. For example,
4164 *
4165 * @code static K_DELAYED_WORK_DEFINE(<work>, <work_handler>); @endcode
4166 *
4167 * @param work Symbol name for delayed work item object
4168 * @param work_handler Function to invoke each time work item is processed.
4169 */
Peter Bigot09a31ce2021-03-04 11:21:46 -06004170#define K_DELAYED_WORK_DEFINE(work, work_handler) __DEPRECATED_MACRO \
Peter Bigotdc34e7c2020-10-28 11:24:05 -05004171 struct k_delayed_work work = Z_DELAYED_WORK_INITIALIZER(work_handler)
4172
4173/**
Peter Bigot3d583982020-11-18 08:55:32 -06004174 * @brief Initialize a triggered work item.
4175 *
4176 * This routine initializes a workqueue triggered work item, prior to
4177 * its first use.
4178 *
4179 * @param work Address of triggered work item.
4180 * @param handler Function to invoke each time work item is processed.
4181 *
4182 * @return N/A
4183 */
4184extern void k_work_poll_init(struct k_work_poll *work,
4185 k_work_handler_t handler);
4186
4187/**
4188 * @brief Submit a triggered work item.
4189 *
4190 * This routine schedules work item @a work to be processed by workqueue
4191 * @a work_q when one of the given @a events is signaled. The routine
4192 * initiates internal poller for the work item and then returns to the caller.
4193 * Only when one of the watched events happen the work item is actually
4194 * submitted to the workqueue and becomes pending.
4195 *
4196 * Submitting a previously submitted triggered work item that is still
4197 * waiting for the event cancels the existing submission and reschedules it
4198 * the using the new event list. Note that this behavior is inherently subject
4199 * to race conditions with the pre-existing triggered work item and work queue,
4200 * so care must be taken to synchronize such resubmissions externally.
4201 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004202 * @funcprops \isr_ok
Peter Bigot3d583982020-11-18 08:55:32 -06004203 *
4204 * @warning
4205 * Provided array of events as well as a triggered work item must be placed
4206 * in persistent memory (valid until work handler execution or work
4207 * cancellation) and cannot be modified after submission.
4208 *
4209 * @param work_q Address of workqueue.
4210 * @param work Address of delayed work item.
4211 * @param events An array of events which trigger the work.
4212 * @param num_events The number of events in the array.
4213 * @param timeout Timeout after which the work will be scheduled
4214 * for execution even if not triggered.
4215 *
4216 *
4217 * @retval 0 Work item started watching for events.
4218 * @retval -EINVAL Work item is being processed or has completed its work.
4219 * @retval -EADDRINUSE Work item is pending on a different workqueue.
4220 */
4221extern int k_work_poll_submit_to_queue(struct k_work_q *work_q,
4222 struct k_work_poll *work,
4223 struct k_poll_event *events,
4224 int num_events,
4225 k_timeout_t timeout);
4226
4227/**
4228 * @brief Submit a triggered work item to the system workqueue.
4229 *
4230 * This routine schedules work item @a work to be processed by system
4231 * workqueue when one of the given @a events is signaled. The routine
4232 * initiates internal poller for the work item and then returns to the caller.
4233 * Only when one of the watched events happen the work item is actually
4234 * submitted to the workqueue and becomes pending.
4235 *
4236 * Submitting a previously submitted triggered work item that is still
4237 * waiting for the event cancels the existing submission and reschedules it
4238 * the using the new event list. Note that this behavior is inherently subject
4239 * to race conditions with the pre-existing triggered work item and work queue,
4240 * so care must be taken to synchronize such resubmissions externally.
4241 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004242 * @funcprops \isr_ok
Peter Bigot3d583982020-11-18 08:55:32 -06004243 *
4244 * @warning
4245 * Provided array of events as well as a triggered work item must not be
4246 * modified until the item has been processed by the workqueue.
4247 *
4248 * @param work Address of delayed work item.
4249 * @param events An array of events which trigger the work.
4250 * @param num_events The number of events in the array.
4251 * @param timeout Timeout after which the work will be scheduled
4252 * for execution even if not triggered.
4253 *
4254 * @retval 0 Work item started watching for events.
4255 * @retval -EINVAL Work item is being processed or has completed its work.
4256 * @retval -EADDRINUSE Work item is pending on a different workqueue.
4257 */
Torbjörn Leksellcae9a902021-03-26 14:20:05 +01004258extern int k_work_poll_submit(struct k_work_poll *work,
Peter Bigot3d583982020-11-18 08:55:32 -06004259 struct k_poll_event *events,
4260 int num_events,
Torbjörn Leksellcae9a902021-03-26 14:20:05 +01004261 k_timeout_t timeout);
Peter Bigot3d583982020-11-18 08:55:32 -06004262
4263/**
4264 * @brief Cancel a triggered work item.
4265 *
4266 * This routine cancels the submission of triggered work item @a work.
4267 * A triggered work item can only be canceled if no event triggered work
4268 * submission.
4269 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004270 * @funcprops \isr_ok
Peter Bigot3d583982020-11-18 08:55:32 -06004271 *
4272 * @param work Address of delayed work item.
4273 *
4274 * @retval 0 Work item canceled.
4275 * @retval -EINVAL Work item is being processed or has completed its work.
4276 */
4277extern int k_work_poll_cancel(struct k_work_poll *work);
4278
4279/** @} */
4280
4281/**
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004282 * @defgroup msgq_apis Message Queue APIs
4283 * @ingroup kernel_apis
4284 * @{
Allan Stephensc98da842016-11-11 15:45:03 -05004285 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004286
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004287/**
4288 * @brief Message Queue Structure
4289 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004290struct k_msgq {
Anas Nashife71293e2019-12-04 20:00:14 -05004291 /** Message queue wait queue */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004292 _wait_q_t wait_q;
Anas Nashife71293e2019-12-04 20:00:14 -05004293 /** Lock */
Andy Rossbe03dbd2018-07-26 10:23:02 -07004294 struct k_spinlock lock;
Anas Nashife71293e2019-12-04 20:00:14 -05004295 /** Message size */
Peter Mitsis026b4ed2016-10-13 11:41:45 -04004296 size_t msg_size;
Anas Nashife71293e2019-12-04 20:00:14 -05004297 /** Maximal number of messages */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004298 uint32_t max_msgs;
Anas Nashife71293e2019-12-04 20:00:14 -05004299 /** Start of message buffer */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004300 char *buffer_start;
Anas Nashife71293e2019-12-04 20:00:14 -05004301 /** End of message buffer */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004302 char *buffer_end;
Anas Nashife71293e2019-12-04 20:00:14 -05004303 /** Read pointer */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004304 char *read_ptr;
Anas Nashife71293e2019-12-04 20:00:14 -05004305 /** Write pointer */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004306 char *write_ptr;
Anas Nashife71293e2019-12-04 20:00:14 -05004307 /** Number of used messages */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004308 uint32_t used_msgs;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004309
Nick Gravesb445f132021-04-12 12:35:18 -07004310 _POLL_EVENT;
4311
Anas Nashife71293e2019-12-04 20:00:14 -05004312 /** Message queue */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004313 uint8_t flags;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004314};
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004315/**
4316 * @cond INTERNAL_HIDDEN
4317 */
4318
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004319
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004320#define Z_MSGQ_INITIALIZER(obj, q_buffer, q_msg_size, q_max_msgs) \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004321 { \
Patrik Flykt4344e272019-03-08 14:19:05 -07004322 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
Peter Mitsis1da807e2016-10-06 11:36:59 -04004323 .msg_size = q_msg_size, \
Charles E. Youse6d01f672019-03-18 10:27:34 -07004324 .max_msgs = q_max_msgs, \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004325 .buffer_start = q_buffer, \
Peter Mitsis1da807e2016-10-06 11:36:59 -04004326 .buffer_end = q_buffer + (q_max_msgs * q_msg_size), \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004327 .read_ptr = q_buffer, \
4328 .write_ptr = q_buffer, \
4329 .used_msgs = 0, \
Nick Gravesb445f132021-04-12 12:35:18 -07004330 _POLL_EVENT_OBJ_INIT(obj) \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004331 }
Kumar Galac8b94f42020-09-29 09:52:23 -05004332
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004333/**
4334 * INTERNAL_HIDDEN @endcond
4335 */
4336
Andrew Boie65a9d2a2017-06-27 10:51:23 -07004337
Andrew Boie0fe789f2018-04-12 18:35:56 -07004338#define K_MSGQ_FLAG_ALLOC BIT(0)
4339
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004340/**
4341 * @brief Message Queue Attributes
4342 */
Youvedeep Singh188c1ab2018-03-19 20:02:40 +05304343struct k_msgq_attrs {
Anas Nashife71293e2019-12-04 20:00:14 -05004344 /** Message Size */
Youvedeep Singh188c1ab2018-03-19 20:02:40 +05304345 size_t msg_size;
Anas Nashife71293e2019-12-04 20:00:14 -05004346 /** Maximal number of messages */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004347 uint32_t max_msgs;
Anas Nashife71293e2019-12-04 20:00:14 -05004348 /** Used messages */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004349 uint32_t used_msgs;
Youvedeep Singh188c1ab2018-03-19 20:02:40 +05304350};
4351
Allan Stephensc98da842016-11-11 15:45:03 -05004352
4353/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004354 * @brief Statically define and initialize a message queue.
Peter Mitsis1da807e2016-10-06 11:36:59 -04004355 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004356 * The message queue's ring buffer contains space for @a q_max_msgs messages,
4357 * each of which is @a q_msg_size bytes long. The buffer is aligned to a
Allan Stephensda827222016-11-09 14:23:58 -06004358 * @a q_align -byte boundary, which must be a power of 2. To ensure that each
4359 * message is similarly aligned to this boundary, @a q_msg_size must also be
4360 * a multiple of @a q_align.
Peter Mitsis1da807e2016-10-06 11:36:59 -04004361 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004362 * The message queue can be accessed outside the module where it is defined
4363 * using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004364 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05004365 * @code extern struct k_msgq <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004366 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004367 * @param q_name Name of the message queue.
4368 * @param q_msg_size Message size (in bytes).
4369 * @param q_max_msgs Maximum number of messages that can be queued.
Allan Stephensda827222016-11-09 14:23:58 -06004370 * @param q_align Alignment of the message queue's ring buffer.
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004371 *
Peter Mitsis1da807e2016-10-06 11:36:59 -04004372 */
Nicolas Pitreb1d37422019-06-03 10:51:32 -04004373#define K_MSGQ_DEFINE(q_name, q_msg_size, q_max_msgs, q_align) \
4374 static char __noinit __aligned(q_align) \
4375 _k_fifo_buf_##q_name[(q_max_msgs) * (q_msg_size)]; \
Fabio Baltierif88a4202021-08-04 23:05:54 +01004376 STRUCT_SECTION_ITERABLE(k_msgq, q_name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004377 Z_MSGQ_INITIALIZER(q_name, _k_fifo_buf_##q_name, \
Peter Mitsis1da807e2016-10-06 11:36:59 -04004378 q_msg_size, q_max_msgs)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004379
Peter Mitsisd7a37502016-10-13 11:37:40 -04004380/**
4381 * @brief Initialize a message queue.
4382 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004383 * This routine initializes a message queue object, prior to its first use.
4384 *
Allan Stephensda827222016-11-09 14:23:58 -06004385 * The message queue's ring buffer must contain space for @a max_msgs messages,
4386 * each of which is @a msg_size bytes long. The buffer must be aligned to an
4387 * N-byte boundary, where N is a power of 2 (i.e. 1, 2, 4, ...). To ensure
4388 * that each message is similarly aligned to this boundary, @a q_msg_size
4389 * must also be a multiple of N.
4390 *
Anas Nashif25c87db2021-03-29 10:54:23 -04004391 * @param msgq Address of the message queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004392 * @param buffer Pointer to ring buffer that holds queued messages.
4393 * @param msg_size Message size (in bytes).
Peter Mitsisd7a37502016-10-13 11:37:40 -04004394 * @param max_msgs Maximum number of messages that can be queued.
4395 *
4396 * @return N/A
4397 */
Anas Nashif25c87db2021-03-29 10:54:23 -04004398void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size,
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004399 uint32_t max_msgs);
Andrew Boie0fe789f2018-04-12 18:35:56 -07004400
4401/**
4402 * @brief Initialize a message queue.
4403 *
4404 * This routine initializes a message queue object, prior to its first use,
4405 * allocating its internal ring buffer from the calling thread's resource
4406 * pool.
4407 *
4408 * Memory allocated for the ring buffer can be released by calling
4409 * k_msgq_cleanup(), or if userspace is enabled and the msgq object loses
4410 * all of its references.
4411 *
Anas Nashif4b386592019-11-25 09:30:47 -05004412 * @param msgq Address of the message queue.
Andrew Boie0fe789f2018-04-12 18:35:56 -07004413 * @param msg_size Message size (in bytes).
4414 * @param max_msgs Maximum number of messages that can be queued.
4415 *
4416 * @return 0 on success, -ENOMEM if there was insufficient memory in the
4417 * thread's resource pool, or -EINVAL if the size parameters cause
4418 * an integer overflow.
4419 */
Anas Nashif4b386592019-11-25 09:30:47 -05004420__syscall int k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size,
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004421 uint32_t max_msgs);
Andrew Boie0fe789f2018-04-12 18:35:56 -07004422
Anas Nashife71293e2019-12-04 20:00:14 -05004423/**
Anas Nashif4b386592019-11-25 09:30:47 -05004424 * @brief Release allocated buffer for a queue
Anas Nashife71293e2019-12-04 20:00:14 -05004425 *
4426 * Releases memory allocated for the ring buffer.
Anas Nashif4b386592019-11-25 09:30:47 -05004427 *
4428 * @param msgq message queue to cleanup
4429 *
Anas Nashif11b93652019-06-16 08:43:48 -04004430 * @retval 0 on success
4431 * @retval -EBUSY Queue not empty
Anas Nashife71293e2019-12-04 20:00:14 -05004432 */
Anas Nashif11b93652019-06-16 08:43:48 -04004433int k_msgq_cleanup(struct k_msgq *msgq);
Peter Mitsisd7a37502016-10-13 11:37:40 -04004434
4435/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004436 * @brief Send a message to a message queue.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004437 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004438 * This routine sends a message to message queue @a q.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004439 *
Lauren Murphyf29a2d12020-09-16 21:13:40 -05004440 * @note The message content is copied from @a data into @a msgq and the @a data
4441 * pointer is not retained, so the message content will not be modified
4442 * by this function.
Benjamin Walsh8215ce12016-11-09 19:45:19 -05004443 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004444 * @funcprops \isr_ok
4445 *
Anas Nashif4b386592019-11-25 09:30:47 -05004446 * @param msgq Address of the message queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004447 * @param data Pointer to the message.
Andy Ross78327382020-03-05 15:18:14 -08004448 * @param timeout Non-negative waiting period to add the message,
4449 * or one of the special values K_NO_WAIT and
Krzysztof Chruscinski94f742e2019-11-07 19:28:00 +01004450 * K_FOREVER.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004451 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05004452 * @retval 0 Message sent.
4453 * @retval -ENOMSG Returned without waiting or queue purged.
4454 * @retval -EAGAIN Waiting period timed out.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004455 */
Lauren Murphyf29a2d12020-09-16 21:13:40 -05004456__syscall int k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout);
Peter Mitsisd7a37502016-10-13 11:37:40 -04004457
4458/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004459 * @brief Receive a message from a message queue.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004460 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004461 * This routine receives a message from message queue @a q in a "first in,
4462 * first out" manner.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004463 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004464 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
4465 *
4466 * @funcprops \isr_ok
Benjamin Walsh8215ce12016-11-09 19:45:19 -05004467 *
Anas Nashif4b386592019-11-25 09:30:47 -05004468 * @param msgq Address of the message queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004469 * @param data Address of area to hold the received message.
Andy Ross78327382020-03-05 15:18:14 -08004470 * @param timeout Waiting period to receive the message,
4471 * or one of the special values K_NO_WAIT and
Krzysztof Chruscinski94f742e2019-11-07 19:28:00 +01004472 * K_FOREVER.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004473 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05004474 * @retval 0 Message received.
4475 * @retval -ENOMSG Returned without waiting.
4476 * @retval -EAGAIN Waiting period timed out.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004477 */
Andy Ross78327382020-03-05 15:18:14 -08004478__syscall int k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout);
Peter Mitsisd7a37502016-10-13 11:37:40 -04004479
4480/**
Sathish Kuttan3efd8e12018-11-09 21:03:10 -08004481 * @brief Peek/read a message from a message queue.
4482 *
4483 * This routine reads a message from message queue @a q in a "first in,
4484 * first out" manner and leaves the message in the queue.
4485 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004486 * @funcprops \isr_ok
Sathish Kuttan3efd8e12018-11-09 21:03:10 -08004487 *
Anas Nashif4b386592019-11-25 09:30:47 -05004488 * @param msgq Address of the message queue.
Sathish Kuttan3efd8e12018-11-09 21:03:10 -08004489 * @param data Address of area to hold the message read from the queue.
4490 *
4491 * @retval 0 Message read.
4492 * @retval -ENOMSG Returned when the queue has no message.
Sathish Kuttan3efd8e12018-11-09 21:03:10 -08004493 */
Anas Nashif4b386592019-11-25 09:30:47 -05004494__syscall int k_msgq_peek(struct k_msgq *msgq, void *data);
Sathish Kuttan3efd8e12018-11-09 21:03:10 -08004495
4496/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004497 * @brief Purge a message queue.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004498 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004499 * This routine discards all unreceived messages in a message queue's ring
4500 * buffer. Any threads that are blocked waiting to send a message to the
4501 * message queue are unblocked and see an -ENOMSG error code.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004502 *
Anas Nashif4b386592019-11-25 09:30:47 -05004503 * @param msgq Address of the message queue.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004504 *
4505 * @return N/A
4506 */
Anas Nashif4b386592019-11-25 09:30:47 -05004507__syscall void k_msgq_purge(struct k_msgq *msgq);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004508
Peter Mitsis67be2492016-10-07 11:44:34 -04004509/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004510 * @brief Get the amount of free space in a message queue.
Peter Mitsis67be2492016-10-07 11:44:34 -04004511 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004512 * This routine returns the number of unused entries in a message queue's
4513 * ring buffer.
Peter Mitsis67be2492016-10-07 11:44:34 -04004514 *
Anas Nashif4b386592019-11-25 09:30:47 -05004515 * @param msgq Address of the message queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004516 *
4517 * @return Number of unused ring buffer entries.
Peter Mitsis67be2492016-10-07 11:44:34 -04004518 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004519__syscall uint32_t k_msgq_num_free_get(struct k_msgq *msgq);
Andrew Boie82edb6e2017-10-02 10:53:06 -07004520
Youvedeep Singh188c1ab2018-03-19 20:02:40 +05304521/**
4522 * @brief Get basic attributes of a message queue.
4523 *
4524 * This routine fetches basic attributes of message queue into attr argument.
4525 *
Anas Nashif4b386592019-11-25 09:30:47 -05004526 * @param msgq Address of the message queue.
Youvedeep Singh188c1ab2018-03-19 20:02:40 +05304527 * @param attrs pointer to message queue attribute structure.
4528 *
4529 * @return N/A
4530 */
Anas Nashif4b386592019-11-25 09:30:47 -05004531__syscall void k_msgq_get_attrs(struct k_msgq *msgq,
4532 struct k_msgq_attrs *attrs);
Youvedeep Singh188c1ab2018-03-19 20:02:40 +05304533
4534
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004535static inline uint32_t z_impl_k_msgq_num_free_get(struct k_msgq *msgq)
Peter Mitsis67be2492016-10-07 11:44:34 -04004536{
Anas Nashif4b386592019-11-25 09:30:47 -05004537 return msgq->max_msgs - msgq->used_msgs;
Peter Mitsis67be2492016-10-07 11:44:34 -04004538}
4539
Peter Mitsisd7a37502016-10-13 11:37:40 -04004540/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004541 * @brief Get the number of messages in a message queue.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004542 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004543 * This routine returns the number of messages in a message queue's ring buffer.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004544 *
Anas Nashif4b386592019-11-25 09:30:47 -05004545 * @param msgq Address of the message queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004546 *
4547 * @return Number of messages.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004548 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004549__syscall uint32_t k_msgq_num_used_get(struct k_msgq *msgq);
Andrew Boie82edb6e2017-10-02 10:53:06 -07004550
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004551static inline uint32_t z_impl_k_msgq_num_used_get(struct k_msgq *msgq)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004552{
Anas Nashif4b386592019-11-25 09:30:47 -05004553 return msgq->used_msgs;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004554}
4555
Anas Nashif166f5192018-02-25 08:02:36 -06004556/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05004557
4558/**
Allan Stephensc98da842016-11-11 15:45:03 -05004559 * @defgroup mailbox_apis Mailbox APIs
4560 * @ingroup kernel_apis
4561 * @{
4562 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004563
Anas Nashife71293e2019-12-04 20:00:14 -05004564/**
4565 * @brief Mailbox Message Structure
4566 *
4567 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004568struct k_mbox_msg {
4569 /** internal use only - needed for legacy API support */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004570 uint32_t _mailbox;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004571 /** size of message (in bytes) */
Peter Mitsisd93078c2016-10-14 12:59:37 -04004572 size_t size;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004573 /** application-defined information value */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004574 uint32_t info;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004575 /** sender's message data buffer */
4576 void *tx_data;
4577 /** internal use only - needed for legacy API support */
4578 void *_rx_data;
4579 /** message data block descriptor */
4580 struct k_mem_block tx_block;
4581 /** source thread id */
4582 k_tid_t rx_source_thread;
4583 /** target thread id */
4584 k_tid_t tx_target_thread;
4585 /** internal use only - thread waiting on send (may be a dummy) */
4586 k_tid_t _syncing_thread;
4587#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
4588 /** internal use only - semaphore used during asynchronous send */
4589 struct k_sem *_async_sem;
4590#endif
4591};
Anas Nashife71293e2019-12-04 20:00:14 -05004592/**
4593 * @brief Mailbox Structure
4594 *
4595 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004596struct k_mbox {
Anas Nashife71293e2019-12-04 20:00:14 -05004597 /** Transmit messages queue */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004598 _wait_q_t tx_msg_queue;
Anas Nashife71293e2019-12-04 20:00:14 -05004599 /** Receive message queue */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004600 _wait_q_t rx_msg_queue;
Andy Ross9eeb6b82018-07-25 15:06:24 -07004601 struct k_spinlock lock;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004602
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004603};
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004604/**
4605 * @cond INTERNAL_HIDDEN
4606 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004607
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004608#define Z_MBOX_INITIALIZER(obj) \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004609 { \
Patrik Flykt4344e272019-03-08 14:19:05 -07004610 .tx_msg_queue = Z_WAIT_Q_INIT(&obj.tx_msg_queue), \
4611 .rx_msg_queue = Z_WAIT_Q_INIT(&obj.rx_msg_queue), \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004612 }
4613
Peter Mitsis12092702016-10-14 12:57:23 -04004614/**
Allan Stephensc98da842016-11-11 15:45:03 -05004615 * INTERNAL_HIDDEN @endcond
4616 */
4617
4618/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004619 * @brief Statically define and initialize a mailbox.
Peter Mitsis12092702016-10-14 12:57:23 -04004620 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004621 * The mailbox is to be accessed outside the module where it is defined using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004622 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05004623 * @code extern struct k_mbox <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004624 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004625 * @param name Name of the mailbox.
Peter Mitsis12092702016-10-14 12:57:23 -04004626 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004627#define K_MBOX_DEFINE(name) \
Fabio Baltierif88a4202021-08-04 23:05:54 +01004628 STRUCT_SECTION_ITERABLE(k_mbox, name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004629 Z_MBOX_INITIALIZER(name) \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004630
Peter Mitsis12092702016-10-14 12:57:23 -04004631/**
4632 * @brief Initialize a mailbox.
4633 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004634 * This routine initializes a mailbox object, prior to its first use.
4635 *
4636 * @param mbox Address of the mailbox.
Peter Mitsis12092702016-10-14 12:57:23 -04004637 *
4638 * @return N/A
4639 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004640extern void k_mbox_init(struct k_mbox *mbox);
4641
Peter Mitsis12092702016-10-14 12:57:23 -04004642/**
4643 * @brief Send a mailbox message in a synchronous manner.
4644 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004645 * This routine sends a message to @a mbox and waits for a receiver to both
4646 * receive and process it. The message data may be in a buffer, in a memory
4647 * pool block, or non-existent (i.e. an empty message).
Peter Mitsis12092702016-10-14 12:57:23 -04004648 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004649 * @param mbox Address of the mailbox.
4650 * @param tx_msg Address of the transmit message descriptor.
Andy Ross78327382020-03-05 15:18:14 -08004651 * @param timeout Waiting period for the message to be received,
4652 * or one of the special values K_NO_WAIT
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004653 * and K_FOREVER. Once the message has been received,
4654 * this routine waits as long as necessary for the message
4655 * to be completely processed.
Peter Mitsis12092702016-10-14 12:57:23 -04004656 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05004657 * @retval 0 Message sent.
4658 * @retval -ENOMSG Returned without waiting.
4659 * @retval -EAGAIN Waiting period timed out.
Peter Mitsis12092702016-10-14 12:57:23 -04004660 */
Peter Mitsis40680f62016-10-14 10:04:55 -04004661extern int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
Andy Ross78327382020-03-05 15:18:14 -08004662 k_timeout_t timeout);
Peter Mitsis12092702016-10-14 12:57:23 -04004663
Peter Mitsis12092702016-10-14 12:57:23 -04004664/**
4665 * @brief Send a mailbox message in an asynchronous manner.
4666 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004667 * This routine sends a message to @a mbox without waiting for a receiver
4668 * to process it. The message data may be in a buffer, in a memory pool block,
4669 * or non-existent (i.e. an empty message). Optionally, the semaphore @a sem
4670 * will be given when the message has been both received and completely
4671 * processed by the receiver.
Peter Mitsis12092702016-10-14 12:57:23 -04004672 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004673 * @param mbox Address of the mailbox.
4674 * @param tx_msg Address of the transmit message descriptor.
4675 * @param sem Address of a semaphore, or NULL if none is needed.
Peter Mitsis12092702016-10-14 12:57:23 -04004676 *
4677 * @return N/A
4678 */
Peter Mitsis40680f62016-10-14 10:04:55 -04004679extern void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004680 struct k_sem *sem);
4681
Peter Mitsis12092702016-10-14 12:57:23 -04004682/**
4683 * @brief Receive a mailbox message.
4684 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004685 * This routine receives a message from @a mbox, then optionally retrieves
4686 * its data and disposes of the message.
Peter Mitsis12092702016-10-14 12:57:23 -04004687 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004688 * @param mbox Address of the mailbox.
4689 * @param rx_msg Address of the receive message descriptor.
4690 * @param buffer Address of the buffer to receive data, or NULL to defer data
4691 * retrieval and message disposal until later.
Andy Ross78327382020-03-05 15:18:14 -08004692 * @param timeout Waiting period for a message to be received,
4693 * or one of the special values K_NO_WAIT and K_FOREVER.
Peter Mitsis12092702016-10-14 12:57:23 -04004694 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05004695 * @retval 0 Message received.
4696 * @retval -ENOMSG Returned without waiting.
4697 * @retval -EAGAIN Waiting period timed out.
Peter Mitsis12092702016-10-14 12:57:23 -04004698 */
Peter Mitsis40680f62016-10-14 10:04:55 -04004699extern int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg,
Andy Ross78327382020-03-05 15:18:14 -08004700 void *buffer, k_timeout_t timeout);
Peter Mitsis12092702016-10-14 12:57:23 -04004701
4702/**
4703 * @brief Retrieve mailbox message data into a buffer.
4704 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004705 * This routine completes the processing of a received message by retrieving
4706 * its data into a buffer, then disposing of the message.
Peter Mitsis12092702016-10-14 12:57:23 -04004707 *
4708 * Alternatively, this routine can be used to dispose of a received message
4709 * without retrieving its data.
4710 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004711 * @param rx_msg Address of the receive message descriptor.
4712 * @param buffer Address of the buffer to receive data, or NULL to discard
4713 * the data.
Peter Mitsis12092702016-10-14 12:57:23 -04004714 *
4715 * @return N/A
4716 */
Peter Mitsis40680f62016-10-14 10:04:55 -04004717extern void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer);
Peter Mitsis12092702016-10-14 12:57:23 -04004718
Anas Nashif166f5192018-02-25 08:02:36 -06004719/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05004720
4721/**
Anas Nashifce78d162018-05-24 12:43:11 -05004722 * @defgroup pipe_apis Pipe APIs
4723 * @ingroup kernel_apis
4724 * @{
Allan Stephensc98da842016-11-11 15:45:03 -05004725 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004726
Anas Nashifce78d162018-05-24 12:43:11 -05004727/** Pipe Structure */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004728struct k_pipe {
Anas Nashifce78d162018-05-24 12:43:11 -05004729 unsigned char *buffer; /**< Pipe buffer: may be NULL */
4730 size_t size; /**< Buffer size */
4731 size_t bytes_used; /**< # bytes used in buffer */
4732 size_t read_index; /**< Where in buffer to read from */
4733 size_t write_index; /**< Where in buffer to write */
Andy Rossf582b552019-02-05 16:10:18 -08004734 struct k_spinlock lock; /**< Synchronization lock */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004735
4736 struct {
Anas Nashifce78d162018-05-24 12:43:11 -05004737 _wait_q_t readers; /**< Reader wait queue */
4738 _wait_q_t writers; /**< Writer wait queue */
Anas Nashif0ff33d12020-07-13 20:21:56 -04004739 } wait_q; /** Wait queue */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004740
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004741 uint8_t flags; /**< Flags */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004742};
4743
Anas Nashifce78d162018-05-24 12:43:11 -05004744/**
4745 * @cond INTERNAL_HIDDEN
4746 */
4747#define K_PIPE_FLAG_ALLOC BIT(0) /** Buffer was allocated */
4748
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004749#define Z_PIPE_INITIALIZER(obj, pipe_buffer, pipe_buffer_size) \
Krzysztof Chruscinskibe063272019-02-13 11:19:54 +01004750 { \
4751 .buffer = pipe_buffer, \
4752 .size = pipe_buffer_size, \
4753 .bytes_used = 0, \
4754 .read_index = 0, \
4755 .write_index = 0, \
4756 .lock = {}, \
4757 .wait_q = { \
Patrik Flykt4344e272019-03-08 14:19:05 -07004758 .readers = Z_WAIT_Q_INIT(&obj.wait_q.readers), \
4759 .writers = Z_WAIT_Q_INIT(&obj.wait_q.writers) \
Krzysztof Chruscinskibe063272019-02-13 11:19:54 +01004760 }, \
Krzysztof Chruscinskibe063272019-02-13 11:19:54 +01004761 .flags = 0 \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004762 }
4763
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004764/**
Allan Stephensc98da842016-11-11 15:45:03 -05004765 * INTERNAL_HIDDEN @endcond
4766 */
4767
4768/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004769 * @brief Statically define and initialize a pipe.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004770 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004771 * The pipe can be accessed outside the module where it is defined using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004772 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05004773 * @code extern struct k_pipe <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004774 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004775 * @param name Name of the pipe.
4776 * @param pipe_buffer_size Size of the pipe's ring buffer (in bytes),
4777 * or zero if no ring buffer is used.
4778 * @param pipe_align Alignment of the pipe's ring buffer (power of 2).
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004779 *
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004780 */
Andrew Boie44fe8122018-04-12 17:38:12 -07004781#define K_PIPE_DEFINE(name, pipe_buffer_size, pipe_align) \
Fabio Baltierif88a4202021-08-04 23:05:54 +01004782 static unsigned char __noinit __aligned(pipe_align) \
Andrew Boie44fe8122018-04-12 17:38:12 -07004783 _k_pipe_buf_##name[pipe_buffer_size]; \
Fabio Baltierif88a4202021-08-04 23:05:54 +01004784 STRUCT_SECTION_ITERABLE(k_pipe, name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004785 Z_PIPE_INITIALIZER(name, _k_pipe_buf_##name, pipe_buffer_size)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004786
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004787/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004788 * @brief Initialize a pipe.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004789 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004790 * This routine initializes a pipe object, prior to its first use.
4791 *
4792 * @param pipe Address of the pipe.
4793 * @param buffer Address of the pipe's ring buffer, or NULL if no ring buffer
4794 * is used.
4795 * @param size Size of the pipe's ring buffer (in bytes), or zero if no ring
4796 * buffer is used.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004797 *
4798 * @return N/A
4799 */
Andrew Boie44fe8122018-04-12 17:38:12 -07004800void k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size);
4801
4802/**
4803 * @brief Release a pipe's allocated buffer
4804 *
4805 * If a pipe object was given a dynamically allocated buffer via
4806 * k_pipe_alloc_init(), this will free it. This function does nothing
4807 * if the buffer wasn't dynamically allocated.
4808 *
4809 * @param pipe Address of the pipe.
Anas Nashif361a84d2019-06-16 08:22:08 -04004810 * @retval 0 on success
4811 * @retval -EAGAIN nothing to cleanup
Andrew Boie44fe8122018-04-12 17:38:12 -07004812 */
Anas Nashif361a84d2019-06-16 08:22:08 -04004813int k_pipe_cleanup(struct k_pipe *pipe);
Andrew Boie44fe8122018-04-12 17:38:12 -07004814
4815/**
4816 * @brief Initialize a pipe and allocate a buffer for it
4817 *
4818 * Storage for the buffer region will be allocated from the calling thread's
4819 * resource pool. This memory will be released if k_pipe_cleanup() is called,
4820 * or userspace is enabled and the pipe object loses all references to it.
4821 *
4822 * This function should only be called on uninitialized pipe objects.
4823 *
4824 * @param pipe Address of the pipe.
4825 * @param size Size of the pipe's ring buffer (in bytes), or zero if no ring
4826 * buffer is used.
4827 * @retval 0 on success
David B. Kinderfcbd8fb2018-05-23 12:06:24 -07004828 * @retval -ENOMEM if memory couldn't be allocated
Andrew Boie44fe8122018-04-12 17:38:12 -07004829 */
4830__syscall int k_pipe_alloc_init(struct k_pipe *pipe, size_t size);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004831
4832/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004833 * @brief Write data to a pipe.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004834 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004835 * This routine writes up to @a bytes_to_write bytes of data to @a pipe.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004836 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004837 * @param pipe Address of the pipe.
4838 * @param data Address of data to write.
4839 * @param bytes_to_write Size of data (in bytes).
4840 * @param bytes_written Address of area to hold the number of bytes written.
4841 * @param min_xfer Minimum number of bytes to write.
Andy Ross78327382020-03-05 15:18:14 -08004842 * @param timeout Waiting period to wait for the data to be written,
4843 * or one of the special values K_NO_WAIT and K_FOREVER.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004844 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05004845 * @retval 0 At least @a min_xfer bytes of data were written.
4846 * @retval -EIO Returned without waiting; zero data bytes were written.
4847 * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004848 * minus one data bytes were written.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004849 */
Andrew Boieb9a05782017-09-29 16:05:32 -07004850__syscall int k_pipe_put(struct k_pipe *pipe, void *data,
4851 size_t bytes_to_write, size_t *bytes_written,
Andy Ross78327382020-03-05 15:18:14 -08004852 size_t min_xfer, k_timeout_t timeout);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004853
4854/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004855 * @brief Read data from a pipe.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004856 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004857 * This routine reads up to @a bytes_to_read bytes of data from @a pipe.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004858 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004859 * @param pipe Address of the pipe.
4860 * @param data Address to place the data read from pipe.
4861 * @param bytes_to_read Maximum number of data bytes to read.
4862 * @param bytes_read Address of area to hold the number of bytes read.
4863 * @param min_xfer Minimum number of data bytes to read.
Andy Ross78327382020-03-05 15:18:14 -08004864 * @param timeout Waiting period to wait for the data to be read,
4865 * or one of the special values K_NO_WAIT and K_FOREVER.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004866 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05004867 * @retval 0 At least @a min_xfer bytes of data were read.
Anas Nashif361a84d2019-06-16 08:22:08 -04004868 * @retval -EINVAL invalid parameters supplied
Allan Stephens9ef50f42016-11-16 15:33:31 -05004869 * @retval -EIO Returned without waiting; zero data bytes were read.
4870 * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004871 * minus one data bytes were read.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004872 */
Andrew Boieb9a05782017-09-29 16:05:32 -07004873__syscall int k_pipe_get(struct k_pipe *pipe, void *data,
4874 size_t bytes_to_read, size_t *bytes_read,
Andy Ross78327382020-03-05 15:18:14 -08004875 size_t min_xfer, k_timeout_t timeout);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004876
4877/**
Christopher Friedt3315f8f2020-05-06 18:43:58 -04004878 * @brief Query the number of bytes that may be read from @a pipe.
4879 *
4880 * @param pipe Address of the pipe.
4881 *
4882 * @retval a number n such that 0 <= n <= @ref k_pipe.size; the
4883 * result is zero for unbuffered pipes.
4884 */
4885__syscall size_t k_pipe_read_avail(struct k_pipe *pipe);
4886
4887/**
4888 * @brief Query the number of bytes that may be written to @a pipe
4889 *
4890 * @param pipe Address of the pipe.
4891 *
4892 * @retval a number n such that 0 <= n <= @ref k_pipe.size; the
4893 * result is zero for unbuffered pipes.
4894 */
4895__syscall size_t k_pipe_write_avail(struct k_pipe *pipe);
4896
Anas Nashif166f5192018-02-25 08:02:36 -06004897/** @} */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004898
Allan Stephensc98da842016-11-11 15:45:03 -05004899/**
4900 * @cond INTERNAL_HIDDEN
4901 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004902
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004903struct k_mem_slab {
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004904 _wait_q_t wait_q;
Nicolas Pitre2bed37e2021-04-13 11:10:22 -04004905 struct k_spinlock lock;
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004906 uint32_t num_blocks;
Peter Mitsisfb02d572016-10-13 16:55:45 -04004907 size_t block_size;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004908 char *buffer;
4909 char *free_list;
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004910 uint32_t num_used;
Kamil Lazowski104f1002020-09-11 14:27:55 +02004911#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
4912 uint32_t max_used;
4913#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004914
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004915};
4916
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004917#define Z_MEM_SLAB_INITIALIZER(obj, slab_buffer, slab_block_size, \
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004918 slab_num_blocks) \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004919 { \
Patrik Flykt4344e272019-03-08 14:19:05 -07004920 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
Lucas Dietrich36db3862021-09-01 17:11:22 +02004921 .lock = {}, \
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004922 .num_blocks = slab_num_blocks, \
4923 .block_size = slab_block_size, \
4924 .buffer = slab_buffer, \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004925 .free_list = NULL, \
4926 .num_used = 0, \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004927 }
4928
Andrew Boie65a9d2a2017-06-27 10:51:23 -07004929
Peter Mitsis578f9112016-10-07 13:50:31 -04004930/**
Allan Stephensc98da842016-11-11 15:45:03 -05004931 * INTERNAL_HIDDEN @endcond
4932 */
4933
4934/**
4935 * @defgroup mem_slab_apis Memory Slab APIs
4936 * @ingroup kernel_apis
4937 * @{
4938 */
4939
4940/**
Allan Stephensda827222016-11-09 14:23:58 -06004941 * @brief Statically define and initialize a memory slab.
Peter Mitsis578f9112016-10-07 13:50:31 -04004942 *
Allan Stephensda827222016-11-09 14:23:58 -06004943 * The memory slab's buffer contains @a slab_num_blocks memory blocks
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004944 * that are @a slab_block_size bytes long. The buffer is aligned to a
Allan Stephensda827222016-11-09 14:23:58 -06004945 * @a slab_align -byte boundary. To ensure that each memory block is similarly
4946 * aligned to this boundary, @a slab_block_size must also be a multiple of
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004947 * @a slab_align.
Peter Mitsis578f9112016-10-07 13:50:31 -04004948 *
Allan Stephensda827222016-11-09 14:23:58 -06004949 * The memory slab can be accessed outside the module where it is defined
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004950 * using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004951 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05004952 * @code extern struct k_mem_slab <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004953 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004954 * @param name Name of the memory slab.
4955 * @param slab_block_size Size of each memory block (in bytes).
4956 * @param slab_num_blocks Number memory blocks.
4957 * @param slab_align Alignment of the memory slab's buffer (power of 2).
Peter Mitsis578f9112016-10-07 13:50:31 -04004958 */
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004959#define K_MEM_SLAB_DEFINE(name, slab_block_size, slab_num_blocks, slab_align) \
Daniel Leunge6f168c2021-07-19 12:10:54 -07004960 char __noinit_named(k_mem_slab_buf_##name) \
4961 __aligned(WB_UP(slab_align)) \
Nicolas Pitre46cd5a02019-05-21 21:40:38 -04004962 _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
Fabio Baltierif88a4202021-08-04 23:05:54 +01004963 STRUCT_SECTION_ITERABLE(k_mem_slab, name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004964 Z_MEM_SLAB_INITIALIZER(name, _k_mem_slab_buf_##name, \
Nicolas Pitre46cd5a02019-05-21 21:40:38 -04004965 WB_UP(slab_block_size), slab_num_blocks)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004966
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004967/**
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004968 * @brief Initialize a memory slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004969 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004970 * Initializes a memory slab, prior to its first use.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004971 *
Allan Stephensda827222016-11-09 14:23:58 -06004972 * The memory slab's buffer contains @a slab_num_blocks memory blocks
4973 * that are @a slab_block_size bytes long. The buffer must be aligned to an
Nicolas Pitre46cd5a02019-05-21 21:40:38 -04004974 * N-byte boundary matching a word boundary, where N is a power of 2
4975 * (i.e. 4 on 32-bit systems, 8, 16, ...).
Allan Stephensda827222016-11-09 14:23:58 -06004976 * To ensure that each memory block is similarly aligned to this boundary,
4977 * @a slab_block_size must also be a multiple of N.
4978 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004979 * @param slab Address of the memory slab.
4980 * @param buffer Pointer to buffer used for the memory blocks.
4981 * @param block_size Size of each memory block (in bytes).
4982 * @param num_blocks Number of memory blocks.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004983 *
Anas Nashifdfc2bbc2019-06-16 09:22:21 -04004984 * @retval 0 on success
4985 * @retval -EINVAL invalid data supplied
4986 *
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004987 */
Anas Nashifdfc2bbc2019-06-16 09:22:21 -04004988extern int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004989 size_t block_size, uint32_t num_blocks);
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004990
4991/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004992 * @brief Allocate memory from a memory slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004993 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004994 * This routine allocates a memory block from a memory slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004995 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004996 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
Krzysztof Chruscinskic482a572021-04-19 10:52:34 +02004997 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004998 *
4999 * @funcprops \isr_ok
Spoorthy Priya Yerabolu04d3c3c2020-09-17 02:54:50 -07005000 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005001 * @param slab Address of the memory slab.
5002 * @param mem Pointer to block address area.
Andy Ross78327382020-03-05 15:18:14 -08005003 * @param timeout Non-negative waiting period to wait for operation to complete.
5004 * Use K_NO_WAIT to return without waiting,
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005005 * or K_FOREVER to wait as long as necessary.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04005006 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05005007 * @retval 0 Memory allocated. The block address area pointed at by @a mem
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005008 * is set to the starting address of the memory block.
Allan Stephens9ef50f42016-11-16 15:33:31 -05005009 * @retval -ENOMEM Returned without waiting.
5010 * @retval -EAGAIN Waiting period timed out.
Anas Nashifdfc2bbc2019-06-16 09:22:21 -04005011 * @retval -EINVAL Invalid data supplied
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04005012 */
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04005013extern int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem,
Andy Ross78327382020-03-05 15:18:14 -08005014 k_timeout_t timeout);
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04005015
5016/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005017 * @brief Free memory allocated from a memory slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04005018 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005019 * This routine releases a previously allocated memory block back to its
5020 * associated memory slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04005021 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005022 * @param slab Address of the memory slab.
5023 * @param mem Pointer to block address area (as set by k_mem_slab_alloc()).
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04005024 *
5025 * @return N/A
5026 */
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04005027extern void k_mem_slab_free(struct k_mem_slab *slab, void **mem);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005028
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04005029/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005030 * @brief Get the number of used blocks in a memory slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04005031 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005032 * This routine gets the number of memory blocks that are currently
5033 * allocated in @a slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04005034 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005035 * @param slab Address of the memory slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04005036 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005037 * @return Number of allocated memory blocks.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04005038 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005039static inline uint32_t k_mem_slab_num_used_get(struct k_mem_slab *slab)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005040{
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04005041 return slab->num_used;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005042}
5043
Peter Mitsisc001aa82016-10-13 13:53:37 -04005044/**
Kamil Lazowski104f1002020-09-11 14:27:55 +02005045 * @brief Get the number of maximum used blocks so far in a memory slab.
5046 *
5047 * This routine gets the maximum number of memory blocks that were
5048 * allocated in @a slab.
5049 *
5050 * @param slab Address of the memory slab.
5051 *
5052 * @return Maximum number of allocated memory blocks.
5053 */
5054static inline uint32_t k_mem_slab_max_used_get(struct k_mem_slab *slab)
5055{
5056#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
5057 return slab->max_used;
5058#else
5059 ARG_UNUSED(slab);
5060 return 0;
5061#endif
5062}
5063
5064/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005065 * @brief Get the number of unused blocks in a memory slab.
Peter Mitsisc001aa82016-10-13 13:53:37 -04005066 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005067 * This routine gets the number of memory blocks that are currently
5068 * unallocated in @a slab.
Peter Mitsisc001aa82016-10-13 13:53:37 -04005069 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005070 * @param slab Address of the memory slab.
Peter Mitsisc001aa82016-10-13 13:53:37 -04005071 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005072 * @return Number of unallocated memory blocks.
Peter Mitsisc001aa82016-10-13 13:53:37 -04005073 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005074static inline uint32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
Peter Mitsisc001aa82016-10-13 13:53:37 -04005075{
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04005076 return slab->num_blocks - slab->num_used;
Peter Mitsisc001aa82016-10-13 13:53:37 -04005077}
5078
Anas Nashif166f5192018-02-25 08:02:36 -06005079/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05005080
5081/**
Anas Nashifdbac76f2020-12-09 12:04:53 -05005082 * @addtogroup heap_apis
Allan Stephensc98da842016-11-11 15:45:03 -05005083 * @{
5084 */
5085
Andrew Boieb95e9602020-09-28 13:26:38 -07005086/* kernel synchronized heap struct */
5087
5088struct k_heap {
5089 struct sys_heap heap;
5090 _wait_q_t wait_q;
5091 struct k_spinlock lock;
5092};
5093
Allan Stephensc98da842016-11-11 15:45:03 -05005094/**
Andy Ross0dd83b82020-04-03 10:01:03 -07005095 * @brief Initialize a k_heap
5096 *
5097 * This constructs a synchronized k_heap object over a memory region
5098 * specified by the user. Note that while any alignment and size can
5099 * be passed as valid parameters, internal alignment restrictions
5100 * inside the inner sys_heap mean that not all bytes may be usable as
5101 * allocated memory.
5102 *
5103 * @param h Heap struct to initialize
5104 * @param mem Pointer to memory.
5105 * @param bytes Size of memory region, in bytes
5106 */
5107void k_heap_init(struct k_heap *h, void *mem, size_t bytes);
5108
Maximilian Bachmann34d7c782020-11-13 15:12:31 +01005109/** @brief Allocate aligned memory from a k_heap
5110 *
5111 * Behaves in all ways like k_heap_alloc(), except that the returned
5112 * memory (if available) will have a starting address in memory which
5113 * is a multiple of the specified power-of-two alignment value in
5114 * bytes. The resulting memory can be returned to the heap using
5115 * k_heap_free().
5116 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01005117 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
Krzysztof Chruscinskic482a572021-04-19 10:52:34 +02005118 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01005119 *
5120 * @funcprops \isr_ok
Maximilian Bachmann34d7c782020-11-13 15:12:31 +01005121 *
5122 * @param h Heap from which to allocate
5123 * @param align Alignment in bytes, must be a power of two
5124 * @param bytes Number of bytes requested
5125 * @param timeout How long to wait, or K_NO_WAIT
5126 * @return Pointer to memory the caller can now use
5127 */
5128void *k_heap_aligned_alloc(struct k_heap *h, size_t align, size_t bytes,
5129 k_timeout_t timeout);
5130
Andy Ross0dd83b82020-04-03 10:01:03 -07005131/**
5132 * @brief Allocate memory from a k_heap
5133 *
5134 * Allocates and returns a memory buffer from the memory region owned
5135 * by the heap. If no memory is available immediately, the call will
5136 * block for the specified timeout (constructed via the standard
5137 * timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory to be
5138 * freed. If the allocation cannot be performed by the expiration of
5139 * the timeout, NULL will be returned.
5140 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01005141 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
Krzysztof Chruscinskic482a572021-04-19 10:52:34 +02005142 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01005143 *
5144 * @funcprops \isr_ok
Spoorthy Priya Yerabolu04d3c3c2020-09-17 02:54:50 -07005145 *
Andy Ross0dd83b82020-04-03 10:01:03 -07005146 * @param h Heap from which to allocate
5147 * @param bytes Desired size of block to allocate
5148 * @param timeout How long to wait, or K_NO_WAIT
5149 * @return A pointer to valid heap memory, or NULL
5150 */
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +01005151void *k_heap_alloc(struct k_heap *h, size_t bytes,
5152 k_timeout_t timeout);
Andy Ross0dd83b82020-04-03 10:01:03 -07005153
5154/**
5155 * @brief Free memory allocated by k_heap_alloc()
5156 *
5157 * Returns the specified memory block, which must have been returned
5158 * from k_heap_alloc(), to the heap for use by other callers. Passing
5159 * a NULL block is legal, and has no effect.
5160 *
5161 * @param h Heap to which to return the memory
5162 * @param mem A valid memory block, or NULL
5163 */
5164void k_heap_free(struct k_heap *h, void *mem);
5165
Andy Rossd3737032021-05-19 09:50:17 -07005166/* Hand-calculated minimum heap sizes needed to return a successful
5167 * 1-byte allocation. See details in lib/os/heap.[ch]
5168 */
5169#define Z_HEAP_MIN_SIZE (sizeof(void *) > 4 ? 56 : 44)
5170
Andy Ross0dd83b82020-04-03 10:01:03 -07005171/**
Daniel Leung10490382021-08-30 10:36:59 -07005172 * @brief Define a static k_heap in the specified linker section
5173 *
5174 * This macro defines and initializes a static memory region and
5175 * k_heap of the requested size in the specified linker section.
5176 * After kernel start, &name can be used as if k_heap_init() had
5177 * been called.
5178 *
5179 * Note that this macro enforces a minimum size on the memory region
5180 * to accommodate metadata requirements. Very small heaps will be
5181 * padded to fit.
5182 *
5183 * @param name Symbol name for the struct k_heap object
5184 * @param bytes Size of memory region, in bytes
5185 * @param in_section __attribute__((section(name))
5186 */
5187#define Z_HEAP_DEFINE_IN_SECT(name, bytes, in_section) \
5188 char in_section \
5189 __aligned(8) /* CHUNK_UNIT */ \
5190 kheap_##name[MAX(bytes, Z_HEAP_MIN_SIZE)]; \
5191 STRUCT_SECTION_ITERABLE(k_heap, name) = { \
5192 .heap = { \
5193 .init_mem = kheap_##name, \
5194 .init_bytes = MAX(bytes, Z_HEAP_MIN_SIZE), \
5195 }, \
5196 }
5197
5198/**
Andy Ross0dd83b82020-04-03 10:01:03 -07005199 * @brief Define a static k_heap
5200 *
5201 * This macro defines and initializes a static memory region and
5202 * k_heap of the requested size. After kernel start, &name can be
5203 * used as if k_heap_init() had been called.
5204 *
Andy Rossd3737032021-05-19 09:50:17 -07005205 * Note that this macro enforces a minimum size on the memory region
5206 * to accommodate metadata requirements. Very small heaps will be
5207 * padded to fit.
5208 *
Andy Ross0dd83b82020-04-03 10:01:03 -07005209 * @param name Symbol name for the struct k_heap object
5210 * @param bytes Size of memory region, in bytes
5211 */
5212#define K_HEAP_DEFINE(name, bytes) \
Daniel Leung10490382021-08-30 10:36:59 -07005213 Z_HEAP_DEFINE_IN_SECT(name, bytes, \
5214 __noinit_named(kheap_buf_##name))
5215
5216/**
5217 * @brief Define a static k_heap in uncached memory
5218 *
5219 * This macro defines and initializes a static memory region and
5220 * k_heap of the requested size in uncache memory. After kernel
5221 * start, &name can be used as if k_heap_init() had been called.
5222 *
5223 * Note that this macro enforces a minimum size on the memory region
5224 * to accommodate metadata requirements. Very small heaps will be
5225 * padded to fit.
5226 *
5227 * @param name Symbol name for the struct k_heap object
5228 * @param bytes Size of memory region, in bytes
5229 */
5230#define K_HEAP_DEFINE_NOCACHE(name, bytes) \
5231 Z_HEAP_DEFINE_IN_SECT(name, bytes, __nocache)
Andy Ross0dd83b82020-04-03 10:01:03 -07005232
Johan Hedberg7d887cb2018-01-11 20:45:27 +02005233/**
Anas Nashif166f5192018-02-25 08:02:36 -06005234 * @}
Allan Stephensc98da842016-11-11 15:45:03 -05005235 */
5236
5237/**
Anas Nashifdbac76f2020-12-09 12:04:53 -05005238 * @defgroup heap_apis Heap APIs
Allan Stephensc98da842016-11-11 15:45:03 -05005239 * @ingroup kernel_apis
5240 * @{
5241 */
5242
5243/**
Christopher Friedt135ffaf2020-11-26 08:19:10 -05005244 * @brief Allocate memory from the heap with a specified alignment.
5245 *
5246 * This routine provides semantics similar to aligned_alloc(); memory is
5247 * allocated from the heap with a specified alignment. However, one minor
5248 * difference is that k_aligned_alloc() accepts any non-zero @p size,
5249 * wherase aligned_alloc() only accepts a @p size that is an integral
5250 * multiple of @p align.
5251 *
5252 * Above, aligned_alloc() refers to:
5253 * C11 standard (ISO/IEC 9899:2011): 7.22.3.1
5254 * The aligned_alloc function (p: 347-348)
5255 *
5256 * @param align Alignment of memory requested (in bytes).
5257 * @param size Amount of memory requested (in bytes).
5258 *
5259 * @return Address of the allocated memory if successful; otherwise NULL.
5260 */
5261extern void *k_aligned_alloc(size_t align, size_t size);
5262
5263/**
5264 * @brief Allocate memory from the heap.
Peter Mitsis937042c2016-10-13 13:18:26 -04005265 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005266 * This routine provides traditional malloc() semantics. Memory is
Allan Stephens480a1312016-10-13 15:44:48 -05005267 * allocated from the heap memory pool.
Peter Mitsis937042c2016-10-13 13:18:26 -04005268 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005269 * @param size Amount of memory requested (in bytes).
Peter Mitsis937042c2016-10-13 13:18:26 -04005270 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005271 * @return Address of the allocated memory if successful; otherwise NULL.
Peter Mitsis937042c2016-10-13 13:18:26 -04005272 */
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +01005273extern void *k_malloc(size_t size);
Peter Mitsis937042c2016-10-13 13:18:26 -04005274
5275/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005276 * @brief Free memory allocated from heap.
Allan Stephens480a1312016-10-13 15:44:48 -05005277 *
5278 * This routine provides traditional free() semantics. The memory being
Andrew Boiea2480bd2018-04-12 16:59:02 -07005279 * returned must have been allocated from the heap memory pool or
5280 * k_mem_pool_malloc().
Peter Mitsis937042c2016-10-13 13:18:26 -04005281 *
Anas Nashif345fdd52016-12-20 08:36:04 -05005282 * If @a ptr is NULL, no operation is performed.
5283 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005284 * @param ptr Pointer to previously allocated memory.
Peter Mitsis937042c2016-10-13 13:18:26 -04005285 *
5286 * @return N/A
5287 */
5288extern void k_free(void *ptr);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005289
Allan Stephensc98da842016-11-11 15:45:03 -05005290/**
Andrew Boie7f95e832017-11-08 14:40:01 -08005291 * @brief Allocate memory from heap, array style
5292 *
5293 * This routine provides traditional calloc() semantics. Memory is
5294 * allocated from the heap memory pool and zeroed.
5295 *
5296 * @param nmemb Number of elements in the requested array
5297 * @param size Size of each array element (in bytes).
5298 *
5299 * @return Address of the allocated memory if successful; otherwise NULL.
5300 */
5301extern void *k_calloc(size_t nmemb, size_t size);
5302
Anas Nashif166f5192018-02-25 08:02:36 -06005303/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05005304
Benjamin Walshacc68c12017-01-29 18:57:45 -05005305/* polling API - PRIVATE */
5306
Benjamin Walshb0179862017-02-02 16:39:57 -05005307#ifdef CONFIG_POLL
Flavio Ceolin6fdc56d2018-09-18 12:32:27 -07005308#define _INIT_OBJ_POLL_EVENT(obj) do { (obj)->poll_event = NULL; } while (false)
Benjamin Walshb0179862017-02-02 16:39:57 -05005309#else
Flavio Ceolin6fdc56d2018-09-18 12:32:27 -07005310#define _INIT_OBJ_POLL_EVENT(obj) do { } while (false)
Benjamin Walshb0179862017-02-02 16:39:57 -05005311#endif
5312
Benjamin Walshacc68c12017-01-29 18:57:45 -05005313/* private - types bit positions */
5314enum _poll_types_bits {
5315 /* can be used to ignore an event */
5316 _POLL_TYPE_IGNORE,
5317
Flavio Ceolinaecd4ec2018-11-02 12:35:30 -07005318 /* to be signaled by k_poll_signal_raise() */
Benjamin Walshacc68c12017-01-29 18:57:45 -05005319 _POLL_TYPE_SIGNAL,
5320
5321 /* semaphore availability */
5322 _POLL_TYPE_SEM_AVAILABLE,
5323
Anas Nashif56821172020-07-08 14:14:25 -04005324 /* queue/FIFO/LIFO data availability */
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02005325 _POLL_TYPE_DATA_AVAILABLE,
Benjamin Walshacc68c12017-01-29 18:57:45 -05005326
Nick Gravesb445f132021-04-12 12:35:18 -07005327 /* msgq data availability */
5328 _POLL_TYPE_MSGQ_DATA_AVAILABLE,
5329
Benjamin Walshacc68c12017-01-29 18:57:45 -05005330 _POLL_NUM_TYPES
5331};
5332
Aastha Grover83b9f692020-08-20 16:47:11 -07005333#define Z_POLL_TYPE_BIT(type) (1U << ((type) - 1U))
Benjamin Walshacc68c12017-01-29 18:57:45 -05005334
5335/* private - states bit positions */
5336enum _poll_states_bits {
5337 /* default state when creating event */
5338 _POLL_STATE_NOT_READY,
5339
Flavio Ceolinaecd4ec2018-11-02 12:35:30 -07005340 /* signaled by k_poll_signal_raise() */
Benjamin Walshacc68c12017-01-29 18:57:45 -05005341 _POLL_STATE_SIGNALED,
5342
5343 /* semaphore is available */
5344 _POLL_STATE_SEM_AVAILABLE,
5345
Anas Nashif56821172020-07-08 14:14:25 -04005346 /* data is available to read on queue/FIFO/LIFO */
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02005347 _POLL_STATE_DATA_AVAILABLE,
Benjamin Walshacc68c12017-01-29 18:57:45 -05005348
Anas Nashif56821172020-07-08 14:14:25 -04005349 /* queue/FIFO/LIFO wait was cancelled */
Paul Sokolovsky45c0b202018-08-21 23:29:11 +03005350 _POLL_STATE_CANCELLED,
5351
Nick Gravesb445f132021-04-12 12:35:18 -07005352 /* data is available to read on a message queue */
5353 _POLL_STATE_MSGQ_DATA_AVAILABLE,
5354
Benjamin Walshacc68c12017-01-29 18:57:45 -05005355 _POLL_NUM_STATES
5356};
5357
Aastha Grover83b9f692020-08-20 16:47:11 -07005358#define Z_POLL_STATE_BIT(state) (1U << ((state) - 1U))
Benjamin Walshacc68c12017-01-29 18:57:45 -05005359
5360#define _POLL_EVENT_NUM_UNUSED_BITS \
Benjamin Walsh969d4a72017-02-02 11:25:11 -05005361 (32 - (0 \
5362 + 8 /* tag */ \
5363 + _POLL_NUM_TYPES \
5364 + _POLL_NUM_STATES \
5365 + 1 /* modes */ \
5366 ))
Benjamin Walshacc68c12017-01-29 18:57:45 -05005367
Benjamin Walshacc68c12017-01-29 18:57:45 -05005368/* end of polling API - PRIVATE */
5369
5370
5371/**
5372 * @defgroup poll_apis Async polling APIs
5373 * @ingroup kernel_apis
5374 * @{
5375 */
5376
5377/* Public polling API */
5378
5379/* public - values for k_poll_event.type bitfield */
5380#define K_POLL_TYPE_IGNORE 0
Patrik Flykt4344e272019-03-08 14:19:05 -07005381#define K_POLL_TYPE_SIGNAL Z_POLL_TYPE_BIT(_POLL_TYPE_SIGNAL)
5382#define K_POLL_TYPE_SEM_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_SEM_AVAILABLE)
5383#define K_POLL_TYPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_DATA_AVAILABLE)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02005384#define K_POLL_TYPE_FIFO_DATA_AVAILABLE K_POLL_TYPE_DATA_AVAILABLE
Nick Gravesb445f132021-04-12 12:35:18 -07005385#define K_POLL_TYPE_MSGQ_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_MSGQ_DATA_AVAILABLE)
Benjamin Walshacc68c12017-01-29 18:57:45 -05005386
5387/* public - polling modes */
5388enum k_poll_modes {
5389 /* polling thread does not take ownership of objects when available */
5390 K_POLL_MODE_NOTIFY_ONLY = 0,
5391
5392 K_POLL_NUM_MODES
5393};
5394
5395/* public - values for k_poll_event.state bitfield */
5396#define K_POLL_STATE_NOT_READY 0
Patrik Flykt4344e272019-03-08 14:19:05 -07005397#define K_POLL_STATE_SIGNALED Z_POLL_STATE_BIT(_POLL_STATE_SIGNALED)
5398#define K_POLL_STATE_SEM_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE)
5399#define K_POLL_STATE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02005400#define K_POLL_STATE_FIFO_DATA_AVAILABLE K_POLL_STATE_DATA_AVAILABLE
Nick Gravesb445f132021-04-12 12:35:18 -07005401#define K_POLL_STATE_MSGQ_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_MSGQ_DATA_AVAILABLE)
Patrik Flykt4344e272019-03-08 14:19:05 -07005402#define K_POLL_STATE_CANCELLED Z_POLL_STATE_BIT(_POLL_STATE_CANCELLED)
Benjamin Walshacc68c12017-01-29 18:57:45 -05005403
5404/* public - poll signal object */
5405struct k_poll_signal {
Anas Nashife71293e2019-12-04 20:00:14 -05005406 /** PRIVATE - DO NOT TOUCH */
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +03005407 sys_dlist_t poll_events;
Benjamin Walshacc68c12017-01-29 18:57:45 -05005408
Anas Nashife71293e2019-12-04 20:00:14 -05005409 /**
Benjamin Walshacc68c12017-01-29 18:57:45 -05005410 * 1 if the event has been signaled, 0 otherwise. Stays set to 1 until
5411 * user resets it to 0.
5412 */
5413 unsigned int signaled;
5414
Anas Nashife71293e2019-12-04 20:00:14 -05005415 /** custom result value passed to k_poll_signal_raise() if needed */
Benjamin Walshacc68c12017-01-29 18:57:45 -05005416 int result;
5417};
5418
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +03005419#define K_POLL_SIGNAL_INITIALIZER(obj) \
Benjamin Walshacc68c12017-01-29 18:57:45 -05005420 { \
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +03005421 .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events), \
Benjamin Walshacc68c12017-01-29 18:57:45 -05005422 .signaled = 0, \
5423 .result = 0, \
5424 }
Anas Nashife71293e2019-12-04 20:00:14 -05005425/**
5426 * @brief Poll Event
5427 *
5428 */
Benjamin Walshacc68c12017-01-29 18:57:45 -05005429struct k_poll_event {
Anas Nashife71293e2019-12-04 20:00:14 -05005430 /** PRIVATE - DO NOT TOUCH */
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +03005431 sys_dnode_t _node;
5432
Anas Nashife71293e2019-12-04 20:00:14 -05005433 /** PRIVATE - DO NOT TOUCH */
Andy Ross202adf52020-11-10 09:54:49 -08005434 struct z_poller *poller;
Benjamin Walshacc68c12017-01-29 18:57:45 -05005435
Anas Nashife71293e2019-12-04 20:00:14 -05005436 /** optional user-specified tag, opaque, untouched by the API */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005437 uint32_t tag:8;
Benjamin Walsh969d4a72017-02-02 11:25:11 -05005438
Anas Nashife71293e2019-12-04 20:00:14 -05005439 /** bitfield of event types (bitwise-ORed K_POLL_TYPE_xxx values) */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005440 uint32_t type:_POLL_NUM_TYPES;
Benjamin Walshacc68c12017-01-29 18:57:45 -05005441
Anas Nashife71293e2019-12-04 20:00:14 -05005442 /** bitfield of event states (bitwise-ORed K_POLL_STATE_xxx values) */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005443 uint32_t state:_POLL_NUM_STATES;
Benjamin Walshacc68c12017-01-29 18:57:45 -05005444
Anas Nashife71293e2019-12-04 20:00:14 -05005445 /** mode of operation, from enum k_poll_modes */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005446 uint32_t mode:1;
Benjamin Walshacc68c12017-01-29 18:57:45 -05005447
Anas Nashife71293e2019-12-04 20:00:14 -05005448 /** unused bits in 32-bit word */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005449 uint32_t unused:_POLL_EVENT_NUM_UNUSED_BITS;
Benjamin Walshacc68c12017-01-29 18:57:45 -05005450
Anas Nashife71293e2019-12-04 20:00:14 -05005451 /** per-type data */
Benjamin Walshacc68c12017-01-29 18:57:45 -05005452 union {
5453 void *obj;
5454 struct k_poll_signal *signal;
5455 struct k_sem *sem;
5456 struct k_fifo *fifo;
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02005457 struct k_queue *queue;
Nick Gravesb445f132021-04-12 12:35:18 -07005458 struct k_msgq *msgq;
Benjamin Walshacc68c12017-01-29 18:57:45 -05005459 };
5460};
5461
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -07005462#define K_POLL_EVENT_INITIALIZER(_event_type, _event_mode, _event_obj) \
Benjamin Walshacc68c12017-01-29 18:57:45 -05005463 { \
5464 .poller = NULL, \
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -07005465 .type = _event_type, \
Benjamin Walshacc68c12017-01-29 18:57:45 -05005466 .state = K_POLL_STATE_NOT_READY, \
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -07005467 .mode = _event_mode, \
Benjamin Walshacc68c12017-01-29 18:57:45 -05005468 .unused = 0, \
Daniel Leung087fb942021-03-24 12:45:01 -07005469 { \
5470 .obj = _event_obj, \
5471 }, \
Benjamin Walsh969d4a72017-02-02 11:25:11 -05005472 }
5473
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -07005474#define K_POLL_EVENT_STATIC_INITIALIZER(_event_type, _event_mode, _event_obj, \
Benjamin Walsh969d4a72017-02-02 11:25:11 -05005475 event_tag) \
5476 { \
Benjamin Walsh969d4a72017-02-02 11:25:11 -05005477 .tag = event_tag, \
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -07005478 .type = _event_type, \
Benjamin Walsh969d4a72017-02-02 11:25:11 -05005479 .state = K_POLL_STATE_NOT_READY, \
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -07005480 .mode = _event_mode, \
Benjamin Walsh969d4a72017-02-02 11:25:11 -05005481 .unused = 0, \
Daniel Leung087fb942021-03-24 12:45:01 -07005482 { \
5483 .obj = _event_obj, \
5484 }, \
Benjamin Walshacc68c12017-01-29 18:57:45 -05005485 }
5486
5487/**
5488 * @brief Initialize one struct k_poll_event instance
5489 *
5490 * After this routine is called on a poll event, the event it ready to be
5491 * placed in an event array to be passed to k_poll().
5492 *
5493 * @param event The event to initialize.
5494 * @param type A bitfield of the types of event, from the K_POLL_TYPE_xxx
5495 * values. Only values that apply to the same object being polled
5496 * can be used together. Choosing K_POLL_TYPE_IGNORE disables the
5497 * event.
Paul Sokolovskycfef9792017-07-18 11:53:06 +03005498 * @param mode Future. Use K_POLL_MODE_NOTIFY_ONLY.
Benjamin Walshacc68c12017-01-29 18:57:45 -05005499 * @param obj Kernel object or poll signal.
5500 *
5501 * @return N/A
5502 */
5503
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005504extern void k_poll_event_init(struct k_poll_event *event, uint32_t type,
Benjamin Walshacc68c12017-01-29 18:57:45 -05005505 int mode, void *obj);
5506
5507/**
5508 * @brief Wait for one or many of multiple poll events to occur
5509 *
5510 * This routine allows a thread to wait concurrently for one or many of
5511 * multiple poll events to have occurred. Such events can be a kernel object
5512 * being available, like a semaphore, or a poll signal event.
5513 *
5514 * When an event notifies that a kernel object is available, the kernel object
5515 * is not "given" to the thread calling k_poll(): it merely signals the fact
5516 * that the object was available when the k_poll() call was in effect. Also,
5517 * all threads trying to acquire an object the regular way, i.e. by pending on
5518 * the object, have precedence over the thread polling on the object. This
5519 * means that the polling thread will never get the poll event on an object
5520 * until the object becomes available and its pend queue is empty. For this
5521 * reason, the k_poll() call is more effective when the objects being polled
5522 * only have one thread, the polling thread, trying to acquire them.
5523 *
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +03005524 * When k_poll() returns 0, the caller should loop on all the events that were
5525 * passed to k_poll() and check the state field for the values that were
5526 * expected and take the associated actions.
Benjamin Walshacc68c12017-01-29 18:57:45 -05005527 *
5528 * Before being reused for another call to k_poll(), the user has to reset the
5529 * state field to K_POLL_STATE_NOT_READY.
5530 *
Andrew Boie3772f772018-05-07 16:52:57 -07005531 * When called from user mode, a temporary memory allocation is required from
5532 * the caller's resource pool.
5533 *
Christian Taedcke7a7c4202020-06-30 12:02:14 +02005534 * @param events An array of events to be polled for.
Benjamin Walshacc68c12017-01-29 18:57:45 -05005535 * @param num_events The number of events in the array.
Andy Ross78327382020-03-05 15:18:14 -08005536 * @param timeout Waiting period for an event to be ready,
5537 * or one of the special values K_NO_WAIT and K_FOREVER.
Benjamin Walshacc68c12017-01-29 18:57:45 -05005538 *
5539 * @retval 0 One or more events are ready.
Benjamin Walshacc68c12017-01-29 18:57:45 -05005540 * @retval -EAGAIN Waiting period timed out.
Paul Sokolovsky45c0b202018-08-21 23:29:11 +03005541 * @retval -EINTR Polling has been interrupted, e.g. with
5542 * k_queue_cancel_wait(). All output events are still set and valid,
5543 * cancelled event(s) will be set to K_POLL_STATE_CANCELLED. In other
5544 * words, -EINTR status means that at least one of output events is
5545 * K_POLL_STATE_CANCELLED.
Andrew Boie3772f772018-05-07 16:52:57 -07005546 * @retval -ENOMEM Thread resource pool insufficient memory (user mode only)
5547 * @retval -EINVAL Bad parameters (user mode only)
Benjamin Walshacc68c12017-01-29 18:57:45 -05005548 */
5549
Andrew Boie3772f772018-05-07 16:52:57 -07005550__syscall int k_poll(struct k_poll_event *events, int num_events,
Andy Ross78327382020-03-05 15:18:14 -08005551 k_timeout_t timeout);
Benjamin Walshacc68c12017-01-29 18:57:45 -05005552
5553/**
Benjamin Walsha304f162017-02-02 16:46:09 -05005554 * @brief Initialize a poll signal object.
5555 *
Flavio Ceolinaecd4ec2018-11-02 12:35:30 -07005556 * Ready a poll signal object to be signaled via k_poll_signal_raise().
Benjamin Walsha304f162017-02-02 16:46:09 -05005557 *
Anas Nashifb503be22021-03-22 08:09:55 -04005558 * @param sig A poll signal.
Benjamin Walsha304f162017-02-02 16:46:09 -05005559 *
5560 * @return N/A
5561 */
5562
Anas Nashifb503be22021-03-22 08:09:55 -04005563__syscall void k_poll_signal_init(struct k_poll_signal *sig);
Andrew Boie3772f772018-05-07 16:52:57 -07005564
5565/*
5566 * @brief Reset a poll signal object's state to unsignaled.
5567 *
Anas Nashifb503be22021-03-22 08:09:55 -04005568 * @param sig A poll signal object
Andrew Boie3772f772018-05-07 16:52:57 -07005569 */
Anas Nashifb503be22021-03-22 08:09:55 -04005570__syscall void k_poll_signal_reset(struct k_poll_signal *sig);
Andrew Boie3772f772018-05-07 16:52:57 -07005571
Andrew Boie3772f772018-05-07 16:52:57 -07005572/**
David B. Kinderfcbd8fb2018-05-23 12:06:24 -07005573 * @brief Fetch the signaled state and result value of a poll signal
Andrew Boie3772f772018-05-07 16:52:57 -07005574 *
Anas Nashifb503be22021-03-22 08:09:55 -04005575 * @param sig A poll signal object
Andrew Boie3772f772018-05-07 16:52:57 -07005576 * @param signaled An integer buffer which will be written nonzero if the
5577 * object was signaled
5578 * @param result An integer destination buffer which will be written with the
David B. Kinderfcbd8fb2018-05-23 12:06:24 -07005579 * result value if the object was signaled, or an undefined
Andrew Boie3772f772018-05-07 16:52:57 -07005580 * value if it was not.
5581 */
Anas Nashifb503be22021-03-22 08:09:55 -04005582__syscall void k_poll_signal_check(struct k_poll_signal *sig,
Andrew Boie3772f772018-05-07 16:52:57 -07005583 unsigned int *signaled, int *result);
Benjamin Walsha304f162017-02-02 16:46:09 -05005584
5585/**
Benjamin Walshacc68c12017-01-29 18:57:45 -05005586 * @brief Signal a poll signal object.
5587 *
5588 * This routine makes ready a poll signal, which is basically a poll event of
5589 * type K_POLL_TYPE_SIGNAL. If a thread was polling on that event, it will be
5590 * made ready to run. A @a result value can be specified.
5591 *
5592 * The poll signal contains a 'signaled' field that, when set by
Flavio Ceolinaecd4ec2018-11-02 12:35:30 -07005593 * k_poll_signal_raise(), stays set until the user sets it back to 0 with
Andrew Boie3772f772018-05-07 16:52:57 -07005594 * k_poll_signal_reset(). It thus has to be reset by the user before being
5595 * passed again to k_poll() or k_poll() will consider it being signaled, and
5596 * will return immediately.
Benjamin Walshacc68c12017-01-29 18:57:45 -05005597 *
Peter A. Bigot773bd982019-04-30 07:06:39 -05005598 * @note The result is stored and the 'signaled' field is set even if
5599 * this function returns an error indicating that an expiring poll was
5600 * not notified. The next k_poll() will detect the missed raise.
5601 *
Anas Nashifb503be22021-03-22 08:09:55 -04005602 * @param sig A poll signal.
Benjamin Walshacc68c12017-01-29 18:57:45 -05005603 * @param result The value to store in the result field of the signal.
5604 *
5605 * @retval 0 The signal was delivered successfully.
5606 * @retval -EAGAIN The polling thread's timeout is in the process of expiring.
5607 */
5608
Anas Nashifb503be22021-03-22 08:09:55 -04005609__syscall int k_poll_signal_raise(struct k_poll_signal *sig, int result);
Benjamin Walshacc68c12017-01-29 18:57:45 -05005610
Anas Nashif954d5502018-02-25 08:37:28 -06005611/**
5612 * @internal
5613 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005614extern void z_handle_obj_poll_events(sys_dlist_t *events, uint32_t state);
Benjamin Walshacc68c12017-01-29 18:57:45 -05005615
Anas Nashif166f5192018-02-25 08:02:36 -06005616/** @} */
Benjamin Walshacc68c12017-01-29 18:57:45 -05005617
Benjamin Walshc3a2bbb2016-12-14 13:04:36 -05005618/**
Anas Nashif30c3cff2019-01-22 08:18:13 -05005619 * @defgroup cpu_idle_apis CPU Idling APIs
5620 * @ingroup kernel_apis
5621 * @{
5622 */
Anas Nashif30c3cff2019-01-22 08:18:13 -05005623/**
Benjamin Walshc3a2bbb2016-12-14 13:04:36 -05005624 * @brief Make the CPU idle.
5625 *
5626 * This function makes the CPU idle until an event wakes it up.
5627 *
5628 * In a regular system, the idle thread should be the only thread responsible
5629 * for making the CPU idle and triggering any type of power management.
5630 * However, in some more constrained systems, such as a single-threaded system,
5631 * the only thread would be responsible for this if needed.
5632 *
Ioannis Glaropoulos91f6d982020-03-18 23:56:56 +01005633 * @note In some architectures, before returning, the function unmasks interrupts
5634 * unconditionally.
5635 *
Benjamin Walshc3a2bbb2016-12-14 13:04:36 -05005636 * @return N/A
5637 */
Andrew Boie07525a32019-09-21 16:17:23 -07005638static inline void k_cpu_idle(void)
5639{
Andrew Boie4f77c2a2019-11-07 12:43:29 -08005640 arch_cpu_idle();
Andrew Boie07525a32019-09-21 16:17:23 -07005641}
Benjamin Walshc3a2bbb2016-12-14 13:04:36 -05005642
5643/**
5644 * @brief Make the CPU idle in an atomic fashion.
5645 *
Peter Bigot88e756e2020-09-29 10:43:10 -05005646 * Similar to k_cpu_idle(), but must be called with interrupts locked.
5647 *
5648 * Enabling interrupts and entering a low-power mode will be atomic,
5649 * i.e. there will be no period of time where interrupts are enabled before
5650 * the processor enters a low-power mode.
5651 *
5652 * After waking up from the low-power mode, the interrupt lockout state will
5653 * be restored as if by irq_unlock(key).
Benjamin Walshc3a2bbb2016-12-14 13:04:36 -05005654 *
5655 * @param key Interrupt locking key obtained from irq_lock().
5656 *
5657 * @return N/A
5658 */
Andrew Boie07525a32019-09-21 16:17:23 -07005659static inline void k_cpu_atomic_idle(unsigned int key)
5660{
Andrew Boie4f77c2a2019-11-07 12:43:29 -08005661 arch_cpu_atomic_idle(key);
Andrew Boie07525a32019-09-21 16:17:23 -07005662}
Benjamin Walshc3a2bbb2016-12-14 13:04:36 -05005663
Anas Nashif30c3cff2019-01-22 08:18:13 -05005664/**
5665 * @}
5666 */
Anas Nashif954d5502018-02-25 08:37:28 -06005667
5668/**
5669 * @internal
5670 */
Andrew Boie4f77c2a2019-11-07 12:43:29 -08005671#ifdef ARCH_EXCEPT
Ioannis Glaropoulosdf029232019-10-07 11:24:36 +02005672/* This architecture has direct support for triggering a CPU exception */
Andrew Boie4f77c2a2019-11-07 12:43:29 -08005673#define z_except_reason(reason) ARCH_EXCEPT(reason)
Andrew Boiecdb94d62017-04-18 15:22:05 -07005674#else
5675
Joakim Anderssone04e4c22019-12-20 15:42:38 +01005676#if !defined(CONFIG_ASSERT_NO_FILE_INFO)
5677#define __EXCEPT_LOC() __ASSERT_PRINT("@ %s:%d\n", __FILE__, __LINE__)
5678#else
5679#define __EXCEPT_LOC()
5680#endif
5681
Andrew Boiecdb94d62017-04-18 15:22:05 -07005682/* NOTE: This is the implementation for arches that do not implement
Andrew Boie4f77c2a2019-11-07 12:43:29 -08005683 * ARCH_EXCEPT() to generate a real CPU exception.
Andrew Boiecdb94d62017-04-18 15:22:05 -07005684 *
5685 * We won't have a real exception frame to determine the PC value when
5686 * the oops occurred, so print file and line number before we jump into
5687 * the fatal error handler.
5688 */
Patrik Flykt4344e272019-03-08 14:19:05 -07005689#define z_except_reason(reason) do { \
Joakim Anderssone04e4c22019-12-20 15:42:38 +01005690 __EXCEPT_LOC(); \
Andrew Boie56236372019-07-15 15:22:29 -07005691 z_fatal_error(reason, NULL); \
Flavio Ceolin6fdc56d2018-09-18 12:32:27 -07005692 } while (false)
Andrew Boiecdb94d62017-04-18 15:22:05 -07005693
5694#endif /* _ARCH__EXCEPT */
5695
5696/**
5697 * @brief Fatally terminate a thread
5698 *
5699 * This should be called when a thread has encountered an unrecoverable
5700 * runtime condition and needs to terminate. What this ultimately
5701 * means is determined by the _fatal_error_handler() implementation, which
Andrew Boie71ce8ce2019-07-11 14:18:28 -07005702 * will be called will reason code K_ERR_KERNEL_OOPS.
Andrew Boiecdb94d62017-04-18 15:22:05 -07005703 *
5704 * If this is called from ISR context, the default system fatal error handler
5705 * will treat it as an unrecoverable system error, just like k_panic().
5706 */
Andrew Boie71ce8ce2019-07-11 14:18:28 -07005707#define k_oops() z_except_reason(K_ERR_KERNEL_OOPS)
Andrew Boiecdb94d62017-04-18 15:22:05 -07005708
5709/**
5710 * @brief Fatally terminate the system
5711 *
5712 * This should be called when the Zephyr kernel has encountered an
5713 * unrecoverable runtime condition and needs to terminate. What this ultimately
5714 * means is determined by the _fatal_error_handler() implementation, which
Andrew Boie71ce8ce2019-07-11 14:18:28 -07005715 * will be called will reason code K_ERR_KERNEL_PANIC.
Andrew Boiecdb94d62017-04-18 15:22:05 -07005716 */
Andrew Boie71ce8ce2019-07-11 14:18:28 -07005717#define k_panic() z_except_reason(K_ERR_KERNEL_PANIC)
Andrew Boiecdb94d62017-04-18 15:22:05 -07005718
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005719/*
5720 * private APIs that are utilized by one or more public APIs
5721 */
5722
Stephanos Ioannidis2d746042019-10-25 00:08:21 +09005723/**
5724 * @internal
5725 */
5726extern void z_init_thread_base(struct _thread_base *thread_base,
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005727 int priority, uint32_t initial_state,
Stephanos Ioannidis2d746042019-10-25 00:08:21 +09005728 unsigned int options);
5729
Benjamin Walshb12a8e02016-12-14 15:24:12 -05005730#ifdef CONFIG_MULTITHREADING
Anas Nashif954d5502018-02-25 08:37:28 -06005731/**
5732 * @internal
5733 */
Patrik Flykt4344e272019-03-08 14:19:05 -07005734extern void z_init_static_threads(void);
Benjamin Walshb12a8e02016-12-14 15:24:12 -05005735#else
Anas Nashif954d5502018-02-25 08:37:28 -06005736/**
5737 * @internal
5738 */
Patrik Flykt4344e272019-03-08 14:19:05 -07005739#define z_init_static_threads() do { } while (false)
Benjamin Walshb12a8e02016-12-14 15:24:12 -05005740#endif
5741
Anas Nashif954d5502018-02-25 08:37:28 -06005742/**
5743 * @internal
5744 */
Patrik Flykt4344e272019-03-08 14:19:05 -07005745extern bool z_is_thread_essential(void);
Guennadi Liakhovetski8d07b772021-04-01 13:46:57 +02005746
5747#ifdef CONFIG_SMP
5748void z_smp_thread_init(void *arg, struct k_thread *thread);
5749void z_smp_thread_swap(void);
5750#endif
5751
Anas Nashif954d5502018-02-25 08:37:28 -06005752/**
5753 * @internal
5754 */
Patrik Flykt4344e272019-03-08 14:19:05 -07005755extern void z_timer_expiration_handler(struct _timeout *t);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005756
Andrew Boied76ae462020-01-02 11:57:43 -08005757#ifdef CONFIG_PRINTK
Andrew Boie756f9072017-10-10 16:01:49 -07005758/**
5759 * @brief Emit a character buffer to the console device
5760 *
5761 * @param c String of characters to print
5762 * @param n The length of the string
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04005763 *
Andrew Boie756f9072017-10-10 16:01:49 -07005764 */
5765__syscall void k_str_out(char *c, size_t n);
Andrew Boied76ae462020-01-02 11:57:43 -08005766#endif
Andrew Boie756f9072017-10-10 16:01:49 -07005767
Ioannis Glaropoulosa6cb8b02019-05-09 21:55:10 +02005768/**
5769 * @brief Disable preservation of floating point context information.
5770 *
5771 * This routine informs the kernel that the specified thread
5772 * will no longer be using the floating point registers.
5773 *
5774 * @warning
5775 * Some architectures apply restrictions on how the disabling of floating
Andrew Boie4f77c2a2019-11-07 12:43:29 -08005776 * point preservation may be requested, see arch_float_disable.
Ioannis Glaropoulosa6cb8b02019-05-09 21:55:10 +02005777 *
5778 * @warning
5779 * This routine should only be used to disable floating point support for
5780 * a thread that currently has such support enabled.
5781 *
5782 * @param thread ID of thread.
5783 *
Katsuhiro Suzuki19db4852021-03-24 01:54:15 +09005784 * @retval 0 On success.
5785 * @retval -ENOTSUP If the floating point disabling is not implemented.
5786 * -EINVAL If the floating point disabling could not be performed.
Ioannis Glaropoulosa6cb8b02019-05-09 21:55:10 +02005787 */
5788__syscall int k_float_disable(struct k_thread *thread);
5789
Katsuhiro Suzuki59903e22021-02-01 15:16:53 +09005790/**
5791 * @brief Enable preservation of floating point context information.
5792 *
5793 * This routine informs the kernel that the specified thread
5794 * will use the floating point registers.
5795
5796 * Invoking this routine initializes the thread's floating point context info
5797 * to that of an FPU that has been reset. The next time the thread is scheduled
5798 * by z_swap() it will either inherit an FPU that is guaranteed to be in a
5799 * "sane" state (if the most recent user of the FPU was cooperatively swapped
5800 * out) or the thread's own floating point context will be loaded (if the most
5801 * recent user of the FPU was preempted, or if this thread is the first user
5802 * of the FPU). Thereafter, the kernel will protect the thread's FP context
5803 * so that it is not altered during a preemptive context switch.
5804 *
5805 * The @a options parameter indicates which floating point register sets will
5806 * be used by the specified thread.
5807 *
5808 * For x86 options:
5809 *
5810 * - K_FP_REGS indicates x87 FPU and MMX registers only
5811 * - K_SSE_REGS indicates SSE registers (and also x87 FPU and MMX registers)
5812 *
5813 * @warning
5814 * Some architectures apply restrictions on how the enabling of floating
5815 * point preservation may be requested, see arch_float_enable.
5816 *
5817 * @warning
5818 * This routine should only be used to enable floating point support for
5819 * a thread that currently has such support enabled.
5820 *
5821 * @param thread ID of thread.
5822 * @param options architecture dependent options
5823 *
5824 * @retval 0 On success.
5825 * @retval -ENOTSUP If the floating point enabling is not implemented.
5826 * -EINVAL If the floating point enabling could not be performed.
5827 */
5828__syscall int k_float_enable(struct k_thread *thread, unsigned int options);
5829
Daniel Leungfc577c42020-08-27 13:54:14 -07005830#ifdef CONFIG_THREAD_RUNTIME_STATS
5831
5832/**
5833 * @brief Get the runtime statistics of a thread
5834 *
5835 * @param thread ID of thread.
5836 * @param stats Pointer to struct to copy statistics into.
5837 * @return -EINVAL if null pointers, otherwise 0
5838 */
5839int k_thread_runtime_stats_get(k_tid_t thread,
5840 k_thread_runtime_stats_t *stats);
5841
5842/**
5843 * @brief Get the runtime statistics of all threads
5844 *
5845 * @param stats Pointer to struct to copy statistics into.
5846 * @return -EINVAL if null pointers, otherwise 0
5847 */
5848int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats);
5849
5850#endif
5851
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005852#ifdef __cplusplus
5853}
5854#endif
5855
Anas Nashif73008b42020-02-06 09:14:51 -05005856#include <tracing/tracing.h>
Andrew Boiefa94ee72017-09-28 16:54:35 -07005857#include <syscalls/kernel.h>
5858
Benjamin Walshdfa7ce52017-01-22 17:06:05 -05005859#endif /* !_ASMLANGUAGE */
5860
Flavio Ceolin67ca1762018-09-14 10:43:44 -07005861#endif /* ZEPHYR_INCLUDE_KERNEL_H_ */