blob: 59419549d9b4c2140fb584cc683fcd252d333ddb [file] [log] [blame]
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001/*
2 * Copyright (c) 2016, Wind River Systems, Inc.
3 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08004 * SPDX-License-Identifier: Apache-2.0
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005 */
6
7/**
8 * @file
9 *
10 * @brief Public kernel APIs.
11 */
12
Flavio Ceolin67ca1762018-09-14 10:43:44 -070013#ifndef ZEPHYR_INCLUDE_KERNEL_H_
14#define ZEPHYR_INCLUDE_KERNEL_H_
Benjamin Walsh456c6da2016-09-02 18:55:39 -040015
Benjamin Walshdfa7ce52017-01-22 17:06:05 -050016#if !defined(_ASMLANGUAGE)
Ioannis Glaropoulos92b8a412018-06-20 17:30:48 +020017#include <kernel_includes.h>
Kumar Gala8777ff12018-07-25 20:24:34 -050018#include <errno.h>
James Harrisb1042812021-03-03 12:02:05 -080019#include <limits.h>
Flavio Ceolin6fdc56d2018-09-18 12:32:27 -070020#include <stdbool.h>
Stephanos Ioannidis33fbe002019-09-09 21:26:59 +090021#include <toolchain.h>
Torbjörn Leksell16bbb8e2021-03-26 08:31:23 +010022#include <tracing/tracing_macros.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040023
24#ifdef __cplusplus
25extern "C" {
26#endif
27
Anas Nashifbbb157d2017-01-15 08:46:31 -050028/**
29 * @brief Kernel APIs
30 * @defgroup kernel_apis Kernel APIs
31 * @{
32 * @}
33 */
34
Benjamin Walsh456c6da2016-09-02 18:55:39 -040035#define K_ANY NULL
36#define K_END NULL
37
Andy Ross851d14a2021-05-13 15:46:43 -070038#if CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES == 0
39#error Zero available thread priorities defined!
40#endif
41
42#define K_PRIO_COOP(x) (-(CONFIG_NUM_COOP_PRIORITIES - (x)))
43#define K_PRIO_PREEMPT(x) (x)
44
Benjamin Walsh456c6da2016-09-02 18:55:39 -040045#define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040046#define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES
Benjamin Walshfab8d922016-11-08 15:36:36 -050047#define K_IDLE_PRIO K_LOWEST_THREAD_PRIO
Benjamin Walsh456c6da2016-09-02 18:55:39 -040048#define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO)
49#define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)
50
Benjamin Walshacc68c12017-01-29 18:57:45 -050051#ifdef CONFIG_POLL
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +030052#define _POLL_EVENT_OBJ_INIT(obj) \
53 .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events),
54#define _POLL_EVENT sys_dlist_t poll_events
Benjamin Walshacc68c12017-01-29 18:57:45 -050055#else
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +030056#define _POLL_EVENT_OBJ_INIT(obj)
Benjamin Walshacc68c12017-01-29 18:57:45 -050057#define _POLL_EVENT
58#endif
59
Benjamin Walshf6ca7de2016-11-08 10:36:50 -050060struct k_thread;
Benjamin Walsh456c6da2016-09-02 18:55:39 -040061struct k_mutex;
62struct k_sem;
Benjamin Walsh456c6da2016-09-02 18:55:39 -040063struct k_msgq;
64struct k_mbox;
65struct k_pipe;
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020066struct k_queue;
Benjamin Walsh456c6da2016-09-02 18:55:39 -040067struct k_fifo;
68struct k_lifo;
69struct k_stack;
Benjamin Walsh7ef0f622016-10-24 17:04:43 -040070struct k_mem_slab;
Benjamin Walsh456c6da2016-09-02 18:55:39 -040071struct k_mem_pool;
72struct k_timer;
Benjamin Walshacc68c12017-01-29 18:57:45 -050073struct k_poll_event;
74struct k_poll_signal;
Chunlin Hane9c97022017-07-07 20:29:30 +080075struct k_mem_domain;
76struct k_mem_partition;
Wentong Wu5611e922019-06-20 23:51:27 +080077struct k_futex;
Peter Mitsisae394bf2021-09-20 14:14:32 -040078struct k_event;
Andrew Boiebca15da2017-10-15 14:17:48 -070079
Benjamin Walsh456c6da2016-09-02 18:55:39 -040080enum execution_context_types {
81 K_ISR = 0,
82 K_COOP_THREAD,
83 K_PREEMPT_THREAD,
84};
85
Anas Nashiffc1b5de2020-11-11 08:42:53 -050086/* private, used by k_poll and k_work_poll */
87struct k_work_poll;
88typedef int (*_poller_cb_t)(struct k_poll_event *event, uint32_t state);
89
Peter Mitsis348eb4c2016-10-26 11:22:14 -040090/**
Anas Nashif4bcb2942019-01-23 23:06:29 -050091 * @addtogroup thread_apis
Carles Cuficb0cf9f2017-01-10 10:57:38 +010092 * @{
93 */
Anas Nashife71293e2019-12-04 20:00:14 -050094
Ramakrishna Pallala110b8e42018-04-27 12:55:43 +053095typedef void (*k_thread_user_cb_t)(const struct k_thread *thread,
96 void *user_data);
Carles Cuficb0cf9f2017-01-10 10:57:38 +010097
98/**
Ramakrishna Pallala110b8e42018-04-27 12:55:43 +053099 * @brief Iterate over all the threads in the system.
100 *
101 * This routine iterates over all the threads in the system and
102 * calls the user_cb function for each thread.
103 *
104 * @param user_cb Pointer to the user callback function.
105 * @param user_data Pointer to user data.
106 *
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200107 * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
Radoslaw Koppel2c529ce2019-11-27 14:20:37 +0100108 * to be effective.
109 * @note This API uses @ref k_spin_lock to protect the _kernel.threads
110 * list which means creation of new threads and terminations of existing
111 * threads are blocked until this API returns.
Ramakrishna Pallala110b8e42018-04-27 12:55:43 +0530112 *
113 * @return N/A
114 */
115extern void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data);
116
Radoslaw Koppel2c529ce2019-11-27 14:20:37 +0100117/**
118 * @brief Iterate over all the threads in the system without locking.
119 *
120 * This routine works exactly the same like @ref k_thread_foreach
121 * but unlocks interrupts when user_cb is executed.
122 *
123 * @param user_cb Pointer to the user callback function.
124 * @param user_data Pointer to user data.
125 *
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200126 * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
Radoslaw Koppel2c529ce2019-11-27 14:20:37 +0100127 * to be effective.
128 * @note This API uses @ref k_spin_lock only when accessing the _kernel.threads
129 * queue elements. It unlocks it during user callback function processing.
130 * If a new task is created when this @c foreach function is in progress,
131 * the added new task would not be included in the enumeration.
132 * If a task is aborted during this enumeration, there would be a race here
133 * and there is a possibility that this aborted task would be included in the
134 * enumeration.
135 * @note If the task is aborted and the memory occupied by its @c k_thread
136 * structure is reused when this @c k_thread_foreach_unlocked is in progress
137 * it might even lead to the system behave unstable.
138 * This function may never return, as it would follow some @c next task
139 * pointers treating given pointer as a pointer to the k_thread structure
140 * while it is something different right now.
141 * Do not reuse the memory that was occupied by k_thread structure of aborted
142 * task if it was aborted after this function was called in any context.
143 */
144extern void k_thread_foreach_unlocked(
145 k_thread_user_cb_t user_cb, void *user_data);
146
Anas Nashif166f5192018-02-25 08:02:36 -0600147/** @} */
Carles Cuficb0cf9f2017-01-10 10:57:38 +0100148
149/**
Allan Stephensc98da842016-11-11 15:45:03 -0500150 * @defgroup thread_apis Thread APIs
151 * @ingroup kernel_apis
152 * @{
153 */
154
Benjamin Walshed240f22017-01-22 13:05:08 -0500155#endif /* !_ASMLANGUAGE */
156
157
158/*
159 * Thread user options. May be needed by assembly code. Common part uses low
160 * bits, arch-specific use high bits.
161 */
162
Anas Nashifa541e932018-05-24 11:19:16 -0500163/**
164 * @brief system thread that must not abort
Anas Nashifa541e932018-05-24 11:19:16 -0500165 * */
Flavio Ceolin8aec0872018-08-15 11:52:00 -0700166#define K_ESSENTIAL (BIT(0))
Benjamin Walshed240f22017-01-22 13:05:08 -0500167
Stephanos Ioannidisaaf93202020-05-03 18:03:19 +0900168#if defined(CONFIG_FPU_SHARING)
Anas Nashifa541e932018-05-24 11:19:16 -0500169/**
Katsuhiro Suzukifadef432020-12-16 11:22:13 +0900170 * @brief FPU registers are managed by context switch
171 *
172 * @details
173 * This option indicates that the thread uses the CPU's floating point
174 * registers. This instructs the kernel to take additional steps to save
175 * and restore the contents of these registers when scheduling the thread.
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200176 * No effect if @kconfig{CONFIG_FPU_SHARING} is not enabled.
Anas Nashifa541e932018-05-24 11:19:16 -0500177 */
Flavio Ceolin8aec0872018-08-15 11:52:00 -0700178#define K_FP_REGS (BIT(1))
Benjamin Walshed240f22017-01-22 13:05:08 -0500179#endif
180
Anas Nashifa541e932018-05-24 11:19:16 -0500181/**
182 * @brief user mode thread
183 *
184 * This thread has dropped from supervisor mode to user mode and consequently
Andrew Boie5cfa5dc2017-08-30 14:17:44 -0700185 * has additional restrictions
186 */
Flavio Ceolin8aec0872018-08-15 11:52:00 -0700187#define K_USER (BIT(2))
Andrew Boie5cfa5dc2017-08-30 14:17:44 -0700188
Anas Nashifa541e932018-05-24 11:19:16 -0500189/**
190 * @brief Inherit Permissions
191 *
192 * @details
193 * Indicates that the thread being created should inherit all kernel object
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300194 * permissions from the thread that created it. No effect if
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200195 * @kconfig{CONFIG_USERSPACE} is not enabled.
Andrew Boie47f8fd12017-10-05 11:11:02 -0700196 */
Flavio Ceolin8aec0872018-08-15 11:52:00 -0700197#define K_INHERIT_PERMS (BIT(3))
Andrew Boie47f8fd12017-10-05 11:11:02 -0700198
Andy Ross9a594a02021-02-10 14:54:21 -0800199/**
200 * @brief Callback item state
201 *
202 * @details
203 * This is a single bit of state reserved for "callback manager"
204 * utilities (p4wq initially) who need to track operations invoked
205 * from within a user-provided callback they have been invoked.
206 * Effectively it serves as a tiny bit of zero-overhead TLS data.
207 */
208#define K_CALLBACK_STATE (BIT(4))
209
Benjamin Walshed240f22017-01-22 13:05:08 -0500210#ifdef CONFIG_X86
211/* x86 Bitmask definitions for threads user options */
212
Daniel Leungce440482021-01-07 15:07:29 -0800213#if defined(CONFIG_FPU_SHARING) && defined(CONFIG_X86_SSE)
Daniel Leung482a1502021-08-31 10:36:58 -0700214/**
215 * @brief FP and SSE registers are managed by context switch on x86
216 *
217 * @details
218 * This option indicates that the thread uses the x86 CPU's floating point
219 * and SSE registers. This instructs the kernel to take additional steps to
220 * save and restore the contents of these registers when scheduling
221 * the thread. No effect if @kconfig{CONFIG_X86_SSE} is not enabled.
222 */
Flavio Ceolin8aec0872018-08-15 11:52:00 -0700223#define K_SSE_REGS (BIT(7))
Benjamin Walshed240f22017-01-22 13:05:08 -0500224#endif
225#endif
226
227/* end - thread options */
228
229#if !defined(_ASMLANGUAGE)
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400230/**
Andrew Boied26cf2d2017-03-30 13:07:02 -0700231 * @brief Create a thread.
232 *
233 * This routine initializes a thread, then schedules it for execution.
234 *
235 * The new thread may be scheduled for immediate execution or a delayed start.
236 * If the newly spawned thread does not have a delayed start the kernel
237 * scheduler may preempt the current thread to allow the new thread to
238 * execute.
239 *
240 * Thread options are architecture-specific, and can include K_ESSENTIAL,
241 * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
242 * them using "|" (the logical OR operator).
243 *
Andrew Boie8ce260d2020-04-24 16:24:46 -0700244 * Stack objects passed to this function must be originally defined with
245 * either of these macros in order to be portable:
246 *
247 * - K_THREAD_STACK_DEFINE() - For stacks that may support either user or
248 * supervisor threads.
249 * - K_KERNEL_STACK_DEFINE() - For stacks that may support supervisor
250 * threads only. These stacks use less memory if CONFIG_USERSPACE is
251 * enabled.
252 *
253 * The stack_size parameter has constraints. It must either be:
254 *
255 * - The original size value passed to K_THREAD_STACK_DEFINE() or
256 * K_KERNEL_STACK_DEFINE()
257 * - The return value of K_THREAD_STACK_SIZEOF(stack) if the stack was
258 * defined with K_THREAD_STACK_DEFINE()
259 * - The return value of K_KERNEL_STACK_SIZEOF(stack) if the stack was
260 * defined with K_KERNEL_STACK_DEFINE().
261 *
262 * Using other values, or sizeof(stack) may produce undefined behavior.
Andrew Boied26cf2d2017-03-30 13:07:02 -0700263 *
264 * @param new_thread Pointer to uninitialized struct k_thread
265 * @param stack Pointer to the stack space.
266 * @param stack_size Stack size in bytes.
267 * @param entry Thread entry function.
268 * @param p1 1st entry point parameter.
269 * @param p2 2nd entry point parameter.
270 * @param p3 3rd entry point parameter.
271 * @param prio Thread priority.
272 * @param options Thread options.
Andy Ross78327382020-03-05 15:18:14 -0800273 * @param delay Scheduling delay, or K_NO_WAIT (for no delay).
Andrew Boied26cf2d2017-03-30 13:07:02 -0700274 *
275 * @return ID of new thread.
Anas Nashif47420d02018-05-24 14:20:56 -0400276 *
Andrew Boied26cf2d2017-03-30 13:07:02 -0700277 */
Andrew Boie662c3452017-10-02 10:51:18 -0700278__syscall k_tid_t k_thread_create(struct k_thread *new_thread,
Andrew Boiec5c104f2017-10-16 14:46:34 -0700279 k_thread_stack_t *stack,
Andrew Boie662c3452017-10-02 10:51:18 -0700280 size_t stack_size,
281 k_thread_entry_t entry,
282 void *p1, void *p2, void *p3,
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500283 int prio, uint32_t options, k_timeout_t delay);
Andrew Boied26cf2d2017-03-30 13:07:02 -0700284
Andrew Boie3f091b52017-08-30 14:34:14 -0700285/**
286 * @brief Drop a thread's privileges permanently to user mode
287 *
Andrew Boie4d6bc472020-10-24 13:11:35 -0700288 * This allows a supervisor thread to be re-used as a user thread.
289 * This function does not return, but control will transfer to the provided
290 * entry point as if this was a new user thread.
291 *
292 * The implementation ensures that the stack buffer contents are erased.
293 * Any thread-local storage will be reverted to a pristine state.
294 *
295 * Memory domain membership, resource pool assignment, kernel object
296 * permissions, priority, and thread options are preserved.
297 *
298 * A common use of this function is to re-use the main thread as a user thread
299 * once all supervisor mode-only tasks have been completed.
300 *
Andrew Boie3f091b52017-08-30 14:34:14 -0700301 * @param entry Function to start executing from
302 * @param p1 1st entry point parameter
303 * @param p2 2nd entry point parameter
304 * @param p3 3rd entry point parameter
305 */
306extern FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
307 void *p1, void *p2,
308 void *p3);
Andrew Boie3f091b52017-08-30 14:34:14 -0700309
Andrew Boied26cf2d2017-03-30 13:07:02 -0700310/**
Adithya Baglody392219e2019-01-02 14:40:39 +0530311 * @brief Grant a thread access to a set of kernel objects
Andrew Boiee12857a2017-10-17 11:38:26 -0700312 *
313 * This is a convenience function. For the provided thread, grant access to
314 * the remaining arguments, which must be pointers to kernel objects.
Andrew Boiee12857a2017-10-17 11:38:26 -0700315 *
316 * The thread object must be initialized (i.e. running). The objects don't
317 * need to be.
Adithya Baglody392219e2019-01-02 14:40:39 +0530318 * Note that NULL shouldn't be passed as an argument.
Andrew Boiee12857a2017-10-17 11:38:26 -0700319 *
320 * @param thread Thread to grant access to objects
Adithya Baglody392219e2019-01-02 14:40:39 +0530321 * @param ... list of kernel object pointers
Andrew Boiee12857a2017-10-17 11:38:26 -0700322 */
Adithya Baglody392219e2019-01-02 14:40:39 +0530323#define k_thread_access_grant(thread, ...) \
Krzysztof Chruscinski1b4b9382020-05-08 07:06:58 +0200324 FOR_EACH_FIXED_ARG(k_object_access_grant, (;), thread, __VA_ARGS__)
Andrew Boiee12857a2017-10-17 11:38:26 -0700325
326/**
Andrew Boie92e5bd72018-04-12 17:12:15 -0700327 * @brief Assign a resource memory pool to a thread
328 *
329 * By default, threads have no resource pool assigned unless their parent
330 * thread has a resource pool, in which case it is inherited. Multiple
331 * threads may be assigned to the same memory pool.
332 *
333 * Changing a thread's resource pool will not migrate allocations from the
334 * previous pool.
335 *
Jukka Rissanenfdf18482020-05-01 12:37:51 +0300336 * @param thread Target thread to assign a memory pool for resource requests.
Andy Rossc770cab2020-10-02 08:22:03 -0700337 * @param heap Heap object to use for resources,
Jukka Rissanenfdf18482020-05-01 12:37:51 +0300338 * or NULL if the thread should no longer have a memory pool.
Andrew Boie92e5bd72018-04-12 17:12:15 -0700339 */
Andy Rossc770cab2020-10-02 08:22:03 -0700340static inline void k_thread_heap_assign(struct k_thread *thread,
341 struct k_heap *heap)
342{
343 thread->resource_pool = heap;
344}
345
Andrew Boieefc5fe02020-02-05 10:41:58 -0800346#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
347/**
348 * @brief Obtain stack usage information for the specified thread
349 *
350 * User threads will need to have permission on the target thread object.
351 *
352 * Some hardware may prevent inspection of a stack buffer currently in use.
353 * If this API is called from supervisor mode, on the currently running thread,
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200354 * on a platform which selects @kconfig{CONFIG_NO_UNUSED_STACK_INSPECTION}, an
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300355 * error will be generated.
Andrew Boieefc5fe02020-02-05 10:41:58 -0800356 *
357 * @param thread Thread to inspect stack information
358 * @param unused_ptr Output parameter, filled in with the unused stack space
359 * of the target thread in bytes.
360 * @return 0 on success
361 * @return -EBADF Bad thread object (user mode only)
362 * @return -EPERM No permissions on thread object (user mode only)
363 * #return -ENOTSUP Forbidden by hardware policy
364 * @return -EINVAL Thread is uninitialized or exited (user mode only)
365 * @return -EFAULT Bad memory address for unused_ptr (user mode only)
366 */
367__syscall int k_thread_stack_space_get(const struct k_thread *thread,
368 size_t *unused_ptr);
369#endif
370
Andrew Boie92e5bd72018-04-12 17:12:15 -0700371#if (CONFIG_HEAP_MEM_POOL_SIZE > 0)
372/**
373 * @brief Assign the system heap as a thread's resource pool
374 *
Yasushi SHOJIa3e0f8c2021-03-11 20:45:20 +0900375 * Similar to z_thread_heap_assign(), but the thread will use
Andrew Boie92e5bd72018-04-12 17:12:15 -0700376 * the kernel heap to draw memory.
377 *
378 * Use with caution, as a malicious thread could perform DoS attacks on the
379 * kernel heap.
380 *
381 * @param thread Target thread to assign the system heap for resource requests
Anas Nashif47420d02018-05-24 14:20:56 -0400382 *
Andrew Boie92e5bd72018-04-12 17:12:15 -0700383 */
384void k_thread_system_pool_assign(struct k_thread *thread);
385#endif /* (CONFIG_HEAP_MEM_POOL_SIZE > 0) */
386
387/**
Andrew Boie322816e2020-02-20 16:33:06 -0800388 * @brief Sleep until a thread exits
389 *
390 * The caller will be put to sleep until the target thread exits, either due
391 * to being aborted, self-exiting, or taking a fatal error. This API returns
392 * immediately if the thread isn't running.
393 *
Andy Ross23f699b2021-02-23 06:12:17 -0800394 * This API may only be called from ISRs with a K_NO_WAIT timeout,
395 * where it can be useful as a predicate to detect when a thread has
396 * aborted.
Andrew Boie322816e2020-02-20 16:33:06 -0800397 *
398 * @param thread Thread to wait to exit
Andy Ross78327382020-03-05 15:18:14 -0800399 * @param timeout upper bound time to wait for the thread to exit.
Andrew Boie322816e2020-02-20 16:33:06 -0800400 * @retval 0 success, target thread has exited or wasn't running
401 * @retval -EBUSY returned without waiting
402 * @retval -EAGAIN waiting period timed out
403 * @retval -EDEADLK target thread is joining on the caller, or target thread
404 * is the caller
405 */
Andy Ross78327382020-03-05 15:18:14 -0800406__syscall int k_thread_join(struct k_thread *thread, k_timeout_t timeout);
407
408/**
409 * @brief Put the current thread to sleep.
410 *
411 * This routine puts the current thread to sleep for @a duration,
412 * specified as a k_timeout_t object.
413 *
Anas Nashifd2c71792020-10-17 07:52:17 -0400414 * @note if @a timeout is set to K_FOREVER then the thread is suspended.
415 *
Andy Ross78327382020-03-05 15:18:14 -0800416 * @param timeout Desired duration of sleep.
417 *
418 * @return Zero if the requested time has elapsed or the number of milliseconds
419 * left to sleep, if thread was woken up by \ref k_wakeup call.
420 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500421__syscall int32_t k_sleep(k_timeout_t timeout);
Andrew Boie322816e2020-02-20 16:33:06 -0800422
423/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500424 * @brief Put the current thread to sleep.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400425 *
Charles E. Yousea5678312019-05-09 16:46:46 -0700426 * This routine puts the current thread to sleep for @a duration milliseconds.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400427 *
Charles E. Yousea5678312019-05-09 16:46:46 -0700428 * @param ms Number of milliseconds to sleep.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400429 *
Piotr Zięcik7700eb22018-10-25 17:45:08 +0200430 * @return Zero if the requested time has elapsed or the number of milliseconds
431 * left to sleep, if thread was woken up by \ref k_wakeup call.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400432 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500433static inline int32_t k_msleep(int32_t ms)
Andy Ross78327382020-03-05 15:18:14 -0800434{
435 return k_sleep(Z_TIMEOUT_MS(ms));
436}
Charles E. Yousea5678312019-05-09 16:46:46 -0700437
438/**
439 * @brief Put the current thread to sleep with microsecond resolution.
440 *
441 * This function is unlikely to work as expected without kernel tuning.
442 * In particular, because the lower bound on the duration of a sleep is
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200443 * the duration of a tick, @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} must be
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300444 * adjusted to achieve the resolution desired. The implications of doing
445 * this must be understood before attempting to use k_usleep(). Use with
446 * caution.
Charles E. Yousea5678312019-05-09 16:46:46 -0700447 *
448 * @param us Number of microseconds to sleep.
449 *
450 * @return Zero if the requested time has elapsed or the number of microseconds
451 * left to sleep, if thread was woken up by \ref k_wakeup call.
452 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500453__syscall int32_t k_usleep(int32_t us);
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400454
455/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500456 * @brief Cause the current thread to busy wait.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400457 *
458 * This routine causes the current thread to execute a "do nothing" loop for
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500459 * @a usec_to_wait microseconds.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400460 *
Peter Bigot6a794362020-05-22 14:17:01 -0500461 * @note The clock used for the microsecond-resolution delay here may
462 * be skewed relative to the clock used for system timeouts like
463 * k_sleep(). For example k_busy_wait(1000) may take slightly more or
464 * less time than k_sleep(K_MSEC(1)), with the offset dependent on
465 * clock tolerances.
466 *
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400467 * @return N/A
468 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500469__syscall void k_busy_wait(uint32_t usec_to_wait);
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400470
471/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500472 * @brief Yield the current thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400473 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500474 * This routine causes the current thread to yield execution to another
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400475 * thread of the same or higher priority. If there are no other ready threads
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500476 * of the same or higher priority, the routine returns immediately.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400477 *
478 * @return N/A
479 */
Andrew Boie468190a2017-09-29 14:00:48 -0700480__syscall void k_yield(void);
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400481
482/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500483 * @brief Wake up a sleeping thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400484 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500485 * This routine prematurely wakes up @a thread from sleeping.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400486 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500487 * If @a thread is not currently sleeping, the routine has no effect.
488 *
489 * @param thread ID of thread to wake.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400490 *
491 * @return N/A
492 */
Andrew Boie468190a2017-09-29 14:00:48 -0700493__syscall void k_wakeup(k_tid_t thread);
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400494
495/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500496 * @brief Get thread ID of the current thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400497 *
Andrew Boief07df422020-11-06 13:11:12 -0800498 * This unconditionally queries the kernel via a system call.
499 *
500 * @return ID of current thread.
501 */
Daniel Leung8530cfa2021-08-09 10:04:11 -0700502__attribute_const__
Andrew Boief07df422020-11-06 13:11:12 -0800503__syscall k_tid_t z_current_get(void);
504
505#ifdef CONFIG_THREAD_LOCAL_STORAGE
506/* Thread-local cache of current thread ID, set in z_thread_entry() */
507extern __thread k_tid_t z_tls_current;
508#endif
509
510/**
511 * @brief Get thread ID of the current thread.
512 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500513 * @return ID of current thread.
Anas Nashif47420d02018-05-24 14:20:56 -0400514 *
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400515 */
Andrew Boief07df422020-11-06 13:11:12 -0800516__attribute_const__
517static inline k_tid_t k_current_get(void)
518{
519#ifdef CONFIG_THREAD_LOCAL_STORAGE
520 return z_tls_current;
521#else
522 return z_current_get();
523#endif
524}
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400525
526/**
Allan Stephensc98da842016-11-11 15:45:03 -0500527 * @brief Abort a thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400528 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500529 * This routine permanently stops execution of @a thread. The thread is taken
530 * off all kernel queues it is part of (i.e. the ready queue, the timeout
531 * queue, or a kernel object wait queue). However, any kernel resources the
532 * thread might currently own (such as mutexes or memory blocks) are not
533 * released. It is the responsibility of the caller of this routine to ensure
534 * all necessary cleanup is performed.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400535 *
Andy Ross23f699b2021-02-23 06:12:17 -0800536 * After k_thread_abort() returns, the thread is guaranteed not to be
537 * running or to become runnable anywhere on the system. Normally
538 * this is done via blocking the caller (in the same manner as
539 * k_thread_join()), but in interrupt context on SMP systems the
540 * implementation is required to spin for threads that are running on
541 * other CPUs. Note that as specified, this means that on SMP
542 * platforms it is possible for application code to create a deadlock
543 * condition by simultaneously aborting a cycle of threads using at
544 * least one termination from interrupt context. Zephyr cannot detect
545 * all such conditions.
546 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500547 * @param thread ID of thread to abort.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400548 *
549 * @return N/A
550 */
Andrew Boie468190a2017-09-29 14:00:48 -0700551__syscall void k_thread_abort(k_tid_t thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400552
Andrew Boie7d627c52017-08-30 11:01:56 -0700553
554/**
555 * @brief Start an inactive thread
556 *
557 * If a thread was created with K_FOREVER in the delay parameter, it will
558 * not be added to the scheduling queue until this function is called
559 * on it.
560 *
561 * @param thread thread to start
562 */
Andrew Boie468190a2017-09-29 14:00:48 -0700563__syscall void k_thread_start(k_tid_t thread);
Andrew Boie7d627c52017-08-30 11:01:56 -0700564
Peter A. Bigot16a40812020-09-18 16:24:57 -0500565extern k_ticks_t z_timeout_expires(const struct _timeout *timeout);
566extern k_ticks_t z_timeout_remaining(const struct _timeout *timeout);
Andy Ross5a5d3da2020-03-09 13:59:15 -0700567
568#ifdef CONFIG_SYS_CLOCK_EXISTS
569
570/**
Andy Rosse39bf292020-03-19 10:30:33 -0700571 * @brief Get time when a thread wakes up, in system ticks
Andy Ross5a5d3da2020-03-09 13:59:15 -0700572 *
573 * This routine computes the system uptime when a waiting thread next
574 * executes, in units of system ticks. If the thread is not waiting,
575 * it returns current system time.
576 */
Peter Bigot0ab314f2020-11-16 15:28:59 -0600577__syscall k_ticks_t k_thread_timeout_expires_ticks(const struct k_thread *t);
Andy Ross5a5d3da2020-03-09 13:59:15 -0700578
579static inline k_ticks_t z_impl_k_thread_timeout_expires_ticks(
Peter Bigot0ab314f2020-11-16 15:28:59 -0600580 const struct k_thread *t)
Andy Ross5a5d3da2020-03-09 13:59:15 -0700581{
582 return z_timeout_expires(&t->base.timeout);
583}
584
585/**
Andy Rosse39bf292020-03-19 10:30:33 -0700586 * @brief Get time remaining before a thread wakes up, in system ticks
Andy Ross5a5d3da2020-03-09 13:59:15 -0700587 *
588 * This routine computes the time remaining before a waiting thread
589 * next executes, in units of system ticks. If the thread is not
590 * waiting, it returns zero.
591 */
Peter Bigot0ab314f2020-11-16 15:28:59 -0600592__syscall k_ticks_t k_thread_timeout_remaining_ticks(const struct k_thread *t);
Andy Ross5a5d3da2020-03-09 13:59:15 -0700593
594static inline k_ticks_t z_impl_k_thread_timeout_remaining_ticks(
Peter Bigot0ab314f2020-11-16 15:28:59 -0600595 const struct k_thread *t)
Andy Ross5a5d3da2020-03-09 13:59:15 -0700596{
597 return z_timeout_remaining(&t->base.timeout);
598}
599
600#endif /* CONFIG_SYS_CLOCK_EXISTS */
601
Allan Stephensc98da842016-11-11 15:45:03 -0500602/**
603 * @cond INTERNAL_HIDDEN
604 */
605
Benjamin Walshd211a522016-12-06 11:44:01 -0500606/* timeout has timed out and is not on _timeout_q anymore */
607#define _EXPIRED (-2)
608
Peter Mitsisa04c0d72016-09-28 19:26:00 -0400609struct _static_thread_data {
Andrew Boied26cf2d2017-03-30 13:07:02 -0700610 struct k_thread *init_thread;
Andrew Boiec5c104f2017-10-16 14:46:34 -0700611 k_thread_stack_t *init_stack;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400612 unsigned int init_stack_size;
Andrew Boie1e06ffc2017-09-11 09:30:04 -0700613 k_thread_entry_t init_entry;
Allan Stephens7c5bffa2016-10-26 10:01:28 -0500614 void *init_p1;
615 void *init_p2;
616 void *init_p3;
617 int init_prio;
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500618 uint32_t init_options;
619 int32_t init_delay;
Allan Stephens7c5bffa2016-10-26 10:01:28 -0500620 void (*init_abort)(void);
Anas Nashif57554052018-03-03 02:31:05 -0600621 const char *init_name;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400622};
623
Anas Nashif45a1d8a2020-04-24 11:29:17 -0400624#define Z_THREAD_INITIALIZER(thread, stack, stack_size, \
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400625 entry, p1, p2, p3, \
Anas Nashif57554052018-03-03 02:31:05 -0600626 prio, options, delay, abort, tname) \
Allan Stephens6cfe1322016-10-26 10:16:51 -0500627 { \
Andrew Boied26cf2d2017-03-30 13:07:02 -0700628 .init_thread = (thread), \
629 .init_stack = (stack), \
Allan Stephens6cfe1322016-10-26 10:16:51 -0500630 .init_stack_size = (stack_size), \
Andrew Boie1e06ffc2017-09-11 09:30:04 -0700631 .init_entry = (k_thread_entry_t)entry, \
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400632 .init_p1 = (void *)p1, \
633 .init_p2 = (void *)p2, \
634 .init_p3 = (void *)p3, \
Allan Stephens6cfe1322016-10-26 10:16:51 -0500635 .init_prio = (prio), \
636 .init_options = (options), \
637 .init_delay = (delay), \
638 .init_abort = (abort), \
Anas Nashif57554052018-03-03 02:31:05 -0600639 .init_name = STRINGIFY(tname), \
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400640 }
641
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400642/**
Allan Stephensc98da842016-11-11 15:45:03 -0500643 * INTERNAL_HIDDEN @endcond
644 */
645
646/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500647 * @brief Statically define and initialize a thread.
648 *
649 * The thread may be scheduled for immediate execution or a delayed start.
650 *
651 * Thread options are architecture-specific, and can include K_ESSENTIAL,
652 * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
653 * them using "|" (the logical OR operator).
654 *
655 * The ID of the thread can be accessed using:
656 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -0500657 * @code extern const k_tid_t <name>; @endcode
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500658 *
659 * @param name Name of the thread.
660 * @param stack_size Stack size in bytes.
661 * @param entry Thread entry function.
662 * @param p1 1st entry point parameter.
663 * @param p2 2nd entry point parameter.
664 * @param p3 3rd entry point parameter.
665 * @param prio Thread priority.
666 * @param options Thread options.
Peter Bigot73c387c2020-04-20 08:55:20 -0500667 * @param delay Scheduling delay (in milliseconds), zero for no delay.
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400668 *
Anas Nashif47420d02018-05-24 14:20:56 -0400669 *
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400670 * @internal It has been observed that the x86 compiler by default aligns
671 * these _static_thread_data structures to 32-byte boundaries, thereby
672 * wasting space. To work around this, force a 4-byte alignment.
Anas Nashif47420d02018-05-24 14:20:56 -0400673 *
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400674 */
Allan Stephens6cfe1322016-10-26 10:16:51 -0500675#define K_THREAD_DEFINE(name, stack_size, \
676 entry, p1, p2, p3, \
677 prio, options, delay) \
Andrew Boiedc5d9352017-06-02 12:56:47 -0700678 K_THREAD_STACK_DEFINE(_k_thread_stack_##name, stack_size); \
Nicolas Pitreb1d37422019-06-03 10:51:32 -0400679 struct k_thread _k_thread_obj_##name; \
Fabio Baltierif88a4202021-08-04 23:05:54 +0100680 STRUCT_SECTION_ITERABLE(_static_thread_data, _k_thread_data_##name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -0400681 Z_THREAD_INITIALIZER(&_k_thread_obj_##name, \
Andrew Boied26cf2d2017-03-30 13:07:02 -0700682 _k_thread_stack_##name, stack_size, \
Allan Stephens6cfe1322016-10-26 10:16:51 -0500683 entry, p1, p2, p3, prio, options, delay, \
Anas Nashif57554052018-03-03 02:31:05 -0600684 NULL, name); \
Andrew Boied26cf2d2017-03-30 13:07:02 -0700685 const k_tid_t name = (k_tid_t)&_k_thread_obj_##name
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400686
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400687/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500688 * @brief Get a thread's priority.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400689 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500690 * This routine gets the priority of @a thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400691 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500692 * @param thread ID of thread whose priority is needed.
693 *
694 * @return Priority of @a thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400695 */
Andrew Boie76c04a22017-09-27 14:45:10 -0700696__syscall int k_thread_priority_get(k_tid_t thread);
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400697
698/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500699 * @brief Set a thread's priority.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400700 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500701 * This routine immediately changes the priority of @a thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400702 *
703 * Rescheduling can occur immediately depending on the priority @a thread is
704 * set to:
705 *
706 * - If its priority is raised above the priority of the caller of this
707 * function, and the caller is preemptible, @a thread will be scheduled in.
708 *
709 * - If the caller operates on itself, it lowers its priority below that of
710 * other threads in the system, and the caller is preemptible, the thread of
711 * highest priority will be scheduled in.
712 *
713 * Priority can be assigned in the range of -CONFIG_NUM_COOP_PRIORITIES to
714 * CONFIG_NUM_PREEMPT_PRIORITIES-1, where -CONFIG_NUM_COOP_PRIORITIES is the
715 * highest priority.
716 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500717 * @param thread ID of thread whose priority is to be set.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400718 * @param prio New priority.
719 *
720 * @warning Changing the priority of a thread currently involved in mutex
721 * priority inheritance may result in undefined behavior.
722 *
723 * @return N/A
724 */
Andrew Boie468190a2017-09-29 14:00:48 -0700725__syscall void k_thread_priority_set(k_tid_t thread, int prio);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400726
Andy Ross4a2e50f2018-05-15 11:06:25 -0700727
728#ifdef CONFIG_SCHED_DEADLINE
729/**
730 * @brief Set deadline expiration time for scheduler
731 *
732 * This sets the "deadline" expiration as a time delta from the
733 * current time, in the same units used by k_cycle_get_32(). The
734 * scheduler (when deadline scheduling is enabled) will choose the
735 * next expiring thread when selecting between threads at the same
736 * static priority. Threads at different priorities will be scheduled
737 * according to their static priority.
738 *
Andy Rossef626572020-07-10 09:43:36 -0700739 * @note Deadlines are stored internally using 32 bit unsigned
740 * integers. The number of cycles between the "first" deadline in the
741 * scheduler queue and the "last" deadline must be less than 2^31 (i.e
742 * a signed non-negative quantity). Failure to adhere to this rule
Peter Mitsis4e8569a2021-09-29 12:49:46 -0400743 * may result in scheduled threads running in an incorrect deadline
Andy Rossef626572020-07-10 09:43:36 -0700744 * order.
Andy Ross4a2e50f2018-05-15 11:06:25 -0700745 *
746 * @note Despite the API naming, the scheduler makes no guarantees the
747 * the thread WILL be scheduled within that deadline, nor does it take
748 * extra metadata (like e.g. the "runtime" and "period" parameters in
749 * Linux sched_setattr()) that allows the kernel to validate the
750 * scheduling for achievability. Such features could be implemented
751 * above this call, which is simply input to the priority selection
752 * logic.
753 *
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200754 * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300755 * configuration.
Anas Nashif240c5162019-06-10 12:25:50 -0400756 *
Andy Ross4a2e50f2018-05-15 11:06:25 -0700757 * @param thread A thread on which to set the deadline
758 * @param deadline A time delta, in cycle units
Anas Nashif47420d02018-05-24 14:20:56 -0400759 *
Andy Ross4a2e50f2018-05-15 11:06:25 -0700760 */
761__syscall void k_thread_deadline_set(k_tid_t thread, int deadline);
762#endif
763
Andy Rossab46b1b2019-01-30 15:00:42 -0800764#ifdef CONFIG_SCHED_CPU_MASK
765/**
766 * @brief Sets all CPU enable masks to zero
767 *
768 * After this returns, the thread will no longer be schedulable on any
769 * CPUs. The thread must not be currently runnable.
770 *
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200771 * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300772 * configuration.
Anas Nashif240c5162019-06-10 12:25:50 -0400773 *
Andy Rossab46b1b2019-01-30 15:00:42 -0800774 * @param thread Thread to operate upon
775 * @return Zero on success, otherwise error code
776 */
777int k_thread_cpu_mask_clear(k_tid_t thread);
778
779/**
780 * @brief Sets all CPU enable masks to one
781 *
782 * After this returns, the thread will be schedulable on any CPU. The
783 * thread must not be currently runnable.
784 *
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200785 * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300786 * configuration.
Anas Nashif240c5162019-06-10 12:25:50 -0400787 *
Andy Rossab46b1b2019-01-30 15:00:42 -0800788 * @param thread Thread to operate upon
789 * @return Zero on success, otherwise error code
790 */
791int k_thread_cpu_mask_enable_all(k_tid_t thread);
792
793/**
794 * @brief Enable thread to run on specified CPU
795 *
796 * The thread must not be currently runnable.
797 *
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200798 * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300799 * configuration.
Anas Nashif240c5162019-06-10 12:25:50 -0400800 *
Andy Rossab46b1b2019-01-30 15:00:42 -0800801 * @param thread Thread to operate upon
802 * @param cpu CPU index
803 * @return Zero on success, otherwise error code
804 */
805int k_thread_cpu_mask_enable(k_tid_t thread, int cpu);
806
807/**
808 * @brief Prevent thread to run on specified CPU
809 *
810 * The thread must not be currently runnable.
811 *
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200812 * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300813 * configuration.
Anas Nashif240c5162019-06-10 12:25:50 -0400814 *
Andy Rossab46b1b2019-01-30 15:00:42 -0800815 * @param thread Thread to operate upon
816 * @param cpu CPU index
817 * @return Zero on success, otherwise error code
818 */
819int k_thread_cpu_mask_disable(k_tid_t thread, int cpu);
820#endif
821
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400822/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500823 * @brief Suspend a thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400824 *
Andy Ross50d09422019-11-19 11:20:07 -0800825 * This routine prevents the kernel scheduler from making @a thread
826 * the current thread. All other internal operations on @a thread are
827 * still performed; for example, kernel objects it is waiting on are
828 * still handed to it. Note that any existing timeouts
829 * (e.g. k_sleep(), or a timeout argument to k_sem_take() et. al.)
830 * will be canceled. On resume, the thread will begin running
831 * immediately and return from the blocked call.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400832 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500833 * If @a thread is already suspended, the routine has no effect.
834 *
835 * @param thread ID of thread to suspend.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400836 *
837 * @return N/A
838 */
Andrew Boie468190a2017-09-29 14:00:48 -0700839__syscall void k_thread_suspend(k_tid_t thread);
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400840
841/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500842 * @brief Resume a suspended thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400843 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500844 * This routine allows the kernel scheduler to make @a thread the current
845 * thread, when it is next eligible for that role.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400846 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500847 * If @a thread is not currently suspended, the routine has no effect.
848 *
849 * @param thread ID of thread to resume.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400850 *
851 * @return N/A
852 */
Andrew Boie468190a2017-09-29 14:00:48 -0700853__syscall void k_thread_resume(k_tid_t thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400854
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400855/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500856 * @brief Set time-slicing period and scope.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400857 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500858 * This routine specifies how the scheduler will perform time slicing of
859 * preemptible threads.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400860 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500861 * To enable time slicing, @a slice must be non-zero. The scheduler
862 * ensures that no thread runs for more than the specified time limit
863 * before other threads of that priority are given a chance to execute.
864 * Any thread whose priority is higher than @a prio is exempted, and may
David B. Kinder8b986d72017-04-18 15:56:26 -0700865 * execute as long as desired without being preempted due to time slicing.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400866 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500867 * Time slicing only limits the maximum amount of time a thread may continuously
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400868 * execute. Once the scheduler selects a thread for execution, there is no
869 * minimum guaranteed time the thread will execute before threads of greater or
870 * equal priority are scheduled.
871 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500872 * When the current thread is the only one of that priority eligible
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400873 * for execution, this routine has no effect; the thread is immediately
874 * rescheduled after the slice period expires.
875 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500876 * To disable timeslicing, set both @a slice and @a prio to zero.
877 *
878 * @param slice Maximum time slice length (in milliseconds).
879 * @param prio Highest thread priority level eligible for time slicing.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400880 *
881 * @return N/A
882 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500883extern void k_sched_time_slice_set(int32_t slice, int prio);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400884
Anas Nashif166f5192018-02-25 08:02:36 -0600885/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -0500886
887/**
888 * @addtogroup isr_apis
889 * @{
890 */
891
892/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500893 * @brief Determine if code is running at interrupt level.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400894 *
Allan Stephensc98da842016-11-11 15:45:03 -0500895 * This routine allows the caller to customize its actions, depending on
896 * whether it is a thread or an ISR.
897 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +0100898 * @funcprops \isr_ok
Allan Stephensc98da842016-11-11 15:45:03 -0500899 *
Flavio Ceolin6a4a86e2018-12-17 12:40:22 -0800900 * @return false if invoked by a thread.
901 * @return true if invoked by an ISR.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400902 */
Flavio Ceolin6a4a86e2018-12-17 12:40:22 -0800903extern bool k_is_in_isr(void);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400904
Benjamin Walsh445830d2016-11-10 15:54:27 -0500905/**
906 * @brief Determine if code is running in a preemptible thread.
907 *
Allan Stephensc98da842016-11-11 15:45:03 -0500908 * This routine allows the caller to customize its actions, depending on
909 * whether it can be preempted by another thread. The routine returns a 'true'
910 * value if all of the following conditions are met:
Benjamin Walsh445830d2016-11-10 15:54:27 -0500911 *
Allan Stephensc98da842016-11-11 15:45:03 -0500912 * - The code is running in a thread, not at ISR.
913 * - The thread's priority is in the preemptible range.
914 * - The thread has not locked the scheduler.
Benjamin Walsh445830d2016-11-10 15:54:27 -0500915 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +0100916 * @funcprops \isr_ok
Allan Stephensc98da842016-11-11 15:45:03 -0500917 *
918 * @return 0 if invoked by an ISR or by a cooperative thread.
Benjamin Walsh445830d2016-11-10 15:54:27 -0500919 * @return Non-zero if invoked by a preemptible thread.
920 */
Andrew Boie468190a2017-09-29 14:00:48 -0700921__syscall int k_is_preempt_thread(void);
Benjamin Walsh445830d2016-11-10 15:54:27 -0500922
Allan Stephensc98da842016-11-11 15:45:03 -0500923/**
Peter Bigot74ef3952019-12-23 11:48:43 -0600924 * @brief Test whether startup is in the before-main-task phase.
925 *
926 * This routine allows the caller to customize its actions, depending on
927 * whether it being invoked before the kernel is fully active.
928 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +0100929 * @funcprops \isr_ok
Peter Bigot74ef3952019-12-23 11:48:43 -0600930 *
931 * @return true if invoked before post-kernel initialization
932 * @return false if invoked during/after post-kernel initialization
933 */
934static inline bool k_is_pre_kernel(void)
935{
936 extern bool z_sys_post_kernel; /* in init.c */
937
938 return !z_sys_post_kernel;
939}
940
941/**
Anas Nashif166f5192018-02-25 08:02:36 -0600942 * @}
Allan Stephensc98da842016-11-11 15:45:03 -0500943 */
944
945/**
946 * @addtogroup thread_apis
947 * @{
948 */
949
950/**
951 * @brief Lock the scheduler.
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500952 *
Allan Stephensc98da842016-11-11 15:45:03 -0500953 * This routine prevents the current thread from being preempted by another
954 * thread by instructing the scheduler to treat it as a cooperative thread.
955 * If the thread subsequently performs an operation that makes it unready,
956 * it will be context switched out in the normal manner. When the thread
957 * again becomes the current thread, its non-preemptible status is maintained.
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500958 *
Allan Stephensc98da842016-11-11 15:45:03 -0500959 * This routine can be called recursively.
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500960 *
Allan Stephensc98da842016-11-11 15:45:03 -0500961 * @note k_sched_lock() and k_sched_unlock() should normally be used
962 * when the operation being performed can be safely interrupted by ISRs.
963 * However, if the amount of processing involved is very small, better
964 * performance may be obtained by using irq_lock() and irq_unlock().
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500965 *
966 * @return N/A
967 */
968extern void k_sched_lock(void);
969
Allan Stephensc98da842016-11-11 15:45:03 -0500970/**
971 * @brief Unlock the scheduler.
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500972 *
Allan Stephensc98da842016-11-11 15:45:03 -0500973 * This routine reverses the effect of a previous call to k_sched_lock().
974 * A thread must call the routine once for each time it called k_sched_lock()
975 * before the thread becomes preemptible.
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500976 *
977 * @return N/A
978 */
979extern void k_sched_unlock(void);
980
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400981/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500982 * @brief Set current thread's custom data.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400983 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500984 * This routine sets the custom data for the current thread to @ value.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400985 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500986 * Custom data is not used by the kernel itself, and is freely available
987 * for a thread to use as it sees fit. It can be used as a framework
988 * upon which to build thread-local storage.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400989 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500990 * @param value New custom data value.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400991 *
992 * @return N/A
Anas Nashif47420d02018-05-24 14:20:56 -0400993 *
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400994 */
Andrew Boie468190a2017-09-29 14:00:48 -0700995__syscall void k_thread_custom_data_set(void *value);
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400996
997/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500998 * @brief Get current thread's custom data.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400999 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001000 * This routine returns the custom data for the current thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001001 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001002 * @return Current custom data value.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001003 */
Andrew Boie468190a2017-09-29 14:00:48 -07001004__syscall void *k_thread_custom_data_get(void);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001005
1006/**
Anas Nashif57554052018-03-03 02:31:05 -06001007 * @brief Set current thread name
1008 *
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +02001009 * Set the name of the thread to be used when @kconfig{CONFIG_THREAD_MONITOR}
Fabio Utzig39fa56b2020-09-11 10:14:37 -03001010 * is enabled for tracing and debugging.
Anas Nashif57554052018-03-03 02:31:05 -06001011 *
Anas Nashif25c87db2021-03-29 10:54:23 -04001012 * @param thread Thread to set name, or NULL to set the current thread
1013 * @param str Name string
Andrew Boie38129ce2019-06-25 08:54:37 -07001014 * @retval 0 on success
1015 * @retval -EFAULT Memory access error with supplied string
1016 * @retval -ENOSYS Thread name configuration option not enabled
1017 * @retval -EINVAL Thread name too long
Anas Nashif57554052018-03-03 02:31:05 -06001018 */
Anas Nashif25c87db2021-03-29 10:54:23 -04001019__syscall int k_thread_name_set(k_tid_t thread, const char *str);
Anas Nashif57554052018-03-03 02:31:05 -06001020
1021/**
1022 * @brief Get thread name
1023 *
1024 * Get the name of a thread
1025 *
Anas Nashif25c87db2021-03-29 10:54:23 -04001026 * @param thread Thread ID
Andrew Boie38129ce2019-06-25 08:54:37 -07001027 * @retval Thread name, or NULL if configuration not enabled
Anas Nashif57554052018-03-03 02:31:05 -06001028 */
Anas Nashif25c87db2021-03-29 10:54:23 -04001029const char *k_thread_name_get(k_tid_t thread);
Andrew Boie38129ce2019-06-25 08:54:37 -07001030
1031/**
1032 * @brief Copy the thread name into a supplied buffer
1033 *
Anas Nashif25c87db2021-03-29 10:54:23 -04001034 * @param thread Thread to obtain name information
Andrew Boie38129ce2019-06-25 08:54:37 -07001035 * @param buf Destination buffer
David B. Kinder73896c02019-10-28 16:27:57 -07001036 * @param size Destination buffer size
Andrew Boie38129ce2019-06-25 08:54:37 -07001037 * @retval -ENOSPC Destination buffer too small
1038 * @retval -EFAULT Memory access error
1039 * @retval -ENOSYS Thread name feature not enabled
1040 * @retval 0 Success
1041 */
Anas Nashif25c87db2021-03-29 10:54:23 -04001042__syscall int k_thread_name_copy(k_tid_t thread, char *buf,
Andrew Boie38129ce2019-06-25 08:54:37 -07001043 size_t size);
Anas Nashif57554052018-03-03 02:31:05 -06001044
1045/**
Pavlo Hamov8076c802019-07-31 12:43:54 +03001046 * @brief Get thread state string
1047 *
1048 * Get the human friendly thread state string
1049 *
1050 * @param thread_id Thread ID
1051 * @retval Thread state string, empty if no state flag is set
1052 */
1053const char *k_thread_state_str(k_tid_t thread_id);
1054
1055/**
Andy Rosscfe62032018-09-29 07:34:55 -07001056 * @}
1057 */
1058
1059/**
Allan Stephensc2f15a42016-11-17 12:24:22 -05001060 * @addtogroup clock_apis
1061 * @{
1062 */
1063
1064/**
1065 * @brief Generate null timeout delay.
1066 *
Maksim Masalskife1ff2f2019-10-29 16:50:44 +08001067 * This macro generates a timeout delay that instructs a kernel API
Allan Stephensc2f15a42016-11-17 12:24:22 -05001068 * not to wait if the requested operation cannot be performed immediately.
1069 *
1070 * @return Timeout delay value.
1071 */
Andy Ross78327382020-03-05 15:18:14 -08001072#define K_NO_WAIT Z_TIMEOUT_NO_WAIT
Allan Stephensc2f15a42016-11-17 12:24:22 -05001073
1074/**
Andy Rosse1bc5952020-03-09 12:19:54 -07001075 * @brief Generate timeout delay from nanoseconds.
1076 *
1077 * This macro generates a timeout delay that instructs a kernel API to
1078 * wait up to @a t nanoseconds to perform the requested operation.
1079 * Note that timer precision is limited to the tick rate, not the
1080 * requested value.
1081 *
Andy Rosse39bf292020-03-19 10:30:33 -07001082 * @param t Duration in nanoseconds.
Andy Rosse1bc5952020-03-09 12:19:54 -07001083 *
1084 * @return Timeout delay value.
1085 */
1086#define K_NSEC(t) Z_TIMEOUT_NS(t)
1087
1088/**
1089 * @brief Generate timeout delay from microseconds.
1090 *
1091 * This macro generates a timeout delay that instructs a kernel API
1092 * to wait up to @a t microseconds to perform the requested operation.
1093 * Note that timer precision is limited to the tick rate, not the
1094 * requested value.
1095 *
Andy Rosse39bf292020-03-19 10:30:33 -07001096 * @param t Duration in microseconds.
Andy Rosse1bc5952020-03-09 12:19:54 -07001097 *
1098 * @return Timeout delay value.
1099 */
1100#define K_USEC(t) Z_TIMEOUT_US(t)
1101
1102/**
1103 * @brief Generate timeout delay from cycles.
1104 *
1105 * This macro generates a timeout delay that instructs a kernel API
1106 * to wait up to @a t cycles to perform the requested operation.
1107 *
Andy Rosse39bf292020-03-19 10:30:33 -07001108 * @param t Duration in cycles.
Andy Rosse1bc5952020-03-09 12:19:54 -07001109 *
1110 * @return Timeout delay value.
1111 */
1112#define K_CYC(t) Z_TIMEOUT_CYC(t)
1113
1114/**
1115 * @brief Generate timeout delay from system ticks.
1116 *
1117 * This macro generates a timeout delay that instructs a kernel API
1118 * to wait up to @a t ticks to perform the requested operation.
1119 *
Andy Rosse39bf292020-03-19 10:30:33 -07001120 * @param t Duration in system ticks.
Andy Rosse1bc5952020-03-09 12:19:54 -07001121 *
1122 * @return Timeout delay value.
1123 */
1124#define K_TICKS(t) Z_TIMEOUT_TICKS(t)
1125
1126/**
Allan Stephensc2f15a42016-11-17 12:24:22 -05001127 * @brief Generate timeout delay from milliseconds.
1128 *
Maksim Masalskife1ff2f2019-10-29 16:50:44 +08001129 * This macro generates a timeout delay that instructs a kernel API
Allan Stephensc2f15a42016-11-17 12:24:22 -05001130 * to wait up to @a ms milliseconds to perform the requested operation.
1131 *
1132 * @param ms Duration in milliseconds.
1133 *
1134 * @return Timeout delay value.
1135 */
Andy Ross78327382020-03-05 15:18:14 -08001136#define K_MSEC(ms) Z_TIMEOUT_MS(ms)
Allan Stephensc2f15a42016-11-17 12:24:22 -05001137
1138/**
1139 * @brief Generate timeout delay from seconds.
1140 *
Maksim Masalskife1ff2f2019-10-29 16:50:44 +08001141 * This macro generates a timeout delay that instructs a kernel API
Allan Stephensc2f15a42016-11-17 12:24:22 -05001142 * to wait up to @a s seconds to perform the requested operation.
1143 *
1144 * @param s Duration in seconds.
1145 *
1146 * @return Timeout delay value.
1147 */
Johan Hedberg14471692016-11-13 10:52:15 +02001148#define K_SECONDS(s) K_MSEC((s) * MSEC_PER_SEC)
Allan Stephensc2f15a42016-11-17 12:24:22 -05001149
1150/**
1151 * @brief Generate timeout delay from minutes.
Maksim Masalskife1ff2f2019-10-29 16:50:44 +08001152
1153 * This macro generates a timeout delay that instructs a kernel API
Allan Stephensc2f15a42016-11-17 12:24:22 -05001154 * to wait up to @a m minutes to perform the requested operation.
1155 *
1156 * @param m Duration in minutes.
1157 *
1158 * @return Timeout delay value.
1159 */
Johan Hedberg14471692016-11-13 10:52:15 +02001160#define K_MINUTES(m) K_SECONDS((m) * 60)
Allan Stephensc2f15a42016-11-17 12:24:22 -05001161
1162/**
1163 * @brief Generate timeout delay from hours.
1164 *
Maksim Masalskife1ff2f2019-10-29 16:50:44 +08001165 * This macro generates a timeout delay that instructs a kernel API
Allan Stephensc2f15a42016-11-17 12:24:22 -05001166 * to wait up to @a h hours to perform the requested operation.
1167 *
1168 * @param h Duration in hours.
1169 *
1170 * @return Timeout delay value.
1171 */
Johan Hedberg14471692016-11-13 10:52:15 +02001172#define K_HOURS(h) K_MINUTES((h) * 60)
1173
Allan Stephensc98da842016-11-11 15:45:03 -05001174/**
Allan Stephensc2f15a42016-11-17 12:24:22 -05001175 * @brief Generate infinite timeout delay.
1176 *
Maksim Masalskife1ff2f2019-10-29 16:50:44 +08001177 * This macro generates a timeout delay that instructs a kernel API
Allan Stephensc2f15a42016-11-17 12:24:22 -05001178 * to wait as long as necessary to perform the requested operation.
1179 *
1180 * @return Timeout delay value.
1181 */
Andy Ross78327382020-03-05 15:18:14 -08001182#define K_FOREVER Z_FOREVER
Allan Stephensc2f15a42016-11-17 12:24:22 -05001183
Andy Rosse1bc5952020-03-09 12:19:54 -07001184#ifdef CONFIG_TIMEOUT_64BIT
1185
Allan Stephensc2f15a42016-11-17 12:24:22 -05001186/**
Andy Rosse39bf292020-03-19 10:30:33 -07001187 * @brief Generates an absolute/uptime timeout value from system ticks
Andy Ross4c7b77a2020-03-09 09:35:35 -07001188 *
1189 * This macro generates a timeout delay that represents an expiration
Andy Rosse39bf292020-03-19 10:30:33 -07001190 * at the absolute uptime value specified, in system ticks. That is, the
Andy Ross4c7b77a2020-03-09 09:35:35 -07001191 * timeout will expire immediately after the system uptime reaches the
1192 * specified tick count.
1193 *
1194 * @param t Tick uptime value
1195 * @return Timeout delay value
1196 */
Martin Jäger19c2f782020-11-09 10:14:53 +01001197#define K_TIMEOUT_ABS_TICKS(t) \
1198 Z_TIMEOUT_TICKS(Z_TICK_ABS((k_ticks_t)MAX(t, 0)))
Andy Ross4c7b77a2020-03-09 09:35:35 -07001199
1200/**
Andy Rosse39bf292020-03-19 10:30:33 -07001201 * @brief Generates an absolute/uptime timeout value from milliseconds
Andy Ross4c7b77a2020-03-09 09:35:35 -07001202 *
1203 * This macro generates a timeout delay that represents an expiration
1204 * at the absolute uptime value specified, in milliseconds. That is,
1205 * the timeout will expire immediately after the system uptime reaches
1206 * the specified tick count.
1207 *
1208 * @param t Millisecond uptime value
1209 * @return Timeout delay value
1210 */
1211#define K_TIMEOUT_ABS_MS(t) K_TIMEOUT_ABS_TICKS(k_ms_to_ticks_ceil64(t))
1212
1213/**
Andy Rosse39bf292020-03-19 10:30:33 -07001214 * @brief Generates an absolute/uptime timeout value from microseconds
Andy Rosse1bc5952020-03-09 12:19:54 -07001215 *
1216 * This macro generates a timeout delay that represents an expiration
1217 * at the absolute uptime value specified, in microseconds. That is,
1218 * the timeout will expire immediately after the system uptime reaches
1219 * the specified time. Note that timer precision is limited by the
1220 * system tick rate and not the requested timeout value.
1221 *
1222 * @param t Microsecond uptime value
1223 * @return Timeout delay value
1224 */
1225#define K_TIMEOUT_ABS_US(t) K_TIMEOUT_ABS_TICKS(k_us_to_ticks_ceil64(t))
1226
1227/**
Andy Rosse39bf292020-03-19 10:30:33 -07001228 * @brief Generates an absolute/uptime timeout value from nanoseconds
Andy Rosse1bc5952020-03-09 12:19:54 -07001229 *
1230 * This macro generates a timeout delay that represents an expiration
1231 * at the absolute uptime value specified, in nanoseconds. That is,
1232 * the timeout will expire immediately after the system uptime reaches
1233 * the specified time. Note that timer precision is limited by the
1234 * system tick rate and not the requested timeout value.
1235 *
1236 * @param t Nanosecond uptime value
1237 * @return Timeout delay value
1238 */
1239#define K_TIMEOUT_ABS_NS(t) K_TIMEOUT_ABS_TICKS(k_ns_to_ticks_ceil64(t))
1240
1241/**
Andy Rosse39bf292020-03-19 10:30:33 -07001242 * @brief Generates an absolute/uptime timeout value from system cycles
Andy Rosse1bc5952020-03-09 12:19:54 -07001243 *
1244 * This macro generates a timeout delay that represents an expiration
1245 * at the absolute uptime value specified, in cycles. That is, the
1246 * timeout will expire immediately after the system uptime reaches the
1247 * specified time. Note that timer precision is limited by the system
1248 * tick rate and not the requested timeout value.
1249 *
1250 * @param t Cycle uptime value
1251 * @return Timeout delay value
1252 */
1253#define K_TIMEOUT_ABS_CYC(t) K_TIMEOUT_ABS_TICKS(k_cyc_to_ticks_ceil64(t))
1254
1255#endif
1256
1257/**
Anas Nashif166f5192018-02-25 08:02:36 -06001258 * @}
Allan Stephensc2f15a42016-11-17 12:24:22 -05001259 */
1260
1261/**
Allan Stephensc98da842016-11-11 15:45:03 -05001262 * @cond INTERNAL_HIDDEN
1263 */
Benjamin Walsha9604bd2016-09-21 11:05:56 -04001264
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001265struct k_timer {
1266 /*
1267 * _timeout structure must be first here if we want to use
1268 * dynamic timer allocation. timeout.node is used in the double-linked
1269 * list of free timers
1270 */
1271 struct _timeout timeout;
1272
Allan Stephens45bfa372016-10-12 12:39:42 -05001273 /* wait queue for the (single) thread waiting on this timer */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001274 _wait_q_t wait_q;
1275
1276 /* runs in ISR context */
Flavio Ceolin4b35dd22018-11-16 19:06:59 -08001277 void (*expiry_fn)(struct k_timer *timer);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001278
1279 /* runs in the context of the thread that calls k_timer_stop() */
Flavio Ceolin4b35dd22018-11-16 19:06:59 -08001280 void (*stop_fn)(struct k_timer *timer);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001281
1282 /* timer period */
Andy Ross78327382020-03-05 15:18:14 -08001283 k_timeout_t period;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001284
Allan Stephens45bfa372016-10-12 12:39:42 -05001285 /* timer status */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001286 uint32_t status;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001287
Benjamin Walshe4e98f92017-01-12 19:38:53 -05001288 /* user-specific data, also used to support legacy features */
1289 void *user_data;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001290
Ederson de Souzabdaac352021-11-22 14:46:19 -08001291 SYS_PORT_TRACING_TRACKING_FIELD(k_timer)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001292};
1293
Patrik Flykt97b3bd12019-03-12 15:15:42 -06001294#define Z_TIMER_INITIALIZER(obj, expiry, stop) \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001295 { \
Krzysztof Chruscinskibe063272019-02-13 11:19:54 +01001296 .timeout = { \
1297 .node = {},\
Peter Bigote37c7852020-07-07 12:34:05 -05001298 .fn = z_timer_expiration_handler, \
Krzysztof Chruscinskibe063272019-02-13 11:19:54 +01001299 .dticks = 0, \
Krzysztof Chruscinskibe063272019-02-13 11:19:54 +01001300 }, \
Patrik Flykt4344e272019-03-08 14:19:05 -07001301 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
Allan Stephens1342adb2016-11-03 13:54:53 -05001302 .expiry_fn = expiry, \
1303 .stop_fn = stop, \
1304 .status = 0, \
Benjamin Walshe4e98f92017-01-12 19:38:53 -05001305 .user_data = 0, \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001306 }
1307
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001308/**
Allan Stephensc98da842016-11-11 15:45:03 -05001309 * INTERNAL_HIDDEN @endcond
1310 */
1311
1312/**
1313 * @defgroup timer_apis Timer APIs
1314 * @ingroup kernel_apis
1315 * @{
1316 */
1317
1318/**
Allan Stephens5eceb852016-11-16 10:16:30 -05001319 * @typedef k_timer_expiry_t
1320 * @brief Timer expiry function type.
1321 *
1322 * A timer's expiry function is executed by the system clock interrupt handler
1323 * each time the timer expires. The expiry function is optional, and is only
1324 * invoked if the timer has been initialized with one.
1325 *
1326 * @param timer Address of timer.
1327 *
1328 * @return N/A
1329 */
1330typedef void (*k_timer_expiry_t)(struct k_timer *timer);
1331
1332/**
1333 * @typedef k_timer_stop_t
1334 * @brief Timer stop function type.
1335 *
1336 * A timer's stop function is executed if the timer is stopped prematurely.
Peter A. Bigot82a98d72020-09-21 05:34:56 -05001337 * The function runs in the context of call that stops the timer. As
1338 * k_timer_stop() can be invoked from an ISR, the stop function must be
1339 * callable from interrupt context (isr-ok).
1340 *
Allan Stephens5eceb852016-11-16 10:16:30 -05001341 * The stop function is optional, and is only invoked if the timer has been
1342 * initialized with one.
1343 *
1344 * @param timer Address of timer.
1345 *
1346 * @return N/A
1347 */
1348typedef void (*k_timer_stop_t)(struct k_timer *timer);
1349
1350/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001351 * @brief Statically define and initialize a timer.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001352 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001353 * The timer can be accessed outside the module where it is defined using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001354 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05001355 * @code extern struct k_timer <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001356 *
1357 * @param name Name of the timer variable.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001358 * @param expiry_fn Function to invoke each time the timer expires.
1359 * @param stop_fn Function to invoke if the timer is stopped while running.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001360 */
Allan Stephens1342adb2016-11-03 13:54:53 -05001361#define K_TIMER_DEFINE(name, expiry_fn, stop_fn) \
Fabio Baltierif88a4202021-08-04 23:05:54 +01001362 STRUCT_SECTION_ITERABLE(k_timer, name) = \
Patrik Flykt97b3bd12019-03-12 15:15:42 -06001363 Z_TIMER_INITIALIZER(name, expiry_fn, stop_fn)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001364
Allan Stephens45bfa372016-10-12 12:39:42 -05001365/**
1366 * @brief Initialize a timer.
1367 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001368 * This routine initializes a timer, prior to its first use.
Allan Stephens45bfa372016-10-12 12:39:42 -05001369 *
1370 * @param timer Address of timer.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001371 * @param expiry_fn Function to invoke each time the timer expires.
1372 * @param stop_fn Function to invoke if the timer is stopped while running.
Allan Stephens45bfa372016-10-12 12:39:42 -05001373 *
1374 * @return N/A
1375 */
1376extern void k_timer_init(struct k_timer *timer,
Allan Stephens5eceb852016-11-16 10:16:30 -05001377 k_timer_expiry_t expiry_fn,
1378 k_timer_stop_t stop_fn);
Andy Ross8d8b2ac2016-09-23 10:08:54 -07001379
Allan Stephens45bfa372016-10-12 12:39:42 -05001380/**
1381 * @brief Start a timer.
1382 *
1383 * This routine starts a timer, and resets its status to zero. The timer
1384 * begins counting down using the specified duration and period values.
1385 *
1386 * Attempting to start a timer that is already running is permitted.
1387 * The timer's status is reset to zero and the timer begins counting down
1388 * using the new duration and period values.
1389 *
1390 * @param timer Address of timer.
Andy Ross78327382020-03-05 15:18:14 -08001391 * @param duration Initial timer duration.
1392 * @param period Timer period.
Allan Stephens45bfa372016-10-12 12:39:42 -05001393 *
1394 * @return N/A
1395 */
Andrew Boiea354d492017-09-29 16:22:28 -07001396__syscall void k_timer_start(struct k_timer *timer,
Andy Ross78327382020-03-05 15:18:14 -08001397 k_timeout_t duration, k_timeout_t period);
Allan Stephens45bfa372016-10-12 12:39:42 -05001398
1399/**
1400 * @brief Stop a timer.
1401 *
1402 * This routine stops a running timer prematurely. The timer's stop function,
1403 * if one exists, is invoked by the caller.
1404 *
1405 * Attempting to stop a timer that is not running is permitted, but has no
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001406 * effect on the timer.
Allan Stephens45bfa372016-10-12 12:39:42 -05001407 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001408 * @note The stop handler has to be callable from ISRs if @a k_timer_stop is to
1409 * be called from ISRs.
1410 *
1411 * @funcprops \isr_ok
Anas Nashif4fb12ae2017-02-01 20:06:55 -05001412 *
Allan Stephens45bfa372016-10-12 12:39:42 -05001413 * @param timer Address of timer.
1414 *
1415 * @return N/A
1416 */
Andrew Boiea354d492017-09-29 16:22:28 -07001417__syscall void k_timer_stop(struct k_timer *timer);
Allan Stephens45bfa372016-10-12 12:39:42 -05001418
1419/**
1420 * @brief Read timer status.
1421 *
1422 * This routine reads the timer's status, which indicates the number of times
1423 * it has expired since its status was last read.
1424 *
1425 * Calling this routine resets the timer's status to zero.
1426 *
1427 * @param timer Address of timer.
1428 *
1429 * @return Timer status.
1430 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001431__syscall uint32_t k_timer_status_get(struct k_timer *timer);
Allan Stephens45bfa372016-10-12 12:39:42 -05001432
1433/**
1434 * @brief Synchronize thread to timer expiration.
1435 *
1436 * This routine blocks the calling thread until the timer's status is non-zero
1437 * (indicating that it has expired at least once since it was last examined)
1438 * or the timer is stopped. If the timer status is already non-zero,
1439 * or the timer is already stopped, the caller continues without waiting.
1440 *
1441 * Calling this routine resets the timer's status to zero.
1442 *
1443 * This routine must not be used by interrupt handlers, since they are not
1444 * allowed to block.
1445 *
1446 * @param timer Address of timer.
1447 *
1448 * @return Timer status.
1449 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001450__syscall uint32_t k_timer_status_sync(struct k_timer *timer);
Allan Stephens45bfa372016-10-12 12:39:42 -05001451
Andy Ross5a5d3da2020-03-09 13:59:15 -07001452#ifdef CONFIG_SYS_CLOCK_EXISTS
1453
1454/**
Andy Rosse39bf292020-03-19 10:30:33 -07001455 * @brief Get next expiration time of a timer, in system ticks
Andy Ross5a5d3da2020-03-09 13:59:15 -07001456 *
1457 * This routine returns the future system uptime reached at the next
1458 * time of expiration of the timer, in units of system ticks. If the
1459 * timer is not running, current system time is returned.
1460 *
1461 * @param timer The timer object
1462 * @return Uptime of expiration, in ticks
1463 */
Peter Bigot0ab314f2020-11-16 15:28:59 -06001464__syscall k_ticks_t k_timer_expires_ticks(const struct k_timer *timer);
Andy Ross5a5d3da2020-03-09 13:59:15 -07001465
Peter Bigot0ab314f2020-11-16 15:28:59 -06001466static inline k_ticks_t z_impl_k_timer_expires_ticks(
1467 const struct k_timer *timer)
Andy Ross5a5d3da2020-03-09 13:59:15 -07001468{
1469 return z_timeout_expires(&timer->timeout);
1470}
1471
1472/**
Andy Rosse39bf292020-03-19 10:30:33 -07001473 * @brief Get time remaining before a timer next expires, in system ticks
Andy Ross5a5d3da2020-03-09 13:59:15 -07001474 *
1475 * This routine computes the time remaining before a running timer
1476 * next expires, in units of system ticks. If the timer is not
1477 * running, it returns zero.
1478 */
Peter Bigot0ab314f2020-11-16 15:28:59 -06001479__syscall k_ticks_t k_timer_remaining_ticks(const struct k_timer *timer);
Andy Ross5a5d3da2020-03-09 13:59:15 -07001480
Peter Bigot0ab314f2020-11-16 15:28:59 -06001481static inline k_ticks_t z_impl_k_timer_remaining_ticks(
1482 const struct k_timer *timer)
Andy Ross5a5d3da2020-03-09 13:59:15 -07001483{
1484 return z_timeout_remaining(&timer->timeout);
1485}
Andy Ross52e444b2018-09-28 09:06:37 -07001486
Allan Stephens45bfa372016-10-12 12:39:42 -05001487/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001488 * @brief Get time remaining before a timer next expires.
Allan Stephens45bfa372016-10-12 12:39:42 -05001489 *
1490 * This routine computes the (approximate) time remaining before a running
1491 * timer next expires. If the timer is not running, it returns zero.
1492 *
1493 * @param timer Address of timer.
1494 *
1495 * @return Remaining time (in milliseconds).
1496 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001497static inline uint32_t k_timer_remaining_get(struct k_timer *timer)
Johan Hedbergf99ad3f2016-12-09 10:39:49 +02001498{
Andy Ross5a5d3da2020-03-09 13:59:15 -07001499 return k_ticks_to_ms_floor32(k_timer_remaining_ticks(timer));
Johan Hedbergf99ad3f2016-12-09 10:39:49 +02001500}
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001501
Andy Ross5a5d3da2020-03-09 13:59:15 -07001502#endif /* CONFIG_SYS_CLOCK_EXISTS */
1503
Allan Stephensc98da842016-11-11 15:45:03 -05001504/**
Benjamin Walshe4e98f92017-01-12 19:38:53 -05001505 * @brief Associate user-specific data with a timer.
1506 *
1507 * This routine records the @a user_data with the @a timer, to be retrieved
1508 * later.
1509 *
1510 * It can be used e.g. in a timer handler shared across multiple subsystems to
1511 * retrieve data specific to the subsystem this timer is associated with.
1512 *
1513 * @param timer Address of timer.
1514 * @param user_data User data to associate with the timer.
1515 *
1516 * @return N/A
1517 */
Andrew Boiea354d492017-09-29 16:22:28 -07001518__syscall void k_timer_user_data_set(struct k_timer *timer, void *user_data);
1519
Anas Nashif954d5502018-02-25 08:37:28 -06001520/**
1521 * @internal
1522 */
Patrik Flykt4344e272019-03-08 14:19:05 -07001523static inline void z_impl_k_timer_user_data_set(struct k_timer *timer,
Andrew Boiea354d492017-09-29 16:22:28 -07001524 void *user_data)
Benjamin Walshe4e98f92017-01-12 19:38:53 -05001525{
1526 timer->user_data = user_data;
1527}
1528
1529/**
1530 * @brief Retrieve the user-specific data from a timer.
1531 *
1532 * @param timer Address of timer.
1533 *
1534 * @return The user data.
1535 */
Peter A. Bigotf1b86ca2020-09-18 16:24:57 -05001536__syscall void *k_timer_user_data_get(const struct k_timer *timer);
Andrew Boiea354d492017-09-29 16:22:28 -07001537
Peter A. Bigotf1b86ca2020-09-18 16:24:57 -05001538static inline void *z_impl_k_timer_user_data_get(const struct k_timer *timer)
Benjamin Walshe4e98f92017-01-12 19:38:53 -05001539{
1540 return timer->user_data;
1541}
1542
Anas Nashif166f5192018-02-25 08:02:36 -06001543/** @} */
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001544
Allan Stephensc98da842016-11-11 15:45:03 -05001545/**
Allan Stephensc2f15a42016-11-17 12:24:22 -05001546 * @addtogroup clock_apis
Jian Kanga3ec9b02021-07-21 09:52:14 +08001547 * @ingroup kernel_apis
Allan Stephensc98da842016-11-11 15:45:03 -05001548 * @{
1549 */
Allan Stephens45bfa372016-10-12 12:39:42 -05001550
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001551/**
Andy Rosse39bf292020-03-19 10:30:33 -07001552 * @brief Get system uptime, in system ticks.
Andy Ross914205c2020-03-10 15:26:38 -07001553 *
1554 * This routine returns the elapsed time since the system booted, in
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +02001555 * ticks (c.f. @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC}), which is the
Andy Ross914205c2020-03-10 15:26:38 -07001556 * fundamental unit of resolution of kernel timekeeping.
1557 *
1558 * @return Current uptime in ticks.
1559 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001560__syscall int64_t k_uptime_ticks(void);
Andy Ross914205c2020-03-10 15:26:38 -07001561
1562/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001563 * @brief Get system uptime.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001564 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001565 * This routine returns the elapsed time since the system booted,
1566 * in milliseconds.
1567 *
David B. Kinder00c41ea2019-06-10 11:13:33 -07001568 * @note
David B. Kinder00c41ea2019-06-10 11:13:33 -07001569 * While this function returns time in milliseconds, it does
1570 * not mean it has millisecond resolution. The actual resolution depends on
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +02001571 * @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option.
Paul Sokolovsky65d51fd2019-02-04 22:44:50 +03001572 *
1573 * @return Current uptime in milliseconds.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001574 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001575static inline int64_t k_uptime_get(void)
Andy Ross914205c2020-03-10 15:26:38 -07001576{
1577 return k_ticks_to_ms_floor64(k_uptime_ticks());
1578}
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001579
Ramesh Thomas89ffd442017-02-05 19:37:19 -08001580/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001581 * @brief Get system uptime (32-bit version).
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001582 *
Peter Bigota6067a32019-08-28 08:19:26 -05001583 * This routine returns the lower 32 bits of the system uptime in
1584 * milliseconds.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001585 *
Peter Bigota6067a32019-08-28 08:19:26 -05001586 * Because correct conversion requires full precision of the system
1587 * clock there is no benefit to using this over k_uptime_get() unless
1588 * you know the application will never run long enough for the system
1589 * clock to approach 2^32 ticks. Calls to this function may involve
1590 * interrupt blocking and 64-bit math.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001591 *
David B. Kinder00c41ea2019-06-10 11:13:33 -07001592 * @note
David B. Kinder00c41ea2019-06-10 11:13:33 -07001593 * While this function returns time in milliseconds, it does
1594 * not mean it has millisecond resolution. The actual resolution depends on
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +02001595 * @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option
Paul Sokolovsky65d51fd2019-02-04 22:44:50 +03001596 *
Peter Bigota6067a32019-08-28 08:19:26 -05001597 * @return The low 32 bits of the current uptime, in milliseconds.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001598 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001599static inline uint32_t k_uptime_get_32(void)
Peter Bigota6067a32019-08-28 08:19:26 -05001600{
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001601 return (uint32_t)k_uptime_get();
Peter Bigota6067a32019-08-28 08:19:26 -05001602}
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001603
1604/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001605 * @brief Get elapsed time.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001606 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001607 * This routine computes the elapsed time between the current system uptime
1608 * and an earlier reference time, in milliseconds.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001609 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001610 * @param reftime Pointer to a reference time, which is updated to the current
1611 * uptime upon return.
1612 *
1613 * @return Elapsed time.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001614 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001615static inline int64_t k_uptime_delta(int64_t *reftime)
Andy Ross987c0e52018-09-27 16:50:00 -07001616{
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001617 int64_t uptime, delta;
Andy Ross987c0e52018-09-27 16:50:00 -07001618
1619 uptime = k_uptime_get();
1620 delta = uptime - *reftime;
1621 *reftime = uptime;
1622
1623 return delta;
1624}
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001625
1626/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001627 * @brief Read the hardware clock.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001628 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001629 * This routine returns the current time, as measured by the system's hardware
1630 * clock.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001631 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001632 * @return Current hardware clock up-counter (in cycles).
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001633 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001634static inline uint32_t k_cycle_get_32(void)
Andrew Boie979b17f2019-10-03 15:20:41 -07001635{
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001636 return arch_k_cycle_get_32();
Andrew Boie979b17f2019-10-03 15:20:41 -07001637}
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001638
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001639/**
Christopher Friedt918a5742021-10-29 20:10:35 -04001640 * @brief Read the 64-bit hardware clock.
1641 *
1642 * This routine returns the current time in 64-bits, as measured by the
1643 * system's hardware clock, if available.
1644 *
1645 * @see CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER
1646 *
1647 * @return Current hardware clock up-counter (in cycles).
1648 */
1649static inline uint64_t k_cycle_get_64(void)
1650{
1651 if (!IS_ENABLED(CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER)) {
1652 __ASSERT(0, "64-bit cycle counter not enabled on this platform. "
1653 "See CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER");
1654 return 0;
1655 }
1656
1657 return arch_k_cycle_get_64();
1658}
1659
1660/**
Anas Nashif166f5192018-02-25 08:02:36 -06001661 * @}
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001662 */
1663
Allan Stephensc98da842016-11-11 15:45:03 -05001664/**
1665 * @cond INTERNAL_HIDDEN
1666 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001667
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001668struct k_queue {
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001669 sys_sflist_t data_q;
Andy Ross603ea422018-07-25 13:01:54 -07001670 struct k_spinlock lock;
Andy Ross99c2d2d2020-06-02 08:34:12 -07001671 _wait_q_t wait_q;
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +03001672
Andy Ross99c2d2d2020-06-02 08:34:12 -07001673 _POLL_EVENT;
Ederson de Souzabdaac352021-11-22 14:46:19 -08001674
1675 SYS_PORT_TRACING_TRACKING_FIELD(k_queue)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001676};
1677
Anas Nashif45a1d8a2020-04-24 11:29:17 -04001678#define Z_QUEUE_INITIALIZER(obj) \
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001679 { \
Toby Firth680ec0b2020-10-05 13:45:47 +01001680 .data_q = SYS_SFLIST_STATIC_INIT(&obj.data_q), \
Stephanos Ioannidisf628dcd2019-09-11 18:09:49 +09001681 .lock = { }, \
Andy Ross99c2d2d2020-06-02 08:34:12 -07001682 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
1683 _POLL_EVENT_OBJ_INIT(obj) \
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001684 }
1685
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001686extern void *z_queue_node_peek(sys_sfnode_t *node, bool needs_free);
1687
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001688/**
1689 * INTERNAL_HIDDEN @endcond
1690 */
1691
1692/**
1693 * @defgroup queue_apis Queue APIs
1694 * @ingroup kernel_apis
1695 * @{
1696 */
1697
1698/**
1699 * @brief Initialize a queue.
1700 *
1701 * This routine initializes a queue object, prior to its first use.
1702 *
1703 * @param queue Address of the queue.
1704 *
1705 * @return N/A
1706 */
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001707__syscall void k_queue_init(struct k_queue *queue);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001708
1709/**
Paul Sokolovsky3f507072017-04-25 17:54:31 +03001710 * @brief Cancel waiting on a queue.
1711 *
1712 * This routine causes first thread pending on @a queue, if any, to
1713 * return from k_queue_get() call with NULL value (as if timeout expired).
Paul Sokolovsky45c0b202018-08-21 23:29:11 +03001714 * If the queue is being waited on by k_poll(), it will return with
1715 * -EINTR and K_POLL_STATE_CANCELLED state (and per above, subsequent
1716 * k_queue_get() will return NULL).
Paul Sokolovsky3f507072017-04-25 17:54:31 +03001717 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001718 * @funcprops \isr_ok
Paul Sokolovsky3f507072017-04-25 17:54:31 +03001719 *
1720 * @param queue Address of the queue.
1721 *
1722 * @return N/A
1723 */
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001724__syscall void k_queue_cancel_wait(struct k_queue *queue);
Paul Sokolovsky3f507072017-04-25 17:54:31 +03001725
1726/**
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001727 * @brief Append an element to the end of a queue.
1728 *
1729 * This routine appends a data item to @a queue. A queue data item must be
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04001730 * aligned on a word boundary, and the first word of the item is reserved
1731 * for the kernel's use.
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001732 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001733 * @funcprops \isr_ok
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001734 *
1735 * @param queue Address of the queue.
1736 * @param data Address of the data item.
1737 *
1738 * @return N/A
1739 */
1740extern void k_queue_append(struct k_queue *queue, void *data);
1741
1742/**
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001743 * @brief Append an element to a queue.
1744 *
Andrew Boieac3dcc12019-04-01 12:28:03 -07001745 * This routine appends a data item to @a queue. There is an implicit memory
1746 * allocation to create an additional temporary bookkeeping data structure from
1747 * the calling thread's resource pool, which is automatically freed when the
1748 * item is removed. The data itself is not copied.
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001749 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001750 * @funcprops \isr_ok
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001751 *
1752 * @param queue Address of the queue.
1753 * @param data Address of the data item.
1754 *
1755 * @retval 0 on success
1756 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
1757 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001758__syscall int32_t k_queue_alloc_append(struct k_queue *queue, void *data);
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001759
1760/**
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001761 * @brief Prepend an element to a queue.
1762 *
1763 * This routine prepends a data item to @a queue. A queue data item must be
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04001764 * aligned on a word boundary, and the first word of the item is reserved
1765 * for the kernel's use.
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001766 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001767 * @funcprops \isr_ok
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001768 *
1769 * @param queue Address of the queue.
1770 * @param data Address of the data item.
1771 *
1772 * @return N/A
1773 */
1774extern void k_queue_prepend(struct k_queue *queue, void *data);
1775
1776/**
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001777 * @brief Prepend an element to a queue.
1778 *
Andrew Boieac3dcc12019-04-01 12:28:03 -07001779 * This routine prepends a data item to @a queue. There is an implicit memory
1780 * allocation to create an additional temporary bookkeeping data structure from
1781 * the calling thread's resource pool, which is automatically freed when the
1782 * item is removed. The data itself is not copied.
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001783 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001784 * @funcprops \isr_ok
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001785 *
1786 * @param queue Address of the queue.
1787 * @param data Address of the data item.
1788 *
1789 * @retval 0 on success
1790 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
1791 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001792__syscall int32_t k_queue_alloc_prepend(struct k_queue *queue, void *data);
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001793
1794/**
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001795 * @brief Inserts an element to a queue.
1796 *
1797 * This routine inserts a data item to @a queue after previous item. A queue
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04001798 * data item must be aligned on a word boundary, and the first word of
1799 * the item is reserved for the kernel's use.
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001800 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001801 * @funcprops \isr_ok
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001802 *
1803 * @param queue Address of the queue.
1804 * @param prev Address of the previous data item.
1805 * @param data Address of the data item.
1806 *
1807 * @return N/A
1808 */
1809extern void k_queue_insert(struct k_queue *queue, void *prev, void *data);
1810
1811/**
1812 * @brief Atomically append a list of elements to a queue.
1813 *
1814 * This routine adds a list of data items to @a queue in one operation.
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04001815 * The data items must be in a singly-linked list, with the first word
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001816 * in each data item pointing to the next data item; the list must be
1817 * NULL-terminated.
1818 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001819 * @funcprops \isr_ok
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001820 *
1821 * @param queue Address of the queue.
1822 * @param head Pointer to first node in singly-linked list.
1823 * @param tail Pointer to last node in singly-linked list.
1824 *
Anas Nashif756d8b02019-06-16 09:53:55 -04001825 * @retval 0 on success
1826 * @retval -EINVAL on invalid supplied data
1827 *
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001828 */
Anas Nashif756d8b02019-06-16 09:53:55 -04001829extern int k_queue_append_list(struct k_queue *queue, void *head, void *tail);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001830
1831/**
1832 * @brief Atomically add a list of elements to a queue.
1833 *
1834 * This routine adds a list of data items to @a queue in one operation.
1835 * The data items must be in a singly-linked list implemented using a
1836 * sys_slist_t object. Upon completion, the original list is empty.
1837 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001838 * @funcprops \isr_ok
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001839 *
1840 * @param queue Address of the queue.
1841 * @param list Pointer to sys_slist_t object.
1842 *
Anas Nashif756d8b02019-06-16 09:53:55 -04001843 * @retval 0 on success
1844 * @retval -EINVAL on invalid data
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001845 */
Anas Nashif756d8b02019-06-16 09:53:55 -04001846extern int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001847
1848/**
1849 * @brief Get an element from a queue.
1850 *
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04001851 * This routine removes first data item from @a queue. The first word of the
1852 * data item is reserved for the kernel's use.
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001853 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001854 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
1855 *
1856 * @funcprops \isr_ok
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001857 *
1858 * @param queue Address of the queue.
Andy Ross78327382020-03-05 15:18:14 -08001859 * @param timeout Non-negative waiting period to obtain a data item
1860 * or one of the special values K_NO_WAIT and
Krzysztof Chruscinski94f742e2019-11-07 19:28:00 +01001861 * K_FOREVER.
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001862 *
1863 * @return Address of the data item if successful; NULL if returned
1864 * without waiting, or waiting period timed out.
1865 */
Andy Ross78327382020-03-05 15:18:14 -08001866__syscall void *k_queue_get(struct k_queue *queue, k_timeout_t timeout);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001867
1868/**
Luiz Augusto von Dentz50b93772017-07-03 16:52:45 +03001869 * @brief Remove an element from a queue.
1870 *
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04001871 * This routine removes data item from @a queue. The first word of the
1872 * data item is reserved for the kernel's use. Removing elements from k_queue
Luiz Augusto von Dentz50b93772017-07-03 16:52:45 +03001873 * rely on sys_slist_find_and_remove which is not a constant time operation.
1874 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001875 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
1876 *
1877 * @funcprops \isr_ok
Luiz Augusto von Dentz50b93772017-07-03 16:52:45 +03001878 *
1879 * @param queue Address of the queue.
1880 * @param data Address of the data item.
1881 *
1882 * @return true if data item was removed
1883 */
Torbjörn Leksellf9848232021-03-26 11:19:35 +01001884bool k_queue_remove(struct k_queue *queue, void *data);
Luiz Augusto von Dentz50b93772017-07-03 16:52:45 +03001885
1886/**
Dhananjay Gundapu Jayakrishnan24bfa402018-08-22 12:33:00 +02001887 * @brief Append an element to a queue only if it's not present already.
1888 *
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04001889 * This routine appends data item to @a queue. The first word of the data
1890 * item is reserved for the kernel's use. Appending elements to k_queue
Dhananjay Gundapu Jayakrishnan24bfa402018-08-22 12:33:00 +02001891 * relies on sys_slist_is_node_in_list which is not a constant time operation.
1892 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001893 * @funcprops \isr_ok
Dhananjay Gundapu Jayakrishnan24bfa402018-08-22 12:33:00 +02001894 *
1895 * @param queue Address of the queue.
1896 * @param data Address of the data item.
1897 *
1898 * @return true if data item was added, false if not
1899 */
Torbjörn Leksellf9848232021-03-26 11:19:35 +01001900bool k_queue_unique_append(struct k_queue *queue, void *data);
Dhananjay Gundapu Jayakrishnan24bfa402018-08-22 12:33:00 +02001901
1902/**
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001903 * @brief Query a queue to see if it has data available.
1904 *
1905 * Note that the data might be already gone by the time this function returns
1906 * if other threads are also trying to read from the queue.
1907 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001908 * @funcprops \isr_ok
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001909 *
1910 * @param queue Address of the queue.
1911 *
1912 * @return Non-zero if the queue is empty.
1913 * @return 0 if data is available.
1914 */
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001915__syscall int k_queue_is_empty(struct k_queue *queue);
1916
Patrik Flykt4344e272019-03-08 14:19:05 -07001917static inline int z_impl_k_queue_is_empty(struct k_queue *queue)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001918{
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001919 return (int)sys_sflist_is_empty(&queue->data_q);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001920}
1921
1922/**
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03001923 * @brief Peek element at the head of queue.
1924 *
1925 * Return element from the head of queue without removing it.
1926 *
1927 * @param queue Address of the queue.
1928 *
1929 * @return Head element, or NULL if queue is empty.
1930 */
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001931__syscall void *k_queue_peek_head(struct k_queue *queue);
1932
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03001933/**
1934 * @brief Peek element at the tail of queue.
1935 *
1936 * Return element from the tail of queue without removing it.
1937 *
1938 * @param queue Address of the queue.
1939 *
1940 * @return Tail element, or NULL if queue is empty.
1941 */
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001942__syscall void *k_queue_peek_tail(struct k_queue *queue);
1943
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03001944/**
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001945 * @brief Statically define and initialize a queue.
1946 *
1947 * The queue can be accessed outside the module where it is defined using:
1948 *
1949 * @code extern struct k_queue <name>; @endcode
1950 *
1951 * @param name Name of the queue.
1952 */
1953#define K_QUEUE_DEFINE(name) \
Fabio Baltierif88a4202021-08-04 23:05:54 +01001954 STRUCT_SECTION_ITERABLE(k_queue, name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04001955 Z_QUEUE_INITIALIZER(name)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001956
Anas Nashif166f5192018-02-25 08:02:36 -06001957/** @} */
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001958
Wentong Wu5611e922019-06-20 23:51:27 +08001959#ifdef CONFIG_USERSPACE
1960/**
1961 * @brief futex structure
1962 *
1963 * A k_futex is a lightweight mutual exclusion primitive designed
1964 * to minimize kernel involvement. Uncontended operation relies
1965 * only on atomic access to shared memory. k_futex are tracked as
Lauren Murphyd922fed2021-02-01 21:24:47 -06001966 * kernel objects and can live in user memory so that any access
1967 * bypasses the kernel object permission management mechanism.
Wentong Wu5611e922019-06-20 23:51:27 +08001968 */
1969struct k_futex {
1970 atomic_t val;
1971};
1972
1973/**
1974 * @brief futex kernel data structure
1975 *
1976 * z_futex_data are the helper data structure for k_futex to complete
1977 * futex contended operation on kernel side, structure z_futex_data
1978 * of every futex object is invisible in user mode.
1979 */
1980struct z_futex_data {
1981 _wait_q_t wait_q;
1982 struct k_spinlock lock;
1983};
1984
1985#define Z_FUTEX_DATA_INITIALIZER(obj) \
1986 { \
1987 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q) \
1988 }
1989
1990/**
1991 * @defgroup futex_apis FUTEX APIs
1992 * @ingroup kernel_apis
1993 * @{
1994 */
1995
1996/**
Wentong Wu5611e922019-06-20 23:51:27 +08001997 * @brief Pend the current thread on a futex
1998 *
1999 * Tests that the supplied futex contains the expected value, and if so,
2000 * goes to sleep until some other thread calls k_futex_wake() on it.
2001 *
2002 * @param futex Address of the futex.
2003 * @param expected Expected value of the futex, if it is different the caller
2004 * will not wait on it.
Andy Ross78327382020-03-05 15:18:14 -08002005 * @param timeout Non-negative waiting period on the futex, or
Krzysztof Chruscinski94f742e2019-11-07 19:28:00 +01002006 * one of the special values K_NO_WAIT or K_FOREVER.
Wentong Wu5611e922019-06-20 23:51:27 +08002007 * @retval -EACCES Caller does not have read access to futex address.
2008 * @retval -EAGAIN If the futex value did not match the expected parameter.
2009 * @retval -EINVAL Futex parameter address not recognized by the kernel.
2010 * @retval -ETIMEDOUT Thread woke up due to timeout and not a futex wakeup.
2011 * @retval 0 if the caller went to sleep and was woken up. The caller
2012 * should check the futex's value on wakeup to determine if it needs
2013 * to block again.
2014 */
Andy Ross78327382020-03-05 15:18:14 -08002015__syscall int k_futex_wait(struct k_futex *futex, int expected,
2016 k_timeout_t timeout);
Wentong Wu5611e922019-06-20 23:51:27 +08002017
2018/**
2019 * @brief Wake one/all threads pending on a futex
2020 *
2021 * Wake up the highest priority thread pending on the supplied futex, or
2022 * wakeup all the threads pending on the supplied futex, and the behavior
2023 * depends on wake_all.
2024 *
2025 * @param futex Futex to wake up pending threads.
2026 * @param wake_all If true, wake up all pending threads; If false,
2027 * wakeup the highest priority thread.
2028 * @retval -EACCES Caller does not have access to the futex address.
2029 * @retval -EINVAL Futex parameter address not recognized by the kernel.
2030 * @retval Number of threads that were woken up.
2031 */
2032__syscall int k_futex_wake(struct k_futex *futex, bool wake_all);
2033
2034/** @} */
2035#endif
2036
Peter Mitsisae394bf2021-09-20 14:14:32 -04002037/**
2038 * @defgroup event_apis Event APIs
2039 * @ingroup kernel_apis
2040 * @{
2041 */
2042
2043/**
2044 * Event Structure
2045 * @ingroup event_apis
2046 */
2047
2048struct k_event {
2049 _wait_q_t wait_q;
2050 uint32_t events;
2051 struct k_spinlock lock;
2052};
2053
2054#define Z_EVENT_INITIALIZER(obj) \
2055 { \
2056 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2057 .events = 0 \
2058 }
2059/**
2060 * @brief Initialize an event object
2061 *
2062 * This routine initializes an event object, prior to its first use.
2063 *
2064 * @param event Address of the event object.
2065 *
2066 * @return N/A
2067 */
2068
2069__syscall void k_event_init(struct k_event *event);
2070
2071/**
2072 * @brief Post one or more events to an event object
2073 *
2074 * This routine posts one or more events to an event object. All tasks waiting
2075 * on the event object @a event whose waiting conditions become met by this
2076 * posting immediately unpend.
2077 *
2078 * Posting differs from setting in that posted events are merged together with
2079 * the current set of events tracked by the event object.
2080 *
2081 * @param event Address of the event object
2082 * @param events Set of events to post to @a event
2083 *
2084 * @return N/A
2085 */
2086
2087__syscall void k_event_post(struct k_event *event, uint32_t events);
2088
2089/**
2090 * @brief Set the events in an event object
2091 *
2092 * This routine sets the events stored in event object to the specified value.
2093 * All tasks waiting on the event object @a event whose waiting conditions
2094 * become met by this immediately unpend.
2095 *
2096 * Setting differs from posting in that set events replace the current set of
2097 * events tracked by the event object.
2098 *
2099 * @param event Address of the event object
2100 * @param events Set of events to post to @a event
2101 *
2102 * @return N/A
2103 */
2104
2105__syscall void k_event_set(struct k_event *event, uint32_t events);
2106
2107/**
2108 * @brief Wait for any of the specified events
2109 *
2110 * This routine waits on event object @a event until any of the specified
2111 * events have been delivered to the event object, or the maximum wait time
2112 * @a timeout has expired. A thread may wait on up to 32 distinctly numbered
2113 * events that are expressed as bits in a single 32-bit word.
2114 *
2115 * @note The caller must be careful when resetting if there are multiple threads
2116 * waiting for the event object @a event.
2117 *
2118 * @param event Address of the event object
2119 * @param events Set of desired events on which to wait
2120 * @param reset If true, clear the set of events tracked by the event object
2121 * before waiting. If false, do not clear the events.
2122 * @param timeout Waiting period for the desired set of events or one of the
2123 * special values K_NO_WAIT and K_FOREVER.
2124 *
2125 * @retval set of matching events upon success
2126 * @retval 0 if matching events were not received within the specified time
2127 */
2128
2129__syscall uint32_t k_event_wait(struct k_event *event, uint32_t events,
2130 bool reset, k_timeout_t timeout);
2131
2132/**
2133 * @brief Wait for any of the specified events
2134 *
2135 * This routine waits on event object @a event until all of the specified
2136 * events have been delivered to the event object, or the maximum wait time
2137 * @a timeout has expired. A thread may wait on up to 32 distinctly numbered
2138 * events that are expressed as bits in a single 32-bit word.
2139 *
2140 * @note The caller must be careful when resetting if there are multiple threads
2141 * waiting for the event object @a event.
2142 *
2143 * @param event Address of the event object
2144 * @param events Set of desired events on which to wait
2145 * @param reset If true, clear the set of events tracked by the event object
2146 * before waiting. If false, do not clear the events.
2147 * @param timeout Waiting period for the desired set of events or one of the
2148 * special values K_NO_WAIT and K_FOREVER.
2149 *
2150 * @retval set of matching events upon success
2151 * @retval 0 if matching events were not received within the specified time
2152 */
2153
2154__syscall uint32_t k_event_wait_all(struct k_event *event, uint32_t events,
2155 bool reset, k_timeout_t timeout);
2156
2157/**
2158 * @brief Statically define and initialize an event object
2159 *
2160 * The event can be accessed outside the module where it is defined using:
2161 *
2162 * @code extern struct k_event <name>; @endcode
2163 *
2164 * @param name Name of the event object.
2165 */
2166
2167#define K_EVENT_DEFINE(name) \
2168 STRUCT_SECTION_ITERABLE(k_event, name) = \
2169 Z_EVENT_INITIALIZER(name);
2170
2171/** @} */
2172
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002173struct k_fifo {
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02002174 struct k_queue _queue;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002175};
2176
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04002177/**
2178 * @cond INTERNAL_HIDDEN
2179 */
Patrik Flykt97b3bd12019-03-12 15:15:42 -06002180#define Z_FIFO_INITIALIZER(obj) \
Allan Stephensc98da842016-11-11 15:45:03 -05002181 { \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002182 ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
Allan Stephensc98da842016-11-11 15:45:03 -05002183 }
2184
2185/**
2186 * INTERNAL_HIDDEN @endcond
2187 */
2188
2189/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002190 * @defgroup fifo_apis FIFO APIs
Allan Stephensc98da842016-11-11 15:45:03 -05002191 * @ingroup kernel_apis
2192 * @{
2193 */
2194
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002195/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002196 * @brief Initialize a FIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002197 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002198 * This routine initializes a FIFO queue, prior to its first use.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002199 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002200 * @param fifo Address of the FIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002201 *
2202 * @return N/A
2203 */
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02002204#define k_fifo_init(fifo) \
Torbjörn Leksell83ae27b2021-03-26 11:42:16 +01002205 ({ \
2206 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, init, fifo); \
2207 k_queue_init(&(fifo)->_queue); \
2208 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, init, fifo); \
2209 })
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002210
2211/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002212 * @brief Cancel waiting on a FIFO queue.
Paul Sokolovsky3f507072017-04-25 17:54:31 +03002213 *
2214 * This routine causes first thread pending on @a fifo, if any, to
2215 * return from k_fifo_get() call with NULL value (as if timeout
2216 * expired).
2217 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002218 * @funcprops \isr_ok
Paul Sokolovsky3f507072017-04-25 17:54:31 +03002219 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002220 * @param fifo Address of the FIFO queue.
Paul Sokolovsky3f507072017-04-25 17:54:31 +03002221 *
2222 * @return N/A
2223 */
2224#define k_fifo_cancel_wait(fifo) \
Torbjörn Leksell83ae27b2021-03-26 11:42:16 +01002225 ({ \
2226 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, cancel_wait, fifo); \
2227 k_queue_cancel_wait(&(fifo)->_queue); \
2228 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, cancel_wait, fifo); \
2229 })
Paul Sokolovsky3f507072017-04-25 17:54:31 +03002230
2231/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002232 * @brief Add an element to a FIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002233 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002234 * This routine adds a data item to @a fifo. A FIFO data item must be
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04002235 * aligned on a word boundary, and the first word of the item is reserved
2236 * for the kernel's use.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002237 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002238 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002239 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002240 * @param fifo Address of the FIFO.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002241 * @param data Address of the data item.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002242 *
2243 * @return N/A
2244 */
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02002245#define k_fifo_put(fifo, data) \
Torbjörn Leksell83ae27b2021-03-26 11:42:16 +01002246 ({ \
2247 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put, fifo, data); \
2248 k_queue_append(&(fifo)->_queue, data); \
2249 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put, fifo, data); \
2250 })
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002251
2252/**
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002253 * @brief Add an element to a FIFO queue.
2254 *
Andrew Boieac3dcc12019-04-01 12:28:03 -07002255 * This routine adds a data item to @a fifo. There is an implicit memory
2256 * allocation to create an additional temporary bookkeeping data structure from
2257 * the calling thread's resource pool, which is automatically freed when the
2258 * item is removed. The data itself is not copied.
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002259 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002260 * @funcprops \isr_ok
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002261 *
2262 * @param fifo Address of the FIFO.
2263 * @param data Address of the data item.
2264 *
2265 * @retval 0 on success
2266 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2267 */
2268#define k_fifo_alloc_put(fifo, data) \
Torbjörn Leksell83ae27b2021-03-26 11:42:16 +01002269 ({ \
2270 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, alloc_put, fifo, data); \
2271 int ret = k_queue_alloc_append(&(fifo)->_queue, data); \
2272 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, alloc_put, fifo, data, ret); \
2273 ret; \
2274 })
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002275
2276/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002277 * @brief Atomically add a list of elements to a FIFO.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002278 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002279 * This routine adds a list of data items to @a fifo in one operation.
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04002280 * The data items must be in a singly-linked list, with the first word of
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002281 * each data item pointing to the next data item; the list must be
2282 * NULL-terminated.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002283 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002284 * @funcprops \isr_ok
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002285 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002286 * @param fifo Address of the FIFO queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002287 * @param head Pointer to first node in singly-linked list.
2288 * @param tail Pointer to last node in singly-linked list.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002289 *
2290 * @return N/A
2291 */
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02002292#define k_fifo_put_list(fifo, head, tail) \
Torbjörn Leksell83ae27b2021-03-26 11:42:16 +01002293 ({ \
2294 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_list, fifo, head, tail); \
2295 k_queue_append_list(&(fifo)->_queue, head, tail); \
2296 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_list, fifo, head, tail); \
2297 })
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002298
2299/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002300 * @brief Atomically add a list of elements to a FIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002301 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002302 * This routine adds a list of data items to @a fifo in one operation.
2303 * The data items must be in a singly-linked list implemented using a
2304 * sys_slist_t object. Upon completion, the sys_slist_t object is invalid
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002305 * and must be re-initialized via sys_slist_init().
2306 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002307 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002308 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002309 * @param fifo Address of the FIFO queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002310 * @param list Pointer to sys_slist_t object.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002311 *
2312 * @return N/A
2313 */
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02002314#define k_fifo_put_slist(fifo, list) \
Torbjörn Leksell83ae27b2021-03-26 11:42:16 +01002315 ({ \
2316 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_slist, fifo, list); \
2317 k_queue_merge_slist(&(fifo)->_queue, list); \
2318 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_slist, fifo, list); \
2319 })
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002320
2321/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002322 * @brief Get an element from a FIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002323 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002324 * This routine removes a data item from @a fifo in a "first in, first out"
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04002325 * manner. The first word of the data item is reserved for the kernel's use.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002326 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002327 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2328 *
2329 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002330 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002331 * @param fifo Address of the FIFO queue.
Andy Ross78327382020-03-05 15:18:14 -08002332 * @param timeout Waiting period to obtain a data item,
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002333 * or one of the special values K_NO_WAIT and K_FOREVER.
2334 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05002335 * @return Address of the data item if successful; NULL if returned
2336 * without waiting, or waiting period timed out.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002337 */
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02002338#define k_fifo_get(fifo, timeout) \
Torbjörn Leksell83ae27b2021-03-26 11:42:16 +01002339 ({ \
2340 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, get, fifo, timeout); \
2341 void *ret = k_queue_get(&(fifo)->_queue, timeout); \
2342 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, get, fifo, timeout, ret); \
2343 ret; \
2344 })
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002345
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002346/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002347 * @brief Query a FIFO queue to see if it has data available.
Benjamin Walsh39b80d82017-01-28 10:06:07 -05002348 *
2349 * Note that the data might be already gone by the time this function returns
Anas Nashif585fd1f2018-02-25 08:04:59 -06002350 * if other threads is also trying to read from the FIFO.
Benjamin Walsh39b80d82017-01-28 10:06:07 -05002351 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002352 * @funcprops \isr_ok
Benjamin Walsh39b80d82017-01-28 10:06:07 -05002353 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002354 * @param fifo Address of the FIFO queue.
Benjamin Walsh39b80d82017-01-28 10:06:07 -05002355 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002356 * @return Non-zero if the FIFO queue is empty.
Benjamin Walsh39b80d82017-01-28 10:06:07 -05002357 * @return 0 if data is available.
2358 */
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02002359#define k_fifo_is_empty(fifo) \
Nicolas Pitrea04a2ca2019-05-20 23:02:39 -04002360 k_queue_is_empty(&(fifo)->_queue)
Benjamin Walsh39b80d82017-01-28 10:06:07 -05002361
2362/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002363 * @brief Peek element at the head of a FIFO queue.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002364 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002365 * Return element from the head of FIFO queue without removing it. A usecase
Ramakrishna Pallala92489ea2018-03-29 22:44:23 +05302366 * for this is if elements of the FIFO object are themselves containers. Then
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002367 * on each iteration of processing, a head container will be peeked,
2368 * and some data processed out of it, and only if the container is empty,
Anas Nashif585fd1f2018-02-25 08:04:59 -06002369 * it will be completely remove from the FIFO queue.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002370 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002371 * @param fifo Address of the FIFO queue.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002372 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002373 * @return Head element, or NULL if the FIFO queue is empty.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002374 */
2375#define k_fifo_peek_head(fifo) \
Torbjörn Leksell83ae27b2021-03-26 11:42:16 +01002376 ({ \
2377 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_head, fifo); \
2378 void *ret = k_queue_peek_head(&(fifo)->_queue); \
2379 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_head, fifo, ret); \
2380 ret; \
2381 })
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002382
2383/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002384 * @brief Peek element at the tail of FIFO queue.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002385 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002386 * Return element from the tail of FIFO queue (without removing it). A usecase
2387 * for this is if elements of the FIFO queue are themselves containers. Then
2388 * it may be useful to add more data to the last container in a FIFO queue.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002389 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002390 * @param fifo Address of the FIFO queue.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002391 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002392 * @return Tail element, or NULL if a FIFO queue is empty.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002393 */
2394#define k_fifo_peek_tail(fifo) \
Torbjörn Leksell83ae27b2021-03-26 11:42:16 +01002395 ({ \
2396 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_tail, fifo); \
2397 void *ret = k_queue_peek_tail(&(fifo)->_queue); \
2398 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_tail, fifo, ret); \
2399 ret; \
2400 })
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002401
2402/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002403 * @brief Statically define and initialize a FIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002404 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002405 * The FIFO queue can be accessed outside the module where it is defined using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002406 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05002407 * @code extern struct k_fifo <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002408 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002409 * @param name Name of the FIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002410 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002411#define K_FIFO_DEFINE(name) \
Fabio Baltierif88a4202021-08-04 23:05:54 +01002412 STRUCT_SECTION_ITERABLE_ALTERNATE(k_queue, k_fifo, name) = \
Patrik Flykt97b3bd12019-03-12 15:15:42 -06002413 Z_FIFO_INITIALIZER(name)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002414
Anas Nashif166f5192018-02-25 08:02:36 -06002415/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05002416
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002417struct k_lifo {
Luiz Augusto von Dentz0dc4dd42017-02-21 15:49:52 +02002418 struct k_queue _queue;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002419};
2420
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04002421/**
2422 * @cond INTERNAL_HIDDEN
2423 */
2424
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002425#define Z_LIFO_INITIALIZER(obj) \
Allan Stephensc98da842016-11-11 15:45:03 -05002426 { \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002427 ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
Allan Stephensc98da842016-11-11 15:45:03 -05002428 }
2429
2430/**
2431 * INTERNAL_HIDDEN @endcond
2432 */
2433
2434/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002435 * @defgroup lifo_apis LIFO APIs
Allan Stephensc98da842016-11-11 15:45:03 -05002436 * @ingroup kernel_apis
2437 * @{
2438 */
2439
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002440/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002441 * @brief Initialize a LIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002442 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002443 * This routine initializes a LIFO queue object, prior to its first use.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002444 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002445 * @param lifo Address of the LIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002446 *
2447 * @return N/A
2448 */
Luiz Augusto von Dentz0dc4dd42017-02-21 15:49:52 +02002449#define k_lifo_init(lifo) \
Torbjörn Lekselld7654452021-03-26 12:21:24 +01002450 ({ \
2451 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, init, lifo); \
2452 k_queue_init(&(lifo)->_queue); \
2453 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, init, lifo); \
2454 })
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002455
2456/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002457 * @brief Add an element to a LIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002458 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002459 * This routine adds a data item to @a lifo. A LIFO queue data item must be
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04002460 * aligned on a word boundary, and the first word of the item is
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002461 * reserved for the kernel's use.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002462 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002463 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002464 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002465 * @param lifo Address of the LIFO queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002466 * @param data Address of the data item.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002467 *
2468 * @return N/A
2469 */
Luiz Augusto von Dentz0dc4dd42017-02-21 15:49:52 +02002470#define k_lifo_put(lifo, data) \
Torbjörn Lekselld7654452021-03-26 12:21:24 +01002471 ({ \
2472 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, put, lifo, data); \
2473 k_queue_prepend(&(lifo)->_queue, data); \
2474 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, put, lifo, data); \
2475 })
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002476
2477/**
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002478 * @brief Add an element to a LIFO queue.
2479 *
Andrew Boieac3dcc12019-04-01 12:28:03 -07002480 * This routine adds a data item to @a lifo. There is an implicit memory
2481 * allocation to create an additional temporary bookkeeping data structure from
2482 * the calling thread's resource pool, which is automatically freed when the
2483 * item is removed. The data itself is not copied.
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002484 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002485 * @funcprops \isr_ok
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002486 *
2487 * @param lifo Address of the LIFO.
2488 * @param data Address of the data item.
2489 *
2490 * @retval 0 on success
2491 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2492 */
2493#define k_lifo_alloc_put(lifo, data) \
Torbjörn Lekselld7654452021-03-26 12:21:24 +01002494 ({ \
2495 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, alloc_put, lifo, data); \
2496 int ret = k_queue_alloc_prepend(&(lifo)->_queue, data); \
2497 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, alloc_put, lifo, data, ret); \
2498 ret; \
2499 })
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002500
2501/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002502 * @brief Get an element from a LIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002503 *
Anas Nashif56821172020-07-08 14:14:25 -04002504 * This routine removes a data item from @a LIFO in a "last in, first out"
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04002505 * manner. The first word of the data item is reserved for the kernel's use.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002506 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002507 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2508 *
2509 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002510 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002511 * @param lifo Address of the LIFO queue.
Andy Ross78327382020-03-05 15:18:14 -08002512 * @param timeout Waiting period to obtain a data item,
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002513 * or one of the special values K_NO_WAIT and K_FOREVER.
2514 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05002515 * @return Address of the data item if successful; NULL if returned
2516 * without waiting, or waiting period timed out.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002517 */
Luiz Augusto von Dentz0dc4dd42017-02-21 15:49:52 +02002518#define k_lifo_get(lifo, timeout) \
Torbjörn Lekselld7654452021-03-26 12:21:24 +01002519 ({ \
2520 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, get, lifo, timeout); \
2521 void *ret = k_queue_get(&(lifo)->_queue, timeout); \
2522 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, get, lifo, timeout, ret); \
2523 ret; \
2524 })
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002525
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002526/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002527 * @brief Statically define and initialize a LIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002528 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002529 * The LIFO queue can be accessed outside the module where it is defined using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002530 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05002531 * @code extern struct k_lifo <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002532 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002533 * @param name Name of the fifo.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002534 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002535#define K_LIFO_DEFINE(name) \
Fabio Baltierif88a4202021-08-04 23:05:54 +01002536 STRUCT_SECTION_ITERABLE_ALTERNATE(k_queue, k_lifo, name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002537 Z_LIFO_INITIALIZER(name)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002538
Anas Nashif166f5192018-02-25 08:02:36 -06002539/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05002540
2541/**
2542 * @cond INTERNAL_HIDDEN
2543 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05002544#define K_STACK_FLAG_ALLOC ((uint8_t)1) /* Buffer was allocated */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002545
Nicolas Pitre3d51f7c2019-05-17 22:48:26 -04002546typedef uintptr_t stack_data_t;
2547
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002548struct k_stack {
2549 _wait_q_t wait_q;
Andy Rossf0933d02018-07-26 10:23:02 -07002550 struct k_spinlock lock;
Nicolas Pitre3d51f7c2019-05-17 22:48:26 -04002551 stack_data_t *base, *next, *top;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002552
Kumar Galaa1b77fd2020-05-27 11:26:57 -05002553 uint8_t flags;
Ederson de Souzabdaac352021-11-22 14:46:19 -08002554
2555 SYS_PORT_TRACING_TRACKING_FIELD(k_stack)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002556};
2557
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002558#define Z_STACK_INITIALIZER(obj, stack_buffer, stack_num_entries) \
Allan Stephensc98da842016-11-11 15:45:03 -05002559 { \
Patrik Flykt4344e272019-03-08 14:19:05 -07002560 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
Allan Stephensc98da842016-11-11 15:45:03 -05002561 .base = stack_buffer, \
2562 .next = stack_buffer, \
2563 .top = stack_buffer + stack_num_entries, \
Allan Stephensc98da842016-11-11 15:45:03 -05002564 }
2565
2566/**
2567 * INTERNAL_HIDDEN @endcond
2568 */
2569
2570/**
2571 * @defgroup stack_apis Stack APIs
2572 * @ingroup kernel_apis
2573 * @{
2574 */
2575
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002576/**
2577 * @brief Initialize a stack.
2578 *
2579 * This routine initializes a stack object, prior to its first use.
2580 *
2581 * @param stack Address of the stack.
2582 * @param buffer Address of array used to hold stacked values.
2583 * @param num_entries Maximum number of values that can be stacked.
2584 *
2585 * @return N/A
2586 */
Andrew Boief3bee952018-05-02 17:44:39 -07002587void k_stack_init(struct k_stack *stack,
Kumar Galaa1b77fd2020-05-27 11:26:57 -05002588 stack_data_t *buffer, uint32_t num_entries);
Andrew Boief3bee952018-05-02 17:44:39 -07002589
2590
2591/**
2592 * @brief Initialize a stack.
2593 *
2594 * This routine initializes a stack object, prior to its first use. Internal
2595 * buffers will be allocated from the calling thread's resource pool.
2596 * This memory will be released if k_stack_cleanup() is called, or
2597 * userspace is enabled and the stack object loses all references to it.
2598 *
2599 * @param stack Address of the stack.
2600 * @param num_entries Maximum number of values that can be stacked.
2601 *
2602 * @return -ENOMEM if memory couldn't be allocated
2603 */
2604
Kumar Galaa1b77fd2020-05-27 11:26:57 -05002605__syscall int32_t k_stack_alloc_init(struct k_stack *stack,
2606 uint32_t num_entries);
Andrew Boief3bee952018-05-02 17:44:39 -07002607
2608/**
2609 * @brief Release a stack's allocated buffer
2610 *
2611 * If a stack object was given a dynamically allocated buffer via
2612 * k_stack_alloc_init(), this will free it. This function does nothing
2613 * if the buffer wasn't dynamically allocated.
2614 *
2615 * @param stack Address of the stack.
Anas Nashif1ed67d12019-06-16 08:58:10 -04002616 * @retval 0 on success
2617 * @retval -EAGAIN when object is still in use
Andrew Boief3bee952018-05-02 17:44:39 -07002618 */
Anas Nashif1ed67d12019-06-16 08:58:10 -04002619int k_stack_cleanup(struct k_stack *stack);
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002620
2621/**
2622 * @brief Push an element onto a stack.
2623 *
Nicolas Pitre3d51f7c2019-05-17 22:48:26 -04002624 * This routine adds a stack_data_t value @a data to @a stack.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002625 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002626 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002627 *
2628 * @param stack Address of the stack.
2629 * @param data Value to push onto the stack.
2630 *
Anas Nashif1ed67d12019-06-16 08:58:10 -04002631 * @retval 0 on success
2632 * @retval -ENOMEM if stack is full
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002633 */
Anas Nashif1ed67d12019-06-16 08:58:10 -04002634__syscall int k_stack_push(struct k_stack *stack, stack_data_t data);
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002635
2636/**
2637 * @brief Pop an element from a stack.
2638 *
Nicolas Pitre3d51f7c2019-05-17 22:48:26 -04002639 * This routine removes a stack_data_t value from @a stack in a "last in,
2640 * first out" manner and stores the value in @a data.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002641 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002642 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2643 *
2644 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002645 *
2646 * @param stack Address of the stack.
2647 * @param data Address of area to hold the value popped from the stack.
Andy Ross78327382020-03-05 15:18:14 -08002648 * @param timeout Waiting period to obtain a value,
2649 * or one of the special values K_NO_WAIT and
Krzysztof Chruscinski94f742e2019-11-07 19:28:00 +01002650 * K_FOREVER.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002651 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05002652 * @retval 0 Element popped from stack.
2653 * @retval -EBUSY Returned without waiting.
2654 * @retval -EAGAIN Waiting period timed out.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002655 */
Krzysztof Chruscinski94f742e2019-11-07 19:28:00 +01002656__syscall int k_stack_pop(struct k_stack *stack, stack_data_t *data,
Andy Ross78327382020-03-05 15:18:14 -08002657 k_timeout_t timeout);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002658
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002659/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002660 * @brief Statically define and initialize a stack
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002661 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002662 * The stack can be accessed outside the module where it is defined using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002663 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05002664 * @code extern struct k_stack <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002665 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002666 * @param name Name of the stack.
2667 * @param stack_num_entries Maximum number of values that can be stacked.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002668 */
Peter Mitsis602e6a82016-10-17 11:48:43 -04002669#define K_STACK_DEFINE(name, stack_num_entries) \
Nicolas Pitre3d51f7c2019-05-17 22:48:26 -04002670 stack_data_t __noinit \
Peter Mitsis602e6a82016-10-17 11:48:43 -04002671 _k_stack_buf_##name[stack_num_entries]; \
Fabio Baltierif88a4202021-08-04 23:05:54 +01002672 STRUCT_SECTION_ITERABLE(k_stack, name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002673 Z_STACK_INITIALIZER(name, _k_stack_buf_##name, \
Peter Mitsis602e6a82016-10-17 11:48:43 -04002674 stack_num_entries)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002675
Anas Nashif166f5192018-02-25 08:02:36 -06002676/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05002677
Peter Bigotdc34e7c2020-10-28 11:24:05 -05002678/**
2679 * @cond INTERNAL_HIDDEN
2680 */
Peter Bigot44539ed2020-11-21 06:58:58 -06002681
Allan Stephens6bba9b02016-11-16 14:56:54 -05002682struct k_work;
Peter Bigotdc34e7c2020-10-28 11:24:05 -05002683struct k_work_q;
2684struct k_work_queue_config;
2685struct k_delayed_work;
2686extern struct k_work_q k_sys_work_q;
2687
2688/**
2689 * INTERNAL_HIDDEN @endcond
2690 */
2691
Allan Stephensc98da842016-11-11 15:45:03 -05002692/**
Anas Nashifce78d162018-05-24 12:43:11 -05002693 * @defgroup mutex_apis Mutex APIs
2694 * @ingroup kernel_apis
2695 * @{
Allan Stephensc98da842016-11-11 15:45:03 -05002696 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002697
Anas Nashifce78d162018-05-24 12:43:11 -05002698/**
2699 * Mutex Structure
2700 * @ingroup mutex_apis
2701 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002702struct k_mutex {
Anas Nashife71293e2019-12-04 20:00:14 -05002703 /** Mutex wait queue */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002704 _wait_q_t wait_q;
Anas Nashifce78d162018-05-24 12:43:11 -05002705 /** Mutex owner */
Benjamin Walshb7ef0cb2016-10-05 17:32:01 -04002706 struct k_thread *owner;
Anas Nashife71293e2019-12-04 20:00:14 -05002707
2708 /** Current lock count */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05002709 uint32_t lock_count;
Anas Nashife71293e2019-12-04 20:00:14 -05002710
2711 /** Original thread priority */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002712 int owner_orig_prio;
Ederson de Souzabdaac352021-11-22 14:46:19 -08002713
2714 SYS_PORT_TRACING_TRACKING_FIELD(k_mutex)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002715};
2716
Anas Nashifce78d162018-05-24 12:43:11 -05002717/**
2718 * @cond INTERNAL_HIDDEN
2719 */
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002720#define Z_MUTEX_INITIALIZER(obj) \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002721 { \
Patrik Flykt4344e272019-03-08 14:19:05 -07002722 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002723 .owner = NULL, \
2724 .lock_count = 0, \
Andy Ross851d14a2021-05-13 15:46:43 -07002725 .owner_orig_prio = K_LOWEST_APPLICATION_THREAD_PRIO, \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002726 }
2727
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002728/**
Allan Stephensc98da842016-11-11 15:45:03 -05002729 * INTERNAL_HIDDEN @endcond
2730 */
2731
2732/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002733 * @brief Statically define and initialize a mutex.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002734 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002735 * The mutex can be accessed outside the module where it is defined using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002736 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05002737 * @code extern struct k_mutex <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002738 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002739 * @param name Name of the mutex.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002740 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002741#define K_MUTEX_DEFINE(name) \
Fabio Baltierif88a4202021-08-04 23:05:54 +01002742 STRUCT_SECTION_ITERABLE(k_mutex, name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002743 Z_MUTEX_INITIALIZER(name)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002744
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002745/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002746 * @brief Initialize a mutex.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002747 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002748 * This routine initializes a mutex object, prior to its first use.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002749 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002750 * Upon completion, the mutex is available and does not have an owner.
2751 *
2752 * @param mutex Address of the mutex.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002753 *
Anas Nashif86bb2d02019-05-04 10:18:13 -04002754 * @retval 0 Mutex object created
2755 *
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002756 */
Anas Nashif86bb2d02019-05-04 10:18:13 -04002757__syscall int k_mutex_init(struct k_mutex *mutex);
2758
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002759
2760/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002761 * @brief Lock a mutex.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002762 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002763 * This routine locks @a mutex. If the mutex is locked by another thread,
2764 * the calling thread waits until the mutex becomes available or until
2765 * a timeout occurs.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002766 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002767 * A thread is permitted to lock a mutex it has already locked. The operation
2768 * completes immediately and the lock count is increased by 1.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002769 *
Andrew Boie6af97932020-05-27 11:48:30 -07002770 * Mutexes may not be locked in ISRs.
2771 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002772 * @param mutex Address of the mutex.
Andy Ross78327382020-03-05 15:18:14 -08002773 * @param timeout Waiting period to lock the mutex,
2774 * or one of the special values K_NO_WAIT and
Krzysztof Chruscinski94f742e2019-11-07 19:28:00 +01002775 * K_FOREVER.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002776 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05002777 * @retval 0 Mutex locked.
2778 * @retval -EBUSY Returned without waiting.
2779 * @retval -EAGAIN Waiting period timed out.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002780 */
Andy Ross78327382020-03-05 15:18:14 -08002781__syscall int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout);
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002782
2783/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002784 * @brief Unlock a mutex.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002785 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002786 * This routine unlocks @a mutex. The mutex must already be locked by the
2787 * calling thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002788 *
2789 * The mutex cannot be claimed by another thread until it has been unlocked by
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002790 * the calling thread as many times as it was previously locked by that
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002791 * thread.
2792 *
Andrew Boie6af97932020-05-27 11:48:30 -07002793 * Mutexes may not be unlocked in ISRs, as mutexes must only be manipulated
2794 * in thread context due to ownership and priority inheritance semantics.
2795 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002796 * @param mutex Address of the mutex.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002797 *
Anas Nashif86bb2d02019-05-04 10:18:13 -04002798 * @retval 0 Mutex unlocked.
2799 * @retval -EPERM The current thread does not own the mutex
2800 * @retval -EINVAL The mutex is not locked
2801 *
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002802 */
Anas Nashif86bb2d02019-05-04 10:18:13 -04002803__syscall int k_mutex_unlock(struct k_mutex *mutex);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002804
Allan Stephensc98da842016-11-11 15:45:03 -05002805/**
Anas Nashif166f5192018-02-25 08:02:36 -06002806 * @}
Allan Stephensc98da842016-11-11 15:45:03 -05002807 */
2808
Anas Nashif06eb4892020-08-23 12:39:09 -04002809
2810struct k_condvar {
2811 _wait_q_t wait_q;
2812};
2813
2814#define Z_CONDVAR_INITIALIZER(obj) \
2815 { \
2816 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2817 }
2818
2819/**
2820 * @defgroup condvar_apis Condition Variables APIs
2821 * @ingroup kernel_apis
2822 * @{
2823 */
2824
2825/**
2826 * @brief Initialize a condition variable
2827 *
2828 * @param condvar pointer to a @p k_condvar structure
2829 * @retval 0 Condition variable created successfully
2830 */
2831__syscall int k_condvar_init(struct k_condvar *condvar);
2832
2833/**
2834 * @brief Signals one thread that is pending on the condition variable
2835 *
2836 * @param condvar pointer to a @p k_condvar structure
2837 * @retval 0 On success
2838 */
2839__syscall int k_condvar_signal(struct k_condvar *condvar);
2840
2841/**
2842 * @brief Unblock all threads that are pending on the condition
2843 * variable
2844 *
2845 * @param condvar pointer to a @p k_condvar structure
2846 * @return An integer with number of woken threads on success
2847 */
2848__syscall int k_condvar_broadcast(struct k_condvar *condvar);
2849
2850/**
2851 * @brief Waits on the condition variable releasing the mutex lock
2852 *
2853 * Automically releases the currently owned mutex, blocks the current thread
2854 * waiting on the condition variable specified by @a condvar,
2855 * and finally acquires the mutex again.
2856 *
2857 * The waiting thread unblocks only after another thread calls
2858 * k_condvar_signal, or k_condvar_broadcast with the same condition variable.
2859 *
2860 * @param condvar pointer to a @p k_condvar structure
2861 * @param mutex Address of the mutex.
2862 * @param timeout Waiting period for the condition variable
2863 * or one of the special values K_NO_WAIT and K_FOREVER.
2864 * @retval 0 On success
2865 * @retval -EAGAIN Waiting period timed out.
2866 */
2867__syscall int k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex,
2868 k_timeout_t timeout);
2869
2870/**
2871 * @brief Statically define and initialize a condition variable.
2872 *
2873 * The condition variable can be accessed outside the module where it is
2874 * defined using:
2875 *
2876 * @code extern struct k_condvar <name>; @endcode
2877 *
2878 * @param name Name of the condition variable.
2879 */
2880#define K_CONDVAR_DEFINE(name) \
Fabio Baltierif88a4202021-08-04 23:05:54 +01002881 STRUCT_SECTION_ITERABLE(k_condvar, name) = \
Anas Nashif06eb4892020-08-23 12:39:09 -04002882 Z_CONDVAR_INITIALIZER(name)
2883/**
2884 * @}
2885 */
2886
Allan Stephensc98da842016-11-11 15:45:03 -05002887/**
2888 * @cond INTERNAL_HIDDEN
2889 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002890
2891struct k_sem {
2892 _wait_q_t wait_q;
James Harrisb1042812021-03-03 12:02:05 -08002893 unsigned int count;
2894 unsigned int limit;
Peter Bigot7aefa3d2021-03-02 06:18:29 -06002895
Benjamin Walshacc68c12017-01-29 18:57:45 -05002896 _POLL_EVENT;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002897
Ederson de Souzabdaac352021-11-22 14:46:19 -08002898 SYS_PORT_TRACING_TRACKING_FIELD(k_sem)
2899
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002900};
2901
Patrik Flykt97b3bd12019-03-12 15:15:42 -06002902#define Z_SEM_INITIALIZER(obj, initial_count, count_limit) \
Allan Stephensc98da842016-11-11 15:45:03 -05002903 { \
Patrik Flykt4344e272019-03-08 14:19:05 -07002904 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
Allan Stephensc98da842016-11-11 15:45:03 -05002905 .count = initial_count, \
2906 .limit = count_limit, \
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +03002907 _POLL_EVENT_OBJ_INIT(obj) \
Allan Stephensc98da842016-11-11 15:45:03 -05002908 }
2909
2910/**
2911 * INTERNAL_HIDDEN @endcond
2912 */
2913
2914/**
2915 * @defgroup semaphore_apis Semaphore APIs
2916 * @ingroup kernel_apis
2917 * @{
2918 */
2919
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002920/**
James Harrisb1042812021-03-03 12:02:05 -08002921 * @brief Maximum limit value allowed for a semaphore.
2922 *
2923 * This is intended for use when a semaphore does not have
2924 * an explicit maximum limit, and instead is just used for
2925 * counting purposes.
2926 *
2927 */
2928#define K_SEM_MAX_LIMIT UINT_MAX
2929
2930/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002931 * @brief Initialize a semaphore.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002932 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002933 * This routine initializes a semaphore object, prior to its first use.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002934 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002935 * @param sem Address of the semaphore.
2936 * @param initial_count Initial semaphore count.
2937 * @param limit Maximum permitted semaphore count.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002938 *
James Harrisb1042812021-03-03 12:02:05 -08002939 * @see K_SEM_MAX_LIMIT
2940 *
Anas Nashif928af3c2019-05-04 10:36:14 -04002941 * @retval 0 Semaphore created successfully
2942 * @retval -EINVAL Invalid values
2943 *
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002944 */
Anas Nashif928af3c2019-05-04 10:36:14 -04002945__syscall int k_sem_init(struct k_sem *sem, unsigned int initial_count,
Andrew Boie99280232017-09-29 14:17:47 -07002946 unsigned int limit);
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002947
2948/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002949 * @brief Take a semaphore.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002950 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002951 * This routine takes @a sem.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002952 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002953 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2954 *
2955 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002956 *
2957 * @param sem Address of the semaphore.
Andy Ross78327382020-03-05 15:18:14 -08002958 * @param timeout Waiting period to take the semaphore,
2959 * or one of the special values K_NO_WAIT and K_FOREVER.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002960 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05002961 * @retval 0 Semaphore taken.
2962 * @retval -EBUSY Returned without waiting.
James Harris53b81792021-03-04 15:47:27 -08002963 * @retval -EAGAIN Waiting period timed out,
2964 * or the semaphore was reset during the waiting period.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002965 */
Andy Ross78327382020-03-05 15:18:14 -08002966__syscall int k_sem_take(struct k_sem *sem, k_timeout_t timeout);
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002967
2968/**
2969 * @brief Give a semaphore.
2970 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002971 * This routine gives @a sem, unless the semaphore is already at its maximum
2972 * permitted count.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002973 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002974 * @funcprops \isr_ok
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002975 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002976 * @param sem Address of the semaphore.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002977 *
2978 * @return N/A
2979 */
Andrew Boie99280232017-09-29 14:17:47 -07002980__syscall void k_sem_give(struct k_sem *sem);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002981
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002982/**
James Harris53b81792021-03-04 15:47:27 -08002983 * @brief Resets a semaphore's count to zero.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002984 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002985 * This routine sets the count of @a sem to zero.
James Harris53b81792021-03-04 15:47:27 -08002986 * Any outstanding semaphore takes will be aborted
2987 * with -EAGAIN.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002988 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002989 * @param sem Address of the semaphore.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002990 *
2991 * @return N/A
2992 */
Andrew Boie990bf162017-10-03 12:36:49 -07002993__syscall void k_sem_reset(struct k_sem *sem);
Andrew Boiefc273c02017-09-23 12:51:23 -07002994
Anas Nashif954d5502018-02-25 08:37:28 -06002995/**
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002996 * @brief Get a semaphore's count.
2997 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002998 * This routine returns the current count of @a sem.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002999 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05003000 * @param sem Address of the semaphore.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04003001 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05003002 * @return Current semaphore count.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04003003 */
Andrew Boie990bf162017-10-03 12:36:49 -07003004__syscall unsigned int k_sem_count_get(struct k_sem *sem);
Andrew Boiefc273c02017-09-23 12:51:23 -07003005
Anas Nashif954d5502018-02-25 08:37:28 -06003006/**
3007 * @internal
3008 */
Patrik Flykt4344e272019-03-08 14:19:05 -07003009static inline unsigned int z_impl_k_sem_count_get(struct k_sem *sem)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04003010{
3011 return sem->count;
3012}
3013
Benjamin Walshb9c1a062016-10-15 17:12:35 -04003014/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05003015 * @brief Statically define and initialize a semaphore.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04003016 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05003017 * The semaphore can be accessed outside the module where it is defined using:
Benjamin Walshb9c1a062016-10-15 17:12:35 -04003018 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05003019 * @code extern struct k_sem <name>; @endcode
Benjamin Walshb9c1a062016-10-15 17:12:35 -04003020 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05003021 * @param name Name of the semaphore.
3022 * @param initial_count Initial semaphore count.
3023 * @param count_limit Maximum permitted semaphore count.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04003024 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04003025#define K_SEM_DEFINE(name, initial_count, count_limit) \
Fabio Baltierif88a4202021-08-04 23:05:54 +01003026 STRUCT_SECTION_ITERABLE(k_sem, name) = \
Patrik Flykt97b3bd12019-03-12 15:15:42 -06003027 Z_SEM_INITIALIZER(name, initial_count, count_limit); \
Rajavardhan Gundi68040c82018-04-27 10:15:15 +05303028 BUILD_ASSERT(((count_limit) != 0) && \
James Harrisb1042812021-03-03 12:02:05 -08003029 ((initial_count) <= (count_limit)) && \
3030 ((count_limit) <= K_SEM_MAX_LIMIT));
Benjamin Walsh456c6da2016-09-02 18:55:39 -04003031
Anas Nashif166f5192018-02-25 08:02:36 -06003032/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05003033
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003034/**
3035 * @cond INTERNAL_HIDDEN
3036 */
3037
3038struct k_work_delayable;
3039struct k_work_sync;
3040
3041/**
3042 * INTERNAL_HIDDEN @endcond
3043 */
3044
3045/**
Anas Nashifc355b7e2021-04-14 08:49:05 -04003046 * @defgroup workqueue_apis Work Queue APIs
3047 * @ingroup kernel_apis
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003048 * @{
3049 */
3050
3051/** @brief The signature for a work item handler function.
3052 *
3053 * The function will be invoked by the thread animating a work queue.
3054 *
3055 * @param work the work item that provided the handler.
3056 */
3057typedef void (*k_work_handler_t)(struct k_work *work);
3058
3059/** @brief Initialize a (non-delayable) work structure.
3060 *
3061 * This must be invoked before submitting a work structure for the first time.
3062 * It need not be invoked again on the same work structure. It can be
3063 * re-invoked to change the associated handler, but this must be done when the
3064 * work item is idle.
3065 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003066 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003067 *
3068 * @param work the work structure to be initialized.
3069 *
3070 * @param handler the handler to be invoked by the work item.
3071 */
3072void k_work_init(struct k_work *work,
3073 k_work_handler_t handler);
3074
3075/** @brief Busy state flags from the work item.
3076 *
3077 * A zero return value indicates the work item appears to be idle.
3078 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003079 * @note This is a live snapshot of state, which may change before the result
3080 * is checked. Use locks where appropriate.
3081 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003082 * @funcprops \isr_ok
3083 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003084 * @param work pointer to the work item.
3085 *
3086 * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED,
3087 * K_WORK_RUNNING, and K_WORK_CANCELING.
3088 */
3089int k_work_busy_get(const struct k_work *work);
3090
3091/** @brief Test whether a work item is currently pending.
3092 *
3093 * Wrapper to determine whether a work item is in a non-idle dstate.
3094 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003095 * @note This is a live snapshot of state, which may change before the result
3096 * is checked. Use locks where appropriate.
3097 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003098 * @funcprops \isr_ok
3099 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003100 * @param work pointer to the work item.
3101 *
3102 * @return true if and only if k_work_busy_get() returns a non-zero value.
3103 */
3104static inline bool k_work_is_pending(const struct k_work *work);
3105
3106/** @brief Submit a work item to a queue.
3107 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003108 * @param queue pointer to the work queue on which the item should run. If
3109 * NULL the queue from the most recent submission will be used.
3110 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003111 * @funcprops \isr_ok
3112 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003113 * @param work pointer to the work item.
3114 *
3115 * @retval 0 if work was already submitted to a queue
3116 * @retval 1 if work was not submitted and has been queued to @p queue
3117 * @retval 2 if work was running and has been queued to the queue that was
3118 * running it
3119 * @retval -EBUSY
3120 * * if work submission was rejected because the work item is cancelling; or
3121 * * @p queue is draining; or
3122 * * @p queue is plugged.
3123 * @retval -EINVAL if @p queue is null and the work item has never been run.
Peter Bigot47435902021-05-17 06:36:04 -05003124 * @retval -ENODEV if @p queue has not been started.
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003125 */
3126int k_work_submit_to_queue(struct k_work_q *queue,
3127 struct k_work *work);
3128
3129/** @brief Submit a work item to the system queue.
3130 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003131 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003132 *
3133 * @param work pointer to the work item.
3134 *
3135 * @return as with k_work_submit_to_queue().
3136 */
Torbjörn Leksell7a646b32021-03-26 14:41:18 +01003137extern int k_work_submit(struct k_work *work);
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003138
3139/** @brief Wait for last-submitted instance to complete.
3140 *
3141 * Resubmissions may occur while waiting, including chained submissions (from
3142 * within the handler).
3143 *
3144 * @note Be careful of caller and work queue thread relative priority. If
3145 * this function sleeps it will not return until the work queue thread
3146 * completes the tasks that allow this thread to resume.
3147 *
3148 * @note Behavior is undefined if this function is invoked on @p work from a
3149 * work queue running @p work.
3150 *
3151 * @param work pointer to the work item.
3152 *
3153 * @param sync pointer to an opaque item containing state related to the
3154 * pending cancellation. The object must persist until the call returns, and
3155 * be accessible from both the caller thread and the work queue thread. The
3156 * object must not be used for any other flush or cancel operation until this
3157 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3158 * must be allocated in coherent memory.
3159 *
3160 * @retval true if call had to wait for completion
3161 * @retval false if work was already idle
3162 */
3163bool k_work_flush(struct k_work *work,
3164 struct k_work_sync *sync);
3165
3166/** @brief Cancel a work item.
3167 *
3168 * This attempts to prevent a pending (non-delayable) work item from being
3169 * processed by removing it from the work queue. If the item is being
3170 * processed, the work item will continue to be processed, but resubmissions
3171 * are rejected until cancellation completes.
3172 *
3173 * If this returns zero cancellation is complete, otherwise something
3174 * (probably a work queue thread) is still referencing the item.
3175 *
3176 * See also k_work_cancel_sync().
3177 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003178 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003179 *
3180 * @param work pointer to the work item.
3181 *
3182 * @return the k_work_busy_get() status indicating the state of the item after all
3183 * cancellation steps performed by this call are completed.
3184 */
3185int k_work_cancel(struct k_work *work);
3186
3187/** @brief Cancel a work item and wait for it to complete.
3188 *
3189 * Same as k_work_cancel() but does not return until cancellation is complete.
3190 * This can be invoked by a thread after k_work_cancel() to synchronize with a
3191 * previous cancellation.
3192 *
3193 * On return the work structure will be idle unless something submits it after
3194 * the cancellation was complete.
3195 *
3196 * @note Be careful of caller and work queue thread relative priority. If
3197 * this function sleeps it will not return until the work queue thread
3198 * completes the tasks that allow this thread to resume.
3199 *
3200 * @note Behavior is undefined if this function is invoked on @p work from a
3201 * work queue running @p work.
3202 *
3203 * @param work pointer to the work item.
3204 *
3205 * @param sync pointer to an opaque item containing state related to the
3206 * pending cancellation. The object must persist until the call returns, and
3207 * be accessible from both the caller thread and the work queue thread. The
3208 * object must not be used for any other flush or cancel operation until this
3209 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3210 * must be allocated in coherent memory.
3211 *
Peter Bigot707dc222021-04-16 11:48:50 -05003212 * @retval true if work was pending (call had to wait for cancellation of a
3213 * running handler to complete, or scheduled or submitted operations were
3214 * cancelled);
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003215 * @retval false otherwise
3216 */
3217bool k_work_cancel_sync(struct k_work *work, struct k_work_sync *sync);
3218
Flavio Ceolind9aa4142021-08-23 14:33:40 -07003219/** @brief Initialize a work queue structure.
3220 *
3221 * This must be invoked before starting a work queue structure for the first time.
3222 * It need not be invoked again on the same work queue structure.
3223 *
3224 * @funcprops \isr_ok
3225 *
3226 * @param queue the queue structure to be initialized.
3227 */
3228void k_work_queue_init(struct k_work_q *queue);
3229
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003230/** @brief Initialize a work queue.
3231 *
3232 * This configures the work queue thread and starts it running. The function
3233 * should not be re-invoked on a queue.
3234 *
Flavio Ceolinc42cde52021-08-23 15:04:58 -07003235 * @param queue pointer to the queue structure. It must be initialized
3236 * in zeroed/bss memory or with @ref k_work_queue_init before
3237 * use.
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003238 *
3239 * @param stack pointer to the work thread stack area.
3240 *
3241 * @param stack_size size of the the work thread stack area, in bytes.
3242 *
3243 * @param prio initial thread priority
3244 *
3245 * @param cfg optional additional configuration parameters. Pass @c
3246 * NULL if not required, to use the defaults documented in
3247 * k_work_queue_config.
3248 */
3249void k_work_queue_start(struct k_work_q *queue,
3250 k_thread_stack_t *stack, size_t stack_size,
3251 int prio, const struct k_work_queue_config *cfg);
3252
3253/** @brief Access the thread that animates a work queue.
3254 *
3255 * This is necessary to grant a work queue thread access to things the work
3256 * items it will process are expected to use.
3257 *
3258 * @param queue pointer to the queue structure.
3259 *
3260 * @return the thread associated with the work queue.
3261 */
3262static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue);
3263
3264/** @brief Wait until the work queue has drained, optionally plugging it.
3265 *
3266 * This blocks submission to the work queue except when coming from queue
3267 * thread, and blocks the caller until no more work items are available in the
3268 * queue.
3269 *
3270 * If @p plug is true then submission will continue to be blocked after the
3271 * drain operation completes until k_work_queue_unplug() is invoked.
3272 *
3273 * Note that work items that are delayed are not yet associated with their
3274 * work queue. They must be cancelled externally if a goal is to ensure the
3275 * work queue remains empty. The @p plug feature can be used to prevent
3276 * delayed items from being submitted after the drain completes.
3277 *
3278 * @param queue pointer to the queue structure.
3279 *
3280 * @param plug if true the work queue will continue to block new submissions
3281 * after all items have drained.
3282 *
3283 * @retval 1 if call had to wait for the drain to complete
3284 * @retval 0 if call did not have to wait
3285 * @retval negative if wait was interrupted or failed
3286 */
3287int k_work_queue_drain(struct k_work_q *queue, bool plug);
3288
3289/** @brief Release a work queue to accept new submissions.
3290 *
3291 * This releases the block on new submissions placed when k_work_queue_drain()
3292 * is invoked with the @p plug option enabled. If this is invoked before the
3293 * drain completes new items may be submitted as soon as the drain completes.
3294 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003295 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003296 *
3297 * @param queue pointer to the queue structure.
3298 *
3299 * @retval 0 if successfully unplugged
3300 * @retval -EALREADY if the work queue was not plugged.
3301 */
3302int k_work_queue_unplug(struct k_work_q *queue);
3303
3304/** @brief Initialize a delayable work structure.
3305 *
3306 * This must be invoked before scheduling a delayable work structure for the
3307 * first time. It need not be invoked again on the same work structure. It
3308 * can be re-invoked to change the associated handler, but this must be done
3309 * when the work item is idle.
3310 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003311 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003312 *
3313 * @param dwork the delayable work structure to be initialized.
3314 *
3315 * @param handler the handler to be invoked by the work item.
3316 */
3317void k_work_init_delayable(struct k_work_delayable *dwork,
3318 k_work_handler_t handler);
3319
3320/**
3321 * @brief Get the parent delayable work structure from a work pointer.
3322 *
3323 * This function is necessary when a @c k_work_handler_t function is passed to
3324 * k_work_schedule_for_queue() and the handler needs to access data from the
3325 * container of the containing `k_work_delayable`.
3326 *
3327 * @param work Address passed to the work handler
3328 *
3329 * @return Address of the containing @c k_work_delayable structure.
3330 */
3331static inline struct k_work_delayable *
3332k_work_delayable_from_work(struct k_work *work);
3333
3334/** @brief Busy state flags from the delayable work item.
3335 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003336 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003337 *
3338 * @note This is a live snapshot of state, which may change before the result
3339 * can be inspected. Use locks where appropriate.
3340 *
3341 * @param dwork pointer to the delayable work item.
3342 *
3343 * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED, K_WORK_RUNNING, and
3344 * K_WORK_CANCELING. A zero return value indicates the work item appears to
3345 * be idle.
3346 */
3347int k_work_delayable_busy_get(const struct k_work_delayable *dwork);
3348
3349/** @brief Test whether a delayed work item is currently pending.
3350 *
3351 * Wrapper to determine whether a delayed work item is in a non-idle state.
3352 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003353 * @note This is a live snapshot of state, which may change before the result
3354 * can be inspected. Use locks where appropriate.
3355 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003356 * @funcprops \isr_ok
3357 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003358 * @param dwork pointer to the delayable work item.
3359 *
3360 * @return true if and only if k_work_delayable_busy_get() returns a non-zero
3361 * value.
3362 */
3363static inline bool k_work_delayable_is_pending(
3364 const struct k_work_delayable *dwork);
3365
3366/** @brief Get the absolute tick count at which a scheduled delayable work
3367 * will be submitted.
3368 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003369 * @note This is a live snapshot of state, which may change before the result
3370 * can be inspected. Use locks where appropriate.
3371 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003372 * @funcprops \isr_ok
3373 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003374 * @param dwork pointer to the delayable work item.
3375 *
3376 * @return the tick count when the timer that will schedule the work item will
3377 * expire, or the current tick count if the work is not scheduled.
3378 */
3379static inline k_ticks_t k_work_delayable_expires_get(
3380 const struct k_work_delayable *dwork);
3381
3382/** @brief Get the number of ticks until a scheduled delayable work will be
3383 * submitted.
3384 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003385 * @note This is a live snapshot of state, which may change before the result
3386 * can be inspected. Use locks where appropriate.
3387 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003388 * @funcprops \isr_ok
3389 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003390 * @param dwork pointer to the delayable work item.
3391 *
3392 * @return the number of ticks until the timer that will schedule the work
3393 * item will expire, or zero if the item is not scheduled.
3394 */
3395static inline k_ticks_t k_work_delayable_remaining_get(
3396 const struct k_work_delayable *dwork);
3397
3398/** @brief Submit an idle work item to a queue after a delay.
3399 *
3400 * Unlike k_work_reschedule_for_queue() this is a no-op if the work item is
3401 * already scheduled or submitted, even if @p delay is @c K_NO_WAIT.
3402 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003403 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003404 *
3405 * @param queue the queue on which the work item should be submitted after the
3406 * delay.
3407 *
3408 * @param dwork pointer to the delayable work item.
3409 *
3410 * @param delay the time to wait before submitting the work item. If @c
3411 * K_NO_WAIT and the work is not pending this is equivalent to
3412 * k_work_submit_to_queue().
3413 *
3414 * @retval 0 if work was already scheduled or submitted.
3415 * @retval 1 if work has been scheduled.
Peter Bigot47435902021-05-17 06:36:04 -05003416 * @retval -EBUSY if @p delay is @c K_NO_WAIT and
3417 * k_work_submit_to_queue() fails with this code.
3418 * @retval -EINVAL if @p delay is @c K_NO_WAIT and
3419 * k_work_submit_to_queue() fails with this code.
3420 * @retval -ENODEV if @p delay is @c K_NO_WAIT and
3421 * k_work_submit_to_queue() fails with this code.
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003422 */
3423int k_work_schedule_for_queue(struct k_work_q *queue,
3424 struct k_work_delayable *dwork,
3425 k_timeout_t delay);
3426
3427/** @brief Submit an idle work item to the system work queue after a
3428 * delay.
3429 *
3430 * This is a thin wrapper around k_work_schedule_for_queue(), with all the API
3431 * characteristcs of that function.
3432 *
3433 * @param dwork pointer to the delayable work item.
3434 *
3435 * @param delay the time to wait before submitting the work item. If @c
3436 * K_NO_WAIT this is equivalent to k_work_submit_to_queue().
3437 *
3438 * @return as with k_work_schedule_for_queue().
3439 */
Torbjörn Leksell7a646b32021-03-26 14:41:18 +01003440extern int k_work_schedule(struct k_work_delayable *dwork,
3441 k_timeout_t delay);
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003442
3443/** @brief Reschedule a work item to a queue after a delay.
3444 *
3445 * Unlike k_work_schedule_for_queue() this function can change the deadline of
3446 * a scheduled work item, and will schedule a work item that isn't idle
3447 * (e.g. is submitted or running). This function does not affect ("unsubmit")
3448 * a work item that has been submitted to a queue.
3449 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003450 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003451 *
3452 * @param queue the queue on which the work item should be submitted after the
3453 * delay.
3454 *
3455 * @param dwork pointer to the delayable work item.
3456 *
3457 * @param delay the time to wait before submitting the work item. If @c
3458 * K_NO_WAIT this is equivalent to k_work_submit_to_queue() after canceling
3459 * any previous scheduled submission.
3460 *
3461 * @note If delay is @c K_NO_WAIT ("no delay") the return values are as with
3462 * k_work_submit_to_queue().
3463 *
3464 * @retval 0 if delay is @c K_NO_WAIT and work was already on a queue
3465 * @retval 1 if
3466 * * delay is @c K_NO_WAIT and work was not submitted but has now been queued
3467 * to @p queue; or
3468 * * delay not @c K_NO_WAIT and work has been scheduled
3469 * @retval 2 if delay is @c K_NO_WAIT and work was running and has been queued
3470 * to the queue that was running it
Peter Bigot47435902021-05-17 06:36:04 -05003471 * @retval -EBUSY if @p delay is @c K_NO_WAIT and
3472 * k_work_submit_to_queue() fails with this code.
3473 * @retval -EINVAL if @p delay is @c K_NO_WAIT and
3474 * k_work_submit_to_queue() fails with this code.
3475 * @retval -ENODEV if @p delay is @c K_NO_WAIT and
3476 * k_work_submit_to_queue() fails with this code.
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003477 */
3478int k_work_reschedule_for_queue(struct k_work_q *queue,
3479 struct k_work_delayable *dwork,
3480 k_timeout_t delay);
3481
3482/** @brief Reschedule a work item to the system work queue after a
3483 * delay.
3484 *
3485 * This is a thin wrapper around k_work_reschedule_for_queue(), with all the
3486 * API characteristcs of that function.
3487 *
3488 * @param dwork pointer to the delayable work item.
3489 *
3490 * @param delay the time to wait before submitting the work item.
3491 *
3492 * @return as with k_work_reschedule_for_queue().
3493 */
Torbjörn Leksell7a646b32021-03-26 14:41:18 +01003494extern int k_work_reschedule(struct k_work_delayable *dwork,
3495 k_timeout_t delay);
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003496
3497/** @brief Flush delayable work.
3498 *
3499 * If the work is scheduled, it is immediately submitted. Then the caller
3500 * blocks until the work completes, as with k_work_flush().
3501 *
3502 * @note Be careful of caller and work queue thread relative priority. If
3503 * this function sleeps it will not return until the work queue thread
3504 * completes the tasks that allow this thread to resume.
3505 *
3506 * @note Behavior is undefined if this function is invoked on @p dwork from a
3507 * work queue running @p dwork.
3508 *
3509 * @param dwork pointer to the delayable work item.
3510 *
3511 * @param sync pointer to an opaque item containing state related to the
3512 * pending cancellation. The object must persist until the call returns, and
3513 * be accessible from both the caller thread and the work queue thread. The
3514 * object must not be used for any other flush or cancel operation until this
3515 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3516 * must be allocated in coherent memory.
3517 *
3518 * @retval true if call had to wait for completion
3519 * @retval false if work was already idle
3520 */
3521bool k_work_flush_delayable(struct k_work_delayable *dwork,
3522 struct k_work_sync *sync);
3523
3524/** @brief Cancel delayable work.
3525 *
3526 * Similar to k_work_cancel() but for delayable work. If the work is
3527 * scheduled or submitted it is canceled. This function does not wait for the
3528 * cancellation to complete.
3529 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003530 * @note The work may still be running when this returns. Use
3531 * k_work_flush_delayable() or k_work_cancel_delayable_sync() to ensure it is
3532 * not running.
3533 *
3534 * @note Canceling delayable work does not prevent rescheduling it. It does
3535 * prevent submitting it until the cancellation completes.
3536 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003537 * @funcprops \isr_ok
3538 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003539 * @param dwork pointer to the delayable work item.
3540 *
3541 * @return the k_work_delayable_busy_get() status indicating the state of the
3542 * item after all cancellation steps performed by this call are completed.
3543 */
3544int k_work_cancel_delayable(struct k_work_delayable *dwork);
3545
3546/** @brief Cancel delayable work and wait.
3547 *
3548 * Like k_work_cancel_delayable() but waits until the work becomes idle.
3549 *
3550 * @note Canceling delayable work does not prevent rescheduling it. It does
3551 * prevent submitting it until the cancellation completes.
3552 *
3553 * @note Be careful of caller and work queue thread relative priority. If
3554 * this function sleeps it will not return until the work queue thread
3555 * completes the tasks that allow this thread to resume.
3556 *
3557 * @note Behavior is undefined if this function is invoked on @p dwork from a
3558 * work queue running @p dwork.
3559 *
3560 * @param dwork pointer to the delayable work item.
3561 *
3562 * @param sync pointer to an opaque item containing state related to the
3563 * pending cancellation. The object must persist until the call returns, and
3564 * be accessible from both the caller thread and the work queue thread. The
3565 * object must not be used for any other flush or cancel operation until this
3566 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3567 * must be allocated in coherent memory.
3568 *
Peter Bigot707dc222021-04-16 11:48:50 -05003569 * @retval true if work was not idle (call had to wait for cancellation of a
3570 * running handler to complete, or scheduled or submitted operations were
3571 * cancelled);
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003572 * @retval false otherwise
3573 */
3574bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
3575 struct k_work_sync *sync);
3576
3577enum {
3578/**
3579 * @cond INTERNAL_HIDDEN
3580 */
3581
3582 /* The atomic API is used for all work and queue flags fields to
3583 * enforce sequential consistency in SMP environments.
3584 */
3585
3586 /* Bits that represent the work item states. At least nine of the
3587 * combinations are distinct valid stable states.
3588 */
3589 K_WORK_RUNNING_BIT = 0,
3590 K_WORK_CANCELING_BIT = 1,
3591 K_WORK_QUEUED_BIT = 2,
3592 K_WORK_DELAYED_BIT = 3,
3593
3594 K_WORK_MASK = BIT(K_WORK_DELAYED_BIT) | BIT(K_WORK_QUEUED_BIT)
3595 | BIT(K_WORK_RUNNING_BIT) | BIT(K_WORK_CANCELING_BIT),
3596
3597 /* Static work flags */
3598 K_WORK_DELAYABLE_BIT = 8,
3599 K_WORK_DELAYABLE = BIT(K_WORK_DELAYABLE_BIT),
3600
3601 /* Dynamic work queue flags */
3602 K_WORK_QUEUE_STARTED_BIT = 0,
3603 K_WORK_QUEUE_STARTED = BIT(K_WORK_QUEUE_STARTED_BIT),
3604 K_WORK_QUEUE_BUSY_BIT = 1,
3605 K_WORK_QUEUE_BUSY = BIT(K_WORK_QUEUE_BUSY_BIT),
3606 K_WORK_QUEUE_DRAIN_BIT = 2,
3607 K_WORK_QUEUE_DRAIN = BIT(K_WORK_QUEUE_DRAIN_BIT),
3608 K_WORK_QUEUE_PLUGGED_BIT = 3,
3609 K_WORK_QUEUE_PLUGGED = BIT(K_WORK_QUEUE_PLUGGED_BIT),
3610
3611 /* Static work queue flags */
3612 K_WORK_QUEUE_NO_YIELD_BIT = 8,
3613 K_WORK_QUEUE_NO_YIELD = BIT(K_WORK_QUEUE_NO_YIELD_BIT),
3614
3615/**
3616 * INTERNAL_HIDDEN @endcond
3617 */
3618 /* Transient work flags */
3619
3620 /** @brief Flag indicating a work item that is running under a work
3621 * queue thread.
3622 *
3623 * Accessed via k_work_busy_get(). May co-occur with other flags.
3624 */
3625 K_WORK_RUNNING = BIT(K_WORK_RUNNING_BIT),
3626
3627 /** @brief Flag indicating a work item that is being canceled.
3628 *
3629 * Accessed via k_work_busy_get(). May co-occur with other flags.
3630 */
3631 K_WORK_CANCELING = BIT(K_WORK_CANCELING_BIT),
3632
3633 /** @brief Flag indicating a work item that has been submitted to a
3634 * queue but has not started running.
3635 *
3636 * Accessed via k_work_busy_get(). May co-occur with other flags.
3637 */
3638 K_WORK_QUEUED = BIT(K_WORK_QUEUED_BIT),
3639
3640 /** @brief Flag indicating a delayed work item that is scheduled for
3641 * submission to a queue.
3642 *
3643 * Accessed via k_work_busy_get(). May co-occur with other flags.
3644 */
3645 K_WORK_DELAYED = BIT(K_WORK_DELAYED_BIT),
3646};
3647
3648/** @brief A structure used to submit work. */
3649struct k_work {
3650 /* All fields are protected by the work module spinlock. No fields
3651 * are to be accessed except through kernel API.
3652 */
3653
3654 /* Node to link into k_work_q pending list. */
3655 sys_snode_t node;
3656
3657 /* The function to be invoked by the work queue thread. */
3658 k_work_handler_t handler;
3659
3660 /* The queue on which the work item was last submitted. */
3661 struct k_work_q *queue;
3662
3663 /* State of the work item.
3664 *
3665 * The item can be DELAYED, QUEUED, and RUNNING simultaneously.
3666 *
3667 * It can be RUNNING and CANCELING simultaneously.
3668 */
3669 uint32_t flags;
3670};
3671
3672#define Z_WORK_INITIALIZER(work_handler) { \
3673 .handler = work_handler, \
3674}
3675
3676/** @brief A structure used to submit work after a delay. */
3677struct k_work_delayable {
3678 /* The work item. */
3679 struct k_work work;
3680
3681 /* Timeout used to submit work after a delay. */
3682 struct _timeout timeout;
3683
3684 /* The queue to which the work should be submitted. */
3685 struct k_work_q *queue;
3686};
3687
3688#define Z_WORK_DELAYABLE_INITIALIZER(work_handler) { \
3689 .work = { \
3690 .handler = work_handler, \
3691 .flags = K_WORK_DELAYABLE, \
3692 }, \
3693}
3694
3695/**
3696 * @brief Initialize a statically-defined delayable work item.
3697 *
3698 * This macro can be used to initialize a statically-defined delayable
3699 * work item, prior to its first use. For example,
3700 *
3701 * @code static K_WORK_DELAYABLE_DEFINE(<dwork>, <work_handler>); @endcode
3702 *
3703 * Note that if the runtime dependencies support initialization with
3704 * k_work_init_delayable() using that will eliminate the initialized
3705 * object in ROM that is produced by this macro and copied in at
3706 * system startup.
3707 *
3708 * @param work Symbol name for delayable work item object
3709 * @param work_handler Function to invoke each time work item is processed.
3710 */
3711#define K_WORK_DELAYABLE_DEFINE(work, work_handler) \
3712 struct k_work_delayable work \
3713 = Z_WORK_DELAYABLE_INITIALIZER(work_handler)
3714
3715/**
3716 * @cond INTERNAL_HIDDEN
3717 */
3718
3719/* Record used to wait for work to flush.
3720 *
3721 * The work item is inserted into the queue that will process (or is
3722 * processing) the item, and will be processed as soon as the item
3723 * completes. When the flusher is processed the semaphore will be
3724 * signaled, releasing the thread waiting for the flush.
3725 */
3726struct z_work_flusher {
3727 struct k_work work;
3728 struct k_sem sem;
3729};
3730
3731/* Record used to wait for work to complete a cancellation.
3732 *
3733 * The work item is inserted into a global queue of pending cancels.
3734 * When a cancelling work item goes idle any matching waiters are
3735 * removed from pending_cancels and are woken.
3736 */
3737struct z_work_canceller {
3738 sys_snode_t node;
3739 struct k_work *work;
3740 struct k_sem sem;
3741};
3742
3743/**
3744 * INTERNAL_HIDDEN @endcond
3745 */
3746
3747/** @brief A structure holding internal state for a pending synchronous
3748 * operation on a work item or queue.
3749 *
3750 * Instances of this type are provided by the caller for invocation of
3751 * k_work_flush(), k_work_cancel_sync() and sibling flush and cancel APIs. A
3752 * referenced object must persist until the call returns, and be accessible
3753 * from both the caller thread and the work queue thread.
3754 *
3755 * @note If CONFIG_KERNEL_COHERENCE is enabled the object must be allocated in
3756 * coherent memory; see arch_mem_coherent(). The stack on these architectures
3757 * is generally not coherent. be stack-allocated. Violations are detected by
3758 * runtime assertion.
3759 */
3760struct k_work_sync {
3761 union {
3762 struct z_work_flusher flusher;
3763 struct z_work_canceller canceller;
3764 };
3765};
3766
3767/** @brief A structure holding optional configuration items for a work
3768 * queue.
3769 *
3770 * This structure, and values it references, are not retained by
3771 * k_work_queue_start().
3772 */
3773struct k_work_queue_config {
3774 /** The name to be given to the work queue thread.
3775 *
3776 * If left null the thread will not have a name.
3777 */
3778 const char *name;
3779
3780 /** Control whether the work queue thread should yield between
3781 * items.
3782 *
3783 * Yielding between items helps guarantee the work queue
3784 * thread does not starve other threads, including cooperative
3785 * ones released by a work item. This is the default behavior.
3786 *
3787 * Set this to @c true to prevent the work queue thread from
3788 * yielding between items. This may be appropriate when a
3789 * sequence of items should complete without yielding
3790 * control.
3791 */
3792 bool no_yield;
3793};
3794
3795/** @brief A structure used to hold work until it can be processed. */
3796struct k_work_q {
3797 /* The thread that animates the work. */
3798 struct k_thread thread;
3799
3800 /* All the following fields must be accessed only while the
3801 * work module spinlock is held.
3802 */
3803
3804 /* List of k_work items to be worked. */
3805 sys_slist_t pending;
3806
3807 /* Wait queue for idle work thread. */
3808 _wait_q_t notifyq;
3809
3810 /* Wait queue for threads waiting for the queue to drain. */
3811 _wait_q_t drainq;
3812
3813 /* Flags describing queue state. */
3814 uint32_t flags;
3815};
3816
3817/* Provide the implementation for inline functions declared above */
3818
3819static inline bool k_work_is_pending(const struct k_work *work)
3820{
3821 return k_work_busy_get(work) != 0;
3822}
3823
3824static inline struct k_work_delayable *
3825k_work_delayable_from_work(struct k_work *work)
3826{
3827 return CONTAINER_OF(work, struct k_work_delayable, work);
3828}
3829
3830static inline bool k_work_delayable_is_pending(
3831 const struct k_work_delayable *dwork)
3832{
3833 return k_work_delayable_busy_get(dwork) != 0;
3834}
3835
3836static inline k_ticks_t k_work_delayable_expires_get(
3837 const struct k_work_delayable *dwork)
3838{
3839 return z_timeout_expires(&dwork->timeout);
3840}
3841
3842static inline k_ticks_t k_work_delayable_remaining_get(
3843 const struct k_work_delayable *dwork)
3844{
3845 return z_timeout_remaining(&dwork->timeout);
3846}
3847
3848static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue)
3849{
3850 return &queue->thread;
3851}
3852
3853/* Legacy wrappers */
3854
Peter Bigot09a31ce2021-03-04 11:21:46 -06003855__deprecated
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003856static inline bool k_work_pending(const struct k_work *work)
3857{
3858 return k_work_is_pending(work);
3859}
3860
Peter Bigot09a31ce2021-03-04 11:21:46 -06003861__deprecated
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003862static inline void k_work_q_start(struct k_work_q *work_q,
3863 k_thread_stack_t *stack,
3864 size_t stack_size, int prio)
3865{
3866 k_work_queue_start(work_q, stack, stack_size, prio, NULL);
3867}
3868
Peter Bigot09a31ce2021-03-04 11:21:46 -06003869/* deprecated, remove when corresponding deprecated API is removed. */
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003870struct k_delayed_work {
3871 struct k_work_delayable work;
3872};
3873
Peter Bigot09a31ce2021-03-04 11:21:46 -06003874#define Z_DELAYED_WORK_INITIALIZER(work_handler) __DEPRECATED_MACRO { \
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003875 .work = Z_WORK_DELAYABLE_INITIALIZER(work_handler), \
3876}
3877
Peter Bigot09a31ce2021-03-04 11:21:46 -06003878__deprecated
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003879static inline void k_delayed_work_init(struct k_delayed_work *work,
3880 k_work_handler_t handler)
3881{
3882 k_work_init_delayable(&work->work, handler);
3883}
3884
Peter Bigot09a31ce2021-03-04 11:21:46 -06003885__deprecated
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003886static inline int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
3887 struct k_delayed_work *work,
3888 k_timeout_t delay)
3889{
3890 int rc = k_work_reschedule_for_queue(work_q, &work->work, delay);
3891
3892 /* Legacy API doesn't distinguish success cases. */
3893 return (rc >= 0) ? 0 : rc;
3894}
3895
Peter Bigot09a31ce2021-03-04 11:21:46 -06003896__deprecated
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003897static inline int k_delayed_work_submit(struct k_delayed_work *work,
3898 k_timeout_t delay)
3899{
3900 int rc = k_work_reschedule(&work->work, delay);
3901
3902 /* Legacy API doesn't distinguish success cases. */
3903 return (rc >= 0) ? 0 : rc;
3904}
3905
Peter Bigot09a31ce2021-03-04 11:21:46 -06003906__deprecated
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003907static inline int k_delayed_work_cancel(struct k_delayed_work *work)
3908{
3909 bool pending = k_work_delayable_is_pending(&work->work);
3910 int rc = k_work_cancel_delayable(&work->work);
3911
3912 /* Old return value rules:
3913 *
3914 * 0 if:
3915 * * Work item countdown cancelled before the item was submitted to
3916 * its queue; or
3917 * * Work item was removed from its queue before it was processed.
3918 *
3919 * -EINVAL if:
3920 * * Work item has never been submitted; or
3921 * * Work item has been successfully cancelled; or
3922 * * Timeout handler is in the process of submitting the work item to
3923 * its queue; or
3924 * * Work queue thread has removed the work item from the queue but
3925 * has not called its handler.
3926 *
3927 * -EALREADY if:
3928 * * Work queue thread has removed the work item from the queue and
3929 * cleared its pending flag; or
3930 * * Work queue thread is invoking the item handler; or
3931 * * Work item handler has completed.
3932 *
3933
3934 * We can't reconstruct those states, so call it successful only when
3935 * a pending item is no longer pending, -EINVAL if it was pending and
3936 * still is, and cancel, and -EALREADY if it wasn't pending (so
3937 * presumably cancellation should have had no effect, assuming we
3938 * didn't hit a race condition).
3939 */
3940 if (pending) {
3941 return (rc == 0) ? 0 : -EINVAL;
3942 }
3943
3944 return -EALREADY;
3945}
3946
Peter Bigot09a31ce2021-03-04 11:21:46 -06003947__deprecated
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003948static inline bool k_delayed_work_pending(struct k_delayed_work *work)
3949{
3950 return k_work_delayable_is_pending(&work->work);
3951}
3952
Peter Bigot09a31ce2021-03-04 11:21:46 -06003953__deprecated
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003954static inline int32_t k_delayed_work_remaining_get(struct k_delayed_work *work)
3955{
3956 k_ticks_t rem = k_work_delayable_remaining_get(&work->work);
3957
3958 /* Probably should be ceil32, but was floor32 */
3959 return k_ticks_to_ms_floor32(rem);
3960}
3961
Peter Bigot09a31ce2021-03-04 11:21:46 -06003962__deprecated
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003963static inline k_ticks_t k_delayed_work_expires_ticks(
3964 struct k_delayed_work *work)
3965{
3966 return k_work_delayable_expires_get(&work->work);
3967}
3968
Peter Bigot09a31ce2021-03-04 11:21:46 -06003969__deprecated
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003970static inline k_ticks_t k_delayed_work_remaining_ticks(
3971 struct k_delayed_work *work)
3972{
3973 return k_work_delayable_remaining_get(&work->work);
3974}
3975
3976/** @} */
3977
Peter Bigot4e3b9262021-01-15 10:52:38 -06003978struct k_work_user;
3979
3980/**
Anas Nashifc355b7e2021-04-14 08:49:05 -04003981 * @addtogroup workqueue_apis
Peter Bigot4e3b9262021-01-15 10:52:38 -06003982 * @{
3983 */
3984
3985/**
3986 * @typedef k_work_user_handler_t
3987 * @brief Work item handler function type for user work queues.
3988 *
3989 * A work item's handler function is executed by a user workqueue's thread
3990 * when the work item is processed by the workqueue.
3991 *
3992 * @param work Address of the work item.
3993 *
3994 * @return N/A
3995 */
3996typedef void (*k_work_user_handler_t)(struct k_work_user *work);
3997
3998/**
3999 * @cond INTERNAL_HIDDEN
4000 */
4001
4002struct k_work_user_q {
4003 struct k_queue queue;
4004 struct k_thread thread;
4005};
4006
4007enum {
4008 K_WORK_USER_STATE_PENDING, /* Work item pending state */
4009};
4010
4011struct k_work_user {
4012 void *_reserved; /* Used by k_queue implementation. */
4013 k_work_user_handler_t handler;
4014 atomic_t flags;
4015};
4016
4017/**
4018 * INTERNAL_HIDDEN @endcond
4019 */
4020
4021#define Z_WORK_USER_INITIALIZER(work_handler) \
4022 { \
Fredrik Gihl67295be2021-06-11 12:31:58 +02004023 ._reserved = NULL, \
Peter Bigot4e3b9262021-01-15 10:52:38 -06004024 .handler = work_handler, \
Fredrik Gihl67295be2021-06-11 12:31:58 +02004025 .flags = 0 \
Peter Bigot4e3b9262021-01-15 10:52:38 -06004026 }
4027
4028/**
4029 * @brief Initialize a statically-defined user work item.
4030 *
4031 * This macro can be used to initialize a statically-defined user work
4032 * item, prior to its first use. For example,
4033 *
4034 * @code static K_WORK_USER_DEFINE(<work>, <work_handler>); @endcode
4035 *
4036 * @param work Symbol name for work item object
4037 * @param work_handler Function to invoke each time work item is processed.
4038 */
4039#define K_WORK_USER_DEFINE(work, work_handler) \
4040 struct k_work_user work = Z_WORK_USER_INITIALIZER(work_handler)
4041
4042/**
4043 * @brief Initialize a userspace work item.
4044 *
4045 * This routine initializes a user workqueue work item, prior to its
4046 * first use.
4047 *
4048 * @param work Address of work item.
4049 * @param handler Function to invoke each time work item is processed.
4050 *
4051 * @return N/A
4052 */
4053static inline void k_work_user_init(struct k_work_user *work,
4054 k_work_user_handler_t handler)
4055{
4056 *work = (struct k_work_user)Z_WORK_USER_INITIALIZER(handler);
4057}
4058
4059/**
4060 * @brief Check if a userspace work item is pending.
4061 *
4062 * This routine indicates if user work item @a work is pending in a workqueue's
4063 * queue.
4064 *
4065 * @note Checking if the work is pending gives no guarantee that the
4066 * work will still be pending when this information is used. It is up to
4067 * the caller to make sure that this information is used in a safe manner.
4068 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004069 * @funcprops \isr_ok
Peter Bigot4e3b9262021-01-15 10:52:38 -06004070 *
4071 * @param work Address of work item.
4072 *
4073 * @return true if work item is pending, or false if it is not pending.
4074 */
4075static inline bool k_work_user_is_pending(struct k_work_user *work)
4076{
4077 return atomic_test_bit(&work->flags, K_WORK_USER_STATE_PENDING);
4078}
4079
4080/**
4081 * @brief Submit a work item to a user mode workqueue
4082 *
4083 * Submits a work item to a workqueue that runs in user mode. A temporary
4084 * memory allocation is made from the caller's resource pool which is freed
4085 * once the worker thread consumes the k_work item. The workqueue
4086 * thread must have memory access to the k_work item being submitted. The caller
4087 * must have permission granted on the work_q parameter's queue object.
4088 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004089 * @funcprops \isr_ok
Peter Bigot4e3b9262021-01-15 10:52:38 -06004090 *
4091 * @param work_q Address of workqueue.
4092 * @param work Address of work item.
4093 *
4094 * @retval -EBUSY if the work item was already in some workqueue
4095 * @retval -ENOMEM if no memory for thread resource pool allocation
4096 * @retval 0 Success
4097 */
4098static inline int k_work_user_submit_to_queue(struct k_work_user_q *work_q,
4099 struct k_work_user *work)
4100{
4101 int ret = -EBUSY;
4102
4103 if (!atomic_test_and_set_bit(&work->flags,
4104 K_WORK_USER_STATE_PENDING)) {
4105 ret = k_queue_alloc_append(&work_q->queue, work);
4106
4107 /* Couldn't insert into the queue. Clear the pending bit
4108 * so the work item can be submitted again
4109 */
4110 if (ret != 0) {
4111 atomic_clear_bit(&work->flags,
4112 K_WORK_USER_STATE_PENDING);
4113 }
4114 }
4115
4116 return ret;
4117}
4118
4119/**
4120 * @brief Start a workqueue in user mode
4121 *
4122 * This works identically to k_work_queue_start() except it is callable from
4123 * user mode, and the worker thread created will run in user mode. The caller
4124 * must have permissions granted on both the work_q parameter's thread and
4125 * queue objects, and the same restrictions on priority apply as
4126 * k_thread_create().
4127 *
4128 * @param work_q Address of workqueue.
4129 * @param stack Pointer to work queue thread's stack space, as defined by
4130 * K_THREAD_STACK_DEFINE()
4131 * @param stack_size Size of the work queue thread's stack (in bytes), which
4132 * should either be the same constant passed to
4133 * K_THREAD_STACK_DEFINE() or the value of K_THREAD_STACK_SIZEOF().
4134 * @param prio Priority of the work queue's thread.
4135 * @param name optional thread name. If not null a copy is made into the
4136 * thread's name buffer.
4137 *
4138 * @return N/A
4139 */
4140extern void k_work_user_queue_start(struct k_work_user_q *work_q,
4141 k_thread_stack_t *stack,
4142 size_t stack_size, int prio,
4143 const char *name);
4144
4145/** @} */
4146
Allan Stephensc98da842016-11-11 15:45:03 -05004147/**
Peter Bigot3d583982020-11-18 08:55:32 -06004148 * @cond INTERNAL_HIDDEN
4149 */
4150
4151struct k_work_poll {
4152 struct k_work work;
4153 struct k_work_q *workq;
4154 struct z_poller poller;
4155 struct k_poll_event *events;
4156 int num_events;
4157 k_work_handler_t real_handler;
4158 struct _timeout timeout;
4159 int poll_result;
4160};
4161
4162/**
4163 * INTERNAL_HIDDEN @endcond
4164 */
4165
4166/**
Anas Nashifc355b7e2021-04-14 08:49:05 -04004167 * @addtogroup workqueue_apis
Peter Bigot3d583982020-11-18 08:55:32 -06004168 * @{
4169 */
4170
4171/**
Peter Bigotdc34e7c2020-10-28 11:24:05 -05004172 * @brief Initialize a statically-defined work item.
4173 *
4174 * This macro can be used to initialize a statically-defined workqueue work
4175 * item, prior to its first use. For example,
4176 *
4177 * @code static K_WORK_DEFINE(<work>, <work_handler>); @endcode
4178 *
4179 * @param work Symbol name for work item object
4180 * @param work_handler Function to invoke each time work item is processed.
4181 */
4182#define K_WORK_DEFINE(work, work_handler) \
4183 struct k_work work = Z_WORK_INITIALIZER(work_handler)
4184
4185/**
4186 * @brief Initialize a statically-defined delayed work item.
4187 *
4188 * This macro can be used to initialize a statically-defined workqueue
4189 * delayed work item, prior to its first use. For example,
4190 *
4191 * @code static K_DELAYED_WORK_DEFINE(<work>, <work_handler>); @endcode
4192 *
4193 * @param work Symbol name for delayed work item object
4194 * @param work_handler Function to invoke each time work item is processed.
4195 */
Peter Bigot09a31ce2021-03-04 11:21:46 -06004196#define K_DELAYED_WORK_DEFINE(work, work_handler) __DEPRECATED_MACRO \
Peter Bigotdc34e7c2020-10-28 11:24:05 -05004197 struct k_delayed_work work = Z_DELAYED_WORK_INITIALIZER(work_handler)
4198
4199/**
Peter Bigot3d583982020-11-18 08:55:32 -06004200 * @brief Initialize a triggered work item.
4201 *
4202 * This routine initializes a workqueue triggered work item, prior to
4203 * its first use.
4204 *
4205 * @param work Address of triggered work item.
4206 * @param handler Function to invoke each time work item is processed.
4207 *
4208 * @return N/A
4209 */
4210extern void k_work_poll_init(struct k_work_poll *work,
4211 k_work_handler_t handler);
4212
4213/**
4214 * @brief Submit a triggered work item.
4215 *
4216 * This routine schedules work item @a work to be processed by workqueue
4217 * @a work_q when one of the given @a events is signaled. The routine
4218 * initiates internal poller for the work item and then returns to the caller.
4219 * Only when one of the watched events happen the work item is actually
4220 * submitted to the workqueue and becomes pending.
4221 *
4222 * Submitting a previously submitted triggered work item that is still
4223 * waiting for the event cancels the existing submission and reschedules it
4224 * the using the new event list. Note that this behavior is inherently subject
4225 * to race conditions with the pre-existing triggered work item and work queue,
4226 * so care must be taken to synchronize such resubmissions externally.
4227 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004228 * @funcprops \isr_ok
Peter Bigot3d583982020-11-18 08:55:32 -06004229 *
4230 * @warning
4231 * Provided array of events as well as a triggered work item must be placed
4232 * in persistent memory (valid until work handler execution or work
4233 * cancellation) and cannot be modified after submission.
4234 *
4235 * @param work_q Address of workqueue.
4236 * @param work Address of delayed work item.
4237 * @param events An array of events which trigger the work.
4238 * @param num_events The number of events in the array.
4239 * @param timeout Timeout after which the work will be scheduled
4240 * for execution even if not triggered.
4241 *
4242 *
4243 * @retval 0 Work item started watching for events.
4244 * @retval -EINVAL Work item is being processed or has completed its work.
4245 * @retval -EADDRINUSE Work item is pending on a different workqueue.
4246 */
4247extern int k_work_poll_submit_to_queue(struct k_work_q *work_q,
4248 struct k_work_poll *work,
4249 struct k_poll_event *events,
4250 int num_events,
4251 k_timeout_t timeout);
4252
4253/**
4254 * @brief Submit a triggered work item to the system workqueue.
4255 *
4256 * This routine schedules work item @a work to be processed by system
4257 * workqueue when one of the given @a events is signaled. The routine
4258 * initiates internal poller for the work item and then returns to the caller.
4259 * Only when one of the watched events happen the work item is actually
4260 * submitted to the workqueue and becomes pending.
4261 *
4262 * Submitting a previously submitted triggered work item that is still
4263 * waiting for the event cancels the existing submission and reschedules it
4264 * the using the new event list. Note that this behavior is inherently subject
4265 * to race conditions with the pre-existing triggered work item and work queue,
4266 * so care must be taken to synchronize such resubmissions externally.
4267 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004268 * @funcprops \isr_ok
Peter Bigot3d583982020-11-18 08:55:32 -06004269 *
4270 * @warning
4271 * Provided array of events as well as a triggered work item must not be
4272 * modified until the item has been processed by the workqueue.
4273 *
4274 * @param work Address of delayed work item.
4275 * @param events An array of events which trigger the work.
4276 * @param num_events The number of events in the array.
4277 * @param timeout Timeout after which the work will be scheduled
4278 * for execution even if not triggered.
4279 *
4280 * @retval 0 Work item started watching for events.
4281 * @retval -EINVAL Work item is being processed or has completed its work.
4282 * @retval -EADDRINUSE Work item is pending on a different workqueue.
4283 */
Torbjörn Leksellcae9a902021-03-26 14:20:05 +01004284extern int k_work_poll_submit(struct k_work_poll *work,
Peter Bigot3d583982020-11-18 08:55:32 -06004285 struct k_poll_event *events,
4286 int num_events,
Torbjörn Leksellcae9a902021-03-26 14:20:05 +01004287 k_timeout_t timeout);
Peter Bigot3d583982020-11-18 08:55:32 -06004288
4289/**
4290 * @brief Cancel a triggered work item.
4291 *
4292 * This routine cancels the submission of triggered work item @a work.
4293 * A triggered work item can only be canceled if no event triggered work
4294 * submission.
4295 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004296 * @funcprops \isr_ok
Peter Bigot3d583982020-11-18 08:55:32 -06004297 *
4298 * @param work Address of delayed work item.
4299 *
4300 * @retval 0 Work item canceled.
4301 * @retval -EINVAL Work item is being processed or has completed its work.
4302 */
4303extern int k_work_poll_cancel(struct k_work_poll *work);
4304
4305/** @} */
4306
4307/**
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004308 * @defgroup msgq_apis Message Queue APIs
4309 * @ingroup kernel_apis
4310 * @{
Allan Stephensc98da842016-11-11 15:45:03 -05004311 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004312
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004313/**
4314 * @brief Message Queue Structure
4315 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004316struct k_msgq {
Anas Nashife71293e2019-12-04 20:00:14 -05004317 /** Message queue wait queue */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004318 _wait_q_t wait_q;
Anas Nashife71293e2019-12-04 20:00:14 -05004319 /** Lock */
Andy Rossbe03dbd2018-07-26 10:23:02 -07004320 struct k_spinlock lock;
Anas Nashife71293e2019-12-04 20:00:14 -05004321 /** Message size */
Peter Mitsis026b4ed2016-10-13 11:41:45 -04004322 size_t msg_size;
Anas Nashife71293e2019-12-04 20:00:14 -05004323 /** Maximal number of messages */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004324 uint32_t max_msgs;
Anas Nashife71293e2019-12-04 20:00:14 -05004325 /** Start of message buffer */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004326 char *buffer_start;
Anas Nashife71293e2019-12-04 20:00:14 -05004327 /** End of message buffer */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004328 char *buffer_end;
Anas Nashife71293e2019-12-04 20:00:14 -05004329 /** Read pointer */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004330 char *read_ptr;
Anas Nashife71293e2019-12-04 20:00:14 -05004331 /** Write pointer */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004332 char *write_ptr;
Anas Nashife71293e2019-12-04 20:00:14 -05004333 /** Number of used messages */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004334 uint32_t used_msgs;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004335
Nick Gravesb445f132021-04-12 12:35:18 -07004336 _POLL_EVENT;
4337
Anas Nashife71293e2019-12-04 20:00:14 -05004338 /** Message queue */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004339 uint8_t flags;
Ederson de Souzabdaac352021-11-22 14:46:19 -08004340
4341 SYS_PORT_TRACING_TRACKING_FIELD(k_msgq)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004342};
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004343/**
4344 * @cond INTERNAL_HIDDEN
4345 */
4346
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004347
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004348#define Z_MSGQ_INITIALIZER(obj, q_buffer, q_msg_size, q_max_msgs) \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004349 { \
Patrik Flykt4344e272019-03-08 14:19:05 -07004350 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
Peter Mitsis1da807e2016-10-06 11:36:59 -04004351 .msg_size = q_msg_size, \
Charles E. Youse6d01f672019-03-18 10:27:34 -07004352 .max_msgs = q_max_msgs, \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004353 .buffer_start = q_buffer, \
Peter Mitsis1da807e2016-10-06 11:36:59 -04004354 .buffer_end = q_buffer + (q_max_msgs * q_msg_size), \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004355 .read_ptr = q_buffer, \
4356 .write_ptr = q_buffer, \
4357 .used_msgs = 0, \
Nick Gravesb445f132021-04-12 12:35:18 -07004358 _POLL_EVENT_OBJ_INIT(obj) \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004359 }
Kumar Galac8b94f42020-09-29 09:52:23 -05004360
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004361/**
4362 * INTERNAL_HIDDEN @endcond
4363 */
4364
Andrew Boie65a9d2a2017-06-27 10:51:23 -07004365
Andrew Boie0fe789f2018-04-12 18:35:56 -07004366#define K_MSGQ_FLAG_ALLOC BIT(0)
4367
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004368/**
4369 * @brief Message Queue Attributes
4370 */
Youvedeep Singh188c1ab2018-03-19 20:02:40 +05304371struct k_msgq_attrs {
Anas Nashife71293e2019-12-04 20:00:14 -05004372 /** Message Size */
Youvedeep Singh188c1ab2018-03-19 20:02:40 +05304373 size_t msg_size;
Anas Nashife71293e2019-12-04 20:00:14 -05004374 /** Maximal number of messages */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004375 uint32_t max_msgs;
Anas Nashife71293e2019-12-04 20:00:14 -05004376 /** Used messages */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004377 uint32_t used_msgs;
Youvedeep Singh188c1ab2018-03-19 20:02:40 +05304378};
4379
Allan Stephensc98da842016-11-11 15:45:03 -05004380
4381/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004382 * @brief Statically define and initialize a message queue.
Peter Mitsis1da807e2016-10-06 11:36:59 -04004383 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004384 * The message queue's ring buffer contains space for @a q_max_msgs messages,
4385 * each of which is @a q_msg_size bytes long. The buffer is aligned to a
Allan Stephensda827222016-11-09 14:23:58 -06004386 * @a q_align -byte boundary, which must be a power of 2. To ensure that each
4387 * message is similarly aligned to this boundary, @a q_msg_size must also be
4388 * a multiple of @a q_align.
Peter Mitsis1da807e2016-10-06 11:36:59 -04004389 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004390 * The message queue can be accessed outside the module where it is defined
4391 * using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004392 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05004393 * @code extern struct k_msgq <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004394 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004395 * @param q_name Name of the message queue.
4396 * @param q_msg_size Message size (in bytes).
4397 * @param q_max_msgs Maximum number of messages that can be queued.
Allan Stephensda827222016-11-09 14:23:58 -06004398 * @param q_align Alignment of the message queue's ring buffer.
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004399 *
Peter Mitsis1da807e2016-10-06 11:36:59 -04004400 */
Nicolas Pitreb1d37422019-06-03 10:51:32 -04004401#define K_MSGQ_DEFINE(q_name, q_msg_size, q_max_msgs, q_align) \
4402 static char __noinit __aligned(q_align) \
4403 _k_fifo_buf_##q_name[(q_max_msgs) * (q_msg_size)]; \
Fabio Baltierif88a4202021-08-04 23:05:54 +01004404 STRUCT_SECTION_ITERABLE(k_msgq, q_name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004405 Z_MSGQ_INITIALIZER(q_name, _k_fifo_buf_##q_name, \
Peter Mitsis1da807e2016-10-06 11:36:59 -04004406 q_msg_size, q_max_msgs)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004407
Peter Mitsisd7a37502016-10-13 11:37:40 -04004408/**
4409 * @brief Initialize a message queue.
4410 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004411 * This routine initializes a message queue object, prior to its first use.
4412 *
Allan Stephensda827222016-11-09 14:23:58 -06004413 * The message queue's ring buffer must contain space for @a max_msgs messages,
4414 * each of which is @a msg_size bytes long. The buffer must be aligned to an
4415 * N-byte boundary, where N is a power of 2 (i.e. 1, 2, 4, ...). To ensure
4416 * that each message is similarly aligned to this boundary, @a q_msg_size
4417 * must also be a multiple of N.
4418 *
Anas Nashif25c87db2021-03-29 10:54:23 -04004419 * @param msgq Address of the message queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004420 * @param buffer Pointer to ring buffer that holds queued messages.
4421 * @param msg_size Message size (in bytes).
Peter Mitsisd7a37502016-10-13 11:37:40 -04004422 * @param max_msgs Maximum number of messages that can be queued.
4423 *
4424 * @return N/A
4425 */
Anas Nashif25c87db2021-03-29 10:54:23 -04004426void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size,
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004427 uint32_t max_msgs);
Andrew Boie0fe789f2018-04-12 18:35:56 -07004428
4429/**
4430 * @brief Initialize a message queue.
4431 *
4432 * This routine initializes a message queue object, prior to its first use,
4433 * allocating its internal ring buffer from the calling thread's resource
4434 * pool.
4435 *
4436 * Memory allocated for the ring buffer can be released by calling
4437 * k_msgq_cleanup(), or if userspace is enabled and the msgq object loses
4438 * all of its references.
4439 *
Anas Nashif4b386592019-11-25 09:30:47 -05004440 * @param msgq Address of the message queue.
Andrew Boie0fe789f2018-04-12 18:35:56 -07004441 * @param msg_size Message size (in bytes).
4442 * @param max_msgs Maximum number of messages that can be queued.
4443 *
4444 * @return 0 on success, -ENOMEM if there was insufficient memory in the
4445 * thread's resource pool, or -EINVAL if the size parameters cause
4446 * an integer overflow.
4447 */
Anas Nashif4b386592019-11-25 09:30:47 -05004448__syscall int k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size,
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004449 uint32_t max_msgs);
Andrew Boie0fe789f2018-04-12 18:35:56 -07004450
Anas Nashife71293e2019-12-04 20:00:14 -05004451/**
Anas Nashif4b386592019-11-25 09:30:47 -05004452 * @brief Release allocated buffer for a queue
Anas Nashife71293e2019-12-04 20:00:14 -05004453 *
4454 * Releases memory allocated for the ring buffer.
Anas Nashif4b386592019-11-25 09:30:47 -05004455 *
4456 * @param msgq message queue to cleanup
4457 *
Anas Nashif11b93652019-06-16 08:43:48 -04004458 * @retval 0 on success
4459 * @retval -EBUSY Queue not empty
Anas Nashife71293e2019-12-04 20:00:14 -05004460 */
Anas Nashif11b93652019-06-16 08:43:48 -04004461int k_msgq_cleanup(struct k_msgq *msgq);
Peter Mitsisd7a37502016-10-13 11:37:40 -04004462
4463/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004464 * @brief Send a message to a message queue.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004465 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004466 * This routine sends a message to message queue @a q.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004467 *
Lauren Murphyf29a2d12020-09-16 21:13:40 -05004468 * @note The message content is copied from @a data into @a msgq and the @a data
4469 * pointer is not retained, so the message content will not be modified
4470 * by this function.
Benjamin Walsh8215ce12016-11-09 19:45:19 -05004471 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004472 * @funcprops \isr_ok
4473 *
Anas Nashif4b386592019-11-25 09:30:47 -05004474 * @param msgq Address of the message queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004475 * @param data Pointer to the message.
Andy Ross78327382020-03-05 15:18:14 -08004476 * @param timeout Non-negative waiting period to add the message,
4477 * or one of the special values K_NO_WAIT and
Krzysztof Chruscinski94f742e2019-11-07 19:28:00 +01004478 * K_FOREVER.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004479 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05004480 * @retval 0 Message sent.
4481 * @retval -ENOMSG Returned without waiting or queue purged.
4482 * @retval -EAGAIN Waiting period timed out.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004483 */
Lauren Murphyf29a2d12020-09-16 21:13:40 -05004484__syscall int k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout);
Peter Mitsisd7a37502016-10-13 11:37:40 -04004485
4486/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004487 * @brief Receive a message from a message queue.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004488 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004489 * This routine receives a message from message queue @a q in a "first in,
4490 * first out" manner.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004491 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004492 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
4493 *
4494 * @funcprops \isr_ok
Benjamin Walsh8215ce12016-11-09 19:45:19 -05004495 *
Anas Nashif4b386592019-11-25 09:30:47 -05004496 * @param msgq Address of the message queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004497 * @param data Address of area to hold the received message.
Andy Ross78327382020-03-05 15:18:14 -08004498 * @param timeout Waiting period to receive the message,
4499 * or one of the special values K_NO_WAIT and
Krzysztof Chruscinski94f742e2019-11-07 19:28:00 +01004500 * K_FOREVER.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004501 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05004502 * @retval 0 Message received.
4503 * @retval -ENOMSG Returned without waiting.
4504 * @retval -EAGAIN Waiting period timed out.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004505 */
Andy Ross78327382020-03-05 15:18:14 -08004506__syscall int k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout);
Peter Mitsisd7a37502016-10-13 11:37:40 -04004507
4508/**
Sathish Kuttan3efd8e12018-11-09 21:03:10 -08004509 * @brief Peek/read a message from a message queue.
4510 *
4511 * This routine reads a message from message queue @a q in a "first in,
4512 * first out" manner and leaves the message in the queue.
4513 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004514 * @funcprops \isr_ok
Sathish Kuttan3efd8e12018-11-09 21:03:10 -08004515 *
Anas Nashif4b386592019-11-25 09:30:47 -05004516 * @param msgq Address of the message queue.
Sathish Kuttan3efd8e12018-11-09 21:03:10 -08004517 * @param data Address of area to hold the message read from the queue.
4518 *
4519 * @retval 0 Message read.
4520 * @retval -ENOMSG Returned when the queue has no message.
Sathish Kuttan3efd8e12018-11-09 21:03:10 -08004521 */
Anas Nashif4b386592019-11-25 09:30:47 -05004522__syscall int k_msgq_peek(struct k_msgq *msgq, void *data);
Sathish Kuttan3efd8e12018-11-09 21:03:10 -08004523
4524/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004525 * @brief Purge a message queue.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004526 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004527 * This routine discards all unreceived messages in a message queue's ring
4528 * buffer. Any threads that are blocked waiting to send a message to the
4529 * message queue are unblocked and see an -ENOMSG error code.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004530 *
Anas Nashif4b386592019-11-25 09:30:47 -05004531 * @param msgq Address of the message queue.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004532 *
4533 * @return N/A
4534 */
Anas Nashif4b386592019-11-25 09:30:47 -05004535__syscall void k_msgq_purge(struct k_msgq *msgq);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004536
Peter Mitsis67be2492016-10-07 11:44:34 -04004537/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004538 * @brief Get the amount of free space in a message queue.
Peter Mitsis67be2492016-10-07 11:44:34 -04004539 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004540 * This routine returns the number of unused entries in a message queue's
4541 * ring buffer.
Peter Mitsis67be2492016-10-07 11:44:34 -04004542 *
Anas Nashif4b386592019-11-25 09:30:47 -05004543 * @param msgq Address of the message queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004544 *
4545 * @return Number of unused ring buffer entries.
Peter Mitsis67be2492016-10-07 11:44:34 -04004546 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004547__syscall uint32_t k_msgq_num_free_get(struct k_msgq *msgq);
Andrew Boie82edb6e2017-10-02 10:53:06 -07004548
Youvedeep Singh188c1ab2018-03-19 20:02:40 +05304549/**
4550 * @brief Get basic attributes of a message queue.
4551 *
4552 * This routine fetches basic attributes of message queue into attr argument.
4553 *
Anas Nashif4b386592019-11-25 09:30:47 -05004554 * @param msgq Address of the message queue.
Youvedeep Singh188c1ab2018-03-19 20:02:40 +05304555 * @param attrs pointer to message queue attribute structure.
4556 *
4557 * @return N/A
4558 */
Anas Nashif4b386592019-11-25 09:30:47 -05004559__syscall void k_msgq_get_attrs(struct k_msgq *msgq,
4560 struct k_msgq_attrs *attrs);
Youvedeep Singh188c1ab2018-03-19 20:02:40 +05304561
4562
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004563static inline uint32_t z_impl_k_msgq_num_free_get(struct k_msgq *msgq)
Peter Mitsis67be2492016-10-07 11:44:34 -04004564{
Anas Nashif4b386592019-11-25 09:30:47 -05004565 return msgq->max_msgs - msgq->used_msgs;
Peter Mitsis67be2492016-10-07 11:44:34 -04004566}
4567
Peter Mitsisd7a37502016-10-13 11:37:40 -04004568/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004569 * @brief Get the number of messages in a message queue.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004570 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004571 * This routine returns the number of messages in a message queue's ring buffer.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004572 *
Anas Nashif4b386592019-11-25 09:30:47 -05004573 * @param msgq Address of the message queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004574 *
4575 * @return Number of messages.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004576 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004577__syscall uint32_t k_msgq_num_used_get(struct k_msgq *msgq);
Andrew Boie82edb6e2017-10-02 10:53:06 -07004578
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004579static inline uint32_t z_impl_k_msgq_num_used_get(struct k_msgq *msgq)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004580{
Anas Nashif4b386592019-11-25 09:30:47 -05004581 return msgq->used_msgs;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004582}
4583
Anas Nashif166f5192018-02-25 08:02:36 -06004584/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05004585
4586/**
Allan Stephensc98da842016-11-11 15:45:03 -05004587 * @defgroup mailbox_apis Mailbox APIs
4588 * @ingroup kernel_apis
4589 * @{
4590 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004591
Anas Nashife71293e2019-12-04 20:00:14 -05004592/**
4593 * @brief Mailbox Message Structure
4594 *
4595 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004596struct k_mbox_msg {
4597 /** internal use only - needed for legacy API support */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004598 uint32_t _mailbox;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004599 /** size of message (in bytes) */
Peter Mitsisd93078c2016-10-14 12:59:37 -04004600 size_t size;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004601 /** application-defined information value */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004602 uint32_t info;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004603 /** sender's message data buffer */
4604 void *tx_data;
4605 /** internal use only - needed for legacy API support */
4606 void *_rx_data;
4607 /** message data block descriptor */
4608 struct k_mem_block tx_block;
4609 /** source thread id */
4610 k_tid_t rx_source_thread;
4611 /** target thread id */
4612 k_tid_t tx_target_thread;
4613 /** internal use only - thread waiting on send (may be a dummy) */
4614 k_tid_t _syncing_thread;
4615#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
4616 /** internal use only - semaphore used during asynchronous send */
4617 struct k_sem *_async_sem;
4618#endif
4619};
Anas Nashife71293e2019-12-04 20:00:14 -05004620/**
4621 * @brief Mailbox Structure
4622 *
4623 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004624struct k_mbox {
Anas Nashife71293e2019-12-04 20:00:14 -05004625 /** Transmit messages queue */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004626 _wait_q_t tx_msg_queue;
Anas Nashife71293e2019-12-04 20:00:14 -05004627 /** Receive message queue */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004628 _wait_q_t rx_msg_queue;
Andy Ross9eeb6b82018-07-25 15:06:24 -07004629 struct k_spinlock lock;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004630
Ederson de Souzabdaac352021-11-22 14:46:19 -08004631 SYS_PORT_TRACING_TRACKING_FIELD(k_mbox)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004632};
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004633/**
4634 * @cond INTERNAL_HIDDEN
4635 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004636
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004637#define Z_MBOX_INITIALIZER(obj) \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004638 { \
Patrik Flykt4344e272019-03-08 14:19:05 -07004639 .tx_msg_queue = Z_WAIT_Q_INIT(&obj.tx_msg_queue), \
4640 .rx_msg_queue = Z_WAIT_Q_INIT(&obj.rx_msg_queue), \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004641 }
4642
Peter Mitsis12092702016-10-14 12:57:23 -04004643/**
Allan Stephensc98da842016-11-11 15:45:03 -05004644 * INTERNAL_HIDDEN @endcond
4645 */
4646
4647/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004648 * @brief Statically define and initialize a mailbox.
Peter Mitsis12092702016-10-14 12:57:23 -04004649 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004650 * The mailbox is to be accessed outside the module where it is defined using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004651 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05004652 * @code extern struct k_mbox <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004653 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004654 * @param name Name of the mailbox.
Peter Mitsis12092702016-10-14 12:57:23 -04004655 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004656#define K_MBOX_DEFINE(name) \
Fabio Baltierif88a4202021-08-04 23:05:54 +01004657 STRUCT_SECTION_ITERABLE(k_mbox, name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004658 Z_MBOX_INITIALIZER(name) \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004659
Peter Mitsis12092702016-10-14 12:57:23 -04004660/**
4661 * @brief Initialize a mailbox.
4662 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004663 * This routine initializes a mailbox object, prior to its first use.
4664 *
4665 * @param mbox Address of the mailbox.
Peter Mitsis12092702016-10-14 12:57:23 -04004666 *
4667 * @return N/A
4668 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004669extern void k_mbox_init(struct k_mbox *mbox);
4670
Peter Mitsis12092702016-10-14 12:57:23 -04004671/**
4672 * @brief Send a mailbox message in a synchronous manner.
4673 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004674 * This routine sends a message to @a mbox and waits for a receiver to both
4675 * receive and process it. The message data may be in a buffer, in a memory
4676 * pool block, or non-existent (i.e. an empty message).
Peter Mitsis12092702016-10-14 12:57:23 -04004677 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004678 * @param mbox Address of the mailbox.
4679 * @param tx_msg Address of the transmit message descriptor.
Andy Ross78327382020-03-05 15:18:14 -08004680 * @param timeout Waiting period for the message to be received,
4681 * or one of the special values K_NO_WAIT
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004682 * and K_FOREVER. Once the message has been received,
4683 * this routine waits as long as necessary for the message
4684 * to be completely processed.
Peter Mitsis12092702016-10-14 12:57:23 -04004685 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05004686 * @retval 0 Message sent.
4687 * @retval -ENOMSG Returned without waiting.
4688 * @retval -EAGAIN Waiting period timed out.
Peter Mitsis12092702016-10-14 12:57:23 -04004689 */
Peter Mitsis40680f62016-10-14 10:04:55 -04004690extern int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
Andy Ross78327382020-03-05 15:18:14 -08004691 k_timeout_t timeout);
Peter Mitsis12092702016-10-14 12:57:23 -04004692
Peter Mitsis12092702016-10-14 12:57:23 -04004693/**
4694 * @brief Send a mailbox message in an asynchronous manner.
4695 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004696 * This routine sends a message to @a mbox without waiting for a receiver
4697 * to process it. The message data may be in a buffer, in a memory pool block,
4698 * or non-existent (i.e. an empty message). Optionally, the semaphore @a sem
4699 * will be given when the message has been both received and completely
4700 * processed by the receiver.
Peter Mitsis12092702016-10-14 12:57:23 -04004701 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004702 * @param mbox Address of the mailbox.
4703 * @param tx_msg Address of the transmit message descriptor.
4704 * @param sem Address of a semaphore, or NULL if none is needed.
Peter Mitsis12092702016-10-14 12:57:23 -04004705 *
4706 * @return N/A
4707 */
Peter Mitsis40680f62016-10-14 10:04:55 -04004708extern void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004709 struct k_sem *sem);
4710
Peter Mitsis12092702016-10-14 12:57:23 -04004711/**
4712 * @brief Receive a mailbox message.
4713 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004714 * This routine receives a message from @a mbox, then optionally retrieves
4715 * its data and disposes of the message.
Peter Mitsis12092702016-10-14 12:57:23 -04004716 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004717 * @param mbox Address of the mailbox.
4718 * @param rx_msg Address of the receive message descriptor.
4719 * @param buffer Address of the buffer to receive data, or NULL to defer data
4720 * retrieval and message disposal until later.
Andy Ross78327382020-03-05 15:18:14 -08004721 * @param timeout Waiting period for a message to be received,
4722 * or one of the special values K_NO_WAIT and K_FOREVER.
Peter Mitsis12092702016-10-14 12:57:23 -04004723 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05004724 * @retval 0 Message received.
4725 * @retval -ENOMSG Returned without waiting.
4726 * @retval -EAGAIN Waiting period timed out.
Peter Mitsis12092702016-10-14 12:57:23 -04004727 */
Peter Mitsis40680f62016-10-14 10:04:55 -04004728extern int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg,
Andy Ross78327382020-03-05 15:18:14 -08004729 void *buffer, k_timeout_t timeout);
Peter Mitsis12092702016-10-14 12:57:23 -04004730
4731/**
4732 * @brief Retrieve mailbox message data into a buffer.
4733 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004734 * This routine completes the processing of a received message by retrieving
4735 * its data into a buffer, then disposing of the message.
Peter Mitsis12092702016-10-14 12:57:23 -04004736 *
4737 * Alternatively, this routine can be used to dispose of a received message
4738 * without retrieving its data.
4739 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004740 * @param rx_msg Address of the receive message descriptor.
4741 * @param buffer Address of the buffer to receive data, or NULL to discard
4742 * the data.
Peter Mitsis12092702016-10-14 12:57:23 -04004743 *
4744 * @return N/A
4745 */
Peter Mitsis40680f62016-10-14 10:04:55 -04004746extern void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer);
Peter Mitsis12092702016-10-14 12:57:23 -04004747
Anas Nashif166f5192018-02-25 08:02:36 -06004748/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05004749
4750/**
Anas Nashifce78d162018-05-24 12:43:11 -05004751 * @defgroup pipe_apis Pipe APIs
4752 * @ingroup kernel_apis
4753 * @{
Allan Stephensc98da842016-11-11 15:45:03 -05004754 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004755
Anas Nashifce78d162018-05-24 12:43:11 -05004756/** Pipe Structure */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004757struct k_pipe {
Anas Nashifce78d162018-05-24 12:43:11 -05004758 unsigned char *buffer; /**< Pipe buffer: may be NULL */
4759 size_t size; /**< Buffer size */
4760 size_t bytes_used; /**< # bytes used in buffer */
4761 size_t read_index; /**< Where in buffer to read from */
4762 size_t write_index; /**< Where in buffer to write */
Andy Rossf582b552019-02-05 16:10:18 -08004763 struct k_spinlock lock; /**< Synchronization lock */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004764
4765 struct {
Anas Nashifce78d162018-05-24 12:43:11 -05004766 _wait_q_t readers; /**< Reader wait queue */
4767 _wait_q_t writers; /**< Writer wait queue */
Anas Nashif0ff33d12020-07-13 20:21:56 -04004768 } wait_q; /** Wait queue */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004769
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004770 uint8_t flags; /**< Flags */
Ederson de Souzabdaac352021-11-22 14:46:19 -08004771
4772 SYS_PORT_TRACING_TRACKING_FIELD(k_pipe)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004773};
4774
Anas Nashifce78d162018-05-24 12:43:11 -05004775/**
4776 * @cond INTERNAL_HIDDEN
4777 */
4778#define K_PIPE_FLAG_ALLOC BIT(0) /** Buffer was allocated */
4779
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004780#define Z_PIPE_INITIALIZER(obj, pipe_buffer, pipe_buffer_size) \
Krzysztof Chruscinskibe063272019-02-13 11:19:54 +01004781 { \
4782 .buffer = pipe_buffer, \
4783 .size = pipe_buffer_size, \
4784 .bytes_used = 0, \
4785 .read_index = 0, \
4786 .write_index = 0, \
4787 .lock = {}, \
4788 .wait_q = { \
Patrik Flykt4344e272019-03-08 14:19:05 -07004789 .readers = Z_WAIT_Q_INIT(&obj.wait_q.readers), \
4790 .writers = Z_WAIT_Q_INIT(&obj.wait_q.writers) \
Krzysztof Chruscinskibe063272019-02-13 11:19:54 +01004791 }, \
Krzysztof Chruscinskibe063272019-02-13 11:19:54 +01004792 .flags = 0 \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004793 }
4794
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004795/**
Allan Stephensc98da842016-11-11 15:45:03 -05004796 * INTERNAL_HIDDEN @endcond
4797 */
4798
4799/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004800 * @brief Statically define and initialize a pipe.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004801 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004802 * The pipe can be accessed outside the module where it is defined using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004803 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05004804 * @code extern struct k_pipe <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004805 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004806 * @param name Name of the pipe.
4807 * @param pipe_buffer_size Size of the pipe's ring buffer (in bytes),
4808 * or zero if no ring buffer is used.
4809 * @param pipe_align Alignment of the pipe's ring buffer (power of 2).
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004810 *
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004811 */
Andrew Boie44fe8122018-04-12 17:38:12 -07004812#define K_PIPE_DEFINE(name, pipe_buffer_size, pipe_align) \
Fabio Baltierif88a4202021-08-04 23:05:54 +01004813 static unsigned char __noinit __aligned(pipe_align) \
Andrew Boie44fe8122018-04-12 17:38:12 -07004814 _k_pipe_buf_##name[pipe_buffer_size]; \
Fabio Baltierif88a4202021-08-04 23:05:54 +01004815 STRUCT_SECTION_ITERABLE(k_pipe, name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004816 Z_PIPE_INITIALIZER(name, _k_pipe_buf_##name, pipe_buffer_size)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004817
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004818/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004819 * @brief Initialize a pipe.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004820 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004821 * This routine initializes a pipe object, prior to its first use.
4822 *
4823 * @param pipe Address of the pipe.
4824 * @param buffer Address of the pipe's ring buffer, or NULL if no ring buffer
4825 * is used.
4826 * @param size Size of the pipe's ring buffer (in bytes), or zero if no ring
4827 * buffer is used.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004828 *
4829 * @return N/A
4830 */
Andrew Boie44fe8122018-04-12 17:38:12 -07004831void k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size);
4832
4833/**
4834 * @brief Release a pipe's allocated buffer
4835 *
4836 * If a pipe object was given a dynamically allocated buffer via
4837 * k_pipe_alloc_init(), this will free it. This function does nothing
4838 * if the buffer wasn't dynamically allocated.
4839 *
4840 * @param pipe Address of the pipe.
Anas Nashif361a84d2019-06-16 08:22:08 -04004841 * @retval 0 on success
4842 * @retval -EAGAIN nothing to cleanup
Andrew Boie44fe8122018-04-12 17:38:12 -07004843 */
Anas Nashif361a84d2019-06-16 08:22:08 -04004844int k_pipe_cleanup(struct k_pipe *pipe);
Andrew Boie44fe8122018-04-12 17:38:12 -07004845
4846/**
4847 * @brief Initialize a pipe and allocate a buffer for it
4848 *
4849 * Storage for the buffer region will be allocated from the calling thread's
4850 * resource pool. This memory will be released if k_pipe_cleanup() is called,
4851 * or userspace is enabled and the pipe object loses all references to it.
4852 *
4853 * This function should only be called on uninitialized pipe objects.
4854 *
4855 * @param pipe Address of the pipe.
4856 * @param size Size of the pipe's ring buffer (in bytes), or zero if no ring
4857 * buffer is used.
4858 * @retval 0 on success
David B. Kinderfcbd8fb2018-05-23 12:06:24 -07004859 * @retval -ENOMEM if memory couldn't be allocated
Andrew Boie44fe8122018-04-12 17:38:12 -07004860 */
4861__syscall int k_pipe_alloc_init(struct k_pipe *pipe, size_t size);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004862
4863/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004864 * @brief Write data to a pipe.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004865 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004866 * This routine writes up to @a bytes_to_write bytes of data to @a pipe.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004867 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004868 * @param pipe Address of the pipe.
4869 * @param data Address of data to write.
4870 * @param bytes_to_write Size of data (in bytes).
4871 * @param bytes_written Address of area to hold the number of bytes written.
4872 * @param min_xfer Minimum number of bytes to write.
Andy Ross78327382020-03-05 15:18:14 -08004873 * @param timeout Waiting period to wait for the data to be written,
4874 * or one of the special values K_NO_WAIT and K_FOREVER.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004875 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05004876 * @retval 0 At least @a min_xfer bytes of data were written.
4877 * @retval -EIO Returned without waiting; zero data bytes were written.
4878 * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004879 * minus one data bytes were written.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004880 */
Andrew Boieb9a05782017-09-29 16:05:32 -07004881__syscall int k_pipe_put(struct k_pipe *pipe, void *data,
4882 size_t bytes_to_write, size_t *bytes_written,
Andy Ross78327382020-03-05 15:18:14 -08004883 size_t min_xfer, k_timeout_t timeout);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004884
4885/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004886 * @brief Read data from a pipe.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004887 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004888 * This routine reads up to @a bytes_to_read bytes of data from @a pipe.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004889 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004890 * @param pipe Address of the pipe.
4891 * @param data Address to place the data read from pipe.
4892 * @param bytes_to_read Maximum number of data bytes to read.
4893 * @param bytes_read Address of area to hold the number of bytes read.
4894 * @param min_xfer Minimum number of data bytes to read.
Andy Ross78327382020-03-05 15:18:14 -08004895 * @param timeout Waiting period to wait for the data to be read,
4896 * or one of the special values K_NO_WAIT and K_FOREVER.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004897 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05004898 * @retval 0 At least @a min_xfer bytes of data were read.
Anas Nashif361a84d2019-06-16 08:22:08 -04004899 * @retval -EINVAL invalid parameters supplied
Allan Stephens9ef50f42016-11-16 15:33:31 -05004900 * @retval -EIO Returned without waiting; zero data bytes were read.
4901 * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004902 * minus one data bytes were read.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004903 */
Andrew Boieb9a05782017-09-29 16:05:32 -07004904__syscall int k_pipe_get(struct k_pipe *pipe, void *data,
4905 size_t bytes_to_read, size_t *bytes_read,
Andy Ross78327382020-03-05 15:18:14 -08004906 size_t min_xfer, k_timeout_t timeout);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004907
4908/**
Christopher Friedt3315f8f2020-05-06 18:43:58 -04004909 * @brief Query the number of bytes that may be read from @a pipe.
4910 *
4911 * @param pipe Address of the pipe.
4912 *
4913 * @retval a number n such that 0 <= n <= @ref k_pipe.size; the
4914 * result is zero for unbuffered pipes.
4915 */
4916__syscall size_t k_pipe_read_avail(struct k_pipe *pipe);
4917
4918/**
4919 * @brief Query the number of bytes that may be written to @a pipe
4920 *
4921 * @param pipe Address of the pipe.
4922 *
4923 * @retval a number n such that 0 <= n <= @ref k_pipe.size; the
4924 * result is zero for unbuffered pipes.
4925 */
4926__syscall size_t k_pipe_write_avail(struct k_pipe *pipe);
4927
Anas Nashif166f5192018-02-25 08:02:36 -06004928/** @} */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004929
Allan Stephensc98da842016-11-11 15:45:03 -05004930/**
4931 * @cond INTERNAL_HIDDEN
4932 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004933
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004934struct k_mem_slab {
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004935 _wait_q_t wait_q;
Nicolas Pitre2bed37e2021-04-13 11:10:22 -04004936 struct k_spinlock lock;
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004937 uint32_t num_blocks;
Peter Mitsisfb02d572016-10-13 16:55:45 -04004938 size_t block_size;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004939 char *buffer;
4940 char *free_list;
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004941 uint32_t num_used;
Kamil Lazowski104f1002020-09-11 14:27:55 +02004942#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
4943 uint32_t max_used;
4944#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004945
Ederson de Souzabdaac352021-11-22 14:46:19 -08004946 SYS_PORT_TRACING_TRACKING_FIELD(k_mem_slab)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004947};
4948
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004949#define Z_MEM_SLAB_INITIALIZER(obj, slab_buffer, slab_block_size, \
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004950 slab_num_blocks) \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004951 { \
Patrik Flykt4344e272019-03-08 14:19:05 -07004952 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
Lucas Dietrich36db3862021-09-01 17:11:22 +02004953 .lock = {}, \
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004954 .num_blocks = slab_num_blocks, \
4955 .block_size = slab_block_size, \
4956 .buffer = slab_buffer, \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004957 .free_list = NULL, \
4958 .num_used = 0, \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004959 }
4960
Andrew Boie65a9d2a2017-06-27 10:51:23 -07004961
Peter Mitsis578f9112016-10-07 13:50:31 -04004962/**
Allan Stephensc98da842016-11-11 15:45:03 -05004963 * INTERNAL_HIDDEN @endcond
4964 */
4965
4966/**
4967 * @defgroup mem_slab_apis Memory Slab APIs
4968 * @ingroup kernel_apis
4969 * @{
4970 */
4971
4972/**
Pavel Hübner10471432021-10-24 18:00:08 +02004973 * @brief Statically define and initialize a memory slab in a public (non-static) scope.
Peter Mitsis578f9112016-10-07 13:50:31 -04004974 *
Allan Stephensda827222016-11-09 14:23:58 -06004975 * The memory slab's buffer contains @a slab_num_blocks memory blocks
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004976 * that are @a slab_block_size bytes long. The buffer is aligned to a
Allan Stephensda827222016-11-09 14:23:58 -06004977 * @a slab_align -byte boundary. To ensure that each memory block is similarly
4978 * aligned to this boundary, @a slab_block_size must also be a multiple of
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004979 * @a slab_align.
Peter Mitsis578f9112016-10-07 13:50:31 -04004980 *
Allan Stephensda827222016-11-09 14:23:58 -06004981 * The memory slab can be accessed outside the module where it is defined
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004982 * using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004983 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05004984 * @code extern struct k_mem_slab <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004985 *
Pavel Hübner10471432021-10-24 18:00:08 +02004986 * @note This macro cannot be used together with a static keyword.
4987 * If such a use-case is desired, use @ref K_MEM_SLAB_DEFINE_STATIC
4988 * instead.
4989 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004990 * @param name Name of the memory slab.
4991 * @param slab_block_size Size of each memory block (in bytes).
4992 * @param slab_num_blocks Number memory blocks.
4993 * @param slab_align Alignment of the memory slab's buffer (power of 2).
Peter Mitsis578f9112016-10-07 13:50:31 -04004994 */
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004995#define K_MEM_SLAB_DEFINE(name, slab_block_size, slab_num_blocks, slab_align) \
Daniel Leunge6f168c2021-07-19 12:10:54 -07004996 char __noinit_named(k_mem_slab_buf_##name) \
4997 __aligned(WB_UP(slab_align)) \
Nicolas Pitre46cd5a02019-05-21 21:40:38 -04004998 _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
Fabio Baltierif88a4202021-08-04 23:05:54 +01004999 STRUCT_SECTION_ITERABLE(k_mem_slab, name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04005000 Z_MEM_SLAB_INITIALIZER(name, _k_mem_slab_buf_##name, \
Nicolas Pitre46cd5a02019-05-21 21:40:38 -04005001 WB_UP(slab_block_size), slab_num_blocks)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005002
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04005003/**
Pavel Hübner10471432021-10-24 18:00:08 +02005004 * @brief Statically define and initialize a memory slab in a private (static) scope.
5005 *
5006 * The memory slab's buffer contains @a slab_num_blocks memory blocks
5007 * that are @a slab_block_size bytes long. The buffer is aligned to a
5008 * @a slab_align -byte boundary. To ensure that each memory block is similarly
5009 * aligned to this boundary, @a slab_block_size must also be a multiple of
5010 * @a slab_align.
5011 *
5012 * @param name Name of the memory slab.
5013 * @param slab_block_size Size of each memory block (in bytes).
5014 * @param slab_num_blocks Number memory blocks.
5015 * @param slab_align Alignment of the memory slab's buffer (power of 2).
5016 */
5017#define K_MEM_SLAB_DEFINE_STATIC(name, slab_block_size, slab_num_blocks, slab_align) \
5018 static char __noinit_named(k_mem_slab_buf_##name) \
5019 __aligned(WB_UP(slab_align)) \
5020 _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
5021 static STRUCT_SECTION_ITERABLE(k_mem_slab, name) = \
5022 Z_MEM_SLAB_INITIALIZER(name, _k_mem_slab_buf_##name, \
5023 WB_UP(slab_block_size), slab_num_blocks)
5024
5025/**
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04005026 * @brief Initialize a memory slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04005027 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005028 * Initializes a memory slab, prior to its first use.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04005029 *
Allan Stephensda827222016-11-09 14:23:58 -06005030 * The memory slab's buffer contains @a slab_num_blocks memory blocks
5031 * that are @a slab_block_size bytes long. The buffer must be aligned to an
Nicolas Pitre46cd5a02019-05-21 21:40:38 -04005032 * N-byte boundary matching a word boundary, where N is a power of 2
5033 * (i.e. 4 on 32-bit systems, 8, 16, ...).
Allan Stephensda827222016-11-09 14:23:58 -06005034 * To ensure that each memory block is similarly aligned to this boundary,
5035 * @a slab_block_size must also be a multiple of N.
5036 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005037 * @param slab Address of the memory slab.
5038 * @param buffer Pointer to buffer used for the memory blocks.
5039 * @param block_size Size of each memory block (in bytes).
5040 * @param num_blocks Number of memory blocks.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04005041 *
Anas Nashifdfc2bbc2019-06-16 09:22:21 -04005042 * @retval 0 on success
5043 * @retval -EINVAL invalid data supplied
5044 *
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04005045 */
Anas Nashifdfc2bbc2019-06-16 09:22:21 -04005046extern int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005047 size_t block_size, uint32_t num_blocks);
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04005048
5049/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005050 * @brief Allocate memory from a memory slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04005051 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005052 * This routine allocates a memory block from a memory slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04005053 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01005054 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
Krzysztof Chruscinskic482a572021-04-19 10:52:34 +02005055 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01005056 *
5057 * @funcprops \isr_ok
Spoorthy Priya Yerabolu04d3c3c2020-09-17 02:54:50 -07005058 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005059 * @param slab Address of the memory slab.
5060 * @param mem Pointer to block address area.
Andy Ross78327382020-03-05 15:18:14 -08005061 * @param timeout Non-negative waiting period to wait for operation to complete.
5062 * Use K_NO_WAIT to return without waiting,
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005063 * or K_FOREVER to wait as long as necessary.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04005064 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05005065 * @retval 0 Memory allocated. The block address area pointed at by @a mem
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005066 * is set to the starting address of the memory block.
Allan Stephens9ef50f42016-11-16 15:33:31 -05005067 * @retval -ENOMEM Returned without waiting.
5068 * @retval -EAGAIN Waiting period timed out.
Anas Nashifdfc2bbc2019-06-16 09:22:21 -04005069 * @retval -EINVAL Invalid data supplied
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04005070 */
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04005071extern int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem,
Andy Ross78327382020-03-05 15:18:14 -08005072 k_timeout_t timeout);
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04005073
5074/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005075 * @brief Free memory allocated from a memory slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04005076 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005077 * This routine releases a previously allocated memory block back to its
5078 * associated memory slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04005079 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005080 * @param slab Address of the memory slab.
5081 * @param mem Pointer to block address area (as set by k_mem_slab_alloc()).
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04005082 *
5083 * @return N/A
5084 */
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04005085extern void k_mem_slab_free(struct k_mem_slab *slab, void **mem);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005086
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04005087/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005088 * @brief Get the number of used blocks in a memory slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04005089 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005090 * This routine gets the number of memory blocks that are currently
5091 * allocated in @a slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04005092 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005093 * @param slab Address of the memory slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04005094 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005095 * @return Number of allocated memory blocks.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04005096 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005097static inline uint32_t k_mem_slab_num_used_get(struct k_mem_slab *slab)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005098{
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04005099 return slab->num_used;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005100}
5101
Peter Mitsisc001aa82016-10-13 13:53:37 -04005102/**
Kamil Lazowski104f1002020-09-11 14:27:55 +02005103 * @brief Get the number of maximum used blocks so far in a memory slab.
5104 *
5105 * This routine gets the maximum number of memory blocks that were
5106 * allocated in @a slab.
5107 *
5108 * @param slab Address of the memory slab.
5109 *
5110 * @return Maximum number of allocated memory blocks.
5111 */
5112static inline uint32_t k_mem_slab_max_used_get(struct k_mem_slab *slab)
5113{
5114#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
5115 return slab->max_used;
5116#else
5117 ARG_UNUSED(slab);
5118 return 0;
5119#endif
5120}
5121
5122/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005123 * @brief Get the number of unused blocks in a memory slab.
Peter Mitsisc001aa82016-10-13 13:53:37 -04005124 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005125 * This routine gets the number of memory blocks that are currently
5126 * unallocated in @a slab.
Peter Mitsisc001aa82016-10-13 13:53:37 -04005127 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005128 * @param slab Address of the memory slab.
Peter Mitsisc001aa82016-10-13 13:53:37 -04005129 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005130 * @return Number of unallocated memory blocks.
Peter Mitsisc001aa82016-10-13 13:53:37 -04005131 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005132static inline uint32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
Peter Mitsisc001aa82016-10-13 13:53:37 -04005133{
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04005134 return slab->num_blocks - slab->num_used;
Peter Mitsisc001aa82016-10-13 13:53:37 -04005135}
5136
Anas Nashif166f5192018-02-25 08:02:36 -06005137/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05005138
5139/**
Anas Nashifdbac76f2020-12-09 12:04:53 -05005140 * @addtogroup heap_apis
Allan Stephensc98da842016-11-11 15:45:03 -05005141 * @{
5142 */
5143
Andrew Boieb95e9602020-09-28 13:26:38 -07005144/* kernel synchronized heap struct */
5145
5146struct k_heap {
5147 struct sys_heap heap;
5148 _wait_q_t wait_q;
5149 struct k_spinlock lock;
5150};
5151
Allan Stephensc98da842016-11-11 15:45:03 -05005152/**
Andy Ross0dd83b82020-04-03 10:01:03 -07005153 * @brief Initialize a k_heap
5154 *
5155 * This constructs a synchronized k_heap object over a memory region
5156 * specified by the user. Note that while any alignment and size can
5157 * be passed as valid parameters, internal alignment restrictions
5158 * inside the inner sys_heap mean that not all bytes may be usable as
5159 * allocated memory.
5160 *
5161 * @param h Heap struct to initialize
5162 * @param mem Pointer to memory.
5163 * @param bytes Size of memory region, in bytes
5164 */
5165void k_heap_init(struct k_heap *h, void *mem, size_t bytes);
5166
Maximilian Bachmann34d7c782020-11-13 15:12:31 +01005167/** @brief Allocate aligned memory from a k_heap
5168 *
5169 * Behaves in all ways like k_heap_alloc(), except that the returned
5170 * memory (if available) will have a starting address in memory which
5171 * is a multiple of the specified power-of-two alignment value in
5172 * bytes. The resulting memory can be returned to the heap using
5173 * k_heap_free().
5174 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01005175 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
Krzysztof Chruscinskic482a572021-04-19 10:52:34 +02005176 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01005177 *
5178 * @funcprops \isr_ok
Maximilian Bachmann34d7c782020-11-13 15:12:31 +01005179 *
5180 * @param h Heap from which to allocate
5181 * @param align Alignment in bytes, must be a power of two
5182 * @param bytes Number of bytes requested
5183 * @param timeout How long to wait, or K_NO_WAIT
5184 * @return Pointer to memory the caller can now use
5185 */
5186void *k_heap_aligned_alloc(struct k_heap *h, size_t align, size_t bytes,
5187 k_timeout_t timeout);
5188
Andy Ross0dd83b82020-04-03 10:01:03 -07005189/**
5190 * @brief Allocate memory from a k_heap
5191 *
5192 * Allocates and returns a memory buffer from the memory region owned
5193 * by the heap. If no memory is available immediately, the call will
5194 * block for the specified timeout (constructed via the standard
5195 * timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory to be
5196 * freed. If the allocation cannot be performed by the expiration of
5197 * the timeout, NULL will be returned.
5198 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01005199 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
Krzysztof Chruscinskic482a572021-04-19 10:52:34 +02005200 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01005201 *
5202 * @funcprops \isr_ok
Spoorthy Priya Yerabolu04d3c3c2020-09-17 02:54:50 -07005203 *
Andy Ross0dd83b82020-04-03 10:01:03 -07005204 * @param h Heap from which to allocate
5205 * @param bytes Desired size of block to allocate
5206 * @param timeout How long to wait, or K_NO_WAIT
5207 * @return A pointer to valid heap memory, or NULL
5208 */
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +01005209void *k_heap_alloc(struct k_heap *h, size_t bytes,
5210 k_timeout_t timeout);
Andy Ross0dd83b82020-04-03 10:01:03 -07005211
5212/**
5213 * @brief Free memory allocated by k_heap_alloc()
5214 *
5215 * Returns the specified memory block, which must have been returned
5216 * from k_heap_alloc(), to the heap for use by other callers. Passing
5217 * a NULL block is legal, and has no effect.
5218 *
5219 * @param h Heap to which to return the memory
5220 * @param mem A valid memory block, or NULL
5221 */
5222void k_heap_free(struct k_heap *h, void *mem);
5223
Andy Rossd3737032021-05-19 09:50:17 -07005224/* Hand-calculated minimum heap sizes needed to return a successful
5225 * 1-byte allocation. See details in lib/os/heap.[ch]
5226 */
5227#define Z_HEAP_MIN_SIZE (sizeof(void *) > 4 ? 56 : 44)
5228
Andy Ross0dd83b82020-04-03 10:01:03 -07005229/**
Daniel Leung10490382021-08-30 10:36:59 -07005230 * @brief Define a static k_heap in the specified linker section
5231 *
5232 * This macro defines and initializes a static memory region and
5233 * k_heap of the requested size in the specified linker section.
5234 * After kernel start, &name can be used as if k_heap_init() had
5235 * been called.
5236 *
5237 * Note that this macro enforces a minimum size on the memory region
5238 * to accommodate metadata requirements. Very small heaps will be
5239 * padded to fit.
5240 *
5241 * @param name Symbol name for the struct k_heap object
5242 * @param bytes Size of memory region, in bytes
5243 * @param in_section __attribute__((section(name))
5244 */
5245#define Z_HEAP_DEFINE_IN_SECT(name, bytes, in_section) \
5246 char in_section \
5247 __aligned(8) /* CHUNK_UNIT */ \
5248 kheap_##name[MAX(bytes, Z_HEAP_MIN_SIZE)]; \
5249 STRUCT_SECTION_ITERABLE(k_heap, name) = { \
5250 .heap = { \
5251 .init_mem = kheap_##name, \
5252 .init_bytes = MAX(bytes, Z_HEAP_MIN_SIZE), \
5253 }, \
5254 }
5255
5256/**
Andy Ross0dd83b82020-04-03 10:01:03 -07005257 * @brief Define a static k_heap
5258 *
5259 * This macro defines and initializes a static memory region and
5260 * k_heap of the requested size. After kernel start, &name can be
5261 * used as if k_heap_init() had been called.
5262 *
Andy Rossd3737032021-05-19 09:50:17 -07005263 * Note that this macro enforces a minimum size on the memory region
5264 * to accommodate metadata requirements. Very small heaps will be
5265 * padded to fit.
5266 *
Andy Ross0dd83b82020-04-03 10:01:03 -07005267 * @param name Symbol name for the struct k_heap object
5268 * @param bytes Size of memory region, in bytes
5269 */
5270#define K_HEAP_DEFINE(name, bytes) \
Daniel Leung10490382021-08-30 10:36:59 -07005271 Z_HEAP_DEFINE_IN_SECT(name, bytes, \
5272 __noinit_named(kheap_buf_##name))
5273
5274/**
5275 * @brief Define a static k_heap in uncached memory
5276 *
5277 * This macro defines and initializes a static memory region and
5278 * k_heap of the requested size in uncache memory. After kernel
5279 * start, &name can be used as if k_heap_init() had been called.
5280 *
5281 * Note that this macro enforces a minimum size on the memory region
5282 * to accommodate metadata requirements. Very small heaps will be
5283 * padded to fit.
5284 *
5285 * @param name Symbol name for the struct k_heap object
5286 * @param bytes Size of memory region, in bytes
5287 */
5288#define K_HEAP_DEFINE_NOCACHE(name, bytes) \
5289 Z_HEAP_DEFINE_IN_SECT(name, bytes, __nocache)
Andy Ross0dd83b82020-04-03 10:01:03 -07005290
Johan Hedberg7d887cb2018-01-11 20:45:27 +02005291/**
Anas Nashif166f5192018-02-25 08:02:36 -06005292 * @}
Allan Stephensc98da842016-11-11 15:45:03 -05005293 */
5294
5295/**
Anas Nashifdbac76f2020-12-09 12:04:53 -05005296 * @defgroup heap_apis Heap APIs
Allan Stephensc98da842016-11-11 15:45:03 -05005297 * @ingroup kernel_apis
5298 * @{
5299 */
5300
5301/**
Christopher Friedt135ffaf2020-11-26 08:19:10 -05005302 * @brief Allocate memory from the heap with a specified alignment.
5303 *
5304 * This routine provides semantics similar to aligned_alloc(); memory is
5305 * allocated from the heap with a specified alignment. However, one minor
5306 * difference is that k_aligned_alloc() accepts any non-zero @p size,
5307 * wherase aligned_alloc() only accepts a @p size that is an integral
5308 * multiple of @p align.
5309 *
5310 * Above, aligned_alloc() refers to:
5311 * C11 standard (ISO/IEC 9899:2011): 7.22.3.1
5312 * The aligned_alloc function (p: 347-348)
5313 *
5314 * @param align Alignment of memory requested (in bytes).
5315 * @param size Amount of memory requested (in bytes).
5316 *
5317 * @return Address of the allocated memory if successful; otherwise NULL.
5318 */
5319extern void *k_aligned_alloc(size_t align, size_t size);
5320
5321/**
5322 * @brief Allocate memory from the heap.
Peter Mitsis937042c2016-10-13 13:18:26 -04005323 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005324 * This routine provides traditional malloc() semantics. Memory is
Allan Stephens480a1312016-10-13 15:44:48 -05005325 * allocated from the heap memory pool.
Peter Mitsis937042c2016-10-13 13:18:26 -04005326 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005327 * @param size Amount of memory requested (in bytes).
Peter Mitsis937042c2016-10-13 13:18:26 -04005328 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005329 * @return Address of the allocated memory if successful; otherwise NULL.
Peter Mitsis937042c2016-10-13 13:18:26 -04005330 */
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +01005331extern void *k_malloc(size_t size);
Peter Mitsis937042c2016-10-13 13:18:26 -04005332
5333/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005334 * @brief Free memory allocated from heap.
Allan Stephens480a1312016-10-13 15:44:48 -05005335 *
5336 * This routine provides traditional free() semantics. The memory being
Andrew Boiea2480bd2018-04-12 16:59:02 -07005337 * returned must have been allocated from the heap memory pool or
5338 * k_mem_pool_malloc().
Peter Mitsis937042c2016-10-13 13:18:26 -04005339 *
Anas Nashif345fdd52016-12-20 08:36:04 -05005340 * If @a ptr is NULL, no operation is performed.
5341 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005342 * @param ptr Pointer to previously allocated memory.
Peter Mitsis937042c2016-10-13 13:18:26 -04005343 *
5344 * @return N/A
5345 */
5346extern void k_free(void *ptr);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005347
Allan Stephensc98da842016-11-11 15:45:03 -05005348/**
Andrew Boie7f95e832017-11-08 14:40:01 -08005349 * @brief Allocate memory from heap, array style
5350 *
5351 * This routine provides traditional calloc() semantics. Memory is
5352 * allocated from the heap memory pool and zeroed.
5353 *
5354 * @param nmemb Number of elements in the requested array
5355 * @param size Size of each array element (in bytes).
5356 *
5357 * @return Address of the allocated memory if successful; otherwise NULL.
5358 */
5359extern void *k_calloc(size_t nmemb, size_t size);
5360
Anas Nashif166f5192018-02-25 08:02:36 -06005361/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05005362
Benjamin Walshacc68c12017-01-29 18:57:45 -05005363/* polling API - PRIVATE */
5364
Benjamin Walshb0179862017-02-02 16:39:57 -05005365#ifdef CONFIG_POLL
Flavio Ceolin6fdc56d2018-09-18 12:32:27 -07005366#define _INIT_OBJ_POLL_EVENT(obj) do { (obj)->poll_event = NULL; } while (false)
Benjamin Walshb0179862017-02-02 16:39:57 -05005367#else
Flavio Ceolin6fdc56d2018-09-18 12:32:27 -07005368#define _INIT_OBJ_POLL_EVENT(obj) do { } while (false)
Benjamin Walshb0179862017-02-02 16:39:57 -05005369#endif
5370
Benjamin Walshacc68c12017-01-29 18:57:45 -05005371/* private - types bit positions */
5372enum _poll_types_bits {
5373 /* can be used to ignore an event */
5374 _POLL_TYPE_IGNORE,
5375
Flavio Ceolinaecd4ec2018-11-02 12:35:30 -07005376 /* to be signaled by k_poll_signal_raise() */
Benjamin Walshacc68c12017-01-29 18:57:45 -05005377 _POLL_TYPE_SIGNAL,
5378
5379 /* semaphore availability */
5380 _POLL_TYPE_SEM_AVAILABLE,
5381
Anas Nashif56821172020-07-08 14:14:25 -04005382 /* queue/FIFO/LIFO data availability */
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02005383 _POLL_TYPE_DATA_AVAILABLE,
Benjamin Walshacc68c12017-01-29 18:57:45 -05005384
Nick Gravesb445f132021-04-12 12:35:18 -07005385 /* msgq data availability */
5386 _POLL_TYPE_MSGQ_DATA_AVAILABLE,
5387
Benjamin Walshacc68c12017-01-29 18:57:45 -05005388 _POLL_NUM_TYPES
5389};
5390
Aastha Grover83b9f692020-08-20 16:47:11 -07005391#define Z_POLL_TYPE_BIT(type) (1U << ((type) - 1U))
Benjamin Walshacc68c12017-01-29 18:57:45 -05005392
5393/* private - states bit positions */
5394enum _poll_states_bits {
5395 /* default state when creating event */
5396 _POLL_STATE_NOT_READY,
5397
Flavio Ceolinaecd4ec2018-11-02 12:35:30 -07005398 /* signaled by k_poll_signal_raise() */
Benjamin Walshacc68c12017-01-29 18:57:45 -05005399 _POLL_STATE_SIGNALED,
5400
5401 /* semaphore is available */
5402 _POLL_STATE_SEM_AVAILABLE,
5403
Anas Nashif56821172020-07-08 14:14:25 -04005404 /* data is available to read on queue/FIFO/LIFO */
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02005405 _POLL_STATE_DATA_AVAILABLE,
Benjamin Walshacc68c12017-01-29 18:57:45 -05005406
Anas Nashif56821172020-07-08 14:14:25 -04005407 /* queue/FIFO/LIFO wait was cancelled */
Paul Sokolovsky45c0b202018-08-21 23:29:11 +03005408 _POLL_STATE_CANCELLED,
5409
Nick Gravesb445f132021-04-12 12:35:18 -07005410 /* data is available to read on a message queue */
5411 _POLL_STATE_MSGQ_DATA_AVAILABLE,
5412
Benjamin Walshacc68c12017-01-29 18:57:45 -05005413 _POLL_NUM_STATES
5414};
5415
Aastha Grover83b9f692020-08-20 16:47:11 -07005416#define Z_POLL_STATE_BIT(state) (1U << ((state) - 1U))
Benjamin Walshacc68c12017-01-29 18:57:45 -05005417
5418#define _POLL_EVENT_NUM_UNUSED_BITS \
Benjamin Walsh969d4a72017-02-02 11:25:11 -05005419 (32 - (0 \
5420 + 8 /* tag */ \
5421 + _POLL_NUM_TYPES \
5422 + _POLL_NUM_STATES \
5423 + 1 /* modes */ \
5424 ))
Benjamin Walshacc68c12017-01-29 18:57:45 -05005425
Benjamin Walshacc68c12017-01-29 18:57:45 -05005426/* end of polling API - PRIVATE */
5427
5428
5429/**
5430 * @defgroup poll_apis Async polling APIs
5431 * @ingroup kernel_apis
5432 * @{
5433 */
5434
5435/* Public polling API */
5436
5437/* public - values for k_poll_event.type bitfield */
5438#define K_POLL_TYPE_IGNORE 0
Patrik Flykt4344e272019-03-08 14:19:05 -07005439#define K_POLL_TYPE_SIGNAL Z_POLL_TYPE_BIT(_POLL_TYPE_SIGNAL)
5440#define K_POLL_TYPE_SEM_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_SEM_AVAILABLE)
5441#define K_POLL_TYPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_DATA_AVAILABLE)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02005442#define K_POLL_TYPE_FIFO_DATA_AVAILABLE K_POLL_TYPE_DATA_AVAILABLE
Nick Gravesb445f132021-04-12 12:35:18 -07005443#define K_POLL_TYPE_MSGQ_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_MSGQ_DATA_AVAILABLE)
Benjamin Walshacc68c12017-01-29 18:57:45 -05005444
5445/* public - polling modes */
5446enum k_poll_modes {
5447 /* polling thread does not take ownership of objects when available */
5448 K_POLL_MODE_NOTIFY_ONLY = 0,
5449
5450 K_POLL_NUM_MODES
5451};
5452
5453/* public - values for k_poll_event.state bitfield */
5454#define K_POLL_STATE_NOT_READY 0
Patrik Flykt4344e272019-03-08 14:19:05 -07005455#define K_POLL_STATE_SIGNALED Z_POLL_STATE_BIT(_POLL_STATE_SIGNALED)
5456#define K_POLL_STATE_SEM_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE)
5457#define K_POLL_STATE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02005458#define K_POLL_STATE_FIFO_DATA_AVAILABLE K_POLL_STATE_DATA_AVAILABLE
Nick Gravesb445f132021-04-12 12:35:18 -07005459#define K_POLL_STATE_MSGQ_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_MSGQ_DATA_AVAILABLE)
Patrik Flykt4344e272019-03-08 14:19:05 -07005460#define K_POLL_STATE_CANCELLED Z_POLL_STATE_BIT(_POLL_STATE_CANCELLED)
Benjamin Walshacc68c12017-01-29 18:57:45 -05005461
5462/* public - poll signal object */
5463struct k_poll_signal {
Anas Nashife71293e2019-12-04 20:00:14 -05005464 /** PRIVATE - DO NOT TOUCH */
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +03005465 sys_dlist_t poll_events;
Benjamin Walshacc68c12017-01-29 18:57:45 -05005466
Anas Nashife71293e2019-12-04 20:00:14 -05005467 /**
Benjamin Walshacc68c12017-01-29 18:57:45 -05005468 * 1 if the event has been signaled, 0 otherwise. Stays set to 1 until
5469 * user resets it to 0.
5470 */
5471 unsigned int signaled;
5472
Anas Nashife71293e2019-12-04 20:00:14 -05005473 /** custom result value passed to k_poll_signal_raise() if needed */
Benjamin Walshacc68c12017-01-29 18:57:45 -05005474 int result;
5475};
5476
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +03005477#define K_POLL_SIGNAL_INITIALIZER(obj) \
Benjamin Walshacc68c12017-01-29 18:57:45 -05005478 { \
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +03005479 .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events), \
Benjamin Walshacc68c12017-01-29 18:57:45 -05005480 .signaled = 0, \
5481 .result = 0, \
5482 }
Anas Nashife71293e2019-12-04 20:00:14 -05005483/**
5484 * @brief Poll Event
5485 *
5486 */
Benjamin Walshacc68c12017-01-29 18:57:45 -05005487struct k_poll_event {
Anas Nashife71293e2019-12-04 20:00:14 -05005488 /** PRIVATE - DO NOT TOUCH */
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +03005489 sys_dnode_t _node;
5490
Anas Nashife71293e2019-12-04 20:00:14 -05005491 /** PRIVATE - DO NOT TOUCH */
Andy Ross202adf52020-11-10 09:54:49 -08005492 struct z_poller *poller;
Benjamin Walshacc68c12017-01-29 18:57:45 -05005493
Anas Nashife71293e2019-12-04 20:00:14 -05005494 /** optional user-specified tag, opaque, untouched by the API */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005495 uint32_t tag:8;
Benjamin Walsh969d4a72017-02-02 11:25:11 -05005496
Anas Nashife71293e2019-12-04 20:00:14 -05005497 /** bitfield of event types (bitwise-ORed K_POLL_TYPE_xxx values) */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005498 uint32_t type:_POLL_NUM_TYPES;
Benjamin Walshacc68c12017-01-29 18:57:45 -05005499
Anas Nashife71293e2019-12-04 20:00:14 -05005500 /** bitfield of event states (bitwise-ORed K_POLL_STATE_xxx values) */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005501 uint32_t state:_POLL_NUM_STATES;
Benjamin Walshacc68c12017-01-29 18:57:45 -05005502
Anas Nashife71293e2019-12-04 20:00:14 -05005503 /** mode of operation, from enum k_poll_modes */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005504 uint32_t mode:1;
Benjamin Walshacc68c12017-01-29 18:57:45 -05005505
Anas Nashife71293e2019-12-04 20:00:14 -05005506 /** unused bits in 32-bit word */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005507 uint32_t unused:_POLL_EVENT_NUM_UNUSED_BITS;
Benjamin Walshacc68c12017-01-29 18:57:45 -05005508
Anas Nashife71293e2019-12-04 20:00:14 -05005509 /** per-type data */
Benjamin Walshacc68c12017-01-29 18:57:45 -05005510 union {
5511 void *obj;
5512 struct k_poll_signal *signal;
5513 struct k_sem *sem;
5514 struct k_fifo *fifo;
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02005515 struct k_queue *queue;
Nick Gravesb445f132021-04-12 12:35:18 -07005516 struct k_msgq *msgq;
Benjamin Walshacc68c12017-01-29 18:57:45 -05005517 };
5518};
5519
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -07005520#define K_POLL_EVENT_INITIALIZER(_event_type, _event_mode, _event_obj) \
Benjamin Walshacc68c12017-01-29 18:57:45 -05005521 { \
5522 .poller = NULL, \
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -07005523 .type = _event_type, \
Benjamin Walshacc68c12017-01-29 18:57:45 -05005524 .state = K_POLL_STATE_NOT_READY, \
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -07005525 .mode = _event_mode, \
Benjamin Walshacc68c12017-01-29 18:57:45 -05005526 .unused = 0, \
Daniel Leung087fb942021-03-24 12:45:01 -07005527 { \
5528 .obj = _event_obj, \
5529 }, \
Benjamin Walsh969d4a72017-02-02 11:25:11 -05005530 }
5531
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -07005532#define K_POLL_EVENT_STATIC_INITIALIZER(_event_type, _event_mode, _event_obj, \
Benjamin Walsh969d4a72017-02-02 11:25:11 -05005533 event_tag) \
5534 { \
Benjamin Walsh969d4a72017-02-02 11:25:11 -05005535 .tag = event_tag, \
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -07005536 .type = _event_type, \
Benjamin Walsh969d4a72017-02-02 11:25:11 -05005537 .state = K_POLL_STATE_NOT_READY, \
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -07005538 .mode = _event_mode, \
Benjamin Walsh969d4a72017-02-02 11:25:11 -05005539 .unused = 0, \
Daniel Leung087fb942021-03-24 12:45:01 -07005540 { \
5541 .obj = _event_obj, \
5542 }, \
Benjamin Walshacc68c12017-01-29 18:57:45 -05005543 }
5544
5545/**
5546 * @brief Initialize one struct k_poll_event instance
5547 *
5548 * After this routine is called on a poll event, the event it ready to be
5549 * placed in an event array to be passed to k_poll().
5550 *
5551 * @param event The event to initialize.
5552 * @param type A bitfield of the types of event, from the K_POLL_TYPE_xxx
5553 * values. Only values that apply to the same object being polled
5554 * can be used together. Choosing K_POLL_TYPE_IGNORE disables the
5555 * event.
Paul Sokolovskycfef9792017-07-18 11:53:06 +03005556 * @param mode Future. Use K_POLL_MODE_NOTIFY_ONLY.
Benjamin Walshacc68c12017-01-29 18:57:45 -05005557 * @param obj Kernel object or poll signal.
5558 *
5559 * @return N/A
5560 */
5561
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005562extern void k_poll_event_init(struct k_poll_event *event, uint32_t type,
Benjamin Walshacc68c12017-01-29 18:57:45 -05005563 int mode, void *obj);
5564
5565/**
5566 * @brief Wait for one or many of multiple poll events to occur
5567 *
5568 * This routine allows a thread to wait concurrently for one or many of
5569 * multiple poll events to have occurred. Such events can be a kernel object
5570 * being available, like a semaphore, or a poll signal event.
5571 *
5572 * When an event notifies that a kernel object is available, the kernel object
5573 * is not "given" to the thread calling k_poll(): it merely signals the fact
5574 * that the object was available when the k_poll() call was in effect. Also,
5575 * all threads trying to acquire an object the regular way, i.e. by pending on
5576 * the object, have precedence over the thread polling on the object. This
5577 * means that the polling thread will never get the poll event on an object
5578 * until the object becomes available and its pend queue is empty. For this
5579 * reason, the k_poll() call is more effective when the objects being polled
5580 * only have one thread, the polling thread, trying to acquire them.
5581 *
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +03005582 * When k_poll() returns 0, the caller should loop on all the events that were
5583 * passed to k_poll() and check the state field for the values that were
5584 * expected and take the associated actions.
Benjamin Walshacc68c12017-01-29 18:57:45 -05005585 *
5586 * Before being reused for another call to k_poll(), the user has to reset the
5587 * state field to K_POLL_STATE_NOT_READY.
5588 *
Andrew Boie3772f772018-05-07 16:52:57 -07005589 * When called from user mode, a temporary memory allocation is required from
5590 * the caller's resource pool.
5591 *
Christian Taedcke7a7c4202020-06-30 12:02:14 +02005592 * @param events An array of events to be polled for.
Benjamin Walshacc68c12017-01-29 18:57:45 -05005593 * @param num_events The number of events in the array.
Andy Ross78327382020-03-05 15:18:14 -08005594 * @param timeout Waiting period for an event to be ready,
5595 * or one of the special values K_NO_WAIT and K_FOREVER.
Benjamin Walshacc68c12017-01-29 18:57:45 -05005596 *
5597 * @retval 0 One or more events are ready.
Benjamin Walshacc68c12017-01-29 18:57:45 -05005598 * @retval -EAGAIN Waiting period timed out.
Paul Sokolovsky45c0b202018-08-21 23:29:11 +03005599 * @retval -EINTR Polling has been interrupted, e.g. with
5600 * k_queue_cancel_wait(). All output events are still set and valid,
5601 * cancelled event(s) will be set to K_POLL_STATE_CANCELLED. In other
5602 * words, -EINTR status means that at least one of output events is
5603 * K_POLL_STATE_CANCELLED.
Andrew Boie3772f772018-05-07 16:52:57 -07005604 * @retval -ENOMEM Thread resource pool insufficient memory (user mode only)
5605 * @retval -EINVAL Bad parameters (user mode only)
Benjamin Walshacc68c12017-01-29 18:57:45 -05005606 */
5607
Andrew Boie3772f772018-05-07 16:52:57 -07005608__syscall int k_poll(struct k_poll_event *events, int num_events,
Andy Ross78327382020-03-05 15:18:14 -08005609 k_timeout_t timeout);
Benjamin Walshacc68c12017-01-29 18:57:45 -05005610
5611/**
Benjamin Walsha304f162017-02-02 16:46:09 -05005612 * @brief Initialize a poll signal object.
5613 *
Flavio Ceolinaecd4ec2018-11-02 12:35:30 -07005614 * Ready a poll signal object to be signaled via k_poll_signal_raise().
Benjamin Walsha304f162017-02-02 16:46:09 -05005615 *
Anas Nashifb503be22021-03-22 08:09:55 -04005616 * @param sig A poll signal.
Benjamin Walsha304f162017-02-02 16:46:09 -05005617 *
5618 * @return N/A
5619 */
5620
Anas Nashifb503be22021-03-22 08:09:55 -04005621__syscall void k_poll_signal_init(struct k_poll_signal *sig);
Andrew Boie3772f772018-05-07 16:52:57 -07005622
5623/*
5624 * @brief Reset a poll signal object's state to unsignaled.
5625 *
Anas Nashifb503be22021-03-22 08:09:55 -04005626 * @param sig A poll signal object
Andrew Boie3772f772018-05-07 16:52:57 -07005627 */
Anas Nashifb503be22021-03-22 08:09:55 -04005628__syscall void k_poll_signal_reset(struct k_poll_signal *sig);
Andrew Boie3772f772018-05-07 16:52:57 -07005629
Andrew Boie3772f772018-05-07 16:52:57 -07005630/**
David B. Kinderfcbd8fb2018-05-23 12:06:24 -07005631 * @brief Fetch the signaled state and result value of a poll signal
Andrew Boie3772f772018-05-07 16:52:57 -07005632 *
Anas Nashifb503be22021-03-22 08:09:55 -04005633 * @param sig A poll signal object
Andrew Boie3772f772018-05-07 16:52:57 -07005634 * @param signaled An integer buffer which will be written nonzero if the
5635 * object was signaled
5636 * @param result An integer destination buffer which will be written with the
David B. Kinderfcbd8fb2018-05-23 12:06:24 -07005637 * result value if the object was signaled, or an undefined
Andrew Boie3772f772018-05-07 16:52:57 -07005638 * value if it was not.
5639 */
Anas Nashifb503be22021-03-22 08:09:55 -04005640__syscall void k_poll_signal_check(struct k_poll_signal *sig,
Andrew Boie3772f772018-05-07 16:52:57 -07005641 unsigned int *signaled, int *result);
Benjamin Walsha304f162017-02-02 16:46:09 -05005642
5643/**
Benjamin Walshacc68c12017-01-29 18:57:45 -05005644 * @brief Signal a poll signal object.
5645 *
5646 * This routine makes ready a poll signal, which is basically a poll event of
5647 * type K_POLL_TYPE_SIGNAL. If a thread was polling on that event, it will be
5648 * made ready to run. A @a result value can be specified.
5649 *
5650 * The poll signal contains a 'signaled' field that, when set by
Flavio Ceolinaecd4ec2018-11-02 12:35:30 -07005651 * k_poll_signal_raise(), stays set until the user sets it back to 0 with
Andrew Boie3772f772018-05-07 16:52:57 -07005652 * k_poll_signal_reset(). It thus has to be reset by the user before being
5653 * passed again to k_poll() or k_poll() will consider it being signaled, and
5654 * will return immediately.
Benjamin Walshacc68c12017-01-29 18:57:45 -05005655 *
Peter A. Bigot773bd982019-04-30 07:06:39 -05005656 * @note The result is stored and the 'signaled' field is set even if
5657 * this function returns an error indicating that an expiring poll was
5658 * not notified. The next k_poll() will detect the missed raise.
5659 *
Anas Nashifb503be22021-03-22 08:09:55 -04005660 * @param sig A poll signal.
Benjamin Walshacc68c12017-01-29 18:57:45 -05005661 * @param result The value to store in the result field of the signal.
5662 *
5663 * @retval 0 The signal was delivered successfully.
5664 * @retval -EAGAIN The polling thread's timeout is in the process of expiring.
5665 */
5666
Anas Nashifb503be22021-03-22 08:09:55 -04005667__syscall int k_poll_signal_raise(struct k_poll_signal *sig, int result);
Benjamin Walshacc68c12017-01-29 18:57:45 -05005668
Anas Nashif954d5502018-02-25 08:37:28 -06005669/**
5670 * @internal
5671 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005672extern void z_handle_obj_poll_events(sys_dlist_t *events, uint32_t state);
Benjamin Walshacc68c12017-01-29 18:57:45 -05005673
Anas Nashif166f5192018-02-25 08:02:36 -06005674/** @} */
Benjamin Walshacc68c12017-01-29 18:57:45 -05005675
Benjamin Walshc3a2bbb2016-12-14 13:04:36 -05005676/**
Anas Nashif30c3cff2019-01-22 08:18:13 -05005677 * @defgroup cpu_idle_apis CPU Idling APIs
5678 * @ingroup kernel_apis
5679 * @{
5680 */
Anas Nashif30c3cff2019-01-22 08:18:13 -05005681/**
Benjamin Walshc3a2bbb2016-12-14 13:04:36 -05005682 * @brief Make the CPU idle.
5683 *
5684 * This function makes the CPU idle until an event wakes it up.
5685 *
5686 * In a regular system, the idle thread should be the only thread responsible
5687 * for making the CPU idle and triggering any type of power management.
5688 * However, in some more constrained systems, such as a single-threaded system,
5689 * the only thread would be responsible for this if needed.
5690 *
Ioannis Glaropoulos91f6d982020-03-18 23:56:56 +01005691 * @note In some architectures, before returning, the function unmasks interrupts
5692 * unconditionally.
5693 *
Benjamin Walshc3a2bbb2016-12-14 13:04:36 -05005694 * @return N/A
5695 */
Andrew Boie07525a32019-09-21 16:17:23 -07005696static inline void k_cpu_idle(void)
5697{
Andrew Boie4f77c2a2019-11-07 12:43:29 -08005698 arch_cpu_idle();
Andrew Boie07525a32019-09-21 16:17:23 -07005699}
Benjamin Walshc3a2bbb2016-12-14 13:04:36 -05005700
5701/**
5702 * @brief Make the CPU idle in an atomic fashion.
5703 *
Peter Bigot88e756e2020-09-29 10:43:10 -05005704 * Similar to k_cpu_idle(), but must be called with interrupts locked.
5705 *
5706 * Enabling interrupts and entering a low-power mode will be atomic,
5707 * i.e. there will be no period of time where interrupts are enabled before
5708 * the processor enters a low-power mode.
5709 *
5710 * After waking up from the low-power mode, the interrupt lockout state will
5711 * be restored as if by irq_unlock(key).
Benjamin Walshc3a2bbb2016-12-14 13:04:36 -05005712 *
5713 * @param key Interrupt locking key obtained from irq_lock().
5714 *
5715 * @return N/A
5716 */
Andrew Boie07525a32019-09-21 16:17:23 -07005717static inline void k_cpu_atomic_idle(unsigned int key)
5718{
Andrew Boie4f77c2a2019-11-07 12:43:29 -08005719 arch_cpu_atomic_idle(key);
Andrew Boie07525a32019-09-21 16:17:23 -07005720}
Benjamin Walshc3a2bbb2016-12-14 13:04:36 -05005721
Anas Nashif30c3cff2019-01-22 08:18:13 -05005722/**
5723 * @}
5724 */
Anas Nashif954d5502018-02-25 08:37:28 -06005725
5726/**
5727 * @internal
5728 */
Andrew Boie4f77c2a2019-11-07 12:43:29 -08005729#ifdef ARCH_EXCEPT
Ioannis Glaropoulosdf029232019-10-07 11:24:36 +02005730/* This architecture has direct support for triggering a CPU exception */
Andrew Boie4f77c2a2019-11-07 12:43:29 -08005731#define z_except_reason(reason) ARCH_EXCEPT(reason)
Andrew Boiecdb94d62017-04-18 15:22:05 -07005732#else
5733
Joakim Anderssone04e4c22019-12-20 15:42:38 +01005734#if !defined(CONFIG_ASSERT_NO_FILE_INFO)
5735#define __EXCEPT_LOC() __ASSERT_PRINT("@ %s:%d\n", __FILE__, __LINE__)
5736#else
5737#define __EXCEPT_LOC()
5738#endif
5739
Andrew Boiecdb94d62017-04-18 15:22:05 -07005740/* NOTE: This is the implementation for arches that do not implement
Andrew Boie4f77c2a2019-11-07 12:43:29 -08005741 * ARCH_EXCEPT() to generate a real CPU exception.
Andrew Boiecdb94d62017-04-18 15:22:05 -07005742 *
5743 * We won't have a real exception frame to determine the PC value when
5744 * the oops occurred, so print file and line number before we jump into
5745 * the fatal error handler.
5746 */
Patrik Flykt4344e272019-03-08 14:19:05 -07005747#define z_except_reason(reason) do { \
Joakim Anderssone04e4c22019-12-20 15:42:38 +01005748 __EXCEPT_LOC(); \
Andrew Boie56236372019-07-15 15:22:29 -07005749 z_fatal_error(reason, NULL); \
Flavio Ceolin6fdc56d2018-09-18 12:32:27 -07005750 } while (false)
Andrew Boiecdb94d62017-04-18 15:22:05 -07005751
5752#endif /* _ARCH__EXCEPT */
5753
5754/**
5755 * @brief Fatally terminate a thread
5756 *
5757 * This should be called when a thread has encountered an unrecoverable
5758 * runtime condition and needs to terminate. What this ultimately
5759 * means is determined by the _fatal_error_handler() implementation, which
Andrew Boie71ce8ce2019-07-11 14:18:28 -07005760 * will be called will reason code K_ERR_KERNEL_OOPS.
Andrew Boiecdb94d62017-04-18 15:22:05 -07005761 *
5762 * If this is called from ISR context, the default system fatal error handler
5763 * will treat it as an unrecoverable system error, just like k_panic().
5764 */
Andrew Boie71ce8ce2019-07-11 14:18:28 -07005765#define k_oops() z_except_reason(K_ERR_KERNEL_OOPS)
Andrew Boiecdb94d62017-04-18 15:22:05 -07005766
5767/**
5768 * @brief Fatally terminate the system
5769 *
5770 * This should be called when the Zephyr kernel has encountered an
5771 * unrecoverable runtime condition and needs to terminate. What this ultimately
5772 * means is determined by the _fatal_error_handler() implementation, which
Andrew Boie71ce8ce2019-07-11 14:18:28 -07005773 * will be called will reason code K_ERR_KERNEL_PANIC.
Andrew Boiecdb94d62017-04-18 15:22:05 -07005774 */
Andrew Boie71ce8ce2019-07-11 14:18:28 -07005775#define k_panic() z_except_reason(K_ERR_KERNEL_PANIC)
Andrew Boiecdb94d62017-04-18 15:22:05 -07005776
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005777/*
5778 * private APIs that are utilized by one or more public APIs
5779 */
5780
Stephanos Ioannidis2d746042019-10-25 00:08:21 +09005781/**
5782 * @internal
5783 */
5784extern void z_init_thread_base(struct _thread_base *thread_base,
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005785 int priority, uint32_t initial_state,
Stephanos Ioannidis2d746042019-10-25 00:08:21 +09005786 unsigned int options);
5787
Benjamin Walshb12a8e02016-12-14 15:24:12 -05005788#ifdef CONFIG_MULTITHREADING
Anas Nashif954d5502018-02-25 08:37:28 -06005789/**
5790 * @internal
5791 */
Patrik Flykt4344e272019-03-08 14:19:05 -07005792extern void z_init_static_threads(void);
Benjamin Walshb12a8e02016-12-14 15:24:12 -05005793#else
Anas Nashif954d5502018-02-25 08:37:28 -06005794/**
5795 * @internal
5796 */
Patrik Flykt4344e272019-03-08 14:19:05 -07005797#define z_init_static_threads() do { } while (false)
Benjamin Walshb12a8e02016-12-14 15:24:12 -05005798#endif
5799
Anas Nashif954d5502018-02-25 08:37:28 -06005800/**
5801 * @internal
5802 */
Patrik Flykt4344e272019-03-08 14:19:05 -07005803extern bool z_is_thread_essential(void);
Guennadi Liakhovetski8d07b772021-04-01 13:46:57 +02005804
5805#ifdef CONFIG_SMP
5806void z_smp_thread_init(void *arg, struct k_thread *thread);
5807void z_smp_thread_swap(void);
5808#endif
5809
Anas Nashif954d5502018-02-25 08:37:28 -06005810/**
5811 * @internal
5812 */
Patrik Flykt4344e272019-03-08 14:19:05 -07005813extern void z_timer_expiration_handler(struct _timeout *t);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005814
Andrew Boied76ae462020-01-02 11:57:43 -08005815#ifdef CONFIG_PRINTK
Andrew Boie756f9072017-10-10 16:01:49 -07005816/**
5817 * @brief Emit a character buffer to the console device
5818 *
5819 * @param c String of characters to print
5820 * @param n The length of the string
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04005821 *
Andrew Boie756f9072017-10-10 16:01:49 -07005822 */
5823__syscall void k_str_out(char *c, size_t n);
Andrew Boied76ae462020-01-02 11:57:43 -08005824#endif
Andrew Boie756f9072017-10-10 16:01:49 -07005825
Ioannis Glaropoulosa6cb8b02019-05-09 21:55:10 +02005826/**
5827 * @brief Disable preservation of floating point context information.
5828 *
5829 * This routine informs the kernel that the specified thread
5830 * will no longer be using the floating point registers.
5831 *
5832 * @warning
5833 * Some architectures apply restrictions on how the disabling of floating
Andrew Boie4f77c2a2019-11-07 12:43:29 -08005834 * point preservation may be requested, see arch_float_disable.
Ioannis Glaropoulosa6cb8b02019-05-09 21:55:10 +02005835 *
5836 * @warning
5837 * This routine should only be used to disable floating point support for
5838 * a thread that currently has such support enabled.
5839 *
5840 * @param thread ID of thread.
5841 *
Katsuhiro Suzuki19db4852021-03-24 01:54:15 +09005842 * @retval 0 On success.
5843 * @retval -ENOTSUP If the floating point disabling is not implemented.
5844 * -EINVAL If the floating point disabling could not be performed.
Ioannis Glaropoulosa6cb8b02019-05-09 21:55:10 +02005845 */
5846__syscall int k_float_disable(struct k_thread *thread);
5847
Katsuhiro Suzuki59903e22021-02-01 15:16:53 +09005848/**
5849 * @brief Enable preservation of floating point context information.
5850 *
5851 * This routine informs the kernel that the specified thread
5852 * will use the floating point registers.
5853
5854 * Invoking this routine initializes the thread's floating point context info
5855 * to that of an FPU that has been reset. The next time the thread is scheduled
5856 * by z_swap() it will either inherit an FPU that is guaranteed to be in a
5857 * "sane" state (if the most recent user of the FPU was cooperatively swapped
5858 * out) or the thread's own floating point context will be loaded (if the most
5859 * recent user of the FPU was preempted, or if this thread is the first user
5860 * of the FPU). Thereafter, the kernel will protect the thread's FP context
5861 * so that it is not altered during a preemptive context switch.
5862 *
5863 * The @a options parameter indicates which floating point register sets will
5864 * be used by the specified thread.
5865 *
5866 * For x86 options:
5867 *
5868 * - K_FP_REGS indicates x87 FPU and MMX registers only
5869 * - K_SSE_REGS indicates SSE registers (and also x87 FPU and MMX registers)
5870 *
5871 * @warning
5872 * Some architectures apply restrictions on how the enabling of floating
5873 * point preservation may be requested, see arch_float_enable.
5874 *
5875 * @warning
5876 * This routine should only be used to enable floating point support for
5877 * a thread that currently has such support enabled.
5878 *
5879 * @param thread ID of thread.
5880 * @param options architecture dependent options
5881 *
5882 * @retval 0 On success.
5883 * @retval -ENOTSUP If the floating point enabling is not implemented.
5884 * -EINVAL If the floating point enabling could not be performed.
5885 */
5886__syscall int k_float_enable(struct k_thread *thread, unsigned int options);
5887
Daniel Leungfc577c42020-08-27 13:54:14 -07005888/**
5889 * @brief Get the runtime statistics of a thread
5890 *
5891 * @param thread ID of thread.
5892 * @param stats Pointer to struct to copy statistics into.
5893 * @return -EINVAL if null pointers, otherwise 0
5894 */
5895int k_thread_runtime_stats_get(k_tid_t thread,
5896 k_thread_runtime_stats_t *stats);
5897
5898/**
5899 * @brief Get the runtime statistics of all threads
5900 *
5901 * @param stats Pointer to struct to copy statistics into.
5902 * @return -EINVAL if null pointers, otherwise 0
5903 */
5904int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats);
5905
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005906#ifdef __cplusplus
5907}
5908#endif
5909
Kai Vehmanend1841812021-11-17 10:42:54 +02005910#include <tracing/tracing.h>
Andrew Boiefa94ee72017-09-28 16:54:35 -07005911#include <syscalls/kernel.h>
5912
Benjamin Walshdfa7ce52017-01-22 17:06:05 -05005913#endif /* !_ASMLANGUAGE */
5914
Flavio Ceolin67ca1762018-09-14 10:43:44 -07005915#endif /* ZEPHYR_INCLUDE_KERNEL_H_ */