blob: e07be5a78a54908c323d0e56222bae4f1cf7518a [file] [log] [blame]
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001/*
2 * Copyright (c) 2016, Wind River Systems, Inc.
3 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08004 * SPDX-License-Identifier: Apache-2.0
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005 */
6
7/**
8 * @file
9 *
10 * @brief Public kernel APIs.
11 */
12
Flavio Ceolin67ca1762018-09-14 10:43:44 -070013#ifndef ZEPHYR_INCLUDE_KERNEL_H_
14#define ZEPHYR_INCLUDE_KERNEL_H_
Benjamin Walsh456c6da2016-09-02 18:55:39 -040015
Benjamin Walshdfa7ce52017-01-22 17:06:05 -050016#if !defined(_ASMLANGUAGE)
Ioannis Glaropoulos92b8a412018-06-20 17:30:48 +020017#include <kernel_includes.h>
Kumar Gala8777ff12018-07-25 20:24:34 -050018#include <errno.h>
James Harrisb1042812021-03-03 12:02:05 -080019#include <limits.h>
Flavio Ceolin6fdc56d2018-09-18 12:32:27 -070020#include <stdbool.h>
Stephanos Ioannidis33fbe002019-09-09 21:26:59 +090021#include <toolchain.h>
Torbjörn Leksell16bbb8e2021-03-26 08:31:23 +010022#include <tracing/tracing_macros.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040023
Daniel Leungfd7a68d2020-10-14 12:17:12 -070024#ifdef CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS
25#include <timing/timing.h>
26#endif
27
Benjamin Walsh456c6da2016-09-02 18:55:39 -040028#ifdef __cplusplus
29extern "C" {
30#endif
31
Anas Nashifbbb157d2017-01-15 08:46:31 -050032/**
33 * @brief Kernel APIs
34 * @defgroup kernel_apis Kernel APIs
35 * @{
36 * @}
37 */
38
Benjamin Walsh456c6da2016-09-02 18:55:39 -040039#define K_ANY NULL
40#define K_END NULL
41
Andy Ross851d14a2021-05-13 15:46:43 -070042#if CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES == 0
43#error Zero available thread priorities defined!
44#endif
45
46#define K_PRIO_COOP(x) (-(CONFIG_NUM_COOP_PRIORITIES - (x)))
47#define K_PRIO_PREEMPT(x) (x)
48
Benjamin Walsh456c6da2016-09-02 18:55:39 -040049#define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040050#define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES
Benjamin Walshfab8d922016-11-08 15:36:36 -050051#define K_IDLE_PRIO K_LOWEST_THREAD_PRIO
Benjamin Walsh456c6da2016-09-02 18:55:39 -040052#define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO)
53#define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)
54
Benjamin Walshacc68c12017-01-29 18:57:45 -050055#ifdef CONFIG_POLL
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +030056#define _POLL_EVENT_OBJ_INIT(obj) \
57 .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events),
58#define _POLL_EVENT sys_dlist_t poll_events
Benjamin Walshacc68c12017-01-29 18:57:45 -050059#else
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +030060#define _POLL_EVENT_OBJ_INIT(obj)
Benjamin Walshacc68c12017-01-29 18:57:45 -050061#define _POLL_EVENT
62#endif
63
Benjamin Walshf6ca7de2016-11-08 10:36:50 -050064struct k_thread;
Benjamin Walsh456c6da2016-09-02 18:55:39 -040065struct k_mutex;
66struct k_sem;
Benjamin Walsh456c6da2016-09-02 18:55:39 -040067struct k_msgq;
68struct k_mbox;
69struct k_pipe;
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020070struct k_queue;
Benjamin Walsh456c6da2016-09-02 18:55:39 -040071struct k_fifo;
72struct k_lifo;
73struct k_stack;
Benjamin Walsh7ef0f622016-10-24 17:04:43 -040074struct k_mem_slab;
Benjamin Walsh456c6da2016-09-02 18:55:39 -040075struct k_mem_pool;
76struct k_timer;
Benjamin Walshacc68c12017-01-29 18:57:45 -050077struct k_poll_event;
78struct k_poll_signal;
Chunlin Hane9c97022017-07-07 20:29:30 +080079struct k_mem_domain;
80struct k_mem_partition;
Wentong Wu5611e922019-06-20 23:51:27 +080081struct k_futex;
Andrew Boiebca15da2017-10-15 14:17:48 -070082
Benjamin Walsh456c6da2016-09-02 18:55:39 -040083enum execution_context_types {
84 K_ISR = 0,
85 K_COOP_THREAD,
86 K_PREEMPT_THREAD,
87};
88
Anas Nashiffc1b5de2020-11-11 08:42:53 -050089/* private, used by k_poll and k_work_poll */
90struct k_work_poll;
91typedef int (*_poller_cb_t)(struct k_poll_event *event, uint32_t state);
92
Peter Mitsis348eb4c2016-10-26 11:22:14 -040093/**
Anas Nashif4bcb2942019-01-23 23:06:29 -050094 * @addtogroup thread_apis
Carles Cuficb0cf9f2017-01-10 10:57:38 +010095 * @{
96 */
Anas Nashife71293e2019-12-04 20:00:14 -050097
Ramakrishna Pallala110b8e42018-04-27 12:55:43 +053098typedef void (*k_thread_user_cb_t)(const struct k_thread *thread,
99 void *user_data);
Carles Cuficb0cf9f2017-01-10 10:57:38 +0100100
101/**
Ramakrishna Pallala110b8e42018-04-27 12:55:43 +0530102 * @brief Iterate over all the threads in the system.
103 *
104 * This routine iterates over all the threads in the system and
105 * calls the user_cb function for each thread.
106 *
107 * @param user_cb Pointer to the user callback function.
108 * @param user_data Pointer to user data.
109 *
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200110 * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
Radoslaw Koppel2c529ce2019-11-27 14:20:37 +0100111 * to be effective.
112 * @note This API uses @ref k_spin_lock to protect the _kernel.threads
113 * list which means creation of new threads and terminations of existing
114 * threads are blocked until this API returns.
Ramakrishna Pallala110b8e42018-04-27 12:55:43 +0530115 *
116 * @return N/A
117 */
118extern void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data);
119
Radoslaw Koppel2c529ce2019-11-27 14:20:37 +0100120/**
121 * @brief Iterate over all the threads in the system without locking.
122 *
123 * This routine works exactly the same like @ref k_thread_foreach
124 * but unlocks interrupts when user_cb is executed.
125 *
126 * @param user_cb Pointer to the user callback function.
127 * @param user_data Pointer to user data.
128 *
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200129 * @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
Radoslaw Koppel2c529ce2019-11-27 14:20:37 +0100130 * to be effective.
131 * @note This API uses @ref k_spin_lock only when accessing the _kernel.threads
132 * queue elements. It unlocks it during user callback function processing.
133 * If a new task is created when this @c foreach function is in progress,
134 * the added new task would not be included in the enumeration.
135 * If a task is aborted during this enumeration, there would be a race here
136 * and there is a possibility that this aborted task would be included in the
137 * enumeration.
138 * @note If the task is aborted and the memory occupied by its @c k_thread
139 * structure is reused when this @c k_thread_foreach_unlocked is in progress
140 * it might even lead to the system behave unstable.
141 * This function may never return, as it would follow some @c next task
142 * pointers treating given pointer as a pointer to the k_thread structure
143 * while it is something different right now.
144 * Do not reuse the memory that was occupied by k_thread structure of aborted
145 * task if it was aborted after this function was called in any context.
146 */
147extern void k_thread_foreach_unlocked(
148 k_thread_user_cb_t user_cb, void *user_data);
149
Anas Nashif166f5192018-02-25 08:02:36 -0600150/** @} */
Carles Cuficb0cf9f2017-01-10 10:57:38 +0100151
152/**
Allan Stephensc98da842016-11-11 15:45:03 -0500153 * @defgroup thread_apis Thread APIs
154 * @ingroup kernel_apis
155 * @{
156 */
157
Benjamin Walshed240f22017-01-22 13:05:08 -0500158#endif /* !_ASMLANGUAGE */
159
160
161/*
162 * Thread user options. May be needed by assembly code. Common part uses low
163 * bits, arch-specific use high bits.
164 */
165
Anas Nashifa541e932018-05-24 11:19:16 -0500166/**
167 * @brief system thread that must not abort
Anas Nashifa541e932018-05-24 11:19:16 -0500168 * */
Flavio Ceolin8aec0872018-08-15 11:52:00 -0700169#define K_ESSENTIAL (BIT(0))
Benjamin Walshed240f22017-01-22 13:05:08 -0500170
Stephanos Ioannidisaaf93202020-05-03 18:03:19 +0900171#if defined(CONFIG_FPU_SHARING)
Anas Nashifa541e932018-05-24 11:19:16 -0500172/**
Katsuhiro Suzukifadef432020-12-16 11:22:13 +0900173 * @brief FPU registers are managed by context switch
174 *
175 * @details
176 * This option indicates that the thread uses the CPU's floating point
177 * registers. This instructs the kernel to take additional steps to save
178 * and restore the contents of these registers when scheduling the thread.
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200179 * No effect if @kconfig{CONFIG_FPU_SHARING} is not enabled.
Anas Nashifa541e932018-05-24 11:19:16 -0500180 */
Flavio Ceolin8aec0872018-08-15 11:52:00 -0700181#define K_FP_REGS (BIT(1))
Benjamin Walshed240f22017-01-22 13:05:08 -0500182#endif
183
Anas Nashifa541e932018-05-24 11:19:16 -0500184/**
185 * @brief user mode thread
186 *
187 * This thread has dropped from supervisor mode to user mode and consequently
Andrew Boie5cfa5dc2017-08-30 14:17:44 -0700188 * has additional restrictions
189 */
Flavio Ceolin8aec0872018-08-15 11:52:00 -0700190#define K_USER (BIT(2))
Andrew Boie5cfa5dc2017-08-30 14:17:44 -0700191
Anas Nashifa541e932018-05-24 11:19:16 -0500192/**
193 * @brief Inherit Permissions
194 *
195 * @details
196 * Indicates that the thread being created should inherit all kernel object
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300197 * permissions from the thread that created it. No effect if
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200198 * @kconfig{CONFIG_USERSPACE} is not enabled.
Andrew Boie47f8fd12017-10-05 11:11:02 -0700199 */
Flavio Ceolin8aec0872018-08-15 11:52:00 -0700200#define K_INHERIT_PERMS (BIT(3))
Andrew Boie47f8fd12017-10-05 11:11:02 -0700201
Andy Ross9a594a02021-02-10 14:54:21 -0800202/**
203 * @brief Callback item state
204 *
205 * @details
206 * This is a single bit of state reserved for "callback manager"
207 * utilities (p4wq initially) who need to track operations invoked
208 * from within a user-provided callback they have been invoked.
209 * Effectively it serves as a tiny bit of zero-overhead TLS data.
210 */
211#define K_CALLBACK_STATE (BIT(4))
212
Benjamin Walshed240f22017-01-22 13:05:08 -0500213#ifdef CONFIG_X86
214/* x86 Bitmask definitions for threads user options */
215
Daniel Leungce440482021-01-07 15:07:29 -0800216#if defined(CONFIG_FPU_SHARING) && defined(CONFIG_X86_SSE)
Benjamin Walshed240f22017-01-22 13:05:08 -0500217/* thread uses SSEx (and also FP) registers */
Flavio Ceolin8aec0872018-08-15 11:52:00 -0700218#define K_SSE_REGS (BIT(7))
Benjamin Walshed240f22017-01-22 13:05:08 -0500219#endif
220#endif
221
222/* end - thread options */
223
224#if !defined(_ASMLANGUAGE)
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400225/**
Andrew Boied26cf2d2017-03-30 13:07:02 -0700226 * @brief Create a thread.
227 *
228 * This routine initializes a thread, then schedules it for execution.
229 *
230 * The new thread may be scheduled for immediate execution or a delayed start.
231 * If the newly spawned thread does not have a delayed start the kernel
232 * scheduler may preempt the current thread to allow the new thread to
233 * execute.
234 *
235 * Thread options are architecture-specific, and can include K_ESSENTIAL,
236 * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
237 * them using "|" (the logical OR operator).
238 *
Andrew Boie8ce260d2020-04-24 16:24:46 -0700239 * Stack objects passed to this function must be originally defined with
240 * either of these macros in order to be portable:
241 *
242 * - K_THREAD_STACK_DEFINE() - For stacks that may support either user or
243 * supervisor threads.
244 * - K_KERNEL_STACK_DEFINE() - For stacks that may support supervisor
245 * threads only. These stacks use less memory if CONFIG_USERSPACE is
246 * enabled.
247 *
248 * The stack_size parameter has constraints. It must either be:
249 *
250 * - The original size value passed to K_THREAD_STACK_DEFINE() or
251 * K_KERNEL_STACK_DEFINE()
252 * - The return value of K_THREAD_STACK_SIZEOF(stack) if the stack was
253 * defined with K_THREAD_STACK_DEFINE()
254 * - The return value of K_KERNEL_STACK_SIZEOF(stack) if the stack was
255 * defined with K_KERNEL_STACK_DEFINE().
256 *
257 * Using other values, or sizeof(stack) may produce undefined behavior.
Andrew Boied26cf2d2017-03-30 13:07:02 -0700258 *
259 * @param new_thread Pointer to uninitialized struct k_thread
260 * @param stack Pointer to the stack space.
261 * @param stack_size Stack size in bytes.
262 * @param entry Thread entry function.
263 * @param p1 1st entry point parameter.
264 * @param p2 2nd entry point parameter.
265 * @param p3 3rd entry point parameter.
266 * @param prio Thread priority.
267 * @param options Thread options.
Andy Ross78327382020-03-05 15:18:14 -0800268 * @param delay Scheduling delay, or K_NO_WAIT (for no delay).
Andrew Boied26cf2d2017-03-30 13:07:02 -0700269 *
270 * @return ID of new thread.
Anas Nashif47420d02018-05-24 14:20:56 -0400271 *
Andrew Boied26cf2d2017-03-30 13:07:02 -0700272 */
Andrew Boie662c3452017-10-02 10:51:18 -0700273__syscall k_tid_t k_thread_create(struct k_thread *new_thread,
Andrew Boiec5c104f2017-10-16 14:46:34 -0700274 k_thread_stack_t *stack,
Andrew Boie662c3452017-10-02 10:51:18 -0700275 size_t stack_size,
276 k_thread_entry_t entry,
277 void *p1, void *p2, void *p3,
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500278 int prio, uint32_t options, k_timeout_t delay);
Andrew Boied26cf2d2017-03-30 13:07:02 -0700279
Andrew Boie3f091b52017-08-30 14:34:14 -0700280/**
281 * @brief Drop a thread's privileges permanently to user mode
282 *
Andrew Boie4d6bc472020-10-24 13:11:35 -0700283 * This allows a supervisor thread to be re-used as a user thread.
284 * This function does not return, but control will transfer to the provided
285 * entry point as if this was a new user thread.
286 *
287 * The implementation ensures that the stack buffer contents are erased.
288 * Any thread-local storage will be reverted to a pristine state.
289 *
290 * Memory domain membership, resource pool assignment, kernel object
291 * permissions, priority, and thread options are preserved.
292 *
293 * A common use of this function is to re-use the main thread as a user thread
294 * once all supervisor mode-only tasks have been completed.
295 *
Andrew Boie3f091b52017-08-30 14:34:14 -0700296 * @param entry Function to start executing from
297 * @param p1 1st entry point parameter
298 * @param p2 2nd entry point parameter
299 * @param p3 3rd entry point parameter
300 */
301extern FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
302 void *p1, void *p2,
303 void *p3);
Andrew Boie3f091b52017-08-30 14:34:14 -0700304
Andrew Boied26cf2d2017-03-30 13:07:02 -0700305/**
Adithya Baglody392219e2019-01-02 14:40:39 +0530306 * @brief Grant a thread access to a set of kernel objects
Andrew Boiee12857a2017-10-17 11:38:26 -0700307 *
308 * This is a convenience function. For the provided thread, grant access to
309 * the remaining arguments, which must be pointers to kernel objects.
Andrew Boiee12857a2017-10-17 11:38:26 -0700310 *
311 * The thread object must be initialized (i.e. running). The objects don't
312 * need to be.
Adithya Baglody392219e2019-01-02 14:40:39 +0530313 * Note that NULL shouldn't be passed as an argument.
Andrew Boiee12857a2017-10-17 11:38:26 -0700314 *
315 * @param thread Thread to grant access to objects
Adithya Baglody392219e2019-01-02 14:40:39 +0530316 * @param ... list of kernel object pointers
Andrew Boiee12857a2017-10-17 11:38:26 -0700317 */
Adithya Baglody392219e2019-01-02 14:40:39 +0530318#define k_thread_access_grant(thread, ...) \
Krzysztof Chruscinski1b4b9382020-05-08 07:06:58 +0200319 FOR_EACH_FIXED_ARG(k_object_access_grant, (;), thread, __VA_ARGS__)
Andrew Boiee12857a2017-10-17 11:38:26 -0700320
321/**
Andrew Boie92e5bd72018-04-12 17:12:15 -0700322 * @brief Assign a resource memory pool to a thread
323 *
324 * By default, threads have no resource pool assigned unless their parent
325 * thread has a resource pool, in which case it is inherited. Multiple
326 * threads may be assigned to the same memory pool.
327 *
328 * Changing a thread's resource pool will not migrate allocations from the
329 * previous pool.
330 *
Jukka Rissanenfdf18482020-05-01 12:37:51 +0300331 * @param thread Target thread to assign a memory pool for resource requests.
Andy Rossc770cab2020-10-02 08:22:03 -0700332 * @param heap Heap object to use for resources,
Jukka Rissanenfdf18482020-05-01 12:37:51 +0300333 * or NULL if the thread should no longer have a memory pool.
Andrew Boie92e5bd72018-04-12 17:12:15 -0700334 */
Andy Rossc770cab2020-10-02 08:22:03 -0700335static inline void k_thread_heap_assign(struct k_thread *thread,
336 struct k_heap *heap)
337{
338 thread->resource_pool = heap;
339}
340
Andrew Boieefc5fe02020-02-05 10:41:58 -0800341#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
342/**
343 * @brief Obtain stack usage information for the specified thread
344 *
345 * User threads will need to have permission on the target thread object.
346 *
347 * Some hardware may prevent inspection of a stack buffer currently in use.
348 * If this API is called from supervisor mode, on the currently running thread,
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200349 * on a platform which selects @kconfig{CONFIG_NO_UNUSED_STACK_INSPECTION}, an
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300350 * error will be generated.
Andrew Boieefc5fe02020-02-05 10:41:58 -0800351 *
352 * @param thread Thread to inspect stack information
353 * @param unused_ptr Output parameter, filled in with the unused stack space
354 * of the target thread in bytes.
355 * @return 0 on success
356 * @return -EBADF Bad thread object (user mode only)
357 * @return -EPERM No permissions on thread object (user mode only)
358 * #return -ENOTSUP Forbidden by hardware policy
359 * @return -EINVAL Thread is uninitialized or exited (user mode only)
360 * @return -EFAULT Bad memory address for unused_ptr (user mode only)
361 */
362__syscall int k_thread_stack_space_get(const struct k_thread *thread,
363 size_t *unused_ptr);
364#endif
365
Andrew Boie92e5bd72018-04-12 17:12:15 -0700366#if (CONFIG_HEAP_MEM_POOL_SIZE > 0)
367/**
368 * @brief Assign the system heap as a thread's resource pool
369 *
Yasushi SHOJIa3e0f8c2021-03-11 20:45:20 +0900370 * Similar to z_thread_heap_assign(), but the thread will use
Andrew Boie92e5bd72018-04-12 17:12:15 -0700371 * the kernel heap to draw memory.
372 *
373 * Use with caution, as a malicious thread could perform DoS attacks on the
374 * kernel heap.
375 *
376 * @param thread Target thread to assign the system heap for resource requests
Anas Nashif47420d02018-05-24 14:20:56 -0400377 *
Andrew Boie92e5bd72018-04-12 17:12:15 -0700378 */
379void k_thread_system_pool_assign(struct k_thread *thread);
380#endif /* (CONFIG_HEAP_MEM_POOL_SIZE > 0) */
381
382/**
Andrew Boie322816e2020-02-20 16:33:06 -0800383 * @brief Sleep until a thread exits
384 *
385 * The caller will be put to sleep until the target thread exits, either due
386 * to being aborted, self-exiting, or taking a fatal error. This API returns
387 * immediately if the thread isn't running.
388 *
Andy Ross23f699b2021-02-23 06:12:17 -0800389 * This API may only be called from ISRs with a K_NO_WAIT timeout,
390 * where it can be useful as a predicate to detect when a thread has
391 * aborted.
Andrew Boie322816e2020-02-20 16:33:06 -0800392 *
393 * @param thread Thread to wait to exit
Andy Ross78327382020-03-05 15:18:14 -0800394 * @param timeout upper bound time to wait for the thread to exit.
Andrew Boie322816e2020-02-20 16:33:06 -0800395 * @retval 0 success, target thread has exited or wasn't running
396 * @retval -EBUSY returned without waiting
397 * @retval -EAGAIN waiting period timed out
398 * @retval -EDEADLK target thread is joining on the caller, or target thread
399 * is the caller
400 */
Andy Ross78327382020-03-05 15:18:14 -0800401__syscall int k_thread_join(struct k_thread *thread, k_timeout_t timeout);
402
403/**
404 * @brief Put the current thread to sleep.
405 *
406 * This routine puts the current thread to sleep for @a duration,
407 * specified as a k_timeout_t object.
408 *
Anas Nashifd2c71792020-10-17 07:52:17 -0400409 * @note if @a timeout is set to K_FOREVER then the thread is suspended.
410 *
Andy Ross78327382020-03-05 15:18:14 -0800411 * @param timeout Desired duration of sleep.
412 *
413 * @return Zero if the requested time has elapsed or the number of milliseconds
414 * left to sleep, if thread was woken up by \ref k_wakeup call.
415 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500416__syscall int32_t k_sleep(k_timeout_t timeout);
Andrew Boie322816e2020-02-20 16:33:06 -0800417
418/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500419 * @brief Put the current thread to sleep.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400420 *
Charles E. Yousea5678312019-05-09 16:46:46 -0700421 * This routine puts the current thread to sleep for @a duration milliseconds.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400422 *
Charles E. Yousea5678312019-05-09 16:46:46 -0700423 * @param ms Number of milliseconds to sleep.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400424 *
Piotr Zięcik7700eb22018-10-25 17:45:08 +0200425 * @return Zero if the requested time has elapsed or the number of milliseconds
426 * left to sleep, if thread was woken up by \ref k_wakeup call.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400427 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500428static inline int32_t k_msleep(int32_t ms)
Andy Ross78327382020-03-05 15:18:14 -0800429{
430 return k_sleep(Z_TIMEOUT_MS(ms));
431}
Charles E. Yousea5678312019-05-09 16:46:46 -0700432
433/**
434 * @brief Put the current thread to sleep with microsecond resolution.
435 *
436 * This function is unlikely to work as expected without kernel tuning.
437 * In particular, because the lower bound on the duration of a sleep is
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200438 * the duration of a tick, @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} must be
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300439 * adjusted to achieve the resolution desired. The implications of doing
440 * this must be understood before attempting to use k_usleep(). Use with
441 * caution.
Charles E. Yousea5678312019-05-09 16:46:46 -0700442 *
443 * @param us Number of microseconds to sleep.
444 *
445 * @return Zero if the requested time has elapsed or the number of microseconds
446 * left to sleep, if thread was woken up by \ref k_wakeup call.
447 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500448__syscall int32_t k_usleep(int32_t us);
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400449
450/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500451 * @brief Cause the current thread to busy wait.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400452 *
453 * This routine causes the current thread to execute a "do nothing" loop for
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500454 * @a usec_to_wait microseconds.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400455 *
Peter Bigot6a794362020-05-22 14:17:01 -0500456 * @note The clock used for the microsecond-resolution delay here may
457 * be skewed relative to the clock used for system timeouts like
458 * k_sleep(). For example k_busy_wait(1000) may take slightly more or
459 * less time than k_sleep(K_MSEC(1)), with the offset dependent on
460 * clock tolerances.
461 *
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400462 * @return N/A
463 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500464__syscall void k_busy_wait(uint32_t usec_to_wait);
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400465
466/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500467 * @brief Yield the current thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400468 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500469 * This routine causes the current thread to yield execution to another
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400470 * thread of the same or higher priority. If there are no other ready threads
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500471 * of the same or higher priority, the routine returns immediately.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400472 *
473 * @return N/A
474 */
Andrew Boie468190a2017-09-29 14:00:48 -0700475__syscall void k_yield(void);
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400476
477/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500478 * @brief Wake up a sleeping thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400479 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500480 * This routine prematurely wakes up @a thread from sleeping.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400481 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500482 * If @a thread is not currently sleeping, the routine has no effect.
483 *
484 * @param thread ID of thread to wake.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400485 *
486 * @return N/A
487 */
Andrew Boie468190a2017-09-29 14:00:48 -0700488__syscall void k_wakeup(k_tid_t thread);
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400489
490/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500491 * @brief Get thread ID of the current thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400492 *
Andrew Boief07df422020-11-06 13:11:12 -0800493 * This unconditionally queries the kernel via a system call.
494 *
495 * @return ID of current thread.
496 */
Daniel Leung8530cfa2021-08-09 10:04:11 -0700497__attribute_const__
Andrew Boief07df422020-11-06 13:11:12 -0800498__syscall k_tid_t z_current_get(void);
499
500#ifdef CONFIG_THREAD_LOCAL_STORAGE
501/* Thread-local cache of current thread ID, set in z_thread_entry() */
502extern __thread k_tid_t z_tls_current;
503#endif
504
505/**
506 * @brief Get thread ID of the current thread.
507 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500508 * @return ID of current thread.
Anas Nashif47420d02018-05-24 14:20:56 -0400509 *
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400510 */
Andrew Boief07df422020-11-06 13:11:12 -0800511__attribute_const__
512static inline k_tid_t k_current_get(void)
513{
514#ifdef CONFIG_THREAD_LOCAL_STORAGE
515 return z_tls_current;
516#else
517 return z_current_get();
518#endif
519}
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400520
521/**
Allan Stephensc98da842016-11-11 15:45:03 -0500522 * @brief Abort a thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400523 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500524 * This routine permanently stops execution of @a thread. The thread is taken
525 * off all kernel queues it is part of (i.e. the ready queue, the timeout
526 * queue, or a kernel object wait queue). However, any kernel resources the
527 * thread might currently own (such as mutexes or memory blocks) are not
528 * released. It is the responsibility of the caller of this routine to ensure
529 * all necessary cleanup is performed.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400530 *
Andy Ross23f699b2021-02-23 06:12:17 -0800531 * After k_thread_abort() returns, the thread is guaranteed not to be
532 * running or to become runnable anywhere on the system. Normally
533 * this is done via blocking the caller (in the same manner as
534 * k_thread_join()), but in interrupt context on SMP systems the
535 * implementation is required to spin for threads that are running on
536 * other CPUs. Note that as specified, this means that on SMP
537 * platforms it is possible for application code to create a deadlock
538 * condition by simultaneously aborting a cycle of threads using at
539 * least one termination from interrupt context. Zephyr cannot detect
540 * all such conditions.
541 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500542 * @param thread ID of thread to abort.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400543 *
544 * @return N/A
545 */
Andrew Boie468190a2017-09-29 14:00:48 -0700546__syscall void k_thread_abort(k_tid_t thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400547
Andrew Boie7d627c52017-08-30 11:01:56 -0700548
549/**
550 * @brief Start an inactive thread
551 *
552 * If a thread was created with K_FOREVER in the delay parameter, it will
553 * not be added to the scheduling queue until this function is called
554 * on it.
555 *
556 * @param thread thread to start
557 */
Andrew Boie468190a2017-09-29 14:00:48 -0700558__syscall void k_thread_start(k_tid_t thread);
Andrew Boie7d627c52017-08-30 11:01:56 -0700559
Peter A. Bigot16a40812020-09-18 16:24:57 -0500560extern k_ticks_t z_timeout_expires(const struct _timeout *timeout);
561extern k_ticks_t z_timeout_remaining(const struct _timeout *timeout);
Andy Ross5a5d3da2020-03-09 13:59:15 -0700562
563#ifdef CONFIG_SYS_CLOCK_EXISTS
564
565/**
Andy Rosse39bf292020-03-19 10:30:33 -0700566 * @brief Get time when a thread wakes up, in system ticks
Andy Ross5a5d3da2020-03-09 13:59:15 -0700567 *
568 * This routine computes the system uptime when a waiting thread next
569 * executes, in units of system ticks. If the thread is not waiting,
570 * it returns current system time.
571 */
Peter Bigot0ab314f2020-11-16 15:28:59 -0600572__syscall k_ticks_t k_thread_timeout_expires_ticks(const struct k_thread *t);
Andy Ross5a5d3da2020-03-09 13:59:15 -0700573
574static inline k_ticks_t z_impl_k_thread_timeout_expires_ticks(
Peter Bigot0ab314f2020-11-16 15:28:59 -0600575 const struct k_thread *t)
Andy Ross5a5d3da2020-03-09 13:59:15 -0700576{
577 return z_timeout_expires(&t->base.timeout);
578}
579
580/**
Andy Rosse39bf292020-03-19 10:30:33 -0700581 * @brief Get time remaining before a thread wakes up, in system ticks
Andy Ross5a5d3da2020-03-09 13:59:15 -0700582 *
583 * This routine computes the time remaining before a waiting thread
584 * next executes, in units of system ticks. If the thread is not
585 * waiting, it returns zero.
586 */
Peter Bigot0ab314f2020-11-16 15:28:59 -0600587__syscall k_ticks_t k_thread_timeout_remaining_ticks(const struct k_thread *t);
Andy Ross5a5d3da2020-03-09 13:59:15 -0700588
589static inline k_ticks_t z_impl_k_thread_timeout_remaining_ticks(
Peter Bigot0ab314f2020-11-16 15:28:59 -0600590 const struct k_thread *t)
Andy Ross5a5d3da2020-03-09 13:59:15 -0700591{
592 return z_timeout_remaining(&t->base.timeout);
593}
594
595#endif /* CONFIG_SYS_CLOCK_EXISTS */
596
Allan Stephensc98da842016-11-11 15:45:03 -0500597/**
598 * @cond INTERNAL_HIDDEN
599 */
600
Benjamin Walshd211a522016-12-06 11:44:01 -0500601/* timeout has timed out and is not on _timeout_q anymore */
602#define _EXPIRED (-2)
603
Peter Mitsisa04c0d72016-09-28 19:26:00 -0400604struct _static_thread_data {
Andrew Boied26cf2d2017-03-30 13:07:02 -0700605 struct k_thread *init_thread;
Andrew Boiec5c104f2017-10-16 14:46:34 -0700606 k_thread_stack_t *init_stack;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400607 unsigned int init_stack_size;
Andrew Boie1e06ffc2017-09-11 09:30:04 -0700608 k_thread_entry_t init_entry;
Allan Stephens7c5bffa2016-10-26 10:01:28 -0500609 void *init_p1;
610 void *init_p2;
611 void *init_p3;
612 int init_prio;
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500613 uint32_t init_options;
614 int32_t init_delay;
Allan Stephens7c5bffa2016-10-26 10:01:28 -0500615 void (*init_abort)(void);
Anas Nashif57554052018-03-03 02:31:05 -0600616 const char *init_name;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400617};
618
Anas Nashif45a1d8a2020-04-24 11:29:17 -0400619#define Z_THREAD_INITIALIZER(thread, stack, stack_size, \
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400620 entry, p1, p2, p3, \
Anas Nashif57554052018-03-03 02:31:05 -0600621 prio, options, delay, abort, tname) \
Allan Stephens6cfe1322016-10-26 10:16:51 -0500622 { \
Andrew Boied26cf2d2017-03-30 13:07:02 -0700623 .init_thread = (thread), \
624 .init_stack = (stack), \
Allan Stephens6cfe1322016-10-26 10:16:51 -0500625 .init_stack_size = (stack_size), \
Andrew Boie1e06ffc2017-09-11 09:30:04 -0700626 .init_entry = (k_thread_entry_t)entry, \
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400627 .init_p1 = (void *)p1, \
628 .init_p2 = (void *)p2, \
629 .init_p3 = (void *)p3, \
Allan Stephens6cfe1322016-10-26 10:16:51 -0500630 .init_prio = (prio), \
631 .init_options = (options), \
632 .init_delay = (delay), \
633 .init_abort = (abort), \
Anas Nashif57554052018-03-03 02:31:05 -0600634 .init_name = STRINGIFY(tname), \
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400635 }
636
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400637/**
Allan Stephensc98da842016-11-11 15:45:03 -0500638 * INTERNAL_HIDDEN @endcond
639 */
640
641/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500642 * @brief Statically define and initialize a thread.
643 *
644 * The thread may be scheduled for immediate execution or a delayed start.
645 *
646 * Thread options are architecture-specific, and can include K_ESSENTIAL,
647 * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
648 * them using "|" (the logical OR operator).
649 *
650 * The ID of the thread can be accessed using:
651 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -0500652 * @code extern const k_tid_t <name>; @endcode
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500653 *
654 * @param name Name of the thread.
655 * @param stack_size Stack size in bytes.
656 * @param entry Thread entry function.
657 * @param p1 1st entry point parameter.
658 * @param p2 2nd entry point parameter.
659 * @param p3 3rd entry point parameter.
660 * @param prio Thread priority.
661 * @param options Thread options.
Peter Bigot73c387c2020-04-20 08:55:20 -0500662 * @param delay Scheduling delay (in milliseconds), zero for no delay.
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400663 *
Anas Nashif47420d02018-05-24 14:20:56 -0400664 *
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400665 * @internal It has been observed that the x86 compiler by default aligns
666 * these _static_thread_data structures to 32-byte boundaries, thereby
667 * wasting space. To work around this, force a 4-byte alignment.
Anas Nashif47420d02018-05-24 14:20:56 -0400668 *
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400669 */
Allan Stephens6cfe1322016-10-26 10:16:51 -0500670#define K_THREAD_DEFINE(name, stack_size, \
671 entry, p1, p2, p3, \
672 prio, options, delay) \
Andrew Boiedc5d9352017-06-02 12:56:47 -0700673 K_THREAD_STACK_DEFINE(_k_thread_stack_##name, stack_size); \
Nicolas Pitreb1d37422019-06-03 10:51:32 -0400674 struct k_thread _k_thread_obj_##name; \
Fabio Baltierif88a4202021-08-04 23:05:54 +0100675 STRUCT_SECTION_ITERABLE(_static_thread_data, _k_thread_data_##name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -0400676 Z_THREAD_INITIALIZER(&_k_thread_obj_##name, \
Andrew Boied26cf2d2017-03-30 13:07:02 -0700677 _k_thread_stack_##name, stack_size, \
Allan Stephens6cfe1322016-10-26 10:16:51 -0500678 entry, p1, p2, p3, prio, options, delay, \
Anas Nashif57554052018-03-03 02:31:05 -0600679 NULL, name); \
Andrew Boied26cf2d2017-03-30 13:07:02 -0700680 const k_tid_t name = (k_tid_t)&_k_thread_obj_##name
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400681
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400682/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500683 * @brief Get a thread's priority.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400684 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500685 * This routine gets the priority of @a thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400686 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500687 * @param thread ID of thread whose priority is needed.
688 *
689 * @return Priority of @a thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400690 */
Andrew Boie76c04a22017-09-27 14:45:10 -0700691__syscall int k_thread_priority_get(k_tid_t thread);
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400692
693/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500694 * @brief Set a thread's priority.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400695 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500696 * This routine immediately changes the priority of @a thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400697 *
698 * Rescheduling can occur immediately depending on the priority @a thread is
699 * set to:
700 *
701 * - If its priority is raised above the priority of the caller of this
702 * function, and the caller is preemptible, @a thread will be scheduled in.
703 *
704 * - If the caller operates on itself, it lowers its priority below that of
705 * other threads in the system, and the caller is preemptible, the thread of
706 * highest priority will be scheduled in.
707 *
708 * Priority can be assigned in the range of -CONFIG_NUM_COOP_PRIORITIES to
709 * CONFIG_NUM_PREEMPT_PRIORITIES-1, where -CONFIG_NUM_COOP_PRIORITIES is the
710 * highest priority.
711 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500712 * @param thread ID of thread whose priority is to be set.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400713 * @param prio New priority.
714 *
715 * @warning Changing the priority of a thread currently involved in mutex
716 * priority inheritance may result in undefined behavior.
717 *
718 * @return N/A
719 */
Andrew Boie468190a2017-09-29 14:00:48 -0700720__syscall void k_thread_priority_set(k_tid_t thread, int prio);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400721
Andy Ross4a2e50f2018-05-15 11:06:25 -0700722
723#ifdef CONFIG_SCHED_DEADLINE
724/**
725 * @brief Set deadline expiration time for scheduler
726 *
727 * This sets the "deadline" expiration as a time delta from the
728 * current time, in the same units used by k_cycle_get_32(). The
729 * scheduler (when deadline scheduling is enabled) will choose the
730 * next expiring thread when selecting between threads at the same
731 * static priority. Threads at different priorities will be scheduled
732 * according to their static priority.
733 *
Andy Rossef626572020-07-10 09:43:36 -0700734 * @note Deadlines are stored internally using 32 bit unsigned
735 * integers. The number of cycles between the "first" deadline in the
736 * scheduler queue and the "last" deadline must be less than 2^31 (i.e
737 * a signed non-negative quantity). Failure to adhere to this rule
738 * may result in scheduled threads running in an incorrect dealine
739 * order.
Andy Ross4a2e50f2018-05-15 11:06:25 -0700740 *
741 * @note Despite the API naming, the scheduler makes no guarantees the
742 * the thread WILL be scheduled within that deadline, nor does it take
743 * extra metadata (like e.g. the "runtime" and "period" parameters in
744 * Linux sched_setattr()) that allows the kernel to validate the
745 * scheduling for achievability. Such features could be implemented
746 * above this call, which is simply input to the priority selection
747 * logic.
748 *
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200749 * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300750 * configuration.
Anas Nashif240c5162019-06-10 12:25:50 -0400751 *
Andy Ross4a2e50f2018-05-15 11:06:25 -0700752 * @param thread A thread on which to set the deadline
753 * @param deadline A time delta, in cycle units
Anas Nashif47420d02018-05-24 14:20:56 -0400754 *
Andy Ross4a2e50f2018-05-15 11:06:25 -0700755 */
756__syscall void k_thread_deadline_set(k_tid_t thread, int deadline);
757#endif
758
Andy Rossab46b1b2019-01-30 15:00:42 -0800759#ifdef CONFIG_SCHED_CPU_MASK
760/**
761 * @brief Sets all CPU enable masks to zero
762 *
763 * After this returns, the thread will no longer be schedulable on any
764 * CPUs. The thread must not be currently runnable.
765 *
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200766 * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300767 * configuration.
Anas Nashif240c5162019-06-10 12:25:50 -0400768 *
Andy Rossab46b1b2019-01-30 15:00:42 -0800769 * @param thread Thread to operate upon
770 * @return Zero on success, otherwise error code
771 */
772int k_thread_cpu_mask_clear(k_tid_t thread);
773
774/**
775 * @brief Sets all CPU enable masks to one
776 *
777 * After this returns, the thread will be schedulable on any CPU. The
778 * thread must not be currently runnable.
779 *
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200780 * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300781 * configuration.
Anas Nashif240c5162019-06-10 12:25:50 -0400782 *
Andy Rossab46b1b2019-01-30 15:00:42 -0800783 * @param thread Thread to operate upon
784 * @return Zero on success, otherwise error code
785 */
786int k_thread_cpu_mask_enable_all(k_tid_t thread);
787
788/**
789 * @brief Enable thread to run on specified CPU
790 *
791 * The thread must not be currently runnable.
792 *
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200793 * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300794 * configuration.
Anas Nashif240c5162019-06-10 12:25:50 -0400795 *
Andy Rossab46b1b2019-01-30 15:00:42 -0800796 * @param thread Thread to operate upon
797 * @param cpu CPU index
798 * @return Zero on success, otherwise error code
799 */
800int k_thread_cpu_mask_enable(k_tid_t thread, int cpu);
801
802/**
803 * @brief Prevent thread to run on specified CPU
804 *
805 * The thread must not be currently runnable.
806 *
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +0200807 * @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300808 * configuration.
Anas Nashif240c5162019-06-10 12:25:50 -0400809 *
Andy Rossab46b1b2019-01-30 15:00:42 -0800810 * @param thread Thread to operate upon
811 * @param cpu CPU index
812 * @return Zero on success, otherwise error code
813 */
814int k_thread_cpu_mask_disable(k_tid_t thread, int cpu);
815#endif
816
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400817/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500818 * @brief Suspend a thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400819 *
Andy Ross50d09422019-11-19 11:20:07 -0800820 * This routine prevents the kernel scheduler from making @a thread
821 * the current thread. All other internal operations on @a thread are
822 * still performed; for example, kernel objects it is waiting on are
823 * still handed to it. Note that any existing timeouts
824 * (e.g. k_sleep(), or a timeout argument to k_sem_take() et. al.)
825 * will be canceled. On resume, the thread will begin running
826 * immediately and return from the blocked call.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400827 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500828 * If @a thread is already suspended, the routine has no effect.
829 *
830 * @param thread ID of thread to suspend.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400831 *
832 * @return N/A
833 */
Andrew Boie468190a2017-09-29 14:00:48 -0700834__syscall void k_thread_suspend(k_tid_t thread);
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400835
836/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500837 * @brief Resume a suspended thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400838 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500839 * This routine allows the kernel scheduler to make @a thread the current
840 * thread, when it is next eligible for that role.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400841 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500842 * If @a thread is not currently suspended, the routine has no effect.
843 *
844 * @param thread ID of thread to resume.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400845 *
846 * @return N/A
847 */
Andrew Boie468190a2017-09-29 14:00:48 -0700848__syscall void k_thread_resume(k_tid_t thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400849
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400850/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500851 * @brief Set time-slicing period and scope.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400852 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500853 * This routine specifies how the scheduler will perform time slicing of
854 * preemptible threads.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400855 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500856 * To enable time slicing, @a slice must be non-zero. The scheduler
857 * ensures that no thread runs for more than the specified time limit
858 * before other threads of that priority are given a chance to execute.
859 * Any thread whose priority is higher than @a prio is exempted, and may
David B. Kinder8b986d72017-04-18 15:56:26 -0700860 * execute as long as desired without being preempted due to time slicing.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400861 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500862 * Time slicing only limits the maximum amount of time a thread may continuously
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400863 * execute. Once the scheduler selects a thread for execution, there is no
864 * minimum guaranteed time the thread will execute before threads of greater or
865 * equal priority are scheduled.
866 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500867 * When the current thread is the only one of that priority eligible
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400868 * for execution, this routine has no effect; the thread is immediately
869 * rescheduled after the slice period expires.
870 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500871 * To disable timeslicing, set both @a slice and @a prio to zero.
872 *
873 * @param slice Maximum time slice length (in milliseconds).
874 * @param prio Highest thread priority level eligible for time slicing.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400875 *
876 * @return N/A
877 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500878extern void k_sched_time_slice_set(int32_t slice, int prio);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400879
Anas Nashif166f5192018-02-25 08:02:36 -0600880/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -0500881
882/**
883 * @addtogroup isr_apis
884 * @{
885 */
886
887/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500888 * @brief Determine if code is running at interrupt level.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400889 *
Allan Stephensc98da842016-11-11 15:45:03 -0500890 * This routine allows the caller to customize its actions, depending on
891 * whether it is a thread or an ISR.
892 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +0100893 * @funcprops \isr_ok
Allan Stephensc98da842016-11-11 15:45:03 -0500894 *
Flavio Ceolin6a4a86e2018-12-17 12:40:22 -0800895 * @return false if invoked by a thread.
896 * @return true if invoked by an ISR.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400897 */
Flavio Ceolin6a4a86e2018-12-17 12:40:22 -0800898extern bool k_is_in_isr(void);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400899
Benjamin Walsh445830d2016-11-10 15:54:27 -0500900/**
901 * @brief Determine if code is running in a preemptible thread.
902 *
Allan Stephensc98da842016-11-11 15:45:03 -0500903 * This routine allows the caller to customize its actions, depending on
904 * whether it can be preempted by another thread. The routine returns a 'true'
905 * value if all of the following conditions are met:
Benjamin Walsh445830d2016-11-10 15:54:27 -0500906 *
Allan Stephensc98da842016-11-11 15:45:03 -0500907 * - The code is running in a thread, not at ISR.
908 * - The thread's priority is in the preemptible range.
909 * - The thread has not locked the scheduler.
Benjamin Walsh445830d2016-11-10 15:54:27 -0500910 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +0100911 * @funcprops \isr_ok
Allan Stephensc98da842016-11-11 15:45:03 -0500912 *
913 * @return 0 if invoked by an ISR or by a cooperative thread.
Benjamin Walsh445830d2016-11-10 15:54:27 -0500914 * @return Non-zero if invoked by a preemptible thread.
915 */
Andrew Boie468190a2017-09-29 14:00:48 -0700916__syscall int k_is_preempt_thread(void);
Benjamin Walsh445830d2016-11-10 15:54:27 -0500917
Allan Stephensc98da842016-11-11 15:45:03 -0500918/**
Peter Bigot74ef3952019-12-23 11:48:43 -0600919 * @brief Test whether startup is in the before-main-task phase.
920 *
921 * This routine allows the caller to customize its actions, depending on
922 * whether it being invoked before the kernel is fully active.
923 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +0100924 * @funcprops \isr_ok
Peter Bigot74ef3952019-12-23 11:48:43 -0600925 *
926 * @return true if invoked before post-kernel initialization
927 * @return false if invoked during/after post-kernel initialization
928 */
929static inline bool k_is_pre_kernel(void)
930{
931 extern bool z_sys_post_kernel; /* in init.c */
932
933 return !z_sys_post_kernel;
934}
935
936/**
Anas Nashif166f5192018-02-25 08:02:36 -0600937 * @}
Allan Stephensc98da842016-11-11 15:45:03 -0500938 */
939
940/**
941 * @addtogroup thread_apis
942 * @{
943 */
944
945/**
946 * @brief Lock the scheduler.
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500947 *
Allan Stephensc98da842016-11-11 15:45:03 -0500948 * This routine prevents the current thread from being preempted by another
949 * thread by instructing the scheduler to treat it as a cooperative thread.
950 * If the thread subsequently performs an operation that makes it unready,
951 * it will be context switched out in the normal manner. When the thread
952 * again becomes the current thread, its non-preemptible status is maintained.
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500953 *
Allan Stephensc98da842016-11-11 15:45:03 -0500954 * This routine can be called recursively.
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500955 *
Allan Stephensc98da842016-11-11 15:45:03 -0500956 * @note k_sched_lock() and k_sched_unlock() should normally be used
957 * when the operation being performed can be safely interrupted by ISRs.
958 * However, if the amount of processing involved is very small, better
959 * performance may be obtained by using irq_lock() and irq_unlock().
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500960 *
961 * @return N/A
962 */
963extern void k_sched_lock(void);
964
Allan Stephensc98da842016-11-11 15:45:03 -0500965/**
966 * @brief Unlock the scheduler.
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500967 *
Allan Stephensc98da842016-11-11 15:45:03 -0500968 * This routine reverses the effect of a previous call to k_sched_lock().
969 * A thread must call the routine once for each time it called k_sched_lock()
970 * before the thread becomes preemptible.
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500971 *
972 * @return N/A
973 */
974extern void k_sched_unlock(void);
975
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400976/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500977 * @brief Set current thread's custom data.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400978 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500979 * This routine sets the custom data for the current thread to @ value.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400980 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500981 * Custom data is not used by the kernel itself, and is freely available
982 * for a thread to use as it sees fit. It can be used as a framework
983 * upon which to build thread-local storage.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400984 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500985 * @param value New custom data value.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400986 *
987 * @return N/A
Anas Nashif47420d02018-05-24 14:20:56 -0400988 *
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400989 */
Andrew Boie468190a2017-09-29 14:00:48 -0700990__syscall void k_thread_custom_data_set(void *value);
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400991
992/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500993 * @brief Get current thread's custom data.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400994 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500995 * This routine returns the custom data for the current thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400996 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500997 * @return Current custom data value.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400998 */
Andrew Boie468190a2017-09-29 14:00:48 -0700999__syscall void *k_thread_custom_data_get(void);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001000
1001/**
Anas Nashif57554052018-03-03 02:31:05 -06001002 * @brief Set current thread name
1003 *
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +02001004 * Set the name of the thread to be used when @kconfig{CONFIG_THREAD_MONITOR}
Fabio Utzig39fa56b2020-09-11 10:14:37 -03001005 * is enabled for tracing and debugging.
Anas Nashif57554052018-03-03 02:31:05 -06001006 *
Anas Nashif25c87db2021-03-29 10:54:23 -04001007 * @param thread Thread to set name, or NULL to set the current thread
1008 * @param str Name string
Andrew Boie38129ce2019-06-25 08:54:37 -07001009 * @retval 0 on success
1010 * @retval -EFAULT Memory access error with supplied string
1011 * @retval -ENOSYS Thread name configuration option not enabled
1012 * @retval -EINVAL Thread name too long
Anas Nashif57554052018-03-03 02:31:05 -06001013 */
Anas Nashif25c87db2021-03-29 10:54:23 -04001014__syscall int k_thread_name_set(k_tid_t thread, const char *str);
Anas Nashif57554052018-03-03 02:31:05 -06001015
1016/**
1017 * @brief Get thread name
1018 *
1019 * Get the name of a thread
1020 *
Anas Nashif25c87db2021-03-29 10:54:23 -04001021 * @param thread Thread ID
Andrew Boie38129ce2019-06-25 08:54:37 -07001022 * @retval Thread name, or NULL if configuration not enabled
Anas Nashif57554052018-03-03 02:31:05 -06001023 */
Anas Nashif25c87db2021-03-29 10:54:23 -04001024const char *k_thread_name_get(k_tid_t thread);
Andrew Boie38129ce2019-06-25 08:54:37 -07001025
1026/**
1027 * @brief Copy the thread name into a supplied buffer
1028 *
Anas Nashif25c87db2021-03-29 10:54:23 -04001029 * @param thread Thread to obtain name information
Andrew Boie38129ce2019-06-25 08:54:37 -07001030 * @param buf Destination buffer
David B. Kinder73896c02019-10-28 16:27:57 -07001031 * @param size Destination buffer size
Andrew Boie38129ce2019-06-25 08:54:37 -07001032 * @retval -ENOSPC Destination buffer too small
1033 * @retval -EFAULT Memory access error
1034 * @retval -ENOSYS Thread name feature not enabled
1035 * @retval 0 Success
1036 */
Anas Nashif25c87db2021-03-29 10:54:23 -04001037__syscall int k_thread_name_copy(k_tid_t thread, char *buf,
Andrew Boie38129ce2019-06-25 08:54:37 -07001038 size_t size);
Anas Nashif57554052018-03-03 02:31:05 -06001039
1040/**
Pavlo Hamov8076c802019-07-31 12:43:54 +03001041 * @brief Get thread state string
1042 *
1043 * Get the human friendly thread state string
1044 *
1045 * @param thread_id Thread ID
1046 * @retval Thread state string, empty if no state flag is set
1047 */
1048const char *k_thread_state_str(k_tid_t thread_id);
1049
1050/**
Andy Rosscfe62032018-09-29 07:34:55 -07001051 * @}
1052 */
1053
1054/**
Allan Stephensc2f15a42016-11-17 12:24:22 -05001055 * @addtogroup clock_apis
1056 * @{
1057 */
1058
1059/**
1060 * @brief Generate null timeout delay.
1061 *
Maksim Masalskife1ff2f2019-10-29 16:50:44 +08001062 * This macro generates a timeout delay that instructs a kernel API
Allan Stephensc2f15a42016-11-17 12:24:22 -05001063 * not to wait if the requested operation cannot be performed immediately.
1064 *
1065 * @return Timeout delay value.
1066 */
Andy Ross78327382020-03-05 15:18:14 -08001067#define K_NO_WAIT Z_TIMEOUT_NO_WAIT
Allan Stephensc2f15a42016-11-17 12:24:22 -05001068
1069/**
Andy Rosse1bc5952020-03-09 12:19:54 -07001070 * @brief Generate timeout delay from nanoseconds.
1071 *
1072 * This macro generates a timeout delay that instructs a kernel API to
1073 * wait up to @a t nanoseconds to perform the requested operation.
1074 * Note that timer precision is limited to the tick rate, not the
1075 * requested value.
1076 *
Andy Rosse39bf292020-03-19 10:30:33 -07001077 * @param t Duration in nanoseconds.
Andy Rosse1bc5952020-03-09 12:19:54 -07001078 *
1079 * @return Timeout delay value.
1080 */
1081#define K_NSEC(t) Z_TIMEOUT_NS(t)
1082
1083/**
1084 * @brief Generate timeout delay from microseconds.
1085 *
1086 * This macro generates a timeout delay that instructs a kernel API
1087 * to wait up to @a t microseconds to perform the requested operation.
1088 * Note that timer precision is limited to the tick rate, not the
1089 * requested value.
1090 *
Andy Rosse39bf292020-03-19 10:30:33 -07001091 * @param t Duration in microseconds.
Andy Rosse1bc5952020-03-09 12:19:54 -07001092 *
1093 * @return Timeout delay value.
1094 */
1095#define K_USEC(t) Z_TIMEOUT_US(t)
1096
1097/**
1098 * @brief Generate timeout delay from cycles.
1099 *
1100 * This macro generates a timeout delay that instructs a kernel API
1101 * to wait up to @a t cycles to perform the requested operation.
1102 *
Andy Rosse39bf292020-03-19 10:30:33 -07001103 * @param t Duration in cycles.
Andy Rosse1bc5952020-03-09 12:19:54 -07001104 *
1105 * @return Timeout delay value.
1106 */
1107#define K_CYC(t) Z_TIMEOUT_CYC(t)
1108
1109/**
1110 * @brief Generate timeout delay from system ticks.
1111 *
1112 * This macro generates a timeout delay that instructs a kernel API
1113 * to wait up to @a t ticks to perform the requested operation.
1114 *
Andy Rosse39bf292020-03-19 10:30:33 -07001115 * @param t Duration in system ticks.
Andy Rosse1bc5952020-03-09 12:19:54 -07001116 *
1117 * @return Timeout delay value.
1118 */
1119#define K_TICKS(t) Z_TIMEOUT_TICKS(t)
1120
1121/**
Allan Stephensc2f15a42016-11-17 12:24:22 -05001122 * @brief Generate timeout delay from milliseconds.
1123 *
Maksim Masalskife1ff2f2019-10-29 16:50:44 +08001124 * This macro generates a timeout delay that instructs a kernel API
Allan Stephensc2f15a42016-11-17 12:24:22 -05001125 * to wait up to @a ms milliseconds to perform the requested operation.
1126 *
1127 * @param ms Duration in milliseconds.
1128 *
1129 * @return Timeout delay value.
1130 */
Andy Ross78327382020-03-05 15:18:14 -08001131#define K_MSEC(ms) Z_TIMEOUT_MS(ms)
Allan Stephensc2f15a42016-11-17 12:24:22 -05001132
1133/**
1134 * @brief Generate timeout delay from seconds.
1135 *
Maksim Masalskife1ff2f2019-10-29 16:50:44 +08001136 * This macro generates a timeout delay that instructs a kernel API
Allan Stephensc2f15a42016-11-17 12:24:22 -05001137 * to wait up to @a s seconds to perform the requested operation.
1138 *
1139 * @param s Duration in seconds.
1140 *
1141 * @return Timeout delay value.
1142 */
Johan Hedberg14471692016-11-13 10:52:15 +02001143#define K_SECONDS(s) K_MSEC((s) * MSEC_PER_SEC)
Allan Stephensc2f15a42016-11-17 12:24:22 -05001144
1145/**
1146 * @brief Generate timeout delay from minutes.
Maksim Masalskife1ff2f2019-10-29 16:50:44 +08001147
1148 * This macro generates a timeout delay that instructs a kernel API
Allan Stephensc2f15a42016-11-17 12:24:22 -05001149 * to wait up to @a m minutes to perform the requested operation.
1150 *
1151 * @param m Duration in minutes.
1152 *
1153 * @return Timeout delay value.
1154 */
Johan Hedberg14471692016-11-13 10:52:15 +02001155#define K_MINUTES(m) K_SECONDS((m) * 60)
Allan Stephensc2f15a42016-11-17 12:24:22 -05001156
1157/**
1158 * @brief Generate timeout delay from hours.
1159 *
Maksim Masalskife1ff2f2019-10-29 16:50:44 +08001160 * This macro generates a timeout delay that instructs a kernel API
Allan Stephensc2f15a42016-11-17 12:24:22 -05001161 * to wait up to @a h hours to perform the requested operation.
1162 *
1163 * @param h Duration in hours.
1164 *
1165 * @return Timeout delay value.
1166 */
Johan Hedberg14471692016-11-13 10:52:15 +02001167#define K_HOURS(h) K_MINUTES((h) * 60)
1168
Allan Stephensc98da842016-11-11 15:45:03 -05001169/**
Allan Stephensc2f15a42016-11-17 12:24:22 -05001170 * @brief Generate infinite timeout delay.
1171 *
Maksim Masalskife1ff2f2019-10-29 16:50:44 +08001172 * This macro generates a timeout delay that instructs a kernel API
Allan Stephensc2f15a42016-11-17 12:24:22 -05001173 * to wait as long as necessary to perform the requested operation.
1174 *
1175 * @return Timeout delay value.
1176 */
Andy Ross78327382020-03-05 15:18:14 -08001177#define K_FOREVER Z_FOREVER
Allan Stephensc2f15a42016-11-17 12:24:22 -05001178
Andy Rosse1bc5952020-03-09 12:19:54 -07001179#ifdef CONFIG_TIMEOUT_64BIT
1180
Allan Stephensc2f15a42016-11-17 12:24:22 -05001181/**
Andy Rosse39bf292020-03-19 10:30:33 -07001182 * @brief Generates an absolute/uptime timeout value from system ticks
Andy Ross4c7b77a2020-03-09 09:35:35 -07001183 *
1184 * This macro generates a timeout delay that represents an expiration
Andy Rosse39bf292020-03-19 10:30:33 -07001185 * at the absolute uptime value specified, in system ticks. That is, the
Andy Ross4c7b77a2020-03-09 09:35:35 -07001186 * timeout will expire immediately after the system uptime reaches the
1187 * specified tick count.
1188 *
1189 * @param t Tick uptime value
1190 * @return Timeout delay value
1191 */
Martin Jäger19c2f782020-11-09 10:14:53 +01001192#define K_TIMEOUT_ABS_TICKS(t) \
1193 Z_TIMEOUT_TICKS(Z_TICK_ABS((k_ticks_t)MAX(t, 0)))
Andy Ross4c7b77a2020-03-09 09:35:35 -07001194
1195/**
Andy Rosse39bf292020-03-19 10:30:33 -07001196 * @brief Generates an absolute/uptime timeout value from milliseconds
Andy Ross4c7b77a2020-03-09 09:35:35 -07001197 *
1198 * This macro generates a timeout delay that represents an expiration
1199 * at the absolute uptime value specified, in milliseconds. That is,
1200 * the timeout will expire immediately after the system uptime reaches
1201 * the specified tick count.
1202 *
1203 * @param t Millisecond uptime value
1204 * @return Timeout delay value
1205 */
1206#define K_TIMEOUT_ABS_MS(t) K_TIMEOUT_ABS_TICKS(k_ms_to_ticks_ceil64(t))
1207
1208/**
Andy Rosse39bf292020-03-19 10:30:33 -07001209 * @brief Generates an absolute/uptime timeout value from microseconds
Andy Rosse1bc5952020-03-09 12:19:54 -07001210 *
1211 * This macro generates a timeout delay that represents an expiration
1212 * at the absolute uptime value specified, in microseconds. That is,
1213 * the timeout will expire immediately after the system uptime reaches
1214 * the specified time. Note that timer precision is limited by the
1215 * system tick rate and not the requested timeout value.
1216 *
1217 * @param t Microsecond uptime value
1218 * @return Timeout delay value
1219 */
1220#define K_TIMEOUT_ABS_US(t) K_TIMEOUT_ABS_TICKS(k_us_to_ticks_ceil64(t))
1221
1222/**
Andy Rosse39bf292020-03-19 10:30:33 -07001223 * @brief Generates an absolute/uptime timeout value from nanoseconds
Andy Rosse1bc5952020-03-09 12:19:54 -07001224 *
1225 * This macro generates a timeout delay that represents an expiration
1226 * at the absolute uptime value specified, in nanoseconds. That is,
1227 * the timeout will expire immediately after the system uptime reaches
1228 * the specified time. Note that timer precision is limited by the
1229 * system tick rate and not the requested timeout value.
1230 *
1231 * @param t Nanosecond uptime value
1232 * @return Timeout delay value
1233 */
1234#define K_TIMEOUT_ABS_NS(t) K_TIMEOUT_ABS_TICKS(k_ns_to_ticks_ceil64(t))
1235
1236/**
Andy Rosse39bf292020-03-19 10:30:33 -07001237 * @brief Generates an absolute/uptime timeout value from system cycles
Andy Rosse1bc5952020-03-09 12:19:54 -07001238 *
1239 * This macro generates a timeout delay that represents an expiration
1240 * at the absolute uptime value specified, in cycles. That is, the
1241 * timeout will expire immediately after the system uptime reaches the
1242 * specified time. Note that timer precision is limited by the system
1243 * tick rate and not the requested timeout value.
1244 *
1245 * @param t Cycle uptime value
1246 * @return Timeout delay value
1247 */
1248#define K_TIMEOUT_ABS_CYC(t) K_TIMEOUT_ABS_TICKS(k_cyc_to_ticks_ceil64(t))
1249
1250#endif
1251
1252/**
Anas Nashif166f5192018-02-25 08:02:36 -06001253 * @}
Allan Stephensc2f15a42016-11-17 12:24:22 -05001254 */
1255
1256/**
Allan Stephensc98da842016-11-11 15:45:03 -05001257 * @cond INTERNAL_HIDDEN
1258 */
Benjamin Walsha9604bd2016-09-21 11:05:56 -04001259
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001260struct k_timer {
1261 /*
1262 * _timeout structure must be first here if we want to use
1263 * dynamic timer allocation. timeout.node is used in the double-linked
1264 * list of free timers
1265 */
1266 struct _timeout timeout;
1267
Allan Stephens45bfa372016-10-12 12:39:42 -05001268 /* wait queue for the (single) thread waiting on this timer */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001269 _wait_q_t wait_q;
1270
1271 /* runs in ISR context */
Flavio Ceolin4b35dd22018-11-16 19:06:59 -08001272 void (*expiry_fn)(struct k_timer *timer);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001273
1274 /* runs in the context of the thread that calls k_timer_stop() */
Flavio Ceolin4b35dd22018-11-16 19:06:59 -08001275 void (*stop_fn)(struct k_timer *timer);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001276
1277 /* timer period */
Andy Ross78327382020-03-05 15:18:14 -08001278 k_timeout_t period;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001279
Allan Stephens45bfa372016-10-12 12:39:42 -05001280 /* timer status */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001281 uint32_t status;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001282
Benjamin Walshe4e98f92017-01-12 19:38:53 -05001283 /* user-specific data, also used to support legacy features */
1284 void *user_data;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001285
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001286};
1287
Patrik Flykt97b3bd12019-03-12 15:15:42 -06001288#define Z_TIMER_INITIALIZER(obj, expiry, stop) \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001289 { \
Krzysztof Chruscinskibe063272019-02-13 11:19:54 +01001290 .timeout = { \
1291 .node = {},\
Peter Bigote37c7852020-07-07 12:34:05 -05001292 .fn = z_timer_expiration_handler, \
Krzysztof Chruscinskibe063272019-02-13 11:19:54 +01001293 .dticks = 0, \
Krzysztof Chruscinskibe063272019-02-13 11:19:54 +01001294 }, \
Patrik Flykt4344e272019-03-08 14:19:05 -07001295 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
Allan Stephens1342adb2016-11-03 13:54:53 -05001296 .expiry_fn = expiry, \
1297 .stop_fn = stop, \
1298 .status = 0, \
Benjamin Walshe4e98f92017-01-12 19:38:53 -05001299 .user_data = 0, \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001300 }
1301
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001302/**
Allan Stephensc98da842016-11-11 15:45:03 -05001303 * INTERNAL_HIDDEN @endcond
1304 */
1305
1306/**
1307 * @defgroup timer_apis Timer APIs
1308 * @ingroup kernel_apis
1309 * @{
1310 */
1311
1312/**
Allan Stephens5eceb852016-11-16 10:16:30 -05001313 * @typedef k_timer_expiry_t
1314 * @brief Timer expiry function type.
1315 *
1316 * A timer's expiry function is executed by the system clock interrupt handler
1317 * each time the timer expires. The expiry function is optional, and is only
1318 * invoked if the timer has been initialized with one.
1319 *
1320 * @param timer Address of timer.
1321 *
1322 * @return N/A
1323 */
1324typedef void (*k_timer_expiry_t)(struct k_timer *timer);
1325
1326/**
1327 * @typedef k_timer_stop_t
1328 * @brief Timer stop function type.
1329 *
1330 * A timer's stop function is executed if the timer is stopped prematurely.
Peter A. Bigot82a98d72020-09-21 05:34:56 -05001331 * The function runs in the context of call that stops the timer. As
1332 * k_timer_stop() can be invoked from an ISR, the stop function must be
1333 * callable from interrupt context (isr-ok).
1334 *
Allan Stephens5eceb852016-11-16 10:16:30 -05001335 * The stop function is optional, and is only invoked if the timer has been
1336 * initialized with one.
1337 *
1338 * @param timer Address of timer.
1339 *
1340 * @return N/A
1341 */
1342typedef void (*k_timer_stop_t)(struct k_timer *timer);
1343
1344/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001345 * @brief Statically define and initialize a timer.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001346 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001347 * The timer can be accessed outside the module where it is defined using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001348 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05001349 * @code extern struct k_timer <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001350 *
1351 * @param name Name of the timer variable.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001352 * @param expiry_fn Function to invoke each time the timer expires.
1353 * @param stop_fn Function to invoke if the timer is stopped while running.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001354 */
Allan Stephens1342adb2016-11-03 13:54:53 -05001355#define K_TIMER_DEFINE(name, expiry_fn, stop_fn) \
Fabio Baltierif88a4202021-08-04 23:05:54 +01001356 STRUCT_SECTION_ITERABLE(k_timer, name) = \
Patrik Flykt97b3bd12019-03-12 15:15:42 -06001357 Z_TIMER_INITIALIZER(name, expiry_fn, stop_fn)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001358
Allan Stephens45bfa372016-10-12 12:39:42 -05001359/**
1360 * @brief Initialize a timer.
1361 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001362 * This routine initializes a timer, prior to its first use.
Allan Stephens45bfa372016-10-12 12:39:42 -05001363 *
1364 * @param timer Address of timer.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001365 * @param expiry_fn Function to invoke each time the timer expires.
1366 * @param stop_fn Function to invoke if the timer is stopped while running.
Allan Stephens45bfa372016-10-12 12:39:42 -05001367 *
1368 * @return N/A
1369 */
1370extern void k_timer_init(struct k_timer *timer,
Allan Stephens5eceb852016-11-16 10:16:30 -05001371 k_timer_expiry_t expiry_fn,
1372 k_timer_stop_t stop_fn);
Andy Ross8d8b2ac2016-09-23 10:08:54 -07001373
Allan Stephens45bfa372016-10-12 12:39:42 -05001374/**
1375 * @brief Start a timer.
1376 *
1377 * This routine starts a timer, and resets its status to zero. The timer
1378 * begins counting down using the specified duration and period values.
1379 *
1380 * Attempting to start a timer that is already running is permitted.
1381 * The timer's status is reset to zero and the timer begins counting down
1382 * using the new duration and period values.
1383 *
1384 * @param timer Address of timer.
Andy Ross78327382020-03-05 15:18:14 -08001385 * @param duration Initial timer duration.
1386 * @param period Timer period.
Allan Stephens45bfa372016-10-12 12:39:42 -05001387 *
1388 * @return N/A
1389 */
Andrew Boiea354d492017-09-29 16:22:28 -07001390__syscall void k_timer_start(struct k_timer *timer,
Andy Ross78327382020-03-05 15:18:14 -08001391 k_timeout_t duration, k_timeout_t period);
Allan Stephens45bfa372016-10-12 12:39:42 -05001392
1393/**
1394 * @brief Stop a timer.
1395 *
1396 * This routine stops a running timer prematurely. The timer's stop function,
1397 * if one exists, is invoked by the caller.
1398 *
1399 * Attempting to stop a timer that is not running is permitted, but has no
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001400 * effect on the timer.
Allan Stephens45bfa372016-10-12 12:39:42 -05001401 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001402 * @note The stop handler has to be callable from ISRs if @a k_timer_stop is to
1403 * be called from ISRs.
1404 *
1405 * @funcprops \isr_ok
Anas Nashif4fb12ae2017-02-01 20:06:55 -05001406 *
Allan Stephens45bfa372016-10-12 12:39:42 -05001407 * @param timer Address of timer.
1408 *
1409 * @return N/A
1410 */
Andrew Boiea354d492017-09-29 16:22:28 -07001411__syscall void k_timer_stop(struct k_timer *timer);
Allan Stephens45bfa372016-10-12 12:39:42 -05001412
1413/**
1414 * @brief Read timer status.
1415 *
1416 * This routine reads the timer's status, which indicates the number of times
1417 * it has expired since its status was last read.
1418 *
1419 * Calling this routine resets the timer's status to zero.
1420 *
1421 * @param timer Address of timer.
1422 *
1423 * @return Timer status.
1424 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001425__syscall uint32_t k_timer_status_get(struct k_timer *timer);
Allan Stephens45bfa372016-10-12 12:39:42 -05001426
1427/**
1428 * @brief Synchronize thread to timer expiration.
1429 *
1430 * This routine blocks the calling thread until the timer's status is non-zero
1431 * (indicating that it has expired at least once since it was last examined)
1432 * or the timer is stopped. If the timer status is already non-zero,
1433 * or the timer is already stopped, the caller continues without waiting.
1434 *
1435 * Calling this routine resets the timer's status to zero.
1436 *
1437 * This routine must not be used by interrupt handlers, since they are not
1438 * allowed to block.
1439 *
1440 * @param timer Address of timer.
1441 *
1442 * @return Timer status.
1443 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001444__syscall uint32_t k_timer_status_sync(struct k_timer *timer);
Allan Stephens45bfa372016-10-12 12:39:42 -05001445
Andy Ross5a5d3da2020-03-09 13:59:15 -07001446#ifdef CONFIG_SYS_CLOCK_EXISTS
1447
1448/**
Andy Rosse39bf292020-03-19 10:30:33 -07001449 * @brief Get next expiration time of a timer, in system ticks
Andy Ross5a5d3da2020-03-09 13:59:15 -07001450 *
1451 * This routine returns the future system uptime reached at the next
1452 * time of expiration of the timer, in units of system ticks. If the
1453 * timer is not running, current system time is returned.
1454 *
1455 * @param timer The timer object
1456 * @return Uptime of expiration, in ticks
1457 */
Peter Bigot0ab314f2020-11-16 15:28:59 -06001458__syscall k_ticks_t k_timer_expires_ticks(const struct k_timer *timer);
Andy Ross5a5d3da2020-03-09 13:59:15 -07001459
Peter Bigot0ab314f2020-11-16 15:28:59 -06001460static inline k_ticks_t z_impl_k_timer_expires_ticks(
1461 const struct k_timer *timer)
Andy Ross5a5d3da2020-03-09 13:59:15 -07001462{
1463 return z_timeout_expires(&timer->timeout);
1464}
1465
1466/**
Andy Rosse39bf292020-03-19 10:30:33 -07001467 * @brief Get time remaining before a timer next expires, in system ticks
Andy Ross5a5d3da2020-03-09 13:59:15 -07001468 *
1469 * This routine computes the time remaining before a running timer
1470 * next expires, in units of system ticks. If the timer is not
1471 * running, it returns zero.
1472 */
Peter Bigot0ab314f2020-11-16 15:28:59 -06001473__syscall k_ticks_t k_timer_remaining_ticks(const struct k_timer *timer);
Andy Ross5a5d3da2020-03-09 13:59:15 -07001474
Peter Bigot0ab314f2020-11-16 15:28:59 -06001475static inline k_ticks_t z_impl_k_timer_remaining_ticks(
1476 const struct k_timer *timer)
Andy Ross5a5d3da2020-03-09 13:59:15 -07001477{
1478 return z_timeout_remaining(&timer->timeout);
1479}
Andy Ross52e444b2018-09-28 09:06:37 -07001480
Allan Stephens45bfa372016-10-12 12:39:42 -05001481/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001482 * @brief Get time remaining before a timer next expires.
Allan Stephens45bfa372016-10-12 12:39:42 -05001483 *
1484 * This routine computes the (approximate) time remaining before a running
1485 * timer next expires. If the timer is not running, it returns zero.
1486 *
1487 * @param timer Address of timer.
1488 *
1489 * @return Remaining time (in milliseconds).
1490 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001491static inline uint32_t k_timer_remaining_get(struct k_timer *timer)
Johan Hedbergf99ad3f2016-12-09 10:39:49 +02001492{
Andy Ross5a5d3da2020-03-09 13:59:15 -07001493 return k_ticks_to_ms_floor32(k_timer_remaining_ticks(timer));
Johan Hedbergf99ad3f2016-12-09 10:39:49 +02001494}
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001495
Andy Ross5a5d3da2020-03-09 13:59:15 -07001496#endif /* CONFIG_SYS_CLOCK_EXISTS */
1497
Allan Stephensc98da842016-11-11 15:45:03 -05001498/**
Benjamin Walshe4e98f92017-01-12 19:38:53 -05001499 * @brief Associate user-specific data with a timer.
1500 *
1501 * This routine records the @a user_data with the @a timer, to be retrieved
1502 * later.
1503 *
1504 * It can be used e.g. in a timer handler shared across multiple subsystems to
1505 * retrieve data specific to the subsystem this timer is associated with.
1506 *
1507 * @param timer Address of timer.
1508 * @param user_data User data to associate with the timer.
1509 *
1510 * @return N/A
1511 */
Andrew Boiea354d492017-09-29 16:22:28 -07001512__syscall void k_timer_user_data_set(struct k_timer *timer, void *user_data);
1513
Anas Nashif954d5502018-02-25 08:37:28 -06001514/**
1515 * @internal
1516 */
Patrik Flykt4344e272019-03-08 14:19:05 -07001517static inline void z_impl_k_timer_user_data_set(struct k_timer *timer,
Andrew Boiea354d492017-09-29 16:22:28 -07001518 void *user_data)
Benjamin Walshe4e98f92017-01-12 19:38:53 -05001519{
1520 timer->user_data = user_data;
1521}
1522
1523/**
1524 * @brief Retrieve the user-specific data from a timer.
1525 *
1526 * @param timer Address of timer.
1527 *
1528 * @return The user data.
1529 */
Peter A. Bigotf1b86ca2020-09-18 16:24:57 -05001530__syscall void *k_timer_user_data_get(const struct k_timer *timer);
Andrew Boiea354d492017-09-29 16:22:28 -07001531
Peter A. Bigotf1b86ca2020-09-18 16:24:57 -05001532static inline void *z_impl_k_timer_user_data_get(const struct k_timer *timer)
Benjamin Walshe4e98f92017-01-12 19:38:53 -05001533{
1534 return timer->user_data;
1535}
1536
Anas Nashif166f5192018-02-25 08:02:36 -06001537/** @} */
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001538
Allan Stephensc98da842016-11-11 15:45:03 -05001539/**
Allan Stephensc2f15a42016-11-17 12:24:22 -05001540 * @addtogroup clock_apis
Jian Kanga3ec9b02021-07-21 09:52:14 +08001541 * @ingroup kernel_apis
Allan Stephensc98da842016-11-11 15:45:03 -05001542 * @{
1543 */
Allan Stephens45bfa372016-10-12 12:39:42 -05001544
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001545/**
Andy Rosse39bf292020-03-19 10:30:33 -07001546 * @brief Get system uptime, in system ticks.
Andy Ross914205c2020-03-10 15:26:38 -07001547 *
1548 * This routine returns the elapsed time since the system booted, in
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +02001549 * ticks (c.f. @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC}), which is the
Andy Ross914205c2020-03-10 15:26:38 -07001550 * fundamental unit of resolution of kernel timekeeping.
1551 *
1552 * @return Current uptime in ticks.
1553 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001554__syscall int64_t k_uptime_ticks(void);
Andy Ross914205c2020-03-10 15:26:38 -07001555
1556/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001557 * @brief Get system uptime.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001558 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001559 * This routine returns the elapsed time since the system booted,
1560 * in milliseconds.
1561 *
David B. Kinder00c41ea2019-06-10 11:13:33 -07001562 * @note
David B. Kinder00c41ea2019-06-10 11:13:33 -07001563 * While this function returns time in milliseconds, it does
1564 * not mean it has millisecond resolution. The actual resolution depends on
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +02001565 * @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option.
Paul Sokolovsky65d51fd2019-02-04 22:44:50 +03001566 *
1567 * @return Current uptime in milliseconds.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001568 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001569static inline int64_t k_uptime_get(void)
Andy Ross914205c2020-03-10 15:26:38 -07001570{
1571 return k_ticks_to_ms_floor64(k_uptime_ticks());
1572}
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001573
Ramesh Thomas89ffd442017-02-05 19:37:19 -08001574/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001575 * @brief Get system uptime (32-bit version).
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001576 *
Peter Bigota6067a32019-08-28 08:19:26 -05001577 * This routine returns the lower 32 bits of the system uptime in
1578 * milliseconds.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001579 *
Peter Bigota6067a32019-08-28 08:19:26 -05001580 * Because correct conversion requires full precision of the system
1581 * clock there is no benefit to using this over k_uptime_get() unless
1582 * you know the application will never run long enough for the system
1583 * clock to approach 2^32 ticks. Calls to this function may involve
1584 * interrupt blocking and 64-bit math.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001585 *
David B. Kinder00c41ea2019-06-10 11:13:33 -07001586 * @note
David B. Kinder00c41ea2019-06-10 11:13:33 -07001587 * While this function returns time in milliseconds, it does
1588 * not mean it has millisecond resolution. The actual resolution depends on
Gerard Marull-Paretas72ab6b22021-06-28 17:13:40 +02001589 * @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option
Paul Sokolovsky65d51fd2019-02-04 22:44:50 +03001590 *
Peter Bigota6067a32019-08-28 08:19:26 -05001591 * @return The low 32 bits of the current uptime, in milliseconds.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001592 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001593static inline uint32_t k_uptime_get_32(void)
Peter Bigota6067a32019-08-28 08:19:26 -05001594{
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001595 return (uint32_t)k_uptime_get();
Peter Bigota6067a32019-08-28 08:19:26 -05001596}
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001597
1598/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001599 * @brief Get elapsed time.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001600 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001601 * This routine computes the elapsed time between the current system uptime
1602 * and an earlier reference time, in milliseconds.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001603 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001604 * @param reftime Pointer to a reference time, which is updated to the current
1605 * uptime upon return.
1606 *
1607 * @return Elapsed time.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001608 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001609static inline int64_t k_uptime_delta(int64_t *reftime)
Andy Ross987c0e52018-09-27 16:50:00 -07001610{
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001611 int64_t uptime, delta;
Andy Ross987c0e52018-09-27 16:50:00 -07001612
1613 uptime = k_uptime_get();
1614 delta = uptime - *reftime;
1615 *reftime = uptime;
1616
1617 return delta;
1618}
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001619
1620/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001621 * @brief Read the hardware clock.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001622 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001623 * This routine returns the current time, as measured by the system's hardware
1624 * clock.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001625 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001626 * @return Current hardware clock up-counter (in cycles).
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001627 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001628static inline uint32_t k_cycle_get_32(void)
Andrew Boie979b17f2019-10-03 15:20:41 -07001629{
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001630 return arch_k_cycle_get_32();
Andrew Boie979b17f2019-10-03 15:20:41 -07001631}
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001632
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001633/**
Anas Nashif166f5192018-02-25 08:02:36 -06001634 * @}
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001635 */
1636
Allan Stephensc98da842016-11-11 15:45:03 -05001637/**
1638 * @cond INTERNAL_HIDDEN
1639 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001640
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001641struct k_queue {
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001642 sys_sflist_t data_q;
Andy Ross603ea422018-07-25 13:01:54 -07001643 struct k_spinlock lock;
Andy Ross99c2d2d2020-06-02 08:34:12 -07001644 _wait_q_t wait_q;
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +03001645
Andy Ross99c2d2d2020-06-02 08:34:12 -07001646 _POLL_EVENT;
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001647};
1648
Anas Nashif45a1d8a2020-04-24 11:29:17 -04001649#define Z_QUEUE_INITIALIZER(obj) \
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001650 { \
Toby Firth680ec0b2020-10-05 13:45:47 +01001651 .data_q = SYS_SFLIST_STATIC_INIT(&obj.data_q), \
Stephanos Ioannidisf628dcd2019-09-11 18:09:49 +09001652 .lock = { }, \
Andy Ross99c2d2d2020-06-02 08:34:12 -07001653 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
1654 _POLL_EVENT_OBJ_INIT(obj) \
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001655 }
1656
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001657extern void *z_queue_node_peek(sys_sfnode_t *node, bool needs_free);
1658
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001659/**
1660 * INTERNAL_HIDDEN @endcond
1661 */
1662
1663/**
1664 * @defgroup queue_apis Queue APIs
1665 * @ingroup kernel_apis
1666 * @{
1667 */
1668
1669/**
1670 * @brief Initialize a queue.
1671 *
1672 * This routine initializes a queue object, prior to its first use.
1673 *
1674 * @param queue Address of the queue.
1675 *
1676 * @return N/A
1677 */
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001678__syscall void k_queue_init(struct k_queue *queue);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001679
1680/**
Paul Sokolovsky3f507072017-04-25 17:54:31 +03001681 * @brief Cancel waiting on a queue.
1682 *
1683 * This routine causes first thread pending on @a queue, if any, to
1684 * return from k_queue_get() call with NULL value (as if timeout expired).
Paul Sokolovsky45c0b202018-08-21 23:29:11 +03001685 * If the queue is being waited on by k_poll(), it will return with
1686 * -EINTR and K_POLL_STATE_CANCELLED state (and per above, subsequent
1687 * k_queue_get() will return NULL).
Paul Sokolovsky3f507072017-04-25 17:54:31 +03001688 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001689 * @funcprops \isr_ok
Paul Sokolovsky3f507072017-04-25 17:54:31 +03001690 *
1691 * @param queue Address of the queue.
1692 *
1693 * @return N/A
1694 */
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001695__syscall void k_queue_cancel_wait(struct k_queue *queue);
Paul Sokolovsky3f507072017-04-25 17:54:31 +03001696
1697/**
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001698 * @brief Append an element to the end of a queue.
1699 *
1700 * This routine appends a data item to @a queue. A queue data item must be
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04001701 * aligned on a word boundary, and the first word of the item is reserved
1702 * for the kernel's use.
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001703 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001704 * @funcprops \isr_ok
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001705 *
1706 * @param queue Address of the queue.
1707 * @param data Address of the data item.
1708 *
1709 * @return N/A
1710 */
1711extern void k_queue_append(struct k_queue *queue, void *data);
1712
1713/**
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001714 * @brief Append an element to a queue.
1715 *
Andrew Boieac3dcc12019-04-01 12:28:03 -07001716 * This routine appends a data item to @a queue. There is an implicit memory
1717 * allocation to create an additional temporary bookkeeping data structure from
1718 * the calling thread's resource pool, which is automatically freed when the
1719 * item is removed. The data itself is not copied.
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001720 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001721 * @funcprops \isr_ok
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001722 *
1723 * @param queue Address of the queue.
1724 * @param data Address of the data item.
1725 *
1726 * @retval 0 on success
1727 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
1728 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001729__syscall int32_t k_queue_alloc_append(struct k_queue *queue, void *data);
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001730
1731/**
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001732 * @brief Prepend an element to a queue.
1733 *
1734 * This routine prepends a data item to @a queue. A queue data item must be
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04001735 * aligned on a word boundary, and the first word of the item is reserved
1736 * for the kernel's use.
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001737 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001738 * @funcprops \isr_ok
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001739 *
1740 * @param queue Address of the queue.
1741 * @param data Address of the data item.
1742 *
1743 * @return N/A
1744 */
1745extern void k_queue_prepend(struct k_queue *queue, void *data);
1746
1747/**
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001748 * @brief Prepend an element to a queue.
1749 *
Andrew Boieac3dcc12019-04-01 12:28:03 -07001750 * This routine prepends a data item to @a queue. There is an implicit memory
1751 * allocation to create an additional temporary bookkeeping data structure from
1752 * the calling thread's resource pool, which is automatically freed when the
1753 * item is removed. The data itself is not copied.
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001754 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001755 * @funcprops \isr_ok
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001756 *
1757 * @param queue Address of the queue.
1758 * @param data Address of the data item.
1759 *
1760 * @retval 0 on success
1761 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
1762 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001763__syscall int32_t k_queue_alloc_prepend(struct k_queue *queue, void *data);
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001764
1765/**
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001766 * @brief Inserts an element to a queue.
1767 *
1768 * This routine inserts a data item to @a queue after previous item. A queue
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04001769 * data item must be aligned on a word boundary, and the first word of
1770 * the item is reserved for the kernel's use.
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001771 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001772 * @funcprops \isr_ok
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001773 *
1774 * @param queue Address of the queue.
1775 * @param prev Address of the previous data item.
1776 * @param data Address of the data item.
1777 *
1778 * @return N/A
1779 */
1780extern void k_queue_insert(struct k_queue *queue, void *prev, void *data);
1781
1782/**
1783 * @brief Atomically append a list of elements to a queue.
1784 *
1785 * This routine adds a list of data items to @a queue in one operation.
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04001786 * The data items must be in a singly-linked list, with the first word
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001787 * in each data item pointing to the next data item; the list must be
1788 * NULL-terminated.
1789 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001790 * @funcprops \isr_ok
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001791 *
1792 * @param queue Address of the queue.
1793 * @param head Pointer to first node in singly-linked list.
1794 * @param tail Pointer to last node in singly-linked list.
1795 *
Anas Nashif756d8b02019-06-16 09:53:55 -04001796 * @retval 0 on success
1797 * @retval -EINVAL on invalid supplied data
1798 *
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001799 */
Anas Nashif756d8b02019-06-16 09:53:55 -04001800extern int k_queue_append_list(struct k_queue *queue, void *head, void *tail);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001801
1802/**
1803 * @brief Atomically add a list of elements to a queue.
1804 *
1805 * This routine adds a list of data items to @a queue in one operation.
1806 * The data items must be in a singly-linked list implemented using a
1807 * sys_slist_t object. Upon completion, the original list is empty.
1808 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001809 * @funcprops \isr_ok
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001810 *
1811 * @param queue Address of the queue.
1812 * @param list Pointer to sys_slist_t object.
1813 *
Anas Nashif756d8b02019-06-16 09:53:55 -04001814 * @retval 0 on success
1815 * @retval -EINVAL on invalid data
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001816 */
Anas Nashif756d8b02019-06-16 09:53:55 -04001817extern int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001818
1819/**
1820 * @brief Get an element from a queue.
1821 *
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04001822 * This routine removes first data item from @a queue. The first word of the
1823 * data item is reserved for the kernel's use.
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001824 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001825 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
1826 *
1827 * @funcprops \isr_ok
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001828 *
1829 * @param queue Address of the queue.
Andy Ross78327382020-03-05 15:18:14 -08001830 * @param timeout Non-negative waiting period to obtain a data item
1831 * or one of the special values K_NO_WAIT and
Krzysztof Chruscinski94f742e2019-11-07 19:28:00 +01001832 * K_FOREVER.
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001833 *
1834 * @return Address of the data item if successful; NULL if returned
1835 * without waiting, or waiting period timed out.
1836 */
Andy Ross78327382020-03-05 15:18:14 -08001837__syscall void *k_queue_get(struct k_queue *queue, k_timeout_t timeout);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001838
1839/**
Luiz Augusto von Dentz50b93772017-07-03 16:52:45 +03001840 * @brief Remove an element from a queue.
1841 *
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04001842 * This routine removes data item from @a queue. The first word of the
1843 * data item is reserved for the kernel's use. Removing elements from k_queue
Luiz Augusto von Dentz50b93772017-07-03 16:52:45 +03001844 * rely on sys_slist_find_and_remove which is not a constant time operation.
1845 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001846 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
1847 *
1848 * @funcprops \isr_ok
Luiz Augusto von Dentz50b93772017-07-03 16:52:45 +03001849 *
1850 * @param queue Address of the queue.
1851 * @param data Address of the data item.
1852 *
1853 * @return true if data item was removed
1854 */
Torbjörn Leksellf9848232021-03-26 11:19:35 +01001855bool k_queue_remove(struct k_queue *queue, void *data);
Luiz Augusto von Dentz50b93772017-07-03 16:52:45 +03001856
1857/**
Dhananjay Gundapu Jayakrishnan24bfa402018-08-22 12:33:00 +02001858 * @brief Append an element to a queue only if it's not present already.
1859 *
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04001860 * This routine appends data item to @a queue. The first word of the data
1861 * item is reserved for the kernel's use. Appending elements to k_queue
Dhananjay Gundapu Jayakrishnan24bfa402018-08-22 12:33:00 +02001862 * relies on sys_slist_is_node_in_list which is not a constant time operation.
1863 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001864 * @funcprops \isr_ok
Dhananjay Gundapu Jayakrishnan24bfa402018-08-22 12:33:00 +02001865 *
1866 * @param queue Address of the queue.
1867 * @param data Address of the data item.
1868 *
1869 * @return true if data item was added, false if not
1870 */
Torbjörn Leksellf9848232021-03-26 11:19:35 +01001871bool k_queue_unique_append(struct k_queue *queue, void *data);
Dhananjay Gundapu Jayakrishnan24bfa402018-08-22 12:33:00 +02001872
1873/**
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001874 * @brief Query a queue to see if it has data available.
1875 *
1876 * Note that the data might be already gone by the time this function returns
1877 * if other threads are also trying to read from the queue.
1878 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001879 * @funcprops \isr_ok
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001880 *
1881 * @param queue Address of the queue.
1882 *
1883 * @return Non-zero if the queue is empty.
1884 * @return 0 if data is available.
1885 */
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001886__syscall int k_queue_is_empty(struct k_queue *queue);
1887
Patrik Flykt4344e272019-03-08 14:19:05 -07001888static inline int z_impl_k_queue_is_empty(struct k_queue *queue)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001889{
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001890 return (int)sys_sflist_is_empty(&queue->data_q);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001891}
1892
1893/**
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03001894 * @brief Peek element at the head of queue.
1895 *
1896 * Return element from the head of queue without removing it.
1897 *
1898 * @param queue Address of the queue.
1899 *
1900 * @return Head element, or NULL if queue is empty.
1901 */
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001902__syscall void *k_queue_peek_head(struct k_queue *queue);
1903
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03001904/**
1905 * @brief Peek element at the tail of queue.
1906 *
1907 * Return element from the tail of queue without removing it.
1908 *
1909 * @param queue Address of the queue.
1910 *
1911 * @return Tail element, or NULL if queue is empty.
1912 */
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001913__syscall void *k_queue_peek_tail(struct k_queue *queue);
1914
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03001915/**
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001916 * @brief Statically define and initialize a queue.
1917 *
1918 * The queue can be accessed outside the module where it is defined using:
1919 *
1920 * @code extern struct k_queue <name>; @endcode
1921 *
1922 * @param name Name of the queue.
1923 */
1924#define K_QUEUE_DEFINE(name) \
Fabio Baltierif88a4202021-08-04 23:05:54 +01001925 STRUCT_SECTION_ITERABLE(k_queue, name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04001926 Z_QUEUE_INITIALIZER(name)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001927
Anas Nashif166f5192018-02-25 08:02:36 -06001928/** @} */
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001929
Wentong Wu5611e922019-06-20 23:51:27 +08001930#ifdef CONFIG_USERSPACE
1931/**
1932 * @brief futex structure
1933 *
1934 * A k_futex is a lightweight mutual exclusion primitive designed
1935 * to minimize kernel involvement. Uncontended operation relies
1936 * only on atomic access to shared memory. k_futex are tracked as
Lauren Murphyd922fed2021-02-01 21:24:47 -06001937 * kernel objects and can live in user memory so that any access
1938 * bypasses the kernel object permission management mechanism.
Wentong Wu5611e922019-06-20 23:51:27 +08001939 */
1940struct k_futex {
1941 atomic_t val;
1942};
1943
1944/**
1945 * @brief futex kernel data structure
1946 *
1947 * z_futex_data are the helper data structure for k_futex to complete
1948 * futex contended operation on kernel side, structure z_futex_data
1949 * of every futex object is invisible in user mode.
1950 */
1951struct z_futex_data {
1952 _wait_q_t wait_q;
1953 struct k_spinlock lock;
1954};
1955
1956#define Z_FUTEX_DATA_INITIALIZER(obj) \
1957 { \
1958 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q) \
1959 }
1960
1961/**
1962 * @defgroup futex_apis FUTEX APIs
1963 * @ingroup kernel_apis
1964 * @{
1965 */
1966
1967/**
Wentong Wu5611e922019-06-20 23:51:27 +08001968 * @brief Pend the current thread on a futex
1969 *
1970 * Tests that the supplied futex contains the expected value, and if so,
1971 * goes to sleep until some other thread calls k_futex_wake() on it.
1972 *
1973 * @param futex Address of the futex.
1974 * @param expected Expected value of the futex, if it is different the caller
1975 * will not wait on it.
Andy Ross78327382020-03-05 15:18:14 -08001976 * @param timeout Non-negative waiting period on the futex, or
Krzysztof Chruscinski94f742e2019-11-07 19:28:00 +01001977 * one of the special values K_NO_WAIT or K_FOREVER.
Wentong Wu5611e922019-06-20 23:51:27 +08001978 * @retval -EACCES Caller does not have read access to futex address.
1979 * @retval -EAGAIN If the futex value did not match the expected parameter.
1980 * @retval -EINVAL Futex parameter address not recognized by the kernel.
1981 * @retval -ETIMEDOUT Thread woke up due to timeout and not a futex wakeup.
1982 * @retval 0 if the caller went to sleep and was woken up. The caller
1983 * should check the futex's value on wakeup to determine if it needs
1984 * to block again.
1985 */
Andy Ross78327382020-03-05 15:18:14 -08001986__syscall int k_futex_wait(struct k_futex *futex, int expected,
1987 k_timeout_t timeout);
Wentong Wu5611e922019-06-20 23:51:27 +08001988
1989/**
1990 * @brief Wake one/all threads pending on a futex
1991 *
1992 * Wake up the highest priority thread pending on the supplied futex, or
1993 * wakeup all the threads pending on the supplied futex, and the behavior
1994 * depends on wake_all.
1995 *
1996 * @param futex Futex to wake up pending threads.
1997 * @param wake_all If true, wake up all pending threads; If false,
1998 * wakeup the highest priority thread.
1999 * @retval -EACCES Caller does not have access to the futex address.
2000 * @retval -EINVAL Futex parameter address not recognized by the kernel.
2001 * @retval Number of threads that were woken up.
2002 */
2003__syscall int k_futex_wake(struct k_futex *futex, bool wake_all);
2004
2005/** @} */
2006#endif
2007
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002008struct k_fifo {
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02002009 struct k_queue _queue;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002010};
2011
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04002012/**
2013 * @cond INTERNAL_HIDDEN
2014 */
Patrik Flykt97b3bd12019-03-12 15:15:42 -06002015#define Z_FIFO_INITIALIZER(obj) \
Allan Stephensc98da842016-11-11 15:45:03 -05002016 { \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002017 ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
Allan Stephensc98da842016-11-11 15:45:03 -05002018 }
2019
2020/**
2021 * INTERNAL_HIDDEN @endcond
2022 */
2023
2024/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002025 * @defgroup fifo_apis FIFO APIs
Allan Stephensc98da842016-11-11 15:45:03 -05002026 * @ingroup kernel_apis
2027 * @{
2028 */
2029
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002030/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002031 * @brief Initialize a FIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002032 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002033 * This routine initializes a FIFO queue, prior to its first use.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002034 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002035 * @param fifo Address of the FIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002036 *
2037 * @return N/A
2038 */
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02002039#define k_fifo_init(fifo) \
Torbjörn Leksell83ae27b2021-03-26 11:42:16 +01002040 ({ \
2041 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, init, fifo); \
2042 k_queue_init(&(fifo)->_queue); \
2043 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, init, fifo); \
2044 })
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002045
2046/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002047 * @brief Cancel waiting on a FIFO queue.
Paul Sokolovsky3f507072017-04-25 17:54:31 +03002048 *
2049 * This routine causes first thread pending on @a fifo, if any, to
2050 * return from k_fifo_get() call with NULL value (as if timeout
2051 * expired).
2052 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002053 * @funcprops \isr_ok
Paul Sokolovsky3f507072017-04-25 17:54:31 +03002054 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002055 * @param fifo Address of the FIFO queue.
Paul Sokolovsky3f507072017-04-25 17:54:31 +03002056 *
2057 * @return N/A
2058 */
2059#define k_fifo_cancel_wait(fifo) \
Torbjörn Leksell83ae27b2021-03-26 11:42:16 +01002060 ({ \
2061 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, cancel_wait, fifo); \
2062 k_queue_cancel_wait(&(fifo)->_queue); \
2063 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, cancel_wait, fifo); \
2064 })
Paul Sokolovsky3f507072017-04-25 17:54:31 +03002065
2066/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002067 * @brief Add an element to a FIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002068 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002069 * This routine adds a data item to @a fifo. A FIFO data item must be
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04002070 * aligned on a word boundary, and the first word of the item is reserved
2071 * for the kernel's use.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002072 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002073 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002074 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002075 * @param fifo Address of the FIFO.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002076 * @param data Address of the data item.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002077 *
2078 * @return N/A
2079 */
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02002080#define k_fifo_put(fifo, data) \
Torbjörn Leksell83ae27b2021-03-26 11:42:16 +01002081 ({ \
2082 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put, fifo, data); \
2083 k_queue_append(&(fifo)->_queue, data); \
2084 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put, fifo, data); \
2085 })
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002086
2087/**
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002088 * @brief Add an element to a FIFO queue.
2089 *
Andrew Boieac3dcc12019-04-01 12:28:03 -07002090 * This routine adds a data item to @a fifo. There is an implicit memory
2091 * allocation to create an additional temporary bookkeeping data structure from
2092 * the calling thread's resource pool, which is automatically freed when the
2093 * item is removed. The data itself is not copied.
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002094 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002095 * @funcprops \isr_ok
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002096 *
2097 * @param fifo Address of the FIFO.
2098 * @param data Address of the data item.
2099 *
2100 * @retval 0 on success
2101 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2102 */
2103#define k_fifo_alloc_put(fifo, data) \
Torbjörn Leksell83ae27b2021-03-26 11:42:16 +01002104 ({ \
2105 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, alloc_put, fifo, data); \
2106 int ret = k_queue_alloc_append(&(fifo)->_queue, data); \
2107 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, alloc_put, fifo, data, ret); \
2108 ret; \
2109 })
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002110
2111/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002112 * @brief Atomically add a list of elements to a FIFO.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002113 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002114 * This routine adds a list of data items to @a fifo in one operation.
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04002115 * The data items must be in a singly-linked list, with the first word of
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002116 * each data item pointing to the next data item; the list must be
2117 * NULL-terminated.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002118 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002119 * @funcprops \isr_ok
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002120 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002121 * @param fifo Address of the FIFO queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002122 * @param head Pointer to first node in singly-linked list.
2123 * @param tail Pointer to last node in singly-linked list.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002124 *
2125 * @return N/A
2126 */
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02002127#define k_fifo_put_list(fifo, head, tail) \
Torbjörn Leksell83ae27b2021-03-26 11:42:16 +01002128 ({ \
2129 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_list, fifo, head, tail); \
2130 k_queue_append_list(&(fifo)->_queue, head, tail); \
2131 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_list, fifo, head, tail); \
2132 })
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002133
2134/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002135 * @brief Atomically add a list of elements to a FIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002136 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002137 * This routine adds a list of data items to @a fifo in one operation.
2138 * The data items must be in a singly-linked list implemented using a
2139 * sys_slist_t object. Upon completion, the sys_slist_t object is invalid
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002140 * and must be re-initialized via sys_slist_init().
2141 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002142 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002143 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002144 * @param fifo Address of the FIFO queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002145 * @param list Pointer to sys_slist_t object.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002146 *
2147 * @return N/A
2148 */
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02002149#define k_fifo_put_slist(fifo, list) \
Torbjörn Leksell83ae27b2021-03-26 11:42:16 +01002150 ({ \
2151 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_slist, fifo, list); \
2152 k_queue_merge_slist(&(fifo)->_queue, list); \
2153 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_slist, fifo, list); \
2154 })
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002155
2156/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002157 * @brief Get an element from a FIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002158 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002159 * This routine removes a data item from @a fifo in a "first in, first out"
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04002160 * manner. The first word of the data item is reserved for the kernel's use.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002161 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002162 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2163 *
2164 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002165 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002166 * @param fifo Address of the FIFO queue.
Andy Ross78327382020-03-05 15:18:14 -08002167 * @param timeout Waiting period to obtain a data item,
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002168 * or one of the special values K_NO_WAIT and K_FOREVER.
2169 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05002170 * @return Address of the data item if successful; NULL if returned
2171 * without waiting, or waiting period timed out.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002172 */
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02002173#define k_fifo_get(fifo, timeout) \
Torbjörn Leksell83ae27b2021-03-26 11:42:16 +01002174 ({ \
2175 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, get, fifo, timeout); \
2176 void *ret = k_queue_get(&(fifo)->_queue, timeout); \
2177 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, get, fifo, timeout, ret); \
2178 ret; \
2179 })
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002180
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002181/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002182 * @brief Query a FIFO queue to see if it has data available.
Benjamin Walsh39b80d82017-01-28 10:06:07 -05002183 *
2184 * Note that the data might be already gone by the time this function returns
Anas Nashif585fd1f2018-02-25 08:04:59 -06002185 * if other threads is also trying to read from the FIFO.
Benjamin Walsh39b80d82017-01-28 10:06:07 -05002186 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002187 * @funcprops \isr_ok
Benjamin Walsh39b80d82017-01-28 10:06:07 -05002188 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002189 * @param fifo Address of the FIFO queue.
Benjamin Walsh39b80d82017-01-28 10:06:07 -05002190 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002191 * @return Non-zero if the FIFO queue is empty.
Benjamin Walsh39b80d82017-01-28 10:06:07 -05002192 * @return 0 if data is available.
2193 */
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02002194#define k_fifo_is_empty(fifo) \
Nicolas Pitrea04a2ca2019-05-20 23:02:39 -04002195 k_queue_is_empty(&(fifo)->_queue)
Benjamin Walsh39b80d82017-01-28 10:06:07 -05002196
2197/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002198 * @brief Peek element at the head of a FIFO queue.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002199 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002200 * Return element from the head of FIFO queue without removing it. A usecase
Ramakrishna Pallala92489ea2018-03-29 22:44:23 +05302201 * for this is if elements of the FIFO object are themselves containers. Then
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002202 * on each iteration of processing, a head container will be peeked,
2203 * and some data processed out of it, and only if the container is empty,
Anas Nashif585fd1f2018-02-25 08:04:59 -06002204 * it will be completely remove from the FIFO queue.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002205 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002206 * @param fifo Address of the FIFO queue.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002207 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002208 * @return Head element, or NULL if the FIFO queue is empty.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002209 */
2210#define k_fifo_peek_head(fifo) \
Torbjörn Leksell83ae27b2021-03-26 11:42:16 +01002211 ({ \
2212 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_head, fifo); \
2213 void *ret = k_queue_peek_head(&(fifo)->_queue); \
2214 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_head, fifo, ret); \
2215 ret; \
2216 })
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002217
2218/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002219 * @brief Peek element at the tail of FIFO queue.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002220 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002221 * Return element from the tail of FIFO queue (without removing it). A usecase
2222 * for this is if elements of the FIFO queue are themselves containers. Then
2223 * it may be useful to add more data to the last container in a FIFO queue.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002224 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002225 * @param fifo Address of the FIFO queue.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002226 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002227 * @return Tail element, or NULL if a FIFO queue is empty.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002228 */
2229#define k_fifo_peek_tail(fifo) \
Torbjörn Leksell83ae27b2021-03-26 11:42:16 +01002230 ({ \
2231 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_tail, fifo); \
2232 void *ret = k_queue_peek_tail(&(fifo)->_queue); \
2233 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_tail, fifo, ret); \
2234 ret; \
2235 })
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002236
2237/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002238 * @brief Statically define and initialize a FIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002239 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002240 * The FIFO queue can be accessed outside the module where it is defined using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002241 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05002242 * @code extern struct k_fifo <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002243 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002244 * @param name Name of the FIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002245 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002246#define K_FIFO_DEFINE(name) \
Fabio Baltierif88a4202021-08-04 23:05:54 +01002247 STRUCT_SECTION_ITERABLE_ALTERNATE(k_queue, k_fifo, name) = \
Patrik Flykt97b3bd12019-03-12 15:15:42 -06002248 Z_FIFO_INITIALIZER(name)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002249
Anas Nashif166f5192018-02-25 08:02:36 -06002250/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05002251
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002252struct k_lifo {
Luiz Augusto von Dentz0dc4dd42017-02-21 15:49:52 +02002253 struct k_queue _queue;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002254};
2255
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04002256/**
2257 * @cond INTERNAL_HIDDEN
2258 */
2259
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002260#define Z_LIFO_INITIALIZER(obj) \
Allan Stephensc98da842016-11-11 15:45:03 -05002261 { \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002262 ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
Allan Stephensc98da842016-11-11 15:45:03 -05002263 }
2264
2265/**
2266 * INTERNAL_HIDDEN @endcond
2267 */
2268
2269/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002270 * @defgroup lifo_apis LIFO APIs
Allan Stephensc98da842016-11-11 15:45:03 -05002271 * @ingroup kernel_apis
2272 * @{
2273 */
2274
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002275/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002276 * @brief Initialize a LIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002277 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002278 * This routine initializes a LIFO queue object, prior to its first use.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002279 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002280 * @param lifo Address of the LIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002281 *
2282 * @return N/A
2283 */
Luiz Augusto von Dentz0dc4dd42017-02-21 15:49:52 +02002284#define k_lifo_init(lifo) \
Torbjörn Lekselld7654452021-03-26 12:21:24 +01002285 ({ \
2286 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, init, lifo); \
2287 k_queue_init(&(lifo)->_queue); \
2288 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, init, lifo); \
2289 })
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002290
2291/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002292 * @brief Add an element to a LIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002293 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002294 * This routine adds a data item to @a lifo. A LIFO queue data item must be
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04002295 * aligned on a word boundary, and the first word of the item is
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002296 * reserved for the kernel's use.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002297 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002298 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002299 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002300 * @param lifo Address of the LIFO queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002301 * @param data Address of the data item.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002302 *
2303 * @return N/A
2304 */
Luiz Augusto von Dentz0dc4dd42017-02-21 15:49:52 +02002305#define k_lifo_put(lifo, data) \
Torbjörn Lekselld7654452021-03-26 12:21:24 +01002306 ({ \
2307 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, put, lifo, data); \
2308 k_queue_prepend(&(lifo)->_queue, data); \
2309 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, put, lifo, data); \
2310 })
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002311
2312/**
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002313 * @brief Add an element to a LIFO queue.
2314 *
Andrew Boieac3dcc12019-04-01 12:28:03 -07002315 * This routine adds a data item to @a lifo. There is an implicit memory
2316 * allocation to create an additional temporary bookkeeping data structure from
2317 * the calling thread's resource pool, which is automatically freed when the
2318 * item is removed. The data itself is not copied.
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002319 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002320 * @funcprops \isr_ok
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002321 *
2322 * @param lifo Address of the LIFO.
2323 * @param data Address of the data item.
2324 *
2325 * @retval 0 on success
2326 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2327 */
2328#define k_lifo_alloc_put(lifo, data) \
Torbjörn Lekselld7654452021-03-26 12:21:24 +01002329 ({ \
2330 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, alloc_put, lifo, data); \
2331 int ret = k_queue_alloc_prepend(&(lifo)->_queue, data); \
2332 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, alloc_put, lifo, data, ret); \
2333 ret; \
2334 })
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002335
2336/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002337 * @brief Get an element from a LIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002338 *
Anas Nashif56821172020-07-08 14:14:25 -04002339 * This routine removes a data item from @a LIFO in a "last in, first out"
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04002340 * manner. The first word of the data item is reserved for the kernel's use.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002341 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002342 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2343 *
2344 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002345 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002346 * @param lifo Address of the LIFO queue.
Andy Ross78327382020-03-05 15:18:14 -08002347 * @param timeout Waiting period to obtain a data item,
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002348 * or one of the special values K_NO_WAIT and K_FOREVER.
2349 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05002350 * @return Address of the data item if successful; NULL if returned
2351 * without waiting, or waiting period timed out.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002352 */
Luiz Augusto von Dentz0dc4dd42017-02-21 15:49:52 +02002353#define k_lifo_get(lifo, timeout) \
Torbjörn Lekselld7654452021-03-26 12:21:24 +01002354 ({ \
2355 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, get, lifo, timeout); \
2356 void *ret = k_queue_get(&(lifo)->_queue, timeout); \
2357 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, get, lifo, timeout, ret); \
2358 ret; \
2359 })
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002360
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002361/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002362 * @brief Statically define and initialize a LIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002363 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002364 * The LIFO queue can be accessed outside the module where it is defined using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002365 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05002366 * @code extern struct k_lifo <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002367 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002368 * @param name Name of the fifo.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002369 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002370#define K_LIFO_DEFINE(name) \
Fabio Baltierif88a4202021-08-04 23:05:54 +01002371 STRUCT_SECTION_ITERABLE_ALTERNATE(k_queue, k_lifo, name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002372 Z_LIFO_INITIALIZER(name)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002373
Anas Nashif166f5192018-02-25 08:02:36 -06002374/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05002375
2376/**
2377 * @cond INTERNAL_HIDDEN
2378 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05002379#define K_STACK_FLAG_ALLOC ((uint8_t)1) /* Buffer was allocated */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002380
Nicolas Pitre3d51f7c2019-05-17 22:48:26 -04002381typedef uintptr_t stack_data_t;
2382
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002383struct k_stack {
2384 _wait_q_t wait_q;
Andy Rossf0933d02018-07-26 10:23:02 -07002385 struct k_spinlock lock;
Nicolas Pitre3d51f7c2019-05-17 22:48:26 -04002386 stack_data_t *base, *next, *top;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002387
Kumar Galaa1b77fd2020-05-27 11:26:57 -05002388 uint8_t flags;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002389};
2390
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002391#define Z_STACK_INITIALIZER(obj, stack_buffer, stack_num_entries) \
Allan Stephensc98da842016-11-11 15:45:03 -05002392 { \
Patrik Flykt4344e272019-03-08 14:19:05 -07002393 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
Allan Stephensc98da842016-11-11 15:45:03 -05002394 .base = stack_buffer, \
2395 .next = stack_buffer, \
2396 .top = stack_buffer + stack_num_entries, \
Allan Stephensc98da842016-11-11 15:45:03 -05002397 }
2398
2399/**
2400 * INTERNAL_HIDDEN @endcond
2401 */
2402
2403/**
2404 * @defgroup stack_apis Stack APIs
2405 * @ingroup kernel_apis
2406 * @{
2407 */
2408
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002409/**
2410 * @brief Initialize a stack.
2411 *
2412 * This routine initializes a stack object, prior to its first use.
2413 *
2414 * @param stack Address of the stack.
2415 * @param buffer Address of array used to hold stacked values.
2416 * @param num_entries Maximum number of values that can be stacked.
2417 *
2418 * @return N/A
2419 */
Andrew Boief3bee952018-05-02 17:44:39 -07002420void k_stack_init(struct k_stack *stack,
Kumar Galaa1b77fd2020-05-27 11:26:57 -05002421 stack_data_t *buffer, uint32_t num_entries);
Andrew Boief3bee952018-05-02 17:44:39 -07002422
2423
2424/**
2425 * @brief Initialize a stack.
2426 *
2427 * This routine initializes a stack object, prior to its first use. Internal
2428 * buffers will be allocated from the calling thread's resource pool.
2429 * This memory will be released if k_stack_cleanup() is called, or
2430 * userspace is enabled and the stack object loses all references to it.
2431 *
2432 * @param stack Address of the stack.
2433 * @param num_entries Maximum number of values that can be stacked.
2434 *
2435 * @return -ENOMEM if memory couldn't be allocated
2436 */
2437
Kumar Galaa1b77fd2020-05-27 11:26:57 -05002438__syscall int32_t k_stack_alloc_init(struct k_stack *stack,
2439 uint32_t num_entries);
Andrew Boief3bee952018-05-02 17:44:39 -07002440
2441/**
2442 * @brief Release a stack's allocated buffer
2443 *
2444 * If a stack object was given a dynamically allocated buffer via
2445 * k_stack_alloc_init(), this will free it. This function does nothing
2446 * if the buffer wasn't dynamically allocated.
2447 *
2448 * @param stack Address of the stack.
Anas Nashif1ed67d12019-06-16 08:58:10 -04002449 * @retval 0 on success
2450 * @retval -EAGAIN when object is still in use
Andrew Boief3bee952018-05-02 17:44:39 -07002451 */
Anas Nashif1ed67d12019-06-16 08:58:10 -04002452int k_stack_cleanup(struct k_stack *stack);
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002453
2454/**
2455 * @brief Push an element onto a stack.
2456 *
Nicolas Pitre3d51f7c2019-05-17 22:48:26 -04002457 * This routine adds a stack_data_t value @a data to @a stack.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002458 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002459 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002460 *
2461 * @param stack Address of the stack.
2462 * @param data Value to push onto the stack.
2463 *
Anas Nashif1ed67d12019-06-16 08:58:10 -04002464 * @retval 0 on success
2465 * @retval -ENOMEM if stack is full
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002466 */
Anas Nashif1ed67d12019-06-16 08:58:10 -04002467__syscall int k_stack_push(struct k_stack *stack, stack_data_t data);
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002468
2469/**
2470 * @brief Pop an element from a stack.
2471 *
Nicolas Pitre3d51f7c2019-05-17 22:48:26 -04002472 * This routine removes a stack_data_t value from @a stack in a "last in,
2473 * first out" manner and stores the value in @a data.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002474 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002475 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2476 *
2477 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002478 *
2479 * @param stack Address of the stack.
2480 * @param data Address of area to hold the value popped from the stack.
Andy Ross78327382020-03-05 15:18:14 -08002481 * @param timeout Waiting period to obtain a value,
2482 * or one of the special values K_NO_WAIT and
Krzysztof Chruscinski94f742e2019-11-07 19:28:00 +01002483 * K_FOREVER.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002484 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05002485 * @retval 0 Element popped from stack.
2486 * @retval -EBUSY Returned without waiting.
2487 * @retval -EAGAIN Waiting period timed out.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002488 */
Krzysztof Chruscinski94f742e2019-11-07 19:28:00 +01002489__syscall int k_stack_pop(struct k_stack *stack, stack_data_t *data,
Andy Ross78327382020-03-05 15:18:14 -08002490 k_timeout_t timeout);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002491
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002492/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002493 * @brief Statically define and initialize a stack
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002494 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002495 * The stack can be accessed outside the module where it is defined using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002496 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05002497 * @code extern struct k_stack <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002498 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002499 * @param name Name of the stack.
2500 * @param stack_num_entries Maximum number of values that can be stacked.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002501 */
Peter Mitsis602e6a82016-10-17 11:48:43 -04002502#define K_STACK_DEFINE(name, stack_num_entries) \
Nicolas Pitre3d51f7c2019-05-17 22:48:26 -04002503 stack_data_t __noinit \
Peter Mitsis602e6a82016-10-17 11:48:43 -04002504 _k_stack_buf_##name[stack_num_entries]; \
Fabio Baltierif88a4202021-08-04 23:05:54 +01002505 STRUCT_SECTION_ITERABLE(k_stack, name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002506 Z_STACK_INITIALIZER(name, _k_stack_buf_##name, \
Peter Mitsis602e6a82016-10-17 11:48:43 -04002507 stack_num_entries)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002508
Anas Nashif166f5192018-02-25 08:02:36 -06002509/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05002510
Peter Bigotdc34e7c2020-10-28 11:24:05 -05002511/**
2512 * @cond INTERNAL_HIDDEN
2513 */
Peter Bigot44539ed2020-11-21 06:58:58 -06002514
Allan Stephens6bba9b02016-11-16 14:56:54 -05002515struct k_work;
Peter Bigotdc34e7c2020-10-28 11:24:05 -05002516struct k_work_q;
2517struct k_work_queue_config;
2518struct k_delayed_work;
2519extern struct k_work_q k_sys_work_q;
2520
2521/**
2522 * INTERNAL_HIDDEN @endcond
2523 */
2524
Allan Stephensc98da842016-11-11 15:45:03 -05002525/**
Anas Nashifce78d162018-05-24 12:43:11 -05002526 * @defgroup mutex_apis Mutex APIs
2527 * @ingroup kernel_apis
2528 * @{
Allan Stephensc98da842016-11-11 15:45:03 -05002529 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002530
Anas Nashifce78d162018-05-24 12:43:11 -05002531/**
2532 * Mutex Structure
2533 * @ingroup mutex_apis
2534 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002535struct k_mutex {
Anas Nashife71293e2019-12-04 20:00:14 -05002536 /** Mutex wait queue */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002537 _wait_q_t wait_q;
Anas Nashifce78d162018-05-24 12:43:11 -05002538 /** Mutex owner */
Benjamin Walshb7ef0cb2016-10-05 17:32:01 -04002539 struct k_thread *owner;
Anas Nashife71293e2019-12-04 20:00:14 -05002540
2541 /** Current lock count */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05002542 uint32_t lock_count;
Anas Nashife71293e2019-12-04 20:00:14 -05002543
2544 /** Original thread priority */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002545 int owner_orig_prio;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002546};
2547
Anas Nashifce78d162018-05-24 12:43:11 -05002548/**
2549 * @cond INTERNAL_HIDDEN
2550 */
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002551#define Z_MUTEX_INITIALIZER(obj) \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002552 { \
Patrik Flykt4344e272019-03-08 14:19:05 -07002553 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002554 .owner = NULL, \
2555 .lock_count = 0, \
Andy Ross851d14a2021-05-13 15:46:43 -07002556 .owner_orig_prio = K_LOWEST_APPLICATION_THREAD_PRIO, \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002557 }
2558
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002559/**
Allan Stephensc98da842016-11-11 15:45:03 -05002560 * INTERNAL_HIDDEN @endcond
2561 */
2562
2563/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002564 * @brief Statically define and initialize a mutex.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002565 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002566 * The mutex can be accessed outside the module where it is defined using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002567 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05002568 * @code extern struct k_mutex <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002569 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002570 * @param name Name of the mutex.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002571 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002572#define K_MUTEX_DEFINE(name) \
Fabio Baltierif88a4202021-08-04 23:05:54 +01002573 STRUCT_SECTION_ITERABLE(k_mutex, name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002574 Z_MUTEX_INITIALIZER(name)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002575
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002576/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002577 * @brief Initialize a mutex.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002578 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002579 * This routine initializes a mutex object, prior to its first use.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002580 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002581 * Upon completion, the mutex is available and does not have an owner.
2582 *
2583 * @param mutex Address of the mutex.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002584 *
Anas Nashif86bb2d02019-05-04 10:18:13 -04002585 * @retval 0 Mutex object created
2586 *
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002587 */
Anas Nashif86bb2d02019-05-04 10:18:13 -04002588__syscall int k_mutex_init(struct k_mutex *mutex);
2589
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002590
2591/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002592 * @brief Lock a mutex.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002593 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002594 * This routine locks @a mutex. If the mutex is locked by another thread,
2595 * the calling thread waits until the mutex becomes available or until
2596 * a timeout occurs.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002597 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002598 * A thread is permitted to lock a mutex it has already locked. The operation
2599 * completes immediately and the lock count is increased by 1.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002600 *
Andrew Boie6af97932020-05-27 11:48:30 -07002601 * Mutexes may not be locked in ISRs.
2602 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002603 * @param mutex Address of the mutex.
Andy Ross78327382020-03-05 15:18:14 -08002604 * @param timeout Waiting period to lock the mutex,
2605 * or one of the special values K_NO_WAIT and
Krzysztof Chruscinski94f742e2019-11-07 19:28:00 +01002606 * K_FOREVER.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002607 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05002608 * @retval 0 Mutex locked.
2609 * @retval -EBUSY Returned without waiting.
2610 * @retval -EAGAIN Waiting period timed out.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002611 */
Andy Ross78327382020-03-05 15:18:14 -08002612__syscall int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout);
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002613
2614/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002615 * @brief Unlock a mutex.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002616 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002617 * This routine unlocks @a mutex. The mutex must already be locked by the
2618 * calling thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002619 *
2620 * The mutex cannot be claimed by another thread until it has been unlocked by
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002621 * the calling thread as many times as it was previously locked by that
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002622 * thread.
2623 *
Andrew Boie6af97932020-05-27 11:48:30 -07002624 * Mutexes may not be unlocked in ISRs, as mutexes must only be manipulated
2625 * in thread context due to ownership and priority inheritance semantics.
2626 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002627 * @param mutex Address of the mutex.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002628 *
Anas Nashif86bb2d02019-05-04 10:18:13 -04002629 * @retval 0 Mutex unlocked.
2630 * @retval -EPERM The current thread does not own the mutex
2631 * @retval -EINVAL The mutex is not locked
2632 *
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002633 */
Anas Nashif86bb2d02019-05-04 10:18:13 -04002634__syscall int k_mutex_unlock(struct k_mutex *mutex);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002635
Allan Stephensc98da842016-11-11 15:45:03 -05002636/**
Anas Nashif166f5192018-02-25 08:02:36 -06002637 * @}
Allan Stephensc98da842016-11-11 15:45:03 -05002638 */
2639
Anas Nashif06eb4892020-08-23 12:39:09 -04002640
2641struct k_condvar {
2642 _wait_q_t wait_q;
2643};
2644
2645#define Z_CONDVAR_INITIALIZER(obj) \
2646 { \
2647 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2648 }
2649
2650/**
2651 * @defgroup condvar_apis Condition Variables APIs
2652 * @ingroup kernel_apis
2653 * @{
2654 */
2655
2656/**
2657 * @brief Initialize a condition variable
2658 *
2659 * @param condvar pointer to a @p k_condvar structure
2660 * @retval 0 Condition variable created successfully
2661 */
2662__syscall int k_condvar_init(struct k_condvar *condvar);
2663
2664/**
2665 * @brief Signals one thread that is pending on the condition variable
2666 *
2667 * @param condvar pointer to a @p k_condvar structure
2668 * @retval 0 On success
2669 */
2670__syscall int k_condvar_signal(struct k_condvar *condvar);
2671
2672/**
2673 * @brief Unblock all threads that are pending on the condition
2674 * variable
2675 *
2676 * @param condvar pointer to a @p k_condvar structure
2677 * @return An integer with number of woken threads on success
2678 */
2679__syscall int k_condvar_broadcast(struct k_condvar *condvar);
2680
2681/**
2682 * @brief Waits on the condition variable releasing the mutex lock
2683 *
2684 * Automically releases the currently owned mutex, blocks the current thread
2685 * waiting on the condition variable specified by @a condvar,
2686 * and finally acquires the mutex again.
2687 *
2688 * The waiting thread unblocks only after another thread calls
2689 * k_condvar_signal, or k_condvar_broadcast with the same condition variable.
2690 *
2691 * @param condvar pointer to a @p k_condvar structure
2692 * @param mutex Address of the mutex.
2693 * @param timeout Waiting period for the condition variable
2694 * or one of the special values K_NO_WAIT and K_FOREVER.
2695 * @retval 0 On success
2696 * @retval -EAGAIN Waiting period timed out.
2697 */
2698__syscall int k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex,
2699 k_timeout_t timeout);
2700
2701/**
2702 * @brief Statically define and initialize a condition variable.
2703 *
2704 * The condition variable can be accessed outside the module where it is
2705 * defined using:
2706 *
2707 * @code extern struct k_condvar <name>; @endcode
2708 *
2709 * @param name Name of the condition variable.
2710 */
2711#define K_CONDVAR_DEFINE(name) \
Fabio Baltierif88a4202021-08-04 23:05:54 +01002712 STRUCT_SECTION_ITERABLE(k_condvar, name) = \
Anas Nashif06eb4892020-08-23 12:39:09 -04002713 Z_CONDVAR_INITIALIZER(name)
2714/**
2715 * @}
2716 */
2717
Allan Stephensc98da842016-11-11 15:45:03 -05002718/**
2719 * @cond INTERNAL_HIDDEN
2720 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002721
2722struct k_sem {
2723 _wait_q_t wait_q;
James Harrisb1042812021-03-03 12:02:05 -08002724 unsigned int count;
2725 unsigned int limit;
Peter Bigot7aefa3d2021-03-02 06:18:29 -06002726
Benjamin Walshacc68c12017-01-29 18:57:45 -05002727 _POLL_EVENT;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002728
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002729};
2730
Patrik Flykt97b3bd12019-03-12 15:15:42 -06002731#define Z_SEM_INITIALIZER(obj, initial_count, count_limit) \
Allan Stephensc98da842016-11-11 15:45:03 -05002732 { \
Patrik Flykt4344e272019-03-08 14:19:05 -07002733 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
Allan Stephensc98da842016-11-11 15:45:03 -05002734 .count = initial_count, \
2735 .limit = count_limit, \
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +03002736 _POLL_EVENT_OBJ_INIT(obj) \
Allan Stephensc98da842016-11-11 15:45:03 -05002737 }
2738
2739/**
2740 * INTERNAL_HIDDEN @endcond
2741 */
2742
2743/**
2744 * @defgroup semaphore_apis Semaphore APIs
2745 * @ingroup kernel_apis
2746 * @{
2747 */
2748
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002749/**
James Harrisb1042812021-03-03 12:02:05 -08002750 * @brief Maximum limit value allowed for a semaphore.
2751 *
2752 * This is intended for use when a semaphore does not have
2753 * an explicit maximum limit, and instead is just used for
2754 * counting purposes.
2755 *
2756 */
2757#define K_SEM_MAX_LIMIT UINT_MAX
2758
2759/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002760 * @brief Initialize a semaphore.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002761 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002762 * This routine initializes a semaphore object, prior to its first use.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002763 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002764 * @param sem Address of the semaphore.
2765 * @param initial_count Initial semaphore count.
2766 * @param limit Maximum permitted semaphore count.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002767 *
James Harrisb1042812021-03-03 12:02:05 -08002768 * @see K_SEM_MAX_LIMIT
2769 *
Anas Nashif928af3c2019-05-04 10:36:14 -04002770 * @retval 0 Semaphore created successfully
2771 * @retval -EINVAL Invalid values
2772 *
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002773 */
Anas Nashif928af3c2019-05-04 10:36:14 -04002774__syscall int k_sem_init(struct k_sem *sem, unsigned int initial_count,
Andrew Boie99280232017-09-29 14:17:47 -07002775 unsigned int limit);
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002776
2777/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002778 * @brief Take a semaphore.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002779 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002780 * This routine takes @a sem.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002781 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002782 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2783 *
2784 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002785 *
2786 * @param sem Address of the semaphore.
Andy Ross78327382020-03-05 15:18:14 -08002787 * @param timeout Waiting period to take the semaphore,
2788 * or one of the special values K_NO_WAIT and K_FOREVER.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002789 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05002790 * @retval 0 Semaphore taken.
2791 * @retval -EBUSY Returned without waiting.
James Harris53b81792021-03-04 15:47:27 -08002792 * @retval -EAGAIN Waiting period timed out,
2793 * or the semaphore was reset during the waiting period.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002794 */
Andy Ross78327382020-03-05 15:18:14 -08002795__syscall int k_sem_take(struct k_sem *sem, k_timeout_t timeout);
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002796
2797/**
2798 * @brief Give a semaphore.
2799 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002800 * This routine gives @a sem, unless the semaphore is already at its maximum
2801 * permitted count.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002802 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002803 * @funcprops \isr_ok
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002804 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002805 * @param sem Address of the semaphore.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002806 *
2807 * @return N/A
2808 */
Andrew Boie99280232017-09-29 14:17:47 -07002809__syscall void k_sem_give(struct k_sem *sem);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002810
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002811/**
James Harris53b81792021-03-04 15:47:27 -08002812 * @brief Resets a semaphore's count to zero.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002813 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002814 * This routine sets the count of @a sem to zero.
James Harris53b81792021-03-04 15:47:27 -08002815 * Any outstanding semaphore takes will be aborted
2816 * with -EAGAIN.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002817 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002818 * @param sem Address of the semaphore.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002819 *
2820 * @return N/A
2821 */
Andrew Boie990bf162017-10-03 12:36:49 -07002822__syscall void k_sem_reset(struct k_sem *sem);
Andrew Boiefc273c02017-09-23 12:51:23 -07002823
Anas Nashif954d5502018-02-25 08:37:28 -06002824/**
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002825 * @brief Get a semaphore's count.
2826 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002827 * This routine returns the current count of @a sem.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002828 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002829 * @param sem Address of the semaphore.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002830 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002831 * @return Current semaphore count.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002832 */
Andrew Boie990bf162017-10-03 12:36:49 -07002833__syscall unsigned int k_sem_count_get(struct k_sem *sem);
Andrew Boiefc273c02017-09-23 12:51:23 -07002834
Anas Nashif954d5502018-02-25 08:37:28 -06002835/**
2836 * @internal
2837 */
Patrik Flykt4344e272019-03-08 14:19:05 -07002838static inline unsigned int z_impl_k_sem_count_get(struct k_sem *sem)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002839{
2840 return sem->count;
2841}
2842
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002843/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002844 * @brief Statically define and initialize a semaphore.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002845 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002846 * The semaphore can be accessed outside the module where it is defined using:
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002847 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05002848 * @code extern struct k_sem <name>; @endcode
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002849 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002850 * @param name Name of the semaphore.
2851 * @param initial_count Initial semaphore count.
2852 * @param count_limit Maximum permitted semaphore count.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002853 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002854#define K_SEM_DEFINE(name, initial_count, count_limit) \
Fabio Baltierif88a4202021-08-04 23:05:54 +01002855 STRUCT_SECTION_ITERABLE(k_sem, name) = \
Patrik Flykt97b3bd12019-03-12 15:15:42 -06002856 Z_SEM_INITIALIZER(name, initial_count, count_limit); \
Rajavardhan Gundi68040c82018-04-27 10:15:15 +05302857 BUILD_ASSERT(((count_limit) != 0) && \
James Harrisb1042812021-03-03 12:02:05 -08002858 ((initial_count) <= (count_limit)) && \
2859 ((count_limit) <= K_SEM_MAX_LIMIT));
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002860
Anas Nashif166f5192018-02-25 08:02:36 -06002861/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05002862
Peter Bigotdc34e7c2020-10-28 11:24:05 -05002863/**
2864 * @cond INTERNAL_HIDDEN
2865 */
2866
2867struct k_work_delayable;
2868struct k_work_sync;
2869
2870/**
2871 * INTERNAL_HIDDEN @endcond
2872 */
2873
2874/**
Anas Nashifc355b7e2021-04-14 08:49:05 -04002875 * @defgroup workqueue_apis Work Queue APIs
2876 * @ingroup kernel_apis
Peter Bigotdc34e7c2020-10-28 11:24:05 -05002877 * @{
2878 */
2879
2880/** @brief The signature for a work item handler function.
2881 *
2882 * The function will be invoked by the thread animating a work queue.
2883 *
2884 * @param work the work item that provided the handler.
2885 */
2886typedef void (*k_work_handler_t)(struct k_work *work);
2887
2888/** @brief Initialize a (non-delayable) work structure.
2889 *
2890 * This must be invoked before submitting a work structure for the first time.
2891 * It need not be invoked again on the same work structure. It can be
2892 * re-invoked to change the associated handler, but this must be done when the
2893 * work item is idle.
2894 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002895 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05002896 *
2897 * @param work the work structure to be initialized.
2898 *
2899 * @param handler the handler to be invoked by the work item.
2900 */
2901void k_work_init(struct k_work *work,
2902 k_work_handler_t handler);
2903
2904/** @brief Busy state flags from the work item.
2905 *
2906 * A zero return value indicates the work item appears to be idle.
2907 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05002908 * @note This is a live snapshot of state, which may change before the result
2909 * is checked. Use locks where appropriate.
2910 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002911 * @funcprops \isr_ok
2912 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05002913 * @param work pointer to the work item.
2914 *
2915 * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED,
2916 * K_WORK_RUNNING, and K_WORK_CANCELING.
2917 */
2918int k_work_busy_get(const struct k_work *work);
2919
2920/** @brief Test whether a work item is currently pending.
2921 *
2922 * Wrapper to determine whether a work item is in a non-idle dstate.
2923 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05002924 * @note This is a live snapshot of state, which may change before the result
2925 * is checked. Use locks where appropriate.
2926 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002927 * @funcprops \isr_ok
2928 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05002929 * @param work pointer to the work item.
2930 *
2931 * @return true if and only if k_work_busy_get() returns a non-zero value.
2932 */
2933static inline bool k_work_is_pending(const struct k_work *work);
2934
2935/** @brief Submit a work item to a queue.
2936 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05002937 * @param queue pointer to the work queue on which the item should run. If
2938 * NULL the queue from the most recent submission will be used.
2939 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002940 * @funcprops \isr_ok
2941 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05002942 * @param work pointer to the work item.
2943 *
2944 * @retval 0 if work was already submitted to a queue
2945 * @retval 1 if work was not submitted and has been queued to @p queue
2946 * @retval 2 if work was running and has been queued to the queue that was
2947 * running it
2948 * @retval -EBUSY
2949 * * if work submission was rejected because the work item is cancelling; or
2950 * * @p queue is draining; or
2951 * * @p queue is plugged.
2952 * @retval -EINVAL if @p queue is null and the work item has never been run.
Peter Bigot47435902021-05-17 06:36:04 -05002953 * @retval -ENODEV if @p queue has not been started.
Peter Bigotdc34e7c2020-10-28 11:24:05 -05002954 */
2955int k_work_submit_to_queue(struct k_work_q *queue,
2956 struct k_work *work);
2957
2958/** @brief Submit a work item to the system queue.
2959 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002960 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05002961 *
2962 * @param work pointer to the work item.
2963 *
2964 * @return as with k_work_submit_to_queue().
2965 */
Torbjörn Leksell7a646b32021-03-26 14:41:18 +01002966extern int k_work_submit(struct k_work *work);
Peter Bigotdc34e7c2020-10-28 11:24:05 -05002967
2968/** @brief Wait for last-submitted instance to complete.
2969 *
2970 * Resubmissions may occur while waiting, including chained submissions (from
2971 * within the handler).
2972 *
2973 * @note Be careful of caller and work queue thread relative priority. If
2974 * this function sleeps it will not return until the work queue thread
2975 * completes the tasks that allow this thread to resume.
2976 *
2977 * @note Behavior is undefined if this function is invoked on @p work from a
2978 * work queue running @p work.
2979 *
2980 * @param work pointer to the work item.
2981 *
2982 * @param sync pointer to an opaque item containing state related to the
2983 * pending cancellation. The object must persist until the call returns, and
2984 * be accessible from both the caller thread and the work queue thread. The
2985 * object must not be used for any other flush or cancel operation until this
2986 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
2987 * must be allocated in coherent memory.
2988 *
2989 * @retval true if call had to wait for completion
2990 * @retval false if work was already idle
2991 */
2992bool k_work_flush(struct k_work *work,
2993 struct k_work_sync *sync);
2994
2995/** @brief Cancel a work item.
2996 *
2997 * This attempts to prevent a pending (non-delayable) work item from being
2998 * processed by removing it from the work queue. If the item is being
2999 * processed, the work item will continue to be processed, but resubmissions
3000 * are rejected until cancellation completes.
3001 *
3002 * If this returns zero cancellation is complete, otherwise something
3003 * (probably a work queue thread) is still referencing the item.
3004 *
3005 * See also k_work_cancel_sync().
3006 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003007 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003008 *
3009 * @param work pointer to the work item.
3010 *
3011 * @return the k_work_busy_get() status indicating the state of the item after all
3012 * cancellation steps performed by this call are completed.
3013 */
3014int k_work_cancel(struct k_work *work);
3015
3016/** @brief Cancel a work item and wait for it to complete.
3017 *
3018 * Same as k_work_cancel() but does not return until cancellation is complete.
3019 * This can be invoked by a thread after k_work_cancel() to synchronize with a
3020 * previous cancellation.
3021 *
3022 * On return the work structure will be idle unless something submits it after
3023 * the cancellation was complete.
3024 *
3025 * @note Be careful of caller and work queue thread relative priority. If
3026 * this function sleeps it will not return until the work queue thread
3027 * completes the tasks that allow this thread to resume.
3028 *
3029 * @note Behavior is undefined if this function is invoked on @p work from a
3030 * work queue running @p work.
3031 *
3032 * @param work pointer to the work item.
3033 *
3034 * @param sync pointer to an opaque item containing state related to the
3035 * pending cancellation. The object must persist until the call returns, and
3036 * be accessible from both the caller thread and the work queue thread. The
3037 * object must not be used for any other flush or cancel operation until this
3038 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3039 * must be allocated in coherent memory.
3040 *
Peter Bigot707dc222021-04-16 11:48:50 -05003041 * @retval true if work was pending (call had to wait for cancellation of a
3042 * running handler to complete, or scheduled or submitted operations were
3043 * cancelled);
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003044 * @retval false otherwise
3045 */
3046bool k_work_cancel_sync(struct k_work *work, struct k_work_sync *sync);
3047
Flavio Ceolind9aa4142021-08-23 14:33:40 -07003048/** @brief Initialize a work queue structure.
3049 *
3050 * This must be invoked before starting a work queue structure for the first time.
3051 * It need not be invoked again on the same work queue structure.
3052 *
3053 * @funcprops \isr_ok
3054 *
3055 * @param queue the queue structure to be initialized.
3056 */
3057void k_work_queue_init(struct k_work_q *queue);
3058
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003059/** @brief Initialize a work queue.
3060 *
3061 * This configures the work queue thread and starts it running. The function
3062 * should not be re-invoked on a queue.
3063 *
Flavio Ceolinc42cde52021-08-23 15:04:58 -07003064 * @param queue pointer to the queue structure. It must be initialized
3065 * in zeroed/bss memory or with @ref k_work_queue_init before
3066 * use.
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003067 *
3068 * @param stack pointer to the work thread stack area.
3069 *
3070 * @param stack_size size of the the work thread stack area, in bytes.
3071 *
3072 * @param prio initial thread priority
3073 *
3074 * @param cfg optional additional configuration parameters. Pass @c
3075 * NULL if not required, to use the defaults documented in
3076 * k_work_queue_config.
3077 */
3078void k_work_queue_start(struct k_work_q *queue,
3079 k_thread_stack_t *stack, size_t stack_size,
3080 int prio, const struct k_work_queue_config *cfg);
3081
3082/** @brief Access the thread that animates a work queue.
3083 *
3084 * This is necessary to grant a work queue thread access to things the work
3085 * items it will process are expected to use.
3086 *
3087 * @param queue pointer to the queue structure.
3088 *
3089 * @return the thread associated with the work queue.
3090 */
3091static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue);
3092
3093/** @brief Wait until the work queue has drained, optionally plugging it.
3094 *
3095 * This blocks submission to the work queue except when coming from queue
3096 * thread, and blocks the caller until no more work items are available in the
3097 * queue.
3098 *
3099 * If @p plug is true then submission will continue to be blocked after the
3100 * drain operation completes until k_work_queue_unplug() is invoked.
3101 *
3102 * Note that work items that are delayed are not yet associated with their
3103 * work queue. They must be cancelled externally if a goal is to ensure the
3104 * work queue remains empty. The @p plug feature can be used to prevent
3105 * delayed items from being submitted after the drain completes.
3106 *
3107 * @param queue pointer to the queue structure.
3108 *
3109 * @param plug if true the work queue will continue to block new submissions
3110 * after all items have drained.
3111 *
3112 * @retval 1 if call had to wait for the drain to complete
3113 * @retval 0 if call did not have to wait
3114 * @retval negative if wait was interrupted or failed
3115 */
3116int k_work_queue_drain(struct k_work_q *queue, bool plug);
3117
3118/** @brief Release a work queue to accept new submissions.
3119 *
3120 * This releases the block on new submissions placed when k_work_queue_drain()
3121 * is invoked with the @p plug option enabled. If this is invoked before the
3122 * drain completes new items may be submitted as soon as the drain completes.
3123 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003124 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003125 *
3126 * @param queue pointer to the queue structure.
3127 *
3128 * @retval 0 if successfully unplugged
3129 * @retval -EALREADY if the work queue was not plugged.
3130 */
3131int k_work_queue_unplug(struct k_work_q *queue);
3132
3133/** @brief Initialize a delayable work structure.
3134 *
3135 * This must be invoked before scheduling a delayable work structure for the
3136 * first time. It need not be invoked again on the same work structure. It
3137 * can be re-invoked to change the associated handler, but this must be done
3138 * when the work item is idle.
3139 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003140 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003141 *
3142 * @param dwork the delayable work structure to be initialized.
3143 *
3144 * @param handler the handler to be invoked by the work item.
3145 */
3146void k_work_init_delayable(struct k_work_delayable *dwork,
3147 k_work_handler_t handler);
3148
3149/**
3150 * @brief Get the parent delayable work structure from a work pointer.
3151 *
3152 * This function is necessary when a @c k_work_handler_t function is passed to
3153 * k_work_schedule_for_queue() and the handler needs to access data from the
3154 * container of the containing `k_work_delayable`.
3155 *
3156 * @param work Address passed to the work handler
3157 *
3158 * @return Address of the containing @c k_work_delayable structure.
3159 */
3160static inline struct k_work_delayable *
3161k_work_delayable_from_work(struct k_work *work);
3162
3163/** @brief Busy state flags from the delayable work item.
3164 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003165 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003166 *
3167 * @note This is a live snapshot of state, which may change before the result
3168 * can be inspected. Use locks where appropriate.
3169 *
3170 * @param dwork pointer to the delayable work item.
3171 *
3172 * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED, K_WORK_RUNNING, and
3173 * K_WORK_CANCELING. A zero return value indicates the work item appears to
3174 * be idle.
3175 */
3176int k_work_delayable_busy_get(const struct k_work_delayable *dwork);
3177
3178/** @brief Test whether a delayed work item is currently pending.
3179 *
3180 * Wrapper to determine whether a delayed work item is in a non-idle state.
3181 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003182 * @note This is a live snapshot of state, which may change before the result
3183 * can be inspected. Use locks where appropriate.
3184 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003185 * @funcprops \isr_ok
3186 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003187 * @param dwork pointer to the delayable work item.
3188 *
3189 * @return true if and only if k_work_delayable_busy_get() returns a non-zero
3190 * value.
3191 */
3192static inline bool k_work_delayable_is_pending(
3193 const struct k_work_delayable *dwork);
3194
3195/** @brief Get the absolute tick count at which a scheduled delayable work
3196 * will be submitted.
3197 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003198 * @note This is a live snapshot of state, which may change before the result
3199 * can be inspected. Use locks where appropriate.
3200 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003201 * @funcprops \isr_ok
3202 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003203 * @param dwork pointer to the delayable work item.
3204 *
3205 * @return the tick count when the timer that will schedule the work item will
3206 * expire, or the current tick count if the work is not scheduled.
3207 */
3208static inline k_ticks_t k_work_delayable_expires_get(
3209 const struct k_work_delayable *dwork);
3210
3211/** @brief Get the number of ticks until a scheduled delayable work will be
3212 * submitted.
3213 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003214 * @note This is a live snapshot of state, which may change before the result
3215 * can be inspected. Use locks where appropriate.
3216 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003217 * @funcprops \isr_ok
3218 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003219 * @param dwork pointer to the delayable work item.
3220 *
3221 * @return the number of ticks until the timer that will schedule the work
3222 * item will expire, or zero if the item is not scheduled.
3223 */
3224static inline k_ticks_t k_work_delayable_remaining_get(
3225 const struct k_work_delayable *dwork);
3226
3227/** @brief Submit an idle work item to a queue after a delay.
3228 *
3229 * Unlike k_work_reschedule_for_queue() this is a no-op if the work item is
3230 * already scheduled or submitted, even if @p delay is @c K_NO_WAIT.
3231 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003232 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003233 *
3234 * @param queue the queue on which the work item should be submitted after the
3235 * delay.
3236 *
3237 * @param dwork pointer to the delayable work item.
3238 *
3239 * @param delay the time to wait before submitting the work item. If @c
3240 * K_NO_WAIT and the work is not pending this is equivalent to
3241 * k_work_submit_to_queue().
3242 *
3243 * @retval 0 if work was already scheduled or submitted.
3244 * @retval 1 if work has been scheduled.
Peter Bigot47435902021-05-17 06:36:04 -05003245 * @retval -EBUSY if @p delay is @c K_NO_WAIT and
3246 * k_work_submit_to_queue() fails with this code.
3247 * @retval -EINVAL if @p delay is @c K_NO_WAIT and
3248 * k_work_submit_to_queue() fails with this code.
3249 * @retval -ENODEV if @p delay is @c K_NO_WAIT and
3250 * k_work_submit_to_queue() fails with this code.
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003251 */
3252int k_work_schedule_for_queue(struct k_work_q *queue,
3253 struct k_work_delayable *dwork,
3254 k_timeout_t delay);
3255
3256/** @brief Submit an idle work item to the system work queue after a
3257 * delay.
3258 *
3259 * This is a thin wrapper around k_work_schedule_for_queue(), with all the API
3260 * characteristcs of that function.
3261 *
3262 * @param dwork pointer to the delayable work item.
3263 *
3264 * @param delay the time to wait before submitting the work item. If @c
3265 * K_NO_WAIT this is equivalent to k_work_submit_to_queue().
3266 *
3267 * @return as with k_work_schedule_for_queue().
3268 */
Torbjörn Leksell7a646b32021-03-26 14:41:18 +01003269extern int k_work_schedule(struct k_work_delayable *dwork,
3270 k_timeout_t delay);
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003271
3272/** @brief Reschedule a work item to a queue after a delay.
3273 *
3274 * Unlike k_work_schedule_for_queue() this function can change the deadline of
3275 * a scheduled work item, and will schedule a work item that isn't idle
3276 * (e.g. is submitted or running). This function does not affect ("unsubmit")
3277 * a work item that has been submitted to a queue.
3278 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003279 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003280 *
3281 * @param queue the queue on which the work item should be submitted after the
3282 * delay.
3283 *
3284 * @param dwork pointer to the delayable work item.
3285 *
3286 * @param delay the time to wait before submitting the work item. If @c
3287 * K_NO_WAIT this is equivalent to k_work_submit_to_queue() after canceling
3288 * any previous scheduled submission.
3289 *
3290 * @note If delay is @c K_NO_WAIT ("no delay") the return values are as with
3291 * k_work_submit_to_queue().
3292 *
3293 * @retval 0 if delay is @c K_NO_WAIT and work was already on a queue
3294 * @retval 1 if
3295 * * delay is @c K_NO_WAIT and work was not submitted but has now been queued
3296 * to @p queue; or
3297 * * delay not @c K_NO_WAIT and work has been scheduled
3298 * @retval 2 if delay is @c K_NO_WAIT and work was running and has been queued
3299 * to the queue that was running it
Peter Bigot47435902021-05-17 06:36:04 -05003300 * @retval -EBUSY if @p delay is @c K_NO_WAIT and
3301 * k_work_submit_to_queue() fails with this code.
3302 * @retval -EINVAL if @p delay is @c K_NO_WAIT and
3303 * k_work_submit_to_queue() fails with this code.
3304 * @retval -ENODEV if @p delay is @c K_NO_WAIT and
3305 * k_work_submit_to_queue() fails with this code.
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003306 */
3307int k_work_reschedule_for_queue(struct k_work_q *queue,
3308 struct k_work_delayable *dwork,
3309 k_timeout_t delay);
3310
3311/** @brief Reschedule a work item to the system work queue after a
3312 * delay.
3313 *
3314 * This is a thin wrapper around k_work_reschedule_for_queue(), with all the
3315 * API characteristcs of that function.
3316 *
3317 * @param dwork pointer to the delayable work item.
3318 *
3319 * @param delay the time to wait before submitting the work item.
3320 *
3321 * @return as with k_work_reschedule_for_queue().
3322 */
Torbjörn Leksell7a646b32021-03-26 14:41:18 +01003323extern int k_work_reschedule(struct k_work_delayable *dwork,
3324 k_timeout_t delay);
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003325
3326/** @brief Flush delayable work.
3327 *
3328 * If the work is scheduled, it is immediately submitted. Then the caller
3329 * blocks until the work completes, as with k_work_flush().
3330 *
3331 * @note Be careful of caller and work queue thread relative priority. If
3332 * this function sleeps it will not return until the work queue thread
3333 * completes the tasks that allow this thread to resume.
3334 *
3335 * @note Behavior is undefined if this function is invoked on @p dwork from a
3336 * work queue running @p dwork.
3337 *
3338 * @param dwork pointer to the delayable work item.
3339 *
3340 * @param sync pointer to an opaque item containing state related to the
3341 * pending cancellation. The object must persist until the call returns, and
3342 * be accessible from both the caller thread and the work queue thread. The
3343 * object must not be used for any other flush or cancel operation until this
3344 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3345 * must be allocated in coherent memory.
3346 *
3347 * @retval true if call had to wait for completion
3348 * @retval false if work was already idle
3349 */
3350bool k_work_flush_delayable(struct k_work_delayable *dwork,
3351 struct k_work_sync *sync);
3352
3353/** @brief Cancel delayable work.
3354 *
3355 * Similar to k_work_cancel() but for delayable work. If the work is
3356 * scheduled or submitted it is canceled. This function does not wait for the
3357 * cancellation to complete.
3358 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003359 * @note The work may still be running when this returns. Use
3360 * k_work_flush_delayable() or k_work_cancel_delayable_sync() to ensure it is
3361 * not running.
3362 *
3363 * @note Canceling delayable work does not prevent rescheduling it. It does
3364 * prevent submitting it until the cancellation completes.
3365 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003366 * @funcprops \isr_ok
3367 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003368 * @param dwork pointer to the delayable work item.
3369 *
3370 * @return the k_work_delayable_busy_get() status indicating the state of the
3371 * item after all cancellation steps performed by this call are completed.
3372 */
3373int k_work_cancel_delayable(struct k_work_delayable *dwork);
3374
3375/** @brief Cancel delayable work and wait.
3376 *
3377 * Like k_work_cancel_delayable() but waits until the work becomes idle.
3378 *
3379 * @note Canceling delayable work does not prevent rescheduling it. It does
3380 * prevent submitting it until the cancellation completes.
3381 *
3382 * @note Be careful of caller and work queue thread relative priority. If
3383 * this function sleeps it will not return until the work queue thread
3384 * completes the tasks that allow this thread to resume.
3385 *
3386 * @note Behavior is undefined if this function is invoked on @p dwork from a
3387 * work queue running @p dwork.
3388 *
3389 * @param dwork pointer to the delayable work item.
3390 *
3391 * @param sync pointer to an opaque item containing state related to the
3392 * pending cancellation. The object must persist until the call returns, and
3393 * be accessible from both the caller thread and the work queue thread. The
3394 * object must not be used for any other flush or cancel operation until this
3395 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3396 * must be allocated in coherent memory.
3397 *
Peter Bigot707dc222021-04-16 11:48:50 -05003398 * @retval true if work was not idle (call had to wait for cancellation of a
3399 * running handler to complete, or scheduled or submitted operations were
3400 * cancelled);
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003401 * @retval false otherwise
3402 */
3403bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
3404 struct k_work_sync *sync);
3405
3406enum {
3407/**
3408 * @cond INTERNAL_HIDDEN
3409 */
3410
3411 /* The atomic API is used for all work and queue flags fields to
3412 * enforce sequential consistency in SMP environments.
3413 */
3414
3415 /* Bits that represent the work item states. At least nine of the
3416 * combinations are distinct valid stable states.
3417 */
3418 K_WORK_RUNNING_BIT = 0,
3419 K_WORK_CANCELING_BIT = 1,
3420 K_WORK_QUEUED_BIT = 2,
3421 K_WORK_DELAYED_BIT = 3,
3422
3423 K_WORK_MASK = BIT(K_WORK_DELAYED_BIT) | BIT(K_WORK_QUEUED_BIT)
3424 | BIT(K_WORK_RUNNING_BIT) | BIT(K_WORK_CANCELING_BIT),
3425
3426 /* Static work flags */
3427 K_WORK_DELAYABLE_BIT = 8,
3428 K_WORK_DELAYABLE = BIT(K_WORK_DELAYABLE_BIT),
3429
3430 /* Dynamic work queue flags */
3431 K_WORK_QUEUE_STARTED_BIT = 0,
3432 K_WORK_QUEUE_STARTED = BIT(K_WORK_QUEUE_STARTED_BIT),
3433 K_WORK_QUEUE_BUSY_BIT = 1,
3434 K_WORK_QUEUE_BUSY = BIT(K_WORK_QUEUE_BUSY_BIT),
3435 K_WORK_QUEUE_DRAIN_BIT = 2,
3436 K_WORK_QUEUE_DRAIN = BIT(K_WORK_QUEUE_DRAIN_BIT),
3437 K_WORK_QUEUE_PLUGGED_BIT = 3,
3438 K_WORK_QUEUE_PLUGGED = BIT(K_WORK_QUEUE_PLUGGED_BIT),
3439
3440 /* Static work queue flags */
3441 K_WORK_QUEUE_NO_YIELD_BIT = 8,
3442 K_WORK_QUEUE_NO_YIELD = BIT(K_WORK_QUEUE_NO_YIELD_BIT),
3443
3444/**
3445 * INTERNAL_HIDDEN @endcond
3446 */
3447 /* Transient work flags */
3448
3449 /** @brief Flag indicating a work item that is running under a work
3450 * queue thread.
3451 *
3452 * Accessed via k_work_busy_get(). May co-occur with other flags.
3453 */
3454 K_WORK_RUNNING = BIT(K_WORK_RUNNING_BIT),
3455
3456 /** @brief Flag indicating a work item that is being canceled.
3457 *
3458 * Accessed via k_work_busy_get(). May co-occur with other flags.
3459 */
3460 K_WORK_CANCELING = BIT(K_WORK_CANCELING_BIT),
3461
3462 /** @brief Flag indicating a work item that has been submitted to a
3463 * queue but has not started running.
3464 *
3465 * Accessed via k_work_busy_get(). May co-occur with other flags.
3466 */
3467 K_WORK_QUEUED = BIT(K_WORK_QUEUED_BIT),
3468
3469 /** @brief Flag indicating a delayed work item that is scheduled for
3470 * submission to a queue.
3471 *
3472 * Accessed via k_work_busy_get(). May co-occur with other flags.
3473 */
3474 K_WORK_DELAYED = BIT(K_WORK_DELAYED_BIT),
3475};
3476
3477/** @brief A structure used to submit work. */
3478struct k_work {
3479 /* All fields are protected by the work module spinlock. No fields
3480 * are to be accessed except through kernel API.
3481 */
3482
3483 /* Node to link into k_work_q pending list. */
3484 sys_snode_t node;
3485
3486 /* The function to be invoked by the work queue thread. */
3487 k_work_handler_t handler;
3488
3489 /* The queue on which the work item was last submitted. */
3490 struct k_work_q *queue;
3491
3492 /* State of the work item.
3493 *
3494 * The item can be DELAYED, QUEUED, and RUNNING simultaneously.
3495 *
3496 * It can be RUNNING and CANCELING simultaneously.
3497 */
3498 uint32_t flags;
3499};
3500
3501#define Z_WORK_INITIALIZER(work_handler) { \
3502 .handler = work_handler, \
3503}
3504
3505/** @brief A structure used to submit work after a delay. */
3506struct k_work_delayable {
3507 /* The work item. */
3508 struct k_work work;
3509
3510 /* Timeout used to submit work after a delay. */
3511 struct _timeout timeout;
3512
3513 /* The queue to which the work should be submitted. */
3514 struct k_work_q *queue;
3515};
3516
3517#define Z_WORK_DELAYABLE_INITIALIZER(work_handler) { \
3518 .work = { \
3519 .handler = work_handler, \
3520 .flags = K_WORK_DELAYABLE, \
3521 }, \
3522}
3523
3524/**
3525 * @brief Initialize a statically-defined delayable work item.
3526 *
3527 * This macro can be used to initialize a statically-defined delayable
3528 * work item, prior to its first use. For example,
3529 *
3530 * @code static K_WORK_DELAYABLE_DEFINE(<dwork>, <work_handler>); @endcode
3531 *
3532 * Note that if the runtime dependencies support initialization with
3533 * k_work_init_delayable() using that will eliminate the initialized
3534 * object in ROM that is produced by this macro and copied in at
3535 * system startup.
3536 *
3537 * @param work Symbol name for delayable work item object
3538 * @param work_handler Function to invoke each time work item is processed.
3539 */
3540#define K_WORK_DELAYABLE_DEFINE(work, work_handler) \
3541 struct k_work_delayable work \
3542 = Z_WORK_DELAYABLE_INITIALIZER(work_handler)
3543
3544/**
3545 * @cond INTERNAL_HIDDEN
3546 */
3547
3548/* Record used to wait for work to flush.
3549 *
3550 * The work item is inserted into the queue that will process (or is
3551 * processing) the item, and will be processed as soon as the item
3552 * completes. When the flusher is processed the semaphore will be
3553 * signaled, releasing the thread waiting for the flush.
3554 */
3555struct z_work_flusher {
3556 struct k_work work;
3557 struct k_sem sem;
3558};
3559
3560/* Record used to wait for work to complete a cancellation.
3561 *
3562 * The work item is inserted into a global queue of pending cancels.
3563 * When a cancelling work item goes idle any matching waiters are
3564 * removed from pending_cancels and are woken.
3565 */
3566struct z_work_canceller {
3567 sys_snode_t node;
3568 struct k_work *work;
3569 struct k_sem sem;
3570};
3571
3572/**
3573 * INTERNAL_HIDDEN @endcond
3574 */
3575
3576/** @brief A structure holding internal state for a pending synchronous
3577 * operation on a work item or queue.
3578 *
3579 * Instances of this type are provided by the caller for invocation of
3580 * k_work_flush(), k_work_cancel_sync() and sibling flush and cancel APIs. A
3581 * referenced object must persist until the call returns, and be accessible
3582 * from both the caller thread and the work queue thread.
3583 *
3584 * @note If CONFIG_KERNEL_COHERENCE is enabled the object must be allocated in
3585 * coherent memory; see arch_mem_coherent(). The stack on these architectures
3586 * is generally not coherent. be stack-allocated. Violations are detected by
3587 * runtime assertion.
3588 */
3589struct k_work_sync {
3590 union {
3591 struct z_work_flusher flusher;
3592 struct z_work_canceller canceller;
3593 };
3594};
3595
3596/** @brief A structure holding optional configuration items for a work
3597 * queue.
3598 *
3599 * This structure, and values it references, are not retained by
3600 * k_work_queue_start().
3601 */
3602struct k_work_queue_config {
3603 /** The name to be given to the work queue thread.
3604 *
3605 * If left null the thread will not have a name.
3606 */
3607 const char *name;
3608
3609 /** Control whether the work queue thread should yield between
3610 * items.
3611 *
3612 * Yielding between items helps guarantee the work queue
3613 * thread does not starve other threads, including cooperative
3614 * ones released by a work item. This is the default behavior.
3615 *
3616 * Set this to @c true to prevent the work queue thread from
3617 * yielding between items. This may be appropriate when a
3618 * sequence of items should complete without yielding
3619 * control.
3620 */
3621 bool no_yield;
3622};
3623
3624/** @brief A structure used to hold work until it can be processed. */
3625struct k_work_q {
3626 /* The thread that animates the work. */
3627 struct k_thread thread;
3628
3629 /* All the following fields must be accessed only while the
3630 * work module spinlock is held.
3631 */
3632
3633 /* List of k_work items to be worked. */
3634 sys_slist_t pending;
3635
3636 /* Wait queue for idle work thread. */
3637 _wait_q_t notifyq;
3638
3639 /* Wait queue for threads waiting for the queue to drain. */
3640 _wait_q_t drainq;
3641
3642 /* Flags describing queue state. */
3643 uint32_t flags;
3644};
3645
3646/* Provide the implementation for inline functions declared above */
3647
3648static inline bool k_work_is_pending(const struct k_work *work)
3649{
3650 return k_work_busy_get(work) != 0;
3651}
3652
3653static inline struct k_work_delayable *
3654k_work_delayable_from_work(struct k_work *work)
3655{
3656 return CONTAINER_OF(work, struct k_work_delayable, work);
3657}
3658
3659static inline bool k_work_delayable_is_pending(
3660 const struct k_work_delayable *dwork)
3661{
3662 return k_work_delayable_busy_get(dwork) != 0;
3663}
3664
3665static inline k_ticks_t k_work_delayable_expires_get(
3666 const struct k_work_delayable *dwork)
3667{
3668 return z_timeout_expires(&dwork->timeout);
3669}
3670
3671static inline k_ticks_t k_work_delayable_remaining_get(
3672 const struct k_work_delayable *dwork)
3673{
3674 return z_timeout_remaining(&dwork->timeout);
3675}
3676
3677static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue)
3678{
3679 return &queue->thread;
3680}
3681
3682/* Legacy wrappers */
3683
Peter Bigot09a31ce2021-03-04 11:21:46 -06003684__deprecated
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003685static inline bool k_work_pending(const struct k_work *work)
3686{
3687 return k_work_is_pending(work);
3688}
3689
Peter Bigot09a31ce2021-03-04 11:21:46 -06003690__deprecated
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003691static inline void k_work_q_start(struct k_work_q *work_q,
3692 k_thread_stack_t *stack,
3693 size_t stack_size, int prio)
3694{
3695 k_work_queue_start(work_q, stack, stack_size, prio, NULL);
3696}
3697
Peter Bigot09a31ce2021-03-04 11:21:46 -06003698/* deprecated, remove when corresponding deprecated API is removed. */
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003699struct k_delayed_work {
3700 struct k_work_delayable work;
3701};
3702
Peter Bigot09a31ce2021-03-04 11:21:46 -06003703#define Z_DELAYED_WORK_INITIALIZER(work_handler) __DEPRECATED_MACRO { \
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003704 .work = Z_WORK_DELAYABLE_INITIALIZER(work_handler), \
3705}
3706
Peter Bigot09a31ce2021-03-04 11:21:46 -06003707__deprecated
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003708static inline void k_delayed_work_init(struct k_delayed_work *work,
3709 k_work_handler_t handler)
3710{
3711 k_work_init_delayable(&work->work, handler);
3712}
3713
Peter Bigot09a31ce2021-03-04 11:21:46 -06003714__deprecated
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003715static inline int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
3716 struct k_delayed_work *work,
3717 k_timeout_t delay)
3718{
3719 int rc = k_work_reschedule_for_queue(work_q, &work->work, delay);
3720
3721 /* Legacy API doesn't distinguish success cases. */
3722 return (rc >= 0) ? 0 : rc;
3723}
3724
Peter Bigot09a31ce2021-03-04 11:21:46 -06003725__deprecated
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003726static inline int k_delayed_work_submit(struct k_delayed_work *work,
3727 k_timeout_t delay)
3728{
3729 int rc = k_work_reschedule(&work->work, delay);
3730
3731 /* Legacy API doesn't distinguish success cases. */
3732 return (rc >= 0) ? 0 : rc;
3733}
3734
Peter Bigot09a31ce2021-03-04 11:21:46 -06003735__deprecated
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003736static inline int k_delayed_work_cancel(struct k_delayed_work *work)
3737{
3738 bool pending = k_work_delayable_is_pending(&work->work);
3739 int rc = k_work_cancel_delayable(&work->work);
3740
3741 /* Old return value rules:
3742 *
3743 * 0 if:
3744 * * Work item countdown cancelled before the item was submitted to
3745 * its queue; or
3746 * * Work item was removed from its queue before it was processed.
3747 *
3748 * -EINVAL if:
3749 * * Work item has never been submitted; or
3750 * * Work item has been successfully cancelled; or
3751 * * Timeout handler is in the process of submitting the work item to
3752 * its queue; or
3753 * * Work queue thread has removed the work item from the queue but
3754 * has not called its handler.
3755 *
3756 * -EALREADY if:
3757 * * Work queue thread has removed the work item from the queue and
3758 * cleared its pending flag; or
3759 * * Work queue thread is invoking the item handler; or
3760 * * Work item handler has completed.
3761 *
3762
3763 * We can't reconstruct those states, so call it successful only when
3764 * a pending item is no longer pending, -EINVAL if it was pending and
3765 * still is, and cancel, and -EALREADY if it wasn't pending (so
3766 * presumably cancellation should have had no effect, assuming we
3767 * didn't hit a race condition).
3768 */
3769 if (pending) {
3770 return (rc == 0) ? 0 : -EINVAL;
3771 }
3772
3773 return -EALREADY;
3774}
3775
Peter Bigot09a31ce2021-03-04 11:21:46 -06003776__deprecated
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003777static inline bool k_delayed_work_pending(struct k_delayed_work *work)
3778{
3779 return k_work_delayable_is_pending(&work->work);
3780}
3781
Peter Bigot09a31ce2021-03-04 11:21:46 -06003782__deprecated
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003783static inline int32_t k_delayed_work_remaining_get(struct k_delayed_work *work)
3784{
3785 k_ticks_t rem = k_work_delayable_remaining_get(&work->work);
3786
3787 /* Probably should be ceil32, but was floor32 */
3788 return k_ticks_to_ms_floor32(rem);
3789}
3790
Peter Bigot09a31ce2021-03-04 11:21:46 -06003791__deprecated
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003792static inline k_ticks_t k_delayed_work_expires_ticks(
3793 struct k_delayed_work *work)
3794{
3795 return k_work_delayable_expires_get(&work->work);
3796}
3797
Peter Bigot09a31ce2021-03-04 11:21:46 -06003798__deprecated
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003799static inline k_ticks_t k_delayed_work_remaining_ticks(
3800 struct k_delayed_work *work)
3801{
3802 return k_work_delayable_remaining_get(&work->work);
3803}
3804
3805/** @} */
3806
Peter Bigot4e3b9262021-01-15 10:52:38 -06003807struct k_work_user;
3808
3809/**
Anas Nashifc355b7e2021-04-14 08:49:05 -04003810 * @addtogroup workqueue_apis
Peter Bigot4e3b9262021-01-15 10:52:38 -06003811 * @{
3812 */
3813
3814/**
3815 * @typedef k_work_user_handler_t
3816 * @brief Work item handler function type for user work queues.
3817 *
3818 * A work item's handler function is executed by a user workqueue's thread
3819 * when the work item is processed by the workqueue.
3820 *
3821 * @param work Address of the work item.
3822 *
3823 * @return N/A
3824 */
3825typedef void (*k_work_user_handler_t)(struct k_work_user *work);
3826
3827/**
3828 * @cond INTERNAL_HIDDEN
3829 */
3830
3831struct k_work_user_q {
3832 struct k_queue queue;
3833 struct k_thread thread;
3834};
3835
3836enum {
3837 K_WORK_USER_STATE_PENDING, /* Work item pending state */
3838};
3839
3840struct k_work_user {
3841 void *_reserved; /* Used by k_queue implementation. */
3842 k_work_user_handler_t handler;
3843 atomic_t flags;
3844};
3845
3846/**
3847 * INTERNAL_HIDDEN @endcond
3848 */
3849
3850#define Z_WORK_USER_INITIALIZER(work_handler) \
3851 { \
Fredrik Gihl67295be2021-06-11 12:31:58 +02003852 ._reserved = NULL, \
Peter Bigot4e3b9262021-01-15 10:52:38 -06003853 .handler = work_handler, \
Fredrik Gihl67295be2021-06-11 12:31:58 +02003854 .flags = 0 \
Peter Bigot4e3b9262021-01-15 10:52:38 -06003855 }
3856
3857/**
3858 * @brief Initialize a statically-defined user work item.
3859 *
3860 * This macro can be used to initialize a statically-defined user work
3861 * item, prior to its first use. For example,
3862 *
3863 * @code static K_WORK_USER_DEFINE(<work>, <work_handler>); @endcode
3864 *
3865 * @param work Symbol name for work item object
3866 * @param work_handler Function to invoke each time work item is processed.
3867 */
3868#define K_WORK_USER_DEFINE(work, work_handler) \
3869 struct k_work_user work = Z_WORK_USER_INITIALIZER(work_handler)
3870
3871/**
3872 * @brief Initialize a userspace work item.
3873 *
3874 * This routine initializes a user workqueue work item, prior to its
3875 * first use.
3876 *
3877 * @param work Address of work item.
3878 * @param handler Function to invoke each time work item is processed.
3879 *
3880 * @return N/A
3881 */
3882static inline void k_work_user_init(struct k_work_user *work,
3883 k_work_user_handler_t handler)
3884{
3885 *work = (struct k_work_user)Z_WORK_USER_INITIALIZER(handler);
3886}
3887
3888/**
3889 * @brief Check if a userspace work item is pending.
3890 *
3891 * This routine indicates if user work item @a work is pending in a workqueue's
3892 * queue.
3893 *
3894 * @note Checking if the work is pending gives no guarantee that the
3895 * work will still be pending when this information is used. It is up to
3896 * the caller to make sure that this information is used in a safe manner.
3897 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003898 * @funcprops \isr_ok
Peter Bigot4e3b9262021-01-15 10:52:38 -06003899 *
3900 * @param work Address of work item.
3901 *
3902 * @return true if work item is pending, or false if it is not pending.
3903 */
3904static inline bool k_work_user_is_pending(struct k_work_user *work)
3905{
3906 return atomic_test_bit(&work->flags, K_WORK_USER_STATE_PENDING);
3907}
3908
3909/**
3910 * @brief Submit a work item to a user mode workqueue
3911 *
3912 * Submits a work item to a workqueue that runs in user mode. A temporary
3913 * memory allocation is made from the caller's resource pool which is freed
3914 * once the worker thread consumes the k_work item. The workqueue
3915 * thread must have memory access to the k_work item being submitted. The caller
3916 * must have permission granted on the work_q parameter's queue object.
3917 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003918 * @funcprops \isr_ok
Peter Bigot4e3b9262021-01-15 10:52:38 -06003919 *
3920 * @param work_q Address of workqueue.
3921 * @param work Address of work item.
3922 *
3923 * @retval -EBUSY if the work item was already in some workqueue
3924 * @retval -ENOMEM if no memory for thread resource pool allocation
3925 * @retval 0 Success
3926 */
3927static inline int k_work_user_submit_to_queue(struct k_work_user_q *work_q,
3928 struct k_work_user *work)
3929{
3930 int ret = -EBUSY;
3931
3932 if (!atomic_test_and_set_bit(&work->flags,
3933 K_WORK_USER_STATE_PENDING)) {
3934 ret = k_queue_alloc_append(&work_q->queue, work);
3935
3936 /* Couldn't insert into the queue. Clear the pending bit
3937 * so the work item can be submitted again
3938 */
3939 if (ret != 0) {
3940 atomic_clear_bit(&work->flags,
3941 K_WORK_USER_STATE_PENDING);
3942 }
3943 }
3944
3945 return ret;
3946}
3947
3948/**
3949 * @brief Start a workqueue in user mode
3950 *
3951 * This works identically to k_work_queue_start() except it is callable from
3952 * user mode, and the worker thread created will run in user mode. The caller
3953 * must have permissions granted on both the work_q parameter's thread and
3954 * queue objects, and the same restrictions on priority apply as
3955 * k_thread_create().
3956 *
3957 * @param work_q Address of workqueue.
3958 * @param stack Pointer to work queue thread's stack space, as defined by
3959 * K_THREAD_STACK_DEFINE()
3960 * @param stack_size Size of the work queue thread's stack (in bytes), which
3961 * should either be the same constant passed to
3962 * K_THREAD_STACK_DEFINE() or the value of K_THREAD_STACK_SIZEOF().
3963 * @param prio Priority of the work queue's thread.
3964 * @param name optional thread name. If not null a copy is made into the
3965 * thread's name buffer.
3966 *
3967 * @return N/A
3968 */
3969extern void k_work_user_queue_start(struct k_work_user_q *work_q,
3970 k_thread_stack_t *stack,
3971 size_t stack_size, int prio,
3972 const char *name);
3973
3974/** @} */
3975
Allan Stephensc98da842016-11-11 15:45:03 -05003976/**
Peter Bigot3d583982020-11-18 08:55:32 -06003977 * @cond INTERNAL_HIDDEN
3978 */
3979
3980struct k_work_poll {
3981 struct k_work work;
3982 struct k_work_q *workq;
3983 struct z_poller poller;
3984 struct k_poll_event *events;
3985 int num_events;
3986 k_work_handler_t real_handler;
3987 struct _timeout timeout;
3988 int poll_result;
3989};
3990
3991/**
3992 * INTERNAL_HIDDEN @endcond
3993 */
3994
3995/**
Anas Nashifc355b7e2021-04-14 08:49:05 -04003996 * @addtogroup workqueue_apis
Peter Bigot3d583982020-11-18 08:55:32 -06003997 * @{
3998 */
3999
4000/**
Peter Bigotdc34e7c2020-10-28 11:24:05 -05004001 * @brief Initialize a statically-defined work item.
4002 *
4003 * This macro can be used to initialize a statically-defined workqueue work
4004 * item, prior to its first use. For example,
4005 *
4006 * @code static K_WORK_DEFINE(<work>, <work_handler>); @endcode
4007 *
4008 * @param work Symbol name for work item object
4009 * @param work_handler Function to invoke each time work item is processed.
4010 */
4011#define K_WORK_DEFINE(work, work_handler) \
4012 struct k_work work = Z_WORK_INITIALIZER(work_handler)
4013
4014/**
4015 * @brief Initialize a statically-defined delayed work item.
4016 *
4017 * This macro can be used to initialize a statically-defined workqueue
4018 * delayed work item, prior to its first use. For example,
4019 *
4020 * @code static K_DELAYED_WORK_DEFINE(<work>, <work_handler>); @endcode
4021 *
4022 * @param work Symbol name for delayed work item object
4023 * @param work_handler Function to invoke each time work item is processed.
4024 */
Peter Bigot09a31ce2021-03-04 11:21:46 -06004025#define K_DELAYED_WORK_DEFINE(work, work_handler) __DEPRECATED_MACRO \
Peter Bigotdc34e7c2020-10-28 11:24:05 -05004026 struct k_delayed_work work = Z_DELAYED_WORK_INITIALIZER(work_handler)
4027
4028/**
Peter Bigot3d583982020-11-18 08:55:32 -06004029 * @brief Initialize a triggered work item.
4030 *
4031 * This routine initializes a workqueue triggered work item, prior to
4032 * its first use.
4033 *
4034 * @param work Address of triggered work item.
4035 * @param handler Function to invoke each time work item is processed.
4036 *
4037 * @return N/A
4038 */
4039extern void k_work_poll_init(struct k_work_poll *work,
4040 k_work_handler_t handler);
4041
4042/**
4043 * @brief Submit a triggered work item.
4044 *
4045 * This routine schedules work item @a work to be processed by workqueue
4046 * @a work_q when one of the given @a events is signaled. The routine
4047 * initiates internal poller for the work item and then returns to the caller.
4048 * Only when one of the watched events happen the work item is actually
4049 * submitted to the workqueue and becomes pending.
4050 *
4051 * Submitting a previously submitted triggered work item that is still
4052 * waiting for the event cancels the existing submission and reschedules it
4053 * the using the new event list. Note that this behavior is inherently subject
4054 * to race conditions with the pre-existing triggered work item and work queue,
4055 * so care must be taken to synchronize such resubmissions externally.
4056 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004057 * @funcprops \isr_ok
Peter Bigot3d583982020-11-18 08:55:32 -06004058 *
4059 * @warning
4060 * Provided array of events as well as a triggered work item must be placed
4061 * in persistent memory (valid until work handler execution or work
4062 * cancellation) and cannot be modified after submission.
4063 *
4064 * @param work_q Address of workqueue.
4065 * @param work Address of delayed work item.
4066 * @param events An array of events which trigger the work.
4067 * @param num_events The number of events in the array.
4068 * @param timeout Timeout after which the work will be scheduled
4069 * for execution even if not triggered.
4070 *
4071 *
4072 * @retval 0 Work item started watching for events.
4073 * @retval -EINVAL Work item is being processed or has completed its work.
4074 * @retval -EADDRINUSE Work item is pending on a different workqueue.
4075 */
4076extern int k_work_poll_submit_to_queue(struct k_work_q *work_q,
4077 struct k_work_poll *work,
4078 struct k_poll_event *events,
4079 int num_events,
4080 k_timeout_t timeout);
4081
4082/**
4083 * @brief Submit a triggered work item to the system workqueue.
4084 *
4085 * This routine schedules work item @a work to be processed by system
4086 * workqueue when one of the given @a events is signaled. The routine
4087 * initiates internal poller for the work item and then returns to the caller.
4088 * Only when one of the watched events happen the work item is actually
4089 * submitted to the workqueue and becomes pending.
4090 *
4091 * Submitting a previously submitted triggered work item that is still
4092 * waiting for the event cancels the existing submission and reschedules it
4093 * the using the new event list. Note that this behavior is inherently subject
4094 * to race conditions with the pre-existing triggered work item and work queue,
4095 * so care must be taken to synchronize such resubmissions externally.
4096 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004097 * @funcprops \isr_ok
Peter Bigot3d583982020-11-18 08:55:32 -06004098 *
4099 * @warning
4100 * Provided array of events as well as a triggered work item must not be
4101 * modified until the item has been processed by the workqueue.
4102 *
4103 * @param work Address of delayed work item.
4104 * @param events An array of events which trigger the work.
4105 * @param num_events The number of events in the array.
4106 * @param timeout Timeout after which the work will be scheduled
4107 * for execution even if not triggered.
4108 *
4109 * @retval 0 Work item started watching for events.
4110 * @retval -EINVAL Work item is being processed or has completed its work.
4111 * @retval -EADDRINUSE Work item is pending on a different workqueue.
4112 */
Torbjörn Leksellcae9a902021-03-26 14:20:05 +01004113extern int k_work_poll_submit(struct k_work_poll *work,
Peter Bigot3d583982020-11-18 08:55:32 -06004114 struct k_poll_event *events,
4115 int num_events,
Torbjörn Leksellcae9a902021-03-26 14:20:05 +01004116 k_timeout_t timeout);
Peter Bigot3d583982020-11-18 08:55:32 -06004117
4118/**
4119 * @brief Cancel a triggered work item.
4120 *
4121 * This routine cancels the submission of triggered work item @a work.
4122 * A triggered work item can only be canceled if no event triggered work
4123 * submission.
4124 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004125 * @funcprops \isr_ok
Peter Bigot3d583982020-11-18 08:55:32 -06004126 *
4127 * @param work Address of delayed work item.
4128 *
4129 * @retval 0 Work item canceled.
4130 * @retval -EINVAL Work item is being processed or has completed its work.
4131 */
4132extern int k_work_poll_cancel(struct k_work_poll *work);
4133
4134/** @} */
4135
4136/**
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004137 * @defgroup msgq_apis Message Queue APIs
4138 * @ingroup kernel_apis
4139 * @{
Allan Stephensc98da842016-11-11 15:45:03 -05004140 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004141
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004142/**
4143 * @brief Message Queue Structure
4144 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004145struct k_msgq {
Anas Nashife71293e2019-12-04 20:00:14 -05004146 /** Message queue wait queue */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004147 _wait_q_t wait_q;
Anas Nashife71293e2019-12-04 20:00:14 -05004148 /** Lock */
Andy Rossbe03dbd2018-07-26 10:23:02 -07004149 struct k_spinlock lock;
Anas Nashife71293e2019-12-04 20:00:14 -05004150 /** Message size */
Peter Mitsis026b4ed2016-10-13 11:41:45 -04004151 size_t msg_size;
Anas Nashife71293e2019-12-04 20:00:14 -05004152 /** Maximal number of messages */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004153 uint32_t max_msgs;
Anas Nashife71293e2019-12-04 20:00:14 -05004154 /** Start of message buffer */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004155 char *buffer_start;
Anas Nashife71293e2019-12-04 20:00:14 -05004156 /** End of message buffer */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004157 char *buffer_end;
Anas Nashife71293e2019-12-04 20:00:14 -05004158 /** Read pointer */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004159 char *read_ptr;
Anas Nashife71293e2019-12-04 20:00:14 -05004160 /** Write pointer */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004161 char *write_ptr;
Anas Nashife71293e2019-12-04 20:00:14 -05004162 /** Number of used messages */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004163 uint32_t used_msgs;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004164
Nick Gravesb445f132021-04-12 12:35:18 -07004165 _POLL_EVENT;
4166
Anas Nashife71293e2019-12-04 20:00:14 -05004167 /** Message queue */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004168 uint8_t flags;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004169};
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004170/**
4171 * @cond INTERNAL_HIDDEN
4172 */
4173
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004174
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004175#define Z_MSGQ_INITIALIZER(obj, q_buffer, q_msg_size, q_max_msgs) \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004176 { \
Patrik Flykt4344e272019-03-08 14:19:05 -07004177 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
Peter Mitsis1da807e2016-10-06 11:36:59 -04004178 .msg_size = q_msg_size, \
Charles E. Youse6d01f672019-03-18 10:27:34 -07004179 .max_msgs = q_max_msgs, \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004180 .buffer_start = q_buffer, \
Peter Mitsis1da807e2016-10-06 11:36:59 -04004181 .buffer_end = q_buffer + (q_max_msgs * q_msg_size), \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004182 .read_ptr = q_buffer, \
4183 .write_ptr = q_buffer, \
4184 .used_msgs = 0, \
Nick Gravesb445f132021-04-12 12:35:18 -07004185 _POLL_EVENT_OBJ_INIT(obj) \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004186 }
Kumar Galac8b94f42020-09-29 09:52:23 -05004187
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004188/**
4189 * INTERNAL_HIDDEN @endcond
4190 */
4191
Andrew Boie65a9d2a2017-06-27 10:51:23 -07004192
Andrew Boie0fe789f2018-04-12 18:35:56 -07004193#define K_MSGQ_FLAG_ALLOC BIT(0)
4194
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004195/**
4196 * @brief Message Queue Attributes
4197 */
Youvedeep Singh188c1ab2018-03-19 20:02:40 +05304198struct k_msgq_attrs {
Anas Nashife71293e2019-12-04 20:00:14 -05004199 /** Message Size */
Youvedeep Singh188c1ab2018-03-19 20:02:40 +05304200 size_t msg_size;
Anas Nashife71293e2019-12-04 20:00:14 -05004201 /** Maximal number of messages */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004202 uint32_t max_msgs;
Anas Nashife71293e2019-12-04 20:00:14 -05004203 /** Used messages */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004204 uint32_t used_msgs;
Youvedeep Singh188c1ab2018-03-19 20:02:40 +05304205};
4206
Allan Stephensc98da842016-11-11 15:45:03 -05004207
4208/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004209 * @brief Statically define and initialize a message queue.
Peter Mitsis1da807e2016-10-06 11:36:59 -04004210 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004211 * The message queue's ring buffer contains space for @a q_max_msgs messages,
4212 * each of which is @a q_msg_size bytes long. The buffer is aligned to a
Allan Stephensda827222016-11-09 14:23:58 -06004213 * @a q_align -byte boundary, which must be a power of 2. To ensure that each
4214 * message is similarly aligned to this boundary, @a q_msg_size must also be
4215 * a multiple of @a q_align.
Peter Mitsis1da807e2016-10-06 11:36:59 -04004216 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004217 * The message queue can be accessed outside the module where it is defined
4218 * using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004219 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05004220 * @code extern struct k_msgq <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004221 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004222 * @param q_name Name of the message queue.
4223 * @param q_msg_size Message size (in bytes).
4224 * @param q_max_msgs Maximum number of messages that can be queued.
Allan Stephensda827222016-11-09 14:23:58 -06004225 * @param q_align Alignment of the message queue's ring buffer.
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004226 *
Peter Mitsis1da807e2016-10-06 11:36:59 -04004227 */
Nicolas Pitreb1d37422019-06-03 10:51:32 -04004228#define K_MSGQ_DEFINE(q_name, q_msg_size, q_max_msgs, q_align) \
4229 static char __noinit __aligned(q_align) \
4230 _k_fifo_buf_##q_name[(q_max_msgs) * (q_msg_size)]; \
Fabio Baltierif88a4202021-08-04 23:05:54 +01004231 STRUCT_SECTION_ITERABLE(k_msgq, q_name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004232 Z_MSGQ_INITIALIZER(q_name, _k_fifo_buf_##q_name, \
Peter Mitsis1da807e2016-10-06 11:36:59 -04004233 q_msg_size, q_max_msgs)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004234
Peter Mitsisd7a37502016-10-13 11:37:40 -04004235/**
4236 * @brief Initialize a message queue.
4237 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004238 * This routine initializes a message queue object, prior to its first use.
4239 *
Allan Stephensda827222016-11-09 14:23:58 -06004240 * The message queue's ring buffer must contain space for @a max_msgs messages,
4241 * each of which is @a msg_size bytes long. The buffer must be aligned to an
4242 * N-byte boundary, where N is a power of 2 (i.e. 1, 2, 4, ...). To ensure
4243 * that each message is similarly aligned to this boundary, @a q_msg_size
4244 * must also be a multiple of N.
4245 *
Anas Nashif25c87db2021-03-29 10:54:23 -04004246 * @param msgq Address of the message queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004247 * @param buffer Pointer to ring buffer that holds queued messages.
4248 * @param msg_size Message size (in bytes).
Peter Mitsisd7a37502016-10-13 11:37:40 -04004249 * @param max_msgs Maximum number of messages that can be queued.
4250 *
4251 * @return N/A
4252 */
Anas Nashif25c87db2021-03-29 10:54:23 -04004253void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size,
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004254 uint32_t max_msgs);
Andrew Boie0fe789f2018-04-12 18:35:56 -07004255
4256/**
4257 * @brief Initialize a message queue.
4258 *
4259 * This routine initializes a message queue object, prior to its first use,
4260 * allocating its internal ring buffer from the calling thread's resource
4261 * pool.
4262 *
4263 * Memory allocated for the ring buffer can be released by calling
4264 * k_msgq_cleanup(), or if userspace is enabled and the msgq object loses
4265 * all of its references.
4266 *
Anas Nashif4b386592019-11-25 09:30:47 -05004267 * @param msgq Address of the message queue.
Andrew Boie0fe789f2018-04-12 18:35:56 -07004268 * @param msg_size Message size (in bytes).
4269 * @param max_msgs Maximum number of messages that can be queued.
4270 *
4271 * @return 0 on success, -ENOMEM if there was insufficient memory in the
4272 * thread's resource pool, or -EINVAL if the size parameters cause
4273 * an integer overflow.
4274 */
Anas Nashif4b386592019-11-25 09:30:47 -05004275__syscall int k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size,
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004276 uint32_t max_msgs);
Andrew Boie0fe789f2018-04-12 18:35:56 -07004277
Anas Nashife71293e2019-12-04 20:00:14 -05004278/**
Anas Nashif4b386592019-11-25 09:30:47 -05004279 * @brief Release allocated buffer for a queue
Anas Nashife71293e2019-12-04 20:00:14 -05004280 *
4281 * Releases memory allocated for the ring buffer.
Anas Nashif4b386592019-11-25 09:30:47 -05004282 *
4283 * @param msgq message queue to cleanup
4284 *
Anas Nashif11b93652019-06-16 08:43:48 -04004285 * @retval 0 on success
4286 * @retval -EBUSY Queue not empty
Anas Nashife71293e2019-12-04 20:00:14 -05004287 */
Anas Nashif11b93652019-06-16 08:43:48 -04004288int k_msgq_cleanup(struct k_msgq *msgq);
Peter Mitsisd7a37502016-10-13 11:37:40 -04004289
4290/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004291 * @brief Send a message to a message queue.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004292 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004293 * This routine sends a message to message queue @a q.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004294 *
Lauren Murphyf29a2d12020-09-16 21:13:40 -05004295 * @note The message content is copied from @a data into @a msgq and the @a data
4296 * pointer is not retained, so the message content will not be modified
4297 * by this function.
Benjamin Walsh8215ce12016-11-09 19:45:19 -05004298 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004299 * @funcprops \isr_ok
4300 *
Anas Nashif4b386592019-11-25 09:30:47 -05004301 * @param msgq Address of the message queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004302 * @param data Pointer to the message.
Andy Ross78327382020-03-05 15:18:14 -08004303 * @param timeout Non-negative waiting period to add the message,
4304 * or one of the special values K_NO_WAIT and
Krzysztof Chruscinski94f742e2019-11-07 19:28:00 +01004305 * K_FOREVER.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004306 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05004307 * @retval 0 Message sent.
4308 * @retval -ENOMSG Returned without waiting or queue purged.
4309 * @retval -EAGAIN Waiting period timed out.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004310 */
Lauren Murphyf29a2d12020-09-16 21:13:40 -05004311__syscall int k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout);
Peter Mitsisd7a37502016-10-13 11:37:40 -04004312
4313/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004314 * @brief Receive a message from a message queue.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004315 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004316 * This routine receives a message from message queue @a q in a "first in,
4317 * first out" manner.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004318 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004319 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
4320 *
4321 * @funcprops \isr_ok
Benjamin Walsh8215ce12016-11-09 19:45:19 -05004322 *
Anas Nashif4b386592019-11-25 09:30:47 -05004323 * @param msgq Address of the message queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004324 * @param data Address of area to hold the received message.
Andy Ross78327382020-03-05 15:18:14 -08004325 * @param timeout Waiting period to receive the message,
4326 * or one of the special values K_NO_WAIT and
Krzysztof Chruscinski94f742e2019-11-07 19:28:00 +01004327 * K_FOREVER.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004328 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05004329 * @retval 0 Message received.
4330 * @retval -ENOMSG Returned without waiting.
4331 * @retval -EAGAIN Waiting period timed out.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004332 */
Andy Ross78327382020-03-05 15:18:14 -08004333__syscall int k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout);
Peter Mitsisd7a37502016-10-13 11:37:40 -04004334
4335/**
Sathish Kuttan3efd8e12018-11-09 21:03:10 -08004336 * @brief Peek/read a message from a message queue.
4337 *
4338 * This routine reads a message from message queue @a q in a "first in,
4339 * first out" manner and leaves the message in the queue.
4340 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004341 * @funcprops \isr_ok
Sathish Kuttan3efd8e12018-11-09 21:03:10 -08004342 *
Anas Nashif4b386592019-11-25 09:30:47 -05004343 * @param msgq Address of the message queue.
Sathish Kuttan3efd8e12018-11-09 21:03:10 -08004344 * @param data Address of area to hold the message read from the queue.
4345 *
4346 * @retval 0 Message read.
4347 * @retval -ENOMSG Returned when the queue has no message.
Sathish Kuttan3efd8e12018-11-09 21:03:10 -08004348 */
Anas Nashif4b386592019-11-25 09:30:47 -05004349__syscall int k_msgq_peek(struct k_msgq *msgq, void *data);
Sathish Kuttan3efd8e12018-11-09 21:03:10 -08004350
4351/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004352 * @brief Purge a message queue.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004353 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004354 * This routine discards all unreceived messages in a message queue's ring
4355 * buffer. Any threads that are blocked waiting to send a message to the
4356 * message queue are unblocked and see an -ENOMSG error code.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004357 *
Anas Nashif4b386592019-11-25 09:30:47 -05004358 * @param msgq Address of the message queue.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004359 *
4360 * @return N/A
4361 */
Anas Nashif4b386592019-11-25 09:30:47 -05004362__syscall void k_msgq_purge(struct k_msgq *msgq);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004363
Peter Mitsis67be2492016-10-07 11:44:34 -04004364/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004365 * @brief Get the amount of free space in a message queue.
Peter Mitsis67be2492016-10-07 11:44:34 -04004366 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004367 * This routine returns the number of unused entries in a message queue's
4368 * ring buffer.
Peter Mitsis67be2492016-10-07 11:44:34 -04004369 *
Anas Nashif4b386592019-11-25 09:30:47 -05004370 * @param msgq Address of the message queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004371 *
4372 * @return Number of unused ring buffer entries.
Peter Mitsis67be2492016-10-07 11:44:34 -04004373 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004374__syscall uint32_t k_msgq_num_free_get(struct k_msgq *msgq);
Andrew Boie82edb6e2017-10-02 10:53:06 -07004375
Youvedeep Singh188c1ab2018-03-19 20:02:40 +05304376/**
4377 * @brief Get basic attributes of a message queue.
4378 *
4379 * This routine fetches basic attributes of message queue into attr argument.
4380 *
Anas Nashif4b386592019-11-25 09:30:47 -05004381 * @param msgq Address of the message queue.
Youvedeep Singh188c1ab2018-03-19 20:02:40 +05304382 * @param attrs pointer to message queue attribute structure.
4383 *
4384 * @return N/A
4385 */
Anas Nashif4b386592019-11-25 09:30:47 -05004386__syscall void k_msgq_get_attrs(struct k_msgq *msgq,
4387 struct k_msgq_attrs *attrs);
Youvedeep Singh188c1ab2018-03-19 20:02:40 +05304388
4389
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004390static inline uint32_t z_impl_k_msgq_num_free_get(struct k_msgq *msgq)
Peter Mitsis67be2492016-10-07 11:44:34 -04004391{
Anas Nashif4b386592019-11-25 09:30:47 -05004392 return msgq->max_msgs - msgq->used_msgs;
Peter Mitsis67be2492016-10-07 11:44:34 -04004393}
4394
Peter Mitsisd7a37502016-10-13 11:37:40 -04004395/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004396 * @brief Get the number of messages in a message queue.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004397 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004398 * This routine returns the number of messages in a message queue's ring buffer.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004399 *
Anas Nashif4b386592019-11-25 09:30:47 -05004400 * @param msgq Address of the message queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004401 *
4402 * @return Number of messages.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004403 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004404__syscall uint32_t k_msgq_num_used_get(struct k_msgq *msgq);
Andrew Boie82edb6e2017-10-02 10:53:06 -07004405
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004406static inline uint32_t z_impl_k_msgq_num_used_get(struct k_msgq *msgq)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004407{
Anas Nashif4b386592019-11-25 09:30:47 -05004408 return msgq->used_msgs;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004409}
4410
Anas Nashif166f5192018-02-25 08:02:36 -06004411/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05004412
4413/**
Allan Stephensc98da842016-11-11 15:45:03 -05004414 * @defgroup mailbox_apis Mailbox APIs
4415 * @ingroup kernel_apis
4416 * @{
4417 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004418
Anas Nashife71293e2019-12-04 20:00:14 -05004419/**
4420 * @brief Mailbox Message Structure
4421 *
4422 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004423struct k_mbox_msg {
4424 /** internal use only - needed for legacy API support */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004425 uint32_t _mailbox;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004426 /** size of message (in bytes) */
Peter Mitsisd93078c2016-10-14 12:59:37 -04004427 size_t size;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004428 /** application-defined information value */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004429 uint32_t info;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004430 /** sender's message data buffer */
4431 void *tx_data;
4432 /** internal use only - needed for legacy API support */
4433 void *_rx_data;
4434 /** message data block descriptor */
4435 struct k_mem_block tx_block;
4436 /** source thread id */
4437 k_tid_t rx_source_thread;
4438 /** target thread id */
4439 k_tid_t tx_target_thread;
4440 /** internal use only - thread waiting on send (may be a dummy) */
4441 k_tid_t _syncing_thread;
4442#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
4443 /** internal use only - semaphore used during asynchronous send */
4444 struct k_sem *_async_sem;
4445#endif
4446};
Anas Nashife71293e2019-12-04 20:00:14 -05004447/**
4448 * @brief Mailbox Structure
4449 *
4450 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004451struct k_mbox {
Anas Nashife71293e2019-12-04 20:00:14 -05004452 /** Transmit messages queue */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004453 _wait_q_t tx_msg_queue;
Anas Nashife71293e2019-12-04 20:00:14 -05004454 /** Receive message queue */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004455 _wait_q_t rx_msg_queue;
Andy Ross9eeb6b82018-07-25 15:06:24 -07004456 struct k_spinlock lock;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004457
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004458};
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004459/**
4460 * @cond INTERNAL_HIDDEN
4461 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004462
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004463#define Z_MBOX_INITIALIZER(obj) \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004464 { \
Patrik Flykt4344e272019-03-08 14:19:05 -07004465 .tx_msg_queue = Z_WAIT_Q_INIT(&obj.tx_msg_queue), \
4466 .rx_msg_queue = Z_WAIT_Q_INIT(&obj.rx_msg_queue), \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004467 }
4468
Peter Mitsis12092702016-10-14 12:57:23 -04004469/**
Allan Stephensc98da842016-11-11 15:45:03 -05004470 * INTERNAL_HIDDEN @endcond
4471 */
4472
4473/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004474 * @brief Statically define and initialize a mailbox.
Peter Mitsis12092702016-10-14 12:57:23 -04004475 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004476 * The mailbox is to be accessed outside the module where it is defined using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004477 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05004478 * @code extern struct k_mbox <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004479 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004480 * @param name Name of the mailbox.
Peter Mitsis12092702016-10-14 12:57:23 -04004481 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004482#define K_MBOX_DEFINE(name) \
Fabio Baltierif88a4202021-08-04 23:05:54 +01004483 STRUCT_SECTION_ITERABLE(k_mbox, name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004484 Z_MBOX_INITIALIZER(name) \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004485
Peter Mitsis12092702016-10-14 12:57:23 -04004486/**
4487 * @brief Initialize a mailbox.
4488 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004489 * This routine initializes a mailbox object, prior to its first use.
4490 *
4491 * @param mbox Address of the mailbox.
Peter Mitsis12092702016-10-14 12:57:23 -04004492 *
4493 * @return N/A
4494 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004495extern void k_mbox_init(struct k_mbox *mbox);
4496
Peter Mitsis12092702016-10-14 12:57:23 -04004497/**
4498 * @brief Send a mailbox message in a synchronous manner.
4499 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004500 * This routine sends a message to @a mbox and waits for a receiver to both
4501 * receive and process it. The message data may be in a buffer, in a memory
4502 * pool block, or non-existent (i.e. an empty message).
Peter Mitsis12092702016-10-14 12:57:23 -04004503 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004504 * @param mbox Address of the mailbox.
4505 * @param tx_msg Address of the transmit message descriptor.
Andy Ross78327382020-03-05 15:18:14 -08004506 * @param timeout Waiting period for the message to be received,
4507 * or one of the special values K_NO_WAIT
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004508 * and K_FOREVER. Once the message has been received,
4509 * this routine waits as long as necessary for the message
4510 * to be completely processed.
Peter Mitsis12092702016-10-14 12:57:23 -04004511 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05004512 * @retval 0 Message sent.
4513 * @retval -ENOMSG Returned without waiting.
4514 * @retval -EAGAIN Waiting period timed out.
Peter Mitsis12092702016-10-14 12:57:23 -04004515 */
Peter Mitsis40680f62016-10-14 10:04:55 -04004516extern int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
Andy Ross78327382020-03-05 15:18:14 -08004517 k_timeout_t timeout);
Peter Mitsis12092702016-10-14 12:57:23 -04004518
Peter Mitsis12092702016-10-14 12:57:23 -04004519/**
4520 * @brief Send a mailbox message in an asynchronous manner.
4521 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004522 * This routine sends a message to @a mbox without waiting for a receiver
4523 * to process it. The message data may be in a buffer, in a memory pool block,
4524 * or non-existent (i.e. an empty message). Optionally, the semaphore @a sem
4525 * will be given when the message has been both received and completely
4526 * processed by the receiver.
Peter Mitsis12092702016-10-14 12:57:23 -04004527 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004528 * @param mbox Address of the mailbox.
4529 * @param tx_msg Address of the transmit message descriptor.
4530 * @param sem Address of a semaphore, or NULL if none is needed.
Peter Mitsis12092702016-10-14 12:57:23 -04004531 *
4532 * @return N/A
4533 */
Peter Mitsis40680f62016-10-14 10:04:55 -04004534extern void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004535 struct k_sem *sem);
4536
Peter Mitsis12092702016-10-14 12:57:23 -04004537/**
4538 * @brief Receive a mailbox message.
4539 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004540 * This routine receives a message from @a mbox, then optionally retrieves
4541 * its data and disposes of the message.
Peter Mitsis12092702016-10-14 12:57:23 -04004542 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004543 * @param mbox Address of the mailbox.
4544 * @param rx_msg Address of the receive message descriptor.
4545 * @param buffer Address of the buffer to receive data, or NULL to defer data
4546 * retrieval and message disposal until later.
Andy Ross78327382020-03-05 15:18:14 -08004547 * @param timeout Waiting period for a message to be received,
4548 * or one of the special values K_NO_WAIT and K_FOREVER.
Peter Mitsis12092702016-10-14 12:57:23 -04004549 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05004550 * @retval 0 Message received.
4551 * @retval -ENOMSG Returned without waiting.
4552 * @retval -EAGAIN Waiting period timed out.
Peter Mitsis12092702016-10-14 12:57:23 -04004553 */
Peter Mitsis40680f62016-10-14 10:04:55 -04004554extern int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg,
Andy Ross78327382020-03-05 15:18:14 -08004555 void *buffer, k_timeout_t timeout);
Peter Mitsis12092702016-10-14 12:57:23 -04004556
4557/**
4558 * @brief Retrieve mailbox message data into a buffer.
4559 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004560 * This routine completes the processing of a received message by retrieving
4561 * its data into a buffer, then disposing of the message.
Peter Mitsis12092702016-10-14 12:57:23 -04004562 *
4563 * Alternatively, this routine can be used to dispose of a received message
4564 * without retrieving its data.
4565 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004566 * @param rx_msg Address of the receive message descriptor.
4567 * @param buffer Address of the buffer to receive data, or NULL to discard
4568 * the data.
Peter Mitsis12092702016-10-14 12:57:23 -04004569 *
4570 * @return N/A
4571 */
Peter Mitsis40680f62016-10-14 10:04:55 -04004572extern void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer);
Peter Mitsis12092702016-10-14 12:57:23 -04004573
Anas Nashif166f5192018-02-25 08:02:36 -06004574/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05004575
4576/**
Anas Nashifce78d162018-05-24 12:43:11 -05004577 * @defgroup pipe_apis Pipe APIs
4578 * @ingroup kernel_apis
4579 * @{
Allan Stephensc98da842016-11-11 15:45:03 -05004580 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004581
Anas Nashifce78d162018-05-24 12:43:11 -05004582/** Pipe Structure */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004583struct k_pipe {
Anas Nashifce78d162018-05-24 12:43:11 -05004584 unsigned char *buffer; /**< Pipe buffer: may be NULL */
4585 size_t size; /**< Buffer size */
4586 size_t bytes_used; /**< # bytes used in buffer */
4587 size_t read_index; /**< Where in buffer to read from */
4588 size_t write_index; /**< Where in buffer to write */
Andy Rossf582b552019-02-05 16:10:18 -08004589 struct k_spinlock lock; /**< Synchronization lock */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004590
4591 struct {
Anas Nashifce78d162018-05-24 12:43:11 -05004592 _wait_q_t readers; /**< Reader wait queue */
4593 _wait_q_t writers; /**< Writer wait queue */
Anas Nashif0ff33d12020-07-13 20:21:56 -04004594 } wait_q; /** Wait queue */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004595
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004596 uint8_t flags; /**< Flags */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004597};
4598
Anas Nashifce78d162018-05-24 12:43:11 -05004599/**
4600 * @cond INTERNAL_HIDDEN
4601 */
4602#define K_PIPE_FLAG_ALLOC BIT(0) /** Buffer was allocated */
4603
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004604#define Z_PIPE_INITIALIZER(obj, pipe_buffer, pipe_buffer_size) \
Krzysztof Chruscinskibe063272019-02-13 11:19:54 +01004605 { \
4606 .buffer = pipe_buffer, \
4607 .size = pipe_buffer_size, \
4608 .bytes_used = 0, \
4609 .read_index = 0, \
4610 .write_index = 0, \
4611 .lock = {}, \
4612 .wait_q = { \
Patrik Flykt4344e272019-03-08 14:19:05 -07004613 .readers = Z_WAIT_Q_INIT(&obj.wait_q.readers), \
4614 .writers = Z_WAIT_Q_INIT(&obj.wait_q.writers) \
Krzysztof Chruscinskibe063272019-02-13 11:19:54 +01004615 }, \
Krzysztof Chruscinskibe063272019-02-13 11:19:54 +01004616 .flags = 0 \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004617 }
4618
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004619/**
Allan Stephensc98da842016-11-11 15:45:03 -05004620 * INTERNAL_HIDDEN @endcond
4621 */
4622
4623/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004624 * @brief Statically define and initialize a pipe.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004625 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004626 * The pipe can be accessed outside the module where it is defined using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004627 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05004628 * @code extern struct k_pipe <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004629 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004630 * @param name Name of the pipe.
4631 * @param pipe_buffer_size Size of the pipe's ring buffer (in bytes),
4632 * or zero if no ring buffer is used.
4633 * @param pipe_align Alignment of the pipe's ring buffer (power of 2).
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004634 *
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004635 */
Andrew Boie44fe8122018-04-12 17:38:12 -07004636#define K_PIPE_DEFINE(name, pipe_buffer_size, pipe_align) \
Fabio Baltierif88a4202021-08-04 23:05:54 +01004637 static unsigned char __noinit __aligned(pipe_align) \
Andrew Boie44fe8122018-04-12 17:38:12 -07004638 _k_pipe_buf_##name[pipe_buffer_size]; \
Fabio Baltierif88a4202021-08-04 23:05:54 +01004639 STRUCT_SECTION_ITERABLE(k_pipe, name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004640 Z_PIPE_INITIALIZER(name, _k_pipe_buf_##name, pipe_buffer_size)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004641
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004642/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004643 * @brief Initialize a pipe.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004644 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004645 * This routine initializes a pipe object, prior to its first use.
4646 *
4647 * @param pipe Address of the pipe.
4648 * @param buffer Address of the pipe's ring buffer, or NULL if no ring buffer
4649 * is used.
4650 * @param size Size of the pipe's ring buffer (in bytes), or zero if no ring
4651 * buffer is used.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004652 *
4653 * @return N/A
4654 */
Andrew Boie44fe8122018-04-12 17:38:12 -07004655void k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size);
4656
4657/**
4658 * @brief Release a pipe's allocated buffer
4659 *
4660 * If a pipe object was given a dynamically allocated buffer via
4661 * k_pipe_alloc_init(), this will free it. This function does nothing
4662 * if the buffer wasn't dynamically allocated.
4663 *
4664 * @param pipe Address of the pipe.
Anas Nashif361a84d2019-06-16 08:22:08 -04004665 * @retval 0 on success
4666 * @retval -EAGAIN nothing to cleanup
Andrew Boie44fe8122018-04-12 17:38:12 -07004667 */
Anas Nashif361a84d2019-06-16 08:22:08 -04004668int k_pipe_cleanup(struct k_pipe *pipe);
Andrew Boie44fe8122018-04-12 17:38:12 -07004669
4670/**
4671 * @brief Initialize a pipe and allocate a buffer for it
4672 *
4673 * Storage for the buffer region will be allocated from the calling thread's
4674 * resource pool. This memory will be released if k_pipe_cleanup() is called,
4675 * or userspace is enabled and the pipe object loses all references to it.
4676 *
4677 * This function should only be called on uninitialized pipe objects.
4678 *
4679 * @param pipe Address of the pipe.
4680 * @param size Size of the pipe's ring buffer (in bytes), or zero if no ring
4681 * buffer is used.
4682 * @retval 0 on success
David B. Kinderfcbd8fb2018-05-23 12:06:24 -07004683 * @retval -ENOMEM if memory couldn't be allocated
Andrew Boie44fe8122018-04-12 17:38:12 -07004684 */
4685__syscall int k_pipe_alloc_init(struct k_pipe *pipe, size_t size);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004686
4687/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004688 * @brief Write data to a pipe.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004689 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004690 * This routine writes up to @a bytes_to_write bytes of data to @a pipe.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004691 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004692 * @param pipe Address of the pipe.
4693 * @param data Address of data to write.
4694 * @param bytes_to_write Size of data (in bytes).
4695 * @param bytes_written Address of area to hold the number of bytes written.
4696 * @param min_xfer Minimum number of bytes to write.
Andy Ross78327382020-03-05 15:18:14 -08004697 * @param timeout Waiting period to wait for the data to be written,
4698 * or one of the special values K_NO_WAIT and K_FOREVER.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004699 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05004700 * @retval 0 At least @a min_xfer bytes of data were written.
4701 * @retval -EIO Returned without waiting; zero data bytes were written.
4702 * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004703 * minus one data bytes were written.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004704 */
Andrew Boieb9a05782017-09-29 16:05:32 -07004705__syscall int k_pipe_put(struct k_pipe *pipe, void *data,
4706 size_t bytes_to_write, size_t *bytes_written,
Andy Ross78327382020-03-05 15:18:14 -08004707 size_t min_xfer, k_timeout_t timeout);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004708
4709/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004710 * @brief Read data from a pipe.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004711 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004712 * This routine reads up to @a bytes_to_read bytes of data from @a pipe.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004713 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004714 * @param pipe Address of the pipe.
4715 * @param data Address to place the data read from pipe.
4716 * @param bytes_to_read Maximum number of data bytes to read.
4717 * @param bytes_read Address of area to hold the number of bytes read.
4718 * @param min_xfer Minimum number of data bytes to read.
Andy Ross78327382020-03-05 15:18:14 -08004719 * @param timeout Waiting period to wait for the data to be read,
4720 * or one of the special values K_NO_WAIT and K_FOREVER.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004721 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05004722 * @retval 0 At least @a min_xfer bytes of data were read.
Anas Nashif361a84d2019-06-16 08:22:08 -04004723 * @retval -EINVAL invalid parameters supplied
Allan Stephens9ef50f42016-11-16 15:33:31 -05004724 * @retval -EIO Returned without waiting; zero data bytes were read.
4725 * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004726 * minus one data bytes were read.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004727 */
Andrew Boieb9a05782017-09-29 16:05:32 -07004728__syscall int k_pipe_get(struct k_pipe *pipe, void *data,
4729 size_t bytes_to_read, size_t *bytes_read,
Andy Ross78327382020-03-05 15:18:14 -08004730 size_t min_xfer, k_timeout_t timeout);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004731
4732/**
Christopher Friedt3315f8f2020-05-06 18:43:58 -04004733 * @brief Query the number of bytes that may be read from @a pipe.
4734 *
4735 * @param pipe Address of the pipe.
4736 *
4737 * @retval a number n such that 0 <= n <= @ref k_pipe.size; the
4738 * result is zero for unbuffered pipes.
4739 */
4740__syscall size_t k_pipe_read_avail(struct k_pipe *pipe);
4741
4742/**
4743 * @brief Query the number of bytes that may be written to @a pipe
4744 *
4745 * @param pipe Address of the pipe.
4746 *
4747 * @retval a number n such that 0 <= n <= @ref k_pipe.size; the
4748 * result is zero for unbuffered pipes.
4749 */
4750__syscall size_t k_pipe_write_avail(struct k_pipe *pipe);
4751
Anas Nashif166f5192018-02-25 08:02:36 -06004752/** @} */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004753
Allan Stephensc98da842016-11-11 15:45:03 -05004754/**
4755 * @cond INTERNAL_HIDDEN
4756 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004757
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004758struct k_mem_slab {
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004759 _wait_q_t wait_q;
Nicolas Pitre2bed37e2021-04-13 11:10:22 -04004760 struct k_spinlock lock;
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004761 uint32_t num_blocks;
Peter Mitsisfb02d572016-10-13 16:55:45 -04004762 size_t block_size;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004763 char *buffer;
4764 char *free_list;
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004765 uint32_t num_used;
Kamil Lazowski104f1002020-09-11 14:27:55 +02004766#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
4767 uint32_t max_used;
4768#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004769
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004770};
4771
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004772#define Z_MEM_SLAB_INITIALIZER(obj, slab_buffer, slab_block_size, \
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004773 slab_num_blocks) \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004774 { \
Nicolas Pitre2bed37e2021-04-13 11:10:22 -04004775 .lock = {}, \
Patrik Flykt4344e272019-03-08 14:19:05 -07004776 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004777 .num_blocks = slab_num_blocks, \
4778 .block_size = slab_block_size, \
4779 .buffer = slab_buffer, \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004780 .free_list = NULL, \
4781 .num_used = 0, \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004782 }
4783
Andrew Boie65a9d2a2017-06-27 10:51:23 -07004784
Peter Mitsis578f9112016-10-07 13:50:31 -04004785/**
Allan Stephensc98da842016-11-11 15:45:03 -05004786 * INTERNAL_HIDDEN @endcond
4787 */
4788
4789/**
4790 * @defgroup mem_slab_apis Memory Slab APIs
4791 * @ingroup kernel_apis
4792 * @{
4793 */
4794
4795/**
Allan Stephensda827222016-11-09 14:23:58 -06004796 * @brief Statically define and initialize a memory slab.
Peter Mitsis578f9112016-10-07 13:50:31 -04004797 *
Allan Stephensda827222016-11-09 14:23:58 -06004798 * The memory slab's buffer contains @a slab_num_blocks memory blocks
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004799 * that are @a slab_block_size bytes long. The buffer is aligned to a
Allan Stephensda827222016-11-09 14:23:58 -06004800 * @a slab_align -byte boundary. To ensure that each memory block is similarly
4801 * aligned to this boundary, @a slab_block_size must also be a multiple of
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004802 * @a slab_align.
Peter Mitsis578f9112016-10-07 13:50:31 -04004803 *
Allan Stephensda827222016-11-09 14:23:58 -06004804 * The memory slab can be accessed outside the module where it is defined
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004805 * using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004806 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05004807 * @code extern struct k_mem_slab <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004808 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004809 * @param name Name of the memory slab.
4810 * @param slab_block_size Size of each memory block (in bytes).
4811 * @param slab_num_blocks Number memory blocks.
4812 * @param slab_align Alignment of the memory slab's buffer (power of 2).
Peter Mitsis578f9112016-10-07 13:50:31 -04004813 */
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004814#define K_MEM_SLAB_DEFINE(name, slab_block_size, slab_num_blocks, slab_align) \
Daniel Leunge6f168c2021-07-19 12:10:54 -07004815 char __noinit_named(k_mem_slab_buf_##name) \
4816 __aligned(WB_UP(slab_align)) \
Nicolas Pitre46cd5a02019-05-21 21:40:38 -04004817 _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
Fabio Baltierif88a4202021-08-04 23:05:54 +01004818 STRUCT_SECTION_ITERABLE(k_mem_slab, name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004819 Z_MEM_SLAB_INITIALIZER(name, _k_mem_slab_buf_##name, \
Nicolas Pitre46cd5a02019-05-21 21:40:38 -04004820 WB_UP(slab_block_size), slab_num_blocks)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004821
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004822/**
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004823 * @brief Initialize a memory slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004824 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004825 * Initializes a memory slab, prior to its first use.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004826 *
Allan Stephensda827222016-11-09 14:23:58 -06004827 * The memory slab's buffer contains @a slab_num_blocks memory blocks
4828 * that are @a slab_block_size bytes long. The buffer must be aligned to an
Nicolas Pitre46cd5a02019-05-21 21:40:38 -04004829 * N-byte boundary matching a word boundary, where N is a power of 2
4830 * (i.e. 4 on 32-bit systems, 8, 16, ...).
Allan Stephensda827222016-11-09 14:23:58 -06004831 * To ensure that each memory block is similarly aligned to this boundary,
4832 * @a slab_block_size must also be a multiple of N.
4833 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004834 * @param slab Address of the memory slab.
4835 * @param buffer Pointer to buffer used for the memory blocks.
4836 * @param block_size Size of each memory block (in bytes).
4837 * @param num_blocks Number of memory blocks.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004838 *
Anas Nashifdfc2bbc2019-06-16 09:22:21 -04004839 * @retval 0 on success
4840 * @retval -EINVAL invalid data supplied
4841 *
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004842 */
Anas Nashifdfc2bbc2019-06-16 09:22:21 -04004843extern int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004844 size_t block_size, uint32_t num_blocks);
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004845
4846/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004847 * @brief Allocate memory from a memory slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004848 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004849 * This routine allocates a memory block from a memory slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004850 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004851 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
Krzysztof Chruscinskic482a572021-04-19 10:52:34 +02004852 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004853 *
4854 * @funcprops \isr_ok
Spoorthy Priya Yerabolu04d3c3c2020-09-17 02:54:50 -07004855 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004856 * @param slab Address of the memory slab.
4857 * @param mem Pointer to block address area.
Andy Ross78327382020-03-05 15:18:14 -08004858 * @param timeout Non-negative waiting period to wait for operation to complete.
4859 * Use K_NO_WAIT to return without waiting,
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004860 * or K_FOREVER to wait as long as necessary.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004861 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05004862 * @retval 0 Memory allocated. The block address area pointed at by @a mem
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004863 * is set to the starting address of the memory block.
Allan Stephens9ef50f42016-11-16 15:33:31 -05004864 * @retval -ENOMEM Returned without waiting.
4865 * @retval -EAGAIN Waiting period timed out.
Anas Nashifdfc2bbc2019-06-16 09:22:21 -04004866 * @retval -EINVAL Invalid data supplied
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004867 */
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004868extern int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem,
Andy Ross78327382020-03-05 15:18:14 -08004869 k_timeout_t timeout);
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004870
4871/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004872 * @brief Free memory allocated from a memory slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004873 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004874 * This routine releases a previously allocated memory block back to its
4875 * associated memory slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004876 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004877 * @param slab Address of the memory slab.
4878 * @param mem Pointer to block address area (as set by k_mem_slab_alloc()).
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004879 *
4880 * @return N/A
4881 */
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004882extern void k_mem_slab_free(struct k_mem_slab *slab, void **mem);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004883
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004884/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004885 * @brief Get the number of used blocks in a memory slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004886 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004887 * This routine gets the number of memory blocks that are currently
4888 * allocated in @a slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004889 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004890 * @param slab Address of the memory slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004891 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004892 * @return Number of allocated memory blocks.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004893 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004894static inline uint32_t k_mem_slab_num_used_get(struct k_mem_slab *slab)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004895{
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004896 return slab->num_used;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004897}
4898
Peter Mitsisc001aa82016-10-13 13:53:37 -04004899/**
Kamil Lazowski104f1002020-09-11 14:27:55 +02004900 * @brief Get the number of maximum used blocks so far in a memory slab.
4901 *
4902 * This routine gets the maximum number of memory blocks that were
4903 * allocated in @a slab.
4904 *
4905 * @param slab Address of the memory slab.
4906 *
4907 * @return Maximum number of allocated memory blocks.
4908 */
4909static inline uint32_t k_mem_slab_max_used_get(struct k_mem_slab *slab)
4910{
4911#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
4912 return slab->max_used;
4913#else
4914 ARG_UNUSED(slab);
4915 return 0;
4916#endif
4917}
4918
4919/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004920 * @brief Get the number of unused blocks in a memory slab.
Peter Mitsisc001aa82016-10-13 13:53:37 -04004921 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004922 * This routine gets the number of memory blocks that are currently
4923 * unallocated in @a slab.
Peter Mitsisc001aa82016-10-13 13:53:37 -04004924 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004925 * @param slab Address of the memory slab.
Peter Mitsisc001aa82016-10-13 13:53:37 -04004926 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004927 * @return Number of unallocated memory blocks.
Peter Mitsisc001aa82016-10-13 13:53:37 -04004928 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004929static inline uint32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
Peter Mitsisc001aa82016-10-13 13:53:37 -04004930{
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004931 return slab->num_blocks - slab->num_used;
Peter Mitsisc001aa82016-10-13 13:53:37 -04004932}
4933
Anas Nashif166f5192018-02-25 08:02:36 -06004934/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05004935
4936/**
Anas Nashifdbac76f2020-12-09 12:04:53 -05004937 * @addtogroup heap_apis
Allan Stephensc98da842016-11-11 15:45:03 -05004938 * @{
4939 */
4940
Andrew Boieb95e9602020-09-28 13:26:38 -07004941/* kernel synchronized heap struct */
4942
4943struct k_heap {
4944 struct sys_heap heap;
4945 _wait_q_t wait_q;
4946 struct k_spinlock lock;
4947};
4948
Allan Stephensc98da842016-11-11 15:45:03 -05004949/**
Andy Ross0dd83b82020-04-03 10:01:03 -07004950 * @brief Initialize a k_heap
4951 *
4952 * This constructs a synchronized k_heap object over a memory region
4953 * specified by the user. Note that while any alignment and size can
4954 * be passed as valid parameters, internal alignment restrictions
4955 * inside the inner sys_heap mean that not all bytes may be usable as
4956 * allocated memory.
4957 *
4958 * @param h Heap struct to initialize
4959 * @param mem Pointer to memory.
4960 * @param bytes Size of memory region, in bytes
4961 */
4962void k_heap_init(struct k_heap *h, void *mem, size_t bytes);
4963
Maximilian Bachmann34d7c782020-11-13 15:12:31 +01004964/** @brief Allocate aligned memory from a k_heap
4965 *
4966 * Behaves in all ways like k_heap_alloc(), except that the returned
4967 * memory (if available) will have a starting address in memory which
4968 * is a multiple of the specified power-of-two alignment value in
4969 * bytes. The resulting memory can be returned to the heap using
4970 * k_heap_free().
4971 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004972 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
Krzysztof Chruscinskic482a572021-04-19 10:52:34 +02004973 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004974 *
4975 * @funcprops \isr_ok
Maximilian Bachmann34d7c782020-11-13 15:12:31 +01004976 *
4977 * @param h Heap from which to allocate
4978 * @param align Alignment in bytes, must be a power of two
4979 * @param bytes Number of bytes requested
4980 * @param timeout How long to wait, or K_NO_WAIT
4981 * @return Pointer to memory the caller can now use
4982 */
4983void *k_heap_aligned_alloc(struct k_heap *h, size_t align, size_t bytes,
4984 k_timeout_t timeout);
4985
Andy Ross0dd83b82020-04-03 10:01:03 -07004986/**
4987 * @brief Allocate memory from a k_heap
4988 *
4989 * Allocates and returns a memory buffer from the memory region owned
4990 * by the heap. If no memory is available immediately, the call will
4991 * block for the specified timeout (constructed via the standard
4992 * timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory to be
4993 * freed. If the allocation cannot be performed by the expiration of
4994 * the timeout, NULL will be returned.
4995 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004996 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
Krzysztof Chruscinskic482a572021-04-19 10:52:34 +02004997 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004998 *
4999 * @funcprops \isr_ok
Spoorthy Priya Yerabolu04d3c3c2020-09-17 02:54:50 -07005000 *
Andy Ross0dd83b82020-04-03 10:01:03 -07005001 * @param h Heap from which to allocate
5002 * @param bytes Desired size of block to allocate
5003 * @param timeout How long to wait, or K_NO_WAIT
5004 * @return A pointer to valid heap memory, or NULL
5005 */
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +01005006void *k_heap_alloc(struct k_heap *h, size_t bytes,
5007 k_timeout_t timeout);
Andy Ross0dd83b82020-04-03 10:01:03 -07005008
5009/**
5010 * @brief Free memory allocated by k_heap_alloc()
5011 *
5012 * Returns the specified memory block, which must have been returned
5013 * from k_heap_alloc(), to the heap for use by other callers. Passing
5014 * a NULL block is legal, and has no effect.
5015 *
5016 * @param h Heap to which to return the memory
5017 * @param mem A valid memory block, or NULL
5018 */
5019void k_heap_free(struct k_heap *h, void *mem);
5020
Andy Rossd3737032021-05-19 09:50:17 -07005021/* Hand-calculated minimum heap sizes needed to return a successful
5022 * 1-byte allocation. See details in lib/os/heap.[ch]
5023 */
5024#define Z_HEAP_MIN_SIZE (sizeof(void *) > 4 ? 56 : 44)
5025
Andy Ross0dd83b82020-04-03 10:01:03 -07005026/**
5027 * @brief Define a static k_heap
5028 *
5029 * This macro defines and initializes a static memory region and
5030 * k_heap of the requested size. After kernel start, &name can be
5031 * used as if k_heap_init() had been called.
5032 *
Andy Rossd3737032021-05-19 09:50:17 -07005033 * Note that this macro enforces a minimum size on the memory region
5034 * to accommodate metadata requirements. Very small heaps will be
5035 * padded to fit.
5036 *
Andy Ross0dd83b82020-04-03 10:01:03 -07005037 * @param name Symbol name for the struct k_heap object
5038 * @param bytes Size of memory region, in bytes
5039 */
5040#define K_HEAP_DEFINE(name, bytes) \
Daniel Leungd92d1f12021-07-19 14:09:25 -07005041 char __noinit_named(kheap_buf_##name) \
5042 __aligned(8) /* CHUNK_UNIT */ \
Andy Rossd3737032021-05-19 09:50:17 -07005043 kheap_##name[MAX(bytes, Z_HEAP_MIN_SIZE)]; \
Fabio Baltierif88a4202021-08-04 23:05:54 +01005044 STRUCT_SECTION_ITERABLE(k_heap, name) = { \
Andy Ross0dd83b82020-04-03 10:01:03 -07005045 .heap = { \
5046 .init_mem = kheap_##name, \
Andy Rossd3737032021-05-19 09:50:17 -07005047 .init_bytes = MAX(bytes, Z_HEAP_MIN_SIZE), \
Andy Ross0dd83b82020-04-03 10:01:03 -07005048 }, \
5049 }
5050
Johan Hedberg7d887cb2018-01-11 20:45:27 +02005051/**
Anas Nashif166f5192018-02-25 08:02:36 -06005052 * @}
Allan Stephensc98da842016-11-11 15:45:03 -05005053 */
5054
5055/**
Anas Nashifdbac76f2020-12-09 12:04:53 -05005056 * @defgroup heap_apis Heap APIs
Allan Stephensc98da842016-11-11 15:45:03 -05005057 * @ingroup kernel_apis
5058 * @{
5059 */
5060
5061/**
Christopher Friedt135ffaf2020-11-26 08:19:10 -05005062 * @brief Allocate memory from the heap with a specified alignment.
5063 *
5064 * This routine provides semantics similar to aligned_alloc(); memory is
5065 * allocated from the heap with a specified alignment. However, one minor
5066 * difference is that k_aligned_alloc() accepts any non-zero @p size,
5067 * wherase aligned_alloc() only accepts a @p size that is an integral
5068 * multiple of @p align.
5069 *
5070 * Above, aligned_alloc() refers to:
5071 * C11 standard (ISO/IEC 9899:2011): 7.22.3.1
5072 * The aligned_alloc function (p: 347-348)
5073 *
5074 * @param align Alignment of memory requested (in bytes).
5075 * @param size Amount of memory requested (in bytes).
5076 *
5077 * @return Address of the allocated memory if successful; otherwise NULL.
5078 */
5079extern void *k_aligned_alloc(size_t align, size_t size);
5080
5081/**
5082 * @brief Allocate memory from the heap.
Peter Mitsis937042c2016-10-13 13:18:26 -04005083 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005084 * This routine provides traditional malloc() semantics. Memory is
Allan Stephens480a1312016-10-13 15:44:48 -05005085 * allocated from the heap memory pool.
Peter Mitsis937042c2016-10-13 13:18:26 -04005086 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005087 * @param size Amount of memory requested (in bytes).
Peter Mitsis937042c2016-10-13 13:18:26 -04005088 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005089 * @return Address of the allocated memory if successful; otherwise NULL.
Peter Mitsis937042c2016-10-13 13:18:26 -04005090 */
Torbjörn Leksell80cd9da2021-03-26 13:42:25 +01005091extern void *k_malloc(size_t size);
Peter Mitsis937042c2016-10-13 13:18:26 -04005092
5093/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005094 * @brief Free memory allocated from heap.
Allan Stephens480a1312016-10-13 15:44:48 -05005095 *
5096 * This routine provides traditional free() semantics. The memory being
Andrew Boiea2480bd2018-04-12 16:59:02 -07005097 * returned must have been allocated from the heap memory pool or
5098 * k_mem_pool_malloc().
Peter Mitsis937042c2016-10-13 13:18:26 -04005099 *
Anas Nashif345fdd52016-12-20 08:36:04 -05005100 * If @a ptr is NULL, no operation is performed.
5101 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005102 * @param ptr Pointer to previously allocated memory.
Peter Mitsis937042c2016-10-13 13:18:26 -04005103 *
5104 * @return N/A
5105 */
5106extern void k_free(void *ptr);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005107
Allan Stephensc98da842016-11-11 15:45:03 -05005108/**
Andrew Boie7f95e832017-11-08 14:40:01 -08005109 * @brief Allocate memory from heap, array style
5110 *
5111 * This routine provides traditional calloc() semantics. Memory is
5112 * allocated from the heap memory pool and zeroed.
5113 *
5114 * @param nmemb Number of elements in the requested array
5115 * @param size Size of each array element (in bytes).
5116 *
5117 * @return Address of the allocated memory if successful; otherwise NULL.
5118 */
5119extern void *k_calloc(size_t nmemb, size_t size);
5120
Anas Nashif166f5192018-02-25 08:02:36 -06005121/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05005122
Benjamin Walshacc68c12017-01-29 18:57:45 -05005123/* polling API - PRIVATE */
5124
Benjamin Walshb0179862017-02-02 16:39:57 -05005125#ifdef CONFIG_POLL
Flavio Ceolin6fdc56d2018-09-18 12:32:27 -07005126#define _INIT_OBJ_POLL_EVENT(obj) do { (obj)->poll_event = NULL; } while (false)
Benjamin Walshb0179862017-02-02 16:39:57 -05005127#else
Flavio Ceolin6fdc56d2018-09-18 12:32:27 -07005128#define _INIT_OBJ_POLL_EVENT(obj) do { } while (false)
Benjamin Walshb0179862017-02-02 16:39:57 -05005129#endif
5130
Benjamin Walshacc68c12017-01-29 18:57:45 -05005131/* private - types bit positions */
5132enum _poll_types_bits {
5133 /* can be used to ignore an event */
5134 _POLL_TYPE_IGNORE,
5135
Flavio Ceolinaecd4ec2018-11-02 12:35:30 -07005136 /* to be signaled by k_poll_signal_raise() */
Benjamin Walshacc68c12017-01-29 18:57:45 -05005137 _POLL_TYPE_SIGNAL,
5138
5139 /* semaphore availability */
5140 _POLL_TYPE_SEM_AVAILABLE,
5141
Anas Nashif56821172020-07-08 14:14:25 -04005142 /* queue/FIFO/LIFO data availability */
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02005143 _POLL_TYPE_DATA_AVAILABLE,
Benjamin Walshacc68c12017-01-29 18:57:45 -05005144
Nick Gravesb445f132021-04-12 12:35:18 -07005145 /* msgq data availability */
5146 _POLL_TYPE_MSGQ_DATA_AVAILABLE,
5147
Benjamin Walshacc68c12017-01-29 18:57:45 -05005148 _POLL_NUM_TYPES
5149};
5150
Aastha Grover83b9f692020-08-20 16:47:11 -07005151#define Z_POLL_TYPE_BIT(type) (1U << ((type) - 1U))
Benjamin Walshacc68c12017-01-29 18:57:45 -05005152
5153/* private - states bit positions */
5154enum _poll_states_bits {
5155 /* default state when creating event */
5156 _POLL_STATE_NOT_READY,
5157
Flavio Ceolinaecd4ec2018-11-02 12:35:30 -07005158 /* signaled by k_poll_signal_raise() */
Benjamin Walshacc68c12017-01-29 18:57:45 -05005159 _POLL_STATE_SIGNALED,
5160
5161 /* semaphore is available */
5162 _POLL_STATE_SEM_AVAILABLE,
5163
Anas Nashif56821172020-07-08 14:14:25 -04005164 /* data is available to read on queue/FIFO/LIFO */
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02005165 _POLL_STATE_DATA_AVAILABLE,
Benjamin Walshacc68c12017-01-29 18:57:45 -05005166
Anas Nashif56821172020-07-08 14:14:25 -04005167 /* queue/FIFO/LIFO wait was cancelled */
Paul Sokolovsky45c0b202018-08-21 23:29:11 +03005168 _POLL_STATE_CANCELLED,
5169
Nick Gravesb445f132021-04-12 12:35:18 -07005170 /* data is available to read on a message queue */
5171 _POLL_STATE_MSGQ_DATA_AVAILABLE,
5172
Benjamin Walshacc68c12017-01-29 18:57:45 -05005173 _POLL_NUM_STATES
5174};
5175
Aastha Grover83b9f692020-08-20 16:47:11 -07005176#define Z_POLL_STATE_BIT(state) (1U << ((state) - 1U))
Benjamin Walshacc68c12017-01-29 18:57:45 -05005177
5178#define _POLL_EVENT_NUM_UNUSED_BITS \
Benjamin Walsh969d4a72017-02-02 11:25:11 -05005179 (32 - (0 \
5180 + 8 /* tag */ \
5181 + _POLL_NUM_TYPES \
5182 + _POLL_NUM_STATES \
5183 + 1 /* modes */ \
5184 ))
Benjamin Walshacc68c12017-01-29 18:57:45 -05005185
Benjamin Walshacc68c12017-01-29 18:57:45 -05005186/* end of polling API - PRIVATE */
5187
5188
5189/**
5190 * @defgroup poll_apis Async polling APIs
5191 * @ingroup kernel_apis
5192 * @{
5193 */
5194
5195/* Public polling API */
5196
5197/* public - values for k_poll_event.type bitfield */
5198#define K_POLL_TYPE_IGNORE 0
Patrik Flykt4344e272019-03-08 14:19:05 -07005199#define K_POLL_TYPE_SIGNAL Z_POLL_TYPE_BIT(_POLL_TYPE_SIGNAL)
5200#define K_POLL_TYPE_SEM_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_SEM_AVAILABLE)
5201#define K_POLL_TYPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_DATA_AVAILABLE)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02005202#define K_POLL_TYPE_FIFO_DATA_AVAILABLE K_POLL_TYPE_DATA_AVAILABLE
Nick Gravesb445f132021-04-12 12:35:18 -07005203#define K_POLL_TYPE_MSGQ_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_MSGQ_DATA_AVAILABLE)
Benjamin Walshacc68c12017-01-29 18:57:45 -05005204
5205/* public - polling modes */
5206enum k_poll_modes {
5207 /* polling thread does not take ownership of objects when available */
5208 K_POLL_MODE_NOTIFY_ONLY = 0,
5209
5210 K_POLL_NUM_MODES
5211};
5212
5213/* public - values for k_poll_event.state bitfield */
5214#define K_POLL_STATE_NOT_READY 0
Patrik Flykt4344e272019-03-08 14:19:05 -07005215#define K_POLL_STATE_SIGNALED Z_POLL_STATE_BIT(_POLL_STATE_SIGNALED)
5216#define K_POLL_STATE_SEM_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE)
5217#define K_POLL_STATE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02005218#define K_POLL_STATE_FIFO_DATA_AVAILABLE K_POLL_STATE_DATA_AVAILABLE
Nick Gravesb445f132021-04-12 12:35:18 -07005219#define K_POLL_STATE_MSGQ_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_MSGQ_DATA_AVAILABLE)
Patrik Flykt4344e272019-03-08 14:19:05 -07005220#define K_POLL_STATE_CANCELLED Z_POLL_STATE_BIT(_POLL_STATE_CANCELLED)
Benjamin Walshacc68c12017-01-29 18:57:45 -05005221
5222/* public - poll signal object */
5223struct k_poll_signal {
Anas Nashife71293e2019-12-04 20:00:14 -05005224 /** PRIVATE - DO NOT TOUCH */
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +03005225 sys_dlist_t poll_events;
Benjamin Walshacc68c12017-01-29 18:57:45 -05005226
Anas Nashife71293e2019-12-04 20:00:14 -05005227 /**
Benjamin Walshacc68c12017-01-29 18:57:45 -05005228 * 1 if the event has been signaled, 0 otherwise. Stays set to 1 until
5229 * user resets it to 0.
5230 */
5231 unsigned int signaled;
5232
Anas Nashife71293e2019-12-04 20:00:14 -05005233 /** custom result value passed to k_poll_signal_raise() if needed */
Benjamin Walshacc68c12017-01-29 18:57:45 -05005234 int result;
5235};
5236
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +03005237#define K_POLL_SIGNAL_INITIALIZER(obj) \
Benjamin Walshacc68c12017-01-29 18:57:45 -05005238 { \
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +03005239 .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events), \
Benjamin Walshacc68c12017-01-29 18:57:45 -05005240 .signaled = 0, \
5241 .result = 0, \
5242 }
Anas Nashife71293e2019-12-04 20:00:14 -05005243/**
5244 * @brief Poll Event
5245 *
5246 */
Benjamin Walshacc68c12017-01-29 18:57:45 -05005247struct k_poll_event {
Anas Nashife71293e2019-12-04 20:00:14 -05005248 /** PRIVATE - DO NOT TOUCH */
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +03005249 sys_dnode_t _node;
5250
Anas Nashife71293e2019-12-04 20:00:14 -05005251 /** PRIVATE - DO NOT TOUCH */
Andy Ross202adf52020-11-10 09:54:49 -08005252 struct z_poller *poller;
Benjamin Walshacc68c12017-01-29 18:57:45 -05005253
Anas Nashife71293e2019-12-04 20:00:14 -05005254 /** optional user-specified tag, opaque, untouched by the API */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005255 uint32_t tag:8;
Benjamin Walsh969d4a72017-02-02 11:25:11 -05005256
Anas Nashife71293e2019-12-04 20:00:14 -05005257 /** bitfield of event types (bitwise-ORed K_POLL_TYPE_xxx values) */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005258 uint32_t type:_POLL_NUM_TYPES;
Benjamin Walshacc68c12017-01-29 18:57:45 -05005259
Anas Nashife71293e2019-12-04 20:00:14 -05005260 /** bitfield of event states (bitwise-ORed K_POLL_STATE_xxx values) */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005261 uint32_t state:_POLL_NUM_STATES;
Benjamin Walshacc68c12017-01-29 18:57:45 -05005262
Anas Nashife71293e2019-12-04 20:00:14 -05005263 /** mode of operation, from enum k_poll_modes */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005264 uint32_t mode:1;
Benjamin Walshacc68c12017-01-29 18:57:45 -05005265
Anas Nashife71293e2019-12-04 20:00:14 -05005266 /** unused bits in 32-bit word */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005267 uint32_t unused:_POLL_EVENT_NUM_UNUSED_BITS;
Benjamin Walshacc68c12017-01-29 18:57:45 -05005268
Anas Nashife71293e2019-12-04 20:00:14 -05005269 /** per-type data */
Benjamin Walshacc68c12017-01-29 18:57:45 -05005270 union {
5271 void *obj;
5272 struct k_poll_signal *signal;
5273 struct k_sem *sem;
5274 struct k_fifo *fifo;
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02005275 struct k_queue *queue;
Nick Gravesb445f132021-04-12 12:35:18 -07005276 struct k_msgq *msgq;
Benjamin Walshacc68c12017-01-29 18:57:45 -05005277 };
5278};
5279
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -07005280#define K_POLL_EVENT_INITIALIZER(_event_type, _event_mode, _event_obj) \
Benjamin Walshacc68c12017-01-29 18:57:45 -05005281 { \
5282 .poller = NULL, \
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -07005283 .type = _event_type, \
Benjamin Walshacc68c12017-01-29 18:57:45 -05005284 .state = K_POLL_STATE_NOT_READY, \
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -07005285 .mode = _event_mode, \
Benjamin Walshacc68c12017-01-29 18:57:45 -05005286 .unused = 0, \
Daniel Leung087fb942021-03-24 12:45:01 -07005287 { \
5288 .obj = _event_obj, \
5289 }, \
Benjamin Walsh969d4a72017-02-02 11:25:11 -05005290 }
5291
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -07005292#define K_POLL_EVENT_STATIC_INITIALIZER(_event_type, _event_mode, _event_obj, \
Benjamin Walsh969d4a72017-02-02 11:25:11 -05005293 event_tag) \
5294 { \
Benjamin Walsh969d4a72017-02-02 11:25:11 -05005295 .tag = event_tag, \
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -07005296 .type = _event_type, \
Benjamin Walsh969d4a72017-02-02 11:25:11 -05005297 .state = K_POLL_STATE_NOT_READY, \
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -07005298 .mode = _event_mode, \
Benjamin Walsh969d4a72017-02-02 11:25:11 -05005299 .unused = 0, \
Daniel Leung087fb942021-03-24 12:45:01 -07005300 { \
5301 .obj = _event_obj, \
5302 }, \
Benjamin Walshacc68c12017-01-29 18:57:45 -05005303 }
5304
5305/**
5306 * @brief Initialize one struct k_poll_event instance
5307 *
5308 * After this routine is called on a poll event, the event it ready to be
5309 * placed in an event array to be passed to k_poll().
5310 *
5311 * @param event The event to initialize.
5312 * @param type A bitfield of the types of event, from the K_POLL_TYPE_xxx
5313 * values. Only values that apply to the same object being polled
5314 * can be used together. Choosing K_POLL_TYPE_IGNORE disables the
5315 * event.
Paul Sokolovskycfef9792017-07-18 11:53:06 +03005316 * @param mode Future. Use K_POLL_MODE_NOTIFY_ONLY.
Benjamin Walshacc68c12017-01-29 18:57:45 -05005317 * @param obj Kernel object or poll signal.
5318 *
5319 * @return N/A
5320 */
5321
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005322extern void k_poll_event_init(struct k_poll_event *event, uint32_t type,
Benjamin Walshacc68c12017-01-29 18:57:45 -05005323 int mode, void *obj);
5324
5325/**
5326 * @brief Wait for one or many of multiple poll events to occur
5327 *
5328 * This routine allows a thread to wait concurrently for one or many of
5329 * multiple poll events to have occurred. Such events can be a kernel object
5330 * being available, like a semaphore, or a poll signal event.
5331 *
5332 * When an event notifies that a kernel object is available, the kernel object
5333 * is not "given" to the thread calling k_poll(): it merely signals the fact
5334 * that the object was available when the k_poll() call was in effect. Also,
5335 * all threads trying to acquire an object the regular way, i.e. by pending on
5336 * the object, have precedence over the thread polling on the object. This
5337 * means that the polling thread will never get the poll event on an object
5338 * until the object becomes available and its pend queue is empty. For this
5339 * reason, the k_poll() call is more effective when the objects being polled
5340 * only have one thread, the polling thread, trying to acquire them.
5341 *
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +03005342 * When k_poll() returns 0, the caller should loop on all the events that were
5343 * passed to k_poll() and check the state field for the values that were
5344 * expected and take the associated actions.
Benjamin Walshacc68c12017-01-29 18:57:45 -05005345 *
5346 * Before being reused for another call to k_poll(), the user has to reset the
5347 * state field to K_POLL_STATE_NOT_READY.
5348 *
Andrew Boie3772f772018-05-07 16:52:57 -07005349 * When called from user mode, a temporary memory allocation is required from
5350 * the caller's resource pool.
5351 *
Christian Taedcke7a7c4202020-06-30 12:02:14 +02005352 * @param events An array of events to be polled for.
Benjamin Walshacc68c12017-01-29 18:57:45 -05005353 * @param num_events The number of events in the array.
Andy Ross78327382020-03-05 15:18:14 -08005354 * @param timeout Waiting period for an event to be ready,
5355 * or one of the special values K_NO_WAIT and K_FOREVER.
Benjamin Walshacc68c12017-01-29 18:57:45 -05005356 *
5357 * @retval 0 One or more events are ready.
Benjamin Walshacc68c12017-01-29 18:57:45 -05005358 * @retval -EAGAIN Waiting period timed out.
Paul Sokolovsky45c0b202018-08-21 23:29:11 +03005359 * @retval -EINTR Polling has been interrupted, e.g. with
5360 * k_queue_cancel_wait(). All output events are still set and valid,
5361 * cancelled event(s) will be set to K_POLL_STATE_CANCELLED. In other
5362 * words, -EINTR status means that at least one of output events is
5363 * K_POLL_STATE_CANCELLED.
Andrew Boie3772f772018-05-07 16:52:57 -07005364 * @retval -ENOMEM Thread resource pool insufficient memory (user mode only)
5365 * @retval -EINVAL Bad parameters (user mode only)
Benjamin Walshacc68c12017-01-29 18:57:45 -05005366 */
5367
Andrew Boie3772f772018-05-07 16:52:57 -07005368__syscall int k_poll(struct k_poll_event *events, int num_events,
Andy Ross78327382020-03-05 15:18:14 -08005369 k_timeout_t timeout);
Benjamin Walshacc68c12017-01-29 18:57:45 -05005370
5371/**
Benjamin Walsha304f162017-02-02 16:46:09 -05005372 * @brief Initialize a poll signal object.
5373 *
Flavio Ceolinaecd4ec2018-11-02 12:35:30 -07005374 * Ready a poll signal object to be signaled via k_poll_signal_raise().
Benjamin Walsha304f162017-02-02 16:46:09 -05005375 *
Anas Nashifb503be22021-03-22 08:09:55 -04005376 * @param sig A poll signal.
Benjamin Walsha304f162017-02-02 16:46:09 -05005377 *
5378 * @return N/A
5379 */
5380
Anas Nashifb503be22021-03-22 08:09:55 -04005381__syscall void k_poll_signal_init(struct k_poll_signal *sig);
Andrew Boie3772f772018-05-07 16:52:57 -07005382
5383/*
5384 * @brief Reset a poll signal object's state to unsignaled.
5385 *
Anas Nashifb503be22021-03-22 08:09:55 -04005386 * @param sig A poll signal object
Andrew Boie3772f772018-05-07 16:52:57 -07005387 */
Anas Nashifb503be22021-03-22 08:09:55 -04005388__syscall void k_poll_signal_reset(struct k_poll_signal *sig);
Andrew Boie3772f772018-05-07 16:52:57 -07005389
Andrew Boie3772f772018-05-07 16:52:57 -07005390/**
David B. Kinderfcbd8fb2018-05-23 12:06:24 -07005391 * @brief Fetch the signaled state and result value of a poll signal
Andrew Boie3772f772018-05-07 16:52:57 -07005392 *
Anas Nashifb503be22021-03-22 08:09:55 -04005393 * @param sig A poll signal object
Andrew Boie3772f772018-05-07 16:52:57 -07005394 * @param signaled An integer buffer which will be written nonzero if the
5395 * object was signaled
5396 * @param result An integer destination buffer which will be written with the
David B. Kinderfcbd8fb2018-05-23 12:06:24 -07005397 * result value if the object was signaled, or an undefined
Andrew Boie3772f772018-05-07 16:52:57 -07005398 * value if it was not.
5399 */
Anas Nashifb503be22021-03-22 08:09:55 -04005400__syscall void k_poll_signal_check(struct k_poll_signal *sig,
Andrew Boie3772f772018-05-07 16:52:57 -07005401 unsigned int *signaled, int *result);
Benjamin Walsha304f162017-02-02 16:46:09 -05005402
5403/**
Benjamin Walshacc68c12017-01-29 18:57:45 -05005404 * @brief Signal a poll signal object.
5405 *
5406 * This routine makes ready a poll signal, which is basically a poll event of
5407 * type K_POLL_TYPE_SIGNAL. If a thread was polling on that event, it will be
5408 * made ready to run. A @a result value can be specified.
5409 *
5410 * The poll signal contains a 'signaled' field that, when set by
Flavio Ceolinaecd4ec2018-11-02 12:35:30 -07005411 * k_poll_signal_raise(), stays set until the user sets it back to 0 with
Andrew Boie3772f772018-05-07 16:52:57 -07005412 * k_poll_signal_reset(). It thus has to be reset by the user before being
5413 * passed again to k_poll() or k_poll() will consider it being signaled, and
5414 * will return immediately.
Benjamin Walshacc68c12017-01-29 18:57:45 -05005415 *
Peter A. Bigot773bd982019-04-30 07:06:39 -05005416 * @note The result is stored and the 'signaled' field is set even if
5417 * this function returns an error indicating that an expiring poll was
5418 * not notified. The next k_poll() will detect the missed raise.
5419 *
Anas Nashifb503be22021-03-22 08:09:55 -04005420 * @param sig A poll signal.
Benjamin Walshacc68c12017-01-29 18:57:45 -05005421 * @param result The value to store in the result field of the signal.
5422 *
5423 * @retval 0 The signal was delivered successfully.
5424 * @retval -EAGAIN The polling thread's timeout is in the process of expiring.
5425 */
5426
Anas Nashifb503be22021-03-22 08:09:55 -04005427__syscall int k_poll_signal_raise(struct k_poll_signal *sig, int result);
Benjamin Walshacc68c12017-01-29 18:57:45 -05005428
Anas Nashif954d5502018-02-25 08:37:28 -06005429/**
5430 * @internal
5431 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005432extern void z_handle_obj_poll_events(sys_dlist_t *events, uint32_t state);
Benjamin Walshacc68c12017-01-29 18:57:45 -05005433
Anas Nashif166f5192018-02-25 08:02:36 -06005434/** @} */
Benjamin Walshacc68c12017-01-29 18:57:45 -05005435
Benjamin Walshc3a2bbb2016-12-14 13:04:36 -05005436/**
Anas Nashif30c3cff2019-01-22 08:18:13 -05005437 * @defgroup cpu_idle_apis CPU Idling APIs
5438 * @ingroup kernel_apis
5439 * @{
5440 */
Anas Nashif30c3cff2019-01-22 08:18:13 -05005441/**
Benjamin Walshc3a2bbb2016-12-14 13:04:36 -05005442 * @brief Make the CPU idle.
5443 *
5444 * This function makes the CPU idle until an event wakes it up.
5445 *
5446 * In a regular system, the idle thread should be the only thread responsible
5447 * for making the CPU idle and triggering any type of power management.
5448 * However, in some more constrained systems, such as a single-threaded system,
5449 * the only thread would be responsible for this if needed.
5450 *
Ioannis Glaropoulos91f6d982020-03-18 23:56:56 +01005451 * @note In some architectures, before returning, the function unmasks interrupts
5452 * unconditionally.
5453 *
Benjamin Walshc3a2bbb2016-12-14 13:04:36 -05005454 * @return N/A
5455 */
Andrew Boie07525a32019-09-21 16:17:23 -07005456static inline void k_cpu_idle(void)
5457{
Andrew Boie4f77c2a2019-11-07 12:43:29 -08005458 arch_cpu_idle();
Andrew Boie07525a32019-09-21 16:17:23 -07005459}
Benjamin Walshc3a2bbb2016-12-14 13:04:36 -05005460
5461/**
5462 * @brief Make the CPU idle in an atomic fashion.
5463 *
Peter Bigot88e756e2020-09-29 10:43:10 -05005464 * Similar to k_cpu_idle(), but must be called with interrupts locked.
5465 *
5466 * Enabling interrupts and entering a low-power mode will be atomic,
5467 * i.e. there will be no period of time where interrupts are enabled before
5468 * the processor enters a low-power mode.
5469 *
5470 * After waking up from the low-power mode, the interrupt lockout state will
5471 * be restored as if by irq_unlock(key).
Benjamin Walshc3a2bbb2016-12-14 13:04:36 -05005472 *
5473 * @param key Interrupt locking key obtained from irq_lock().
5474 *
5475 * @return N/A
5476 */
Andrew Boie07525a32019-09-21 16:17:23 -07005477static inline void k_cpu_atomic_idle(unsigned int key)
5478{
Andrew Boie4f77c2a2019-11-07 12:43:29 -08005479 arch_cpu_atomic_idle(key);
Andrew Boie07525a32019-09-21 16:17:23 -07005480}
Benjamin Walshc3a2bbb2016-12-14 13:04:36 -05005481
Anas Nashif30c3cff2019-01-22 08:18:13 -05005482/**
5483 * @}
5484 */
Anas Nashif954d5502018-02-25 08:37:28 -06005485
5486/**
5487 * @internal
5488 */
Andrew Boie4f77c2a2019-11-07 12:43:29 -08005489#ifdef ARCH_EXCEPT
Ioannis Glaropoulosdf029232019-10-07 11:24:36 +02005490/* This architecture has direct support for triggering a CPU exception */
Andrew Boie4f77c2a2019-11-07 12:43:29 -08005491#define z_except_reason(reason) ARCH_EXCEPT(reason)
Andrew Boiecdb94d62017-04-18 15:22:05 -07005492#else
5493
Joakim Anderssone04e4c22019-12-20 15:42:38 +01005494#if !defined(CONFIG_ASSERT_NO_FILE_INFO)
5495#define __EXCEPT_LOC() __ASSERT_PRINT("@ %s:%d\n", __FILE__, __LINE__)
5496#else
5497#define __EXCEPT_LOC()
5498#endif
5499
Andrew Boiecdb94d62017-04-18 15:22:05 -07005500/* NOTE: This is the implementation for arches that do not implement
Andrew Boie4f77c2a2019-11-07 12:43:29 -08005501 * ARCH_EXCEPT() to generate a real CPU exception.
Andrew Boiecdb94d62017-04-18 15:22:05 -07005502 *
5503 * We won't have a real exception frame to determine the PC value when
5504 * the oops occurred, so print file and line number before we jump into
5505 * the fatal error handler.
5506 */
Patrik Flykt4344e272019-03-08 14:19:05 -07005507#define z_except_reason(reason) do { \
Joakim Anderssone04e4c22019-12-20 15:42:38 +01005508 __EXCEPT_LOC(); \
Andrew Boie56236372019-07-15 15:22:29 -07005509 z_fatal_error(reason, NULL); \
Flavio Ceolin6fdc56d2018-09-18 12:32:27 -07005510 } while (false)
Andrew Boiecdb94d62017-04-18 15:22:05 -07005511
5512#endif /* _ARCH__EXCEPT */
5513
5514/**
5515 * @brief Fatally terminate a thread
5516 *
5517 * This should be called when a thread has encountered an unrecoverable
5518 * runtime condition and needs to terminate. What this ultimately
5519 * means is determined by the _fatal_error_handler() implementation, which
Andrew Boie71ce8ce2019-07-11 14:18:28 -07005520 * will be called will reason code K_ERR_KERNEL_OOPS.
Andrew Boiecdb94d62017-04-18 15:22:05 -07005521 *
5522 * If this is called from ISR context, the default system fatal error handler
5523 * will treat it as an unrecoverable system error, just like k_panic().
5524 */
Andrew Boie71ce8ce2019-07-11 14:18:28 -07005525#define k_oops() z_except_reason(K_ERR_KERNEL_OOPS)
Andrew Boiecdb94d62017-04-18 15:22:05 -07005526
5527/**
5528 * @brief Fatally terminate the system
5529 *
5530 * This should be called when the Zephyr kernel has encountered an
5531 * unrecoverable runtime condition and needs to terminate. What this ultimately
5532 * means is determined by the _fatal_error_handler() implementation, which
Andrew Boie71ce8ce2019-07-11 14:18:28 -07005533 * will be called will reason code K_ERR_KERNEL_PANIC.
Andrew Boiecdb94d62017-04-18 15:22:05 -07005534 */
Andrew Boie71ce8ce2019-07-11 14:18:28 -07005535#define k_panic() z_except_reason(K_ERR_KERNEL_PANIC)
Andrew Boiecdb94d62017-04-18 15:22:05 -07005536
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005537/*
5538 * private APIs that are utilized by one or more public APIs
5539 */
5540
Stephanos Ioannidis2d746042019-10-25 00:08:21 +09005541/**
5542 * @internal
5543 */
5544extern void z_init_thread_base(struct _thread_base *thread_base,
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005545 int priority, uint32_t initial_state,
Stephanos Ioannidis2d746042019-10-25 00:08:21 +09005546 unsigned int options);
5547
Benjamin Walshb12a8e02016-12-14 15:24:12 -05005548#ifdef CONFIG_MULTITHREADING
Anas Nashif954d5502018-02-25 08:37:28 -06005549/**
5550 * @internal
5551 */
Patrik Flykt4344e272019-03-08 14:19:05 -07005552extern void z_init_static_threads(void);
Benjamin Walshb12a8e02016-12-14 15:24:12 -05005553#else
Anas Nashif954d5502018-02-25 08:37:28 -06005554/**
5555 * @internal
5556 */
Patrik Flykt4344e272019-03-08 14:19:05 -07005557#define z_init_static_threads() do { } while (false)
Benjamin Walshb12a8e02016-12-14 15:24:12 -05005558#endif
5559
Anas Nashif954d5502018-02-25 08:37:28 -06005560/**
5561 * @internal
5562 */
Patrik Flykt4344e272019-03-08 14:19:05 -07005563extern bool z_is_thread_essential(void);
Guennadi Liakhovetski8d07b772021-04-01 13:46:57 +02005564
5565#ifdef CONFIG_SMP
5566void z_smp_thread_init(void *arg, struct k_thread *thread);
5567void z_smp_thread_swap(void);
5568#endif
5569
Anas Nashif954d5502018-02-25 08:37:28 -06005570/**
5571 * @internal
5572 */
Patrik Flykt4344e272019-03-08 14:19:05 -07005573extern void z_timer_expiration_handler(struct _timeout *t);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005574
Andrew Boied76ae462020-01-02 11:57:43 -08005575#ifdef CONFIG_PRINTK
Andrew Boie756f9072017-10-10 16:01:49 -07005576/**
5577 * @brief Emit a character buffer to the console device
5578 *
5579 * @param c String of characters to print
5580 * @param n The length of the string
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04005581 *
Andrew Boie756f9072017-10-10 16:01:49 -07005582 */
5583__syscall void k_str_out(char *c, size_t n);
Andrew Boied76ae462020-01-02 11:57:43 -08005584#endif
Andrew Boie756f9072017-10-10 16:01:49 -07005585
Ioannis Glaropoulosa6cb8b02019-05-09 21:55:10 +02005586/**
5587 * @brief Disable preservation of floating point context information.
5588 *
5589 * This routine informs the kernel that the specified thread
5590 * will no longer be using the floating point registers.
5591 *
5592 * @warning
5593 * Some architectures apply restrictions on how the disabling of floating
Andrew Boie4f77c2a2019-11-07 12:43:29 -08005594 * point preservation may be requested, see arch_float_disable.
Ioannis Glaropoulosa6cb8b02019-05-09 21:55:10 +02005595 *
5596 * @warning
5597 * This routine should only be used to disable floating point support for
5598 * a thread that currently has such support enabled.
5599 *
5600 * @param thread ID of thread.
5601 *
Katsuhiro Suzuki19db4852021-03-24 01:54:15 +09005602 * @retval 0 On success.
5603 * @retval -ENOTSUP If the floating point disabling is not implemented.
5604 * -EINVAL If the floating point disabling could not be performed.
Ioannis Glaropoulosa6cb8b02019-05-09 21:55:10 +02005605 */
5606__syscall int k_float_disable(struct k_thread *thread);
5607
Katsuhiro Suzuki59903e22021-02-01 15:16:53 +09005608/**
5609 * @brief Enable preservation of floating point context information.
5610 *
5611 * This routine informs the kernel that the specified thread
5612 * will use the floating point registers.
5613
5614 * Invoking this routine initializes the thread's floating point context info
5615 * to that of an FPU that has been reset. The next time the thread is scheduled
5616 * by z_swap() it will either inherit an FPU that is guaranteed to be in a
5617 * "sane" state (if the most recent user of the FPU was cooperatively swapped
5618 * out) or the thread's own floating point context will be loaded (if the most
5619 * recent user of the FPU was preempted, or if this thread is the first user
5620 * of the FPU). Thereafter, the kernel will protect the thread's FP context
5621 * so that it is not altered during a preemptive context switch.
5622 *
5623 * The @a options parameter indicates which floating point register sets will
5624 * be used by the specified thread.
5625 *
5626 * For x86 options:
5627 *
5628 * - K_FP_REGS indicates x87 FPU and MMX registers only
5629 * - K_SSE_REGS indicates SSE registers (and also x87 FPU and MMX registers)
5630 *
5631 * @warning
5632 * Some architectures apply restrictions on how the enabling of floating
5633 * point preservation may be requested, see arch_float_enable.
5634 *
5635 * @warning
5636 * This routine should only be used to enable floating point support for
5637 * a thread that currently has such support enabled.
5638 *
5639 * @param thread ID of thread.
5640 * @param options architecture dependent options
5641 *
5642 * @retval 0 On success.
5643 * @retval -ENOTSUP If the floating point enabling is not implemented.
5644 * -EINVAL If the floating point enabling could not be performed.
5645 */
5646__syscall int k_float_enable(struct k_thread *thread, unsigned int options);
5647
Daniel Leungfc577c42020-08-27 13:54:14 -07005648#ifdef CONFIG_THREAD_RUNTIME_STATS
5649
5650/**
5651 * @brief Get the runtime statistics of a thread
5652 *
5653 * @param thread ID of thread.
5654 * @param stats Pointer to struct to copy statistics into.
5655 * @return -EINVAL if null pointers, otherwise 0
5656 */
5657int k_thread_runtime_stats_get(k_tid_t thread,
5658 k_thread_runtime_stats_t *stats);
5659
5660/**
5661 * @brief Get the runtime statistics of all threads
5662 *
5663 * @param stats Pointer to struct to copy statistics into.
5664 * @return -EINVAL if null pointers, otherwise 0
5665 */
5666int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats);
5667
5668#endif
5669
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005670#ifdef __cplusplus
5671}
5672#endif
5673
Anas Nashif73008b42020-02-06 09:14:51 -05005674#include <tracing/tracing.h>
Andrew Boiefa94ee72017-09-28 16:54:35 -07005675#include <syscalls/kernel.h>
5676
Benjamin Walshdfa7ce52017-01-22 17:06:05 -05005677#endif /* !_ASMLANGUAGE */
5678
Flavio Ceolin67ca1762018-09-14 10:43:44 -07005679#endif /* ZEPHYR_INCLUDE_KERNEL_H_ */