blob: 35dc1459f2eb2348464be86790f1531ea6c10a8f [file] [log] [blame]
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001/*
2 * Copyright (c) 2016, Wind River Systems, Inc.
3 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08004 * SPDX-License-Identifier: Apache-2.0
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005 */
6
7/**
8 * @file
9 *
10 * @brief Public kernel APIs.
11 */
12
Flavio Ceolin67ca1762018-09-14 10:43:44 -070013#ifndef ZEPHYR_INCLUDE_KERNEL_H_
14#define ZEPHYR_INCLUDE_KERNEL_H_
Benjamin Walsh456c6da2016-09-02 18:55:39 -040015
Benjamin Walshdfa7ce52017-01-22 17:06:05 -050016#if !defined(_ASMLANGUAGE)
Ioannis Glaropoulos92b8a412018-06-20 17:30:48 +020017#include <kernel_includes.h>
Kumar Gala8777ff12018-07-25 20:24:34 -050018#include <errno.h>
James Harrisb1042812021-03-03 12:02:05 -080019#include <limits.h>
Flavio Ceolin6fdc56d2018-09-18 12:32:27 -070020#include <stdbool.h>
Stephanos Ioannidis33fbe002019-09-09 21:26:59 +090021#include <toolchain.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040022
Daniel Leungfd7a68d2020-10-14 12:17:12 -070023#ifdef CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS
24#include <timing/timing.h>
25#endif
26
Benjamin Walsh456c6da2016-09-02 18:55:39 -040027#ifdef __cplusplus
28extern "C" {
29#endif
30
Anas Nashifbbb157d2017-01-15 08:46:31 -050031/**
32 * @brief Kernel APIs
33 * @defgroup kernel_apis Kernel APIs
34 * @{
35 * @}
36 */
37
Benjamin Walsh2f280412017-01-14 19:23:46 -050038#if defined(CONFIG_COOP_ENABLED) && defined(CONFIG_PREEMPT_ENABLED)
39#define _NUM_COOP_PRIO (CONFIG_NUM_COOP_PRIORITIES)
40#define _NUM_PREEMPT_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES + 1)
41#elif defined(CONFIG_COOP_ENABLED)
42#define _NUM_COOP_PRIO (CONFIG_NUM_COOP_PRIORITIES + 1)
43#define _NUM_PREEMPT_PRIO (0)
44#elif defined(CONFIG_PREEMPT_ENABLED)
45#define _NUM_COOP_PRIO (0)
46#define _NUM_PREEMPT_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES + 1)
47#else
48#error "invalid configuration"
49#endif
50
51#define K_PRIO_COOP(x) (-(_NUM_COOP_PRIO - (x)))
Benjamin Walsh456c6da2016-09-02 18:55:39 -040052#define K_PRIO_PREEMPT(x) (x)
53
Benjamin Walsh456c6da2016-09-02 18:55:39 -040054#define K_ANY NULL
55#define K_END NULL
56
Benjamin Walshedb35702017-01-14 18:47:22 -050057#if defined(CONFIG_COOP_ENABLED) && defined(CONFIG_PREEMPT_ENABLED)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040058#define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
Benjamin Walshedb35702017-01-14 18:47:22 -050059#elif defined(CONFIG_COOP_ENABLED)
60#define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES - 1)
61#elif defined(CONFIG_PREEMPT_ENABLED)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040062#define K_HIGHEST_THREAD_PRIO 0
Benjamin Walshedb35702017-01-14 18:47:22 -050063#else
64#error "invalid configuration"
Benjamin Walsh456c6da2016-09-02 18:55:39 -040065#endif
66
Benjamin Walsh7fa3cd72017-01-14 18:49:11 -050067#ifdef CONFIG_PREEMPT_ENABLED
Benjamin Walsh456c6da2016-09-02 18:55:39 -040068#define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES
69#else
70#define K_LOWEST_THREAD_PRIO -1
71#endif
72
Benjamin Walshfab8d922016-11-08 15:36:36 -050073#define K_IDLE_PRIO K_LOWEST_THREAD_PRIO
74
Benjamin Walsh456c6da2016-09-02 18:55:39 -040075#define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO)
76#define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)
77
Anas Nashif2f203c22016-12-18 06:57:45 -050078#ifdef CONFIG_OBJECT_TRACING
Flavio Ceolind1ed3362018-12-07 11:39:13 -080079#define _OBJECT_TRACING_NEXT_PTR(type) struct type *__next;
Kumar Galaa1b77fd2020-05-27 11:26:57 -050080#define _OBJECT_TRACING_LINKED_FLAG uint8_t __linked;
Shih-Wei Teng5ebceeb2019-10-08 14:37:47 +080081#define _OBJECT_TRACING_INIT \
82 .__next = NULL, \
83 .__linked = 0,
Benjamin Walsh456c6da2016-09-02 18:55:39 -040084#else
Anas Nashif2f203c22016-12-18 06:57:45 -050085#define _OBJECT_TRACING_INIT
Flavio Ceolind1ed3362018-12-07 11:39:13 -080086#define _OBJECT_TRACING_NEXT_PTR(type)
Shih-Wei Teng5ebceeb2019-10-08 14:37:47 +080087#define _OBJECT_TRACING_LINKED_FLAG
Benjamin Walsh456c6da2016-09-02 18:55:39 -040088#endif
89
Benjamin Walshacc68c12017-01-29 18:57:45 -050090#ifdef CONFIG_POLL
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +030091#define _POLL_EVENT_OBJ_INIT(obj) \
92 .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events),
93#define _POLL_EVENT sys_dlist_t poll_events
Benjamin Walshacc68c12017-01-29 18:57:45 -050094#else
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +030095#define _POLL_EVENT_OBJ_INIT(obj)
Benjamin Walshacc68c12017-01-29 18:57:45 -050096#define _POLL_EVENT
97#endif
98
Benjamin Walshf6ca7de2016-11-08 10:36:50 -050099struct k_thread;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400100struct k_mutex;
101struct k_sem;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400102struct k_msgq;
103struct k_mbox;
104struct k_pipe;
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200105struct k_queue;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400106struct k_fifo;
107struct k_lifo;
108struct k_stack;
Benjamin Walsh7ef0f622016-10-24 17:04:43 -0400109struct k_mem_slab;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400110struct k_mem_pool;
111struct k_timer;
Benjamin Walshacc68c12017-01-29 18:57:45 -0500112struct k_poll_event;
113struct k_poll_signal;
Chunlin Hane9c97022017-07-07 20:29:30 +0800114struct k_mem_domain;
115struct k_mem_partition;
Wentong Wu5611e922019-06-20 23:51:27 +0800116struct k_futex;
Andrew Boiebca15da2017-10-15 14:17:48 -0700117
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400118enum execution_context_types {
119 K_ISR = 0,
120 K_COOP_THREAD,
121 K_PREEMPT_THREAD,
122};
123
Anas Nashiffc1b5de2020-11-11 08:42:53 -0500124/* private, used by k_poll and k_work_poll */
125struct k_work_poll;
126typedef int (*_poller_cb_t)(struct k_poll_event *event, uint32_t state);
127
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400128/**
Anas Nashif4bcb2942019-01-23 23:06:29 -0500129 * @addtogroup thread_apis
Carles Cuficb0cf9f2017-01-10 10:57:38 +0100130 * @{
131 */
Anas Nashife71293e2019-12-04 20:00:14 -0500132
Ramakrishna Pallala110b8e42018-04-27 12:55:43 +0530133typedef void (*k_thread_user_cb_t)(const struct k_thread *thread,
134 void *user_data);
Carles Cuficb0cf9f2017-01-10 10:57:38 +0100135
136/**
Ramakrishna Pallala110b8e42018-04-27 12:55:43 +0530137 * @brief Iterate over all the threads in the system.
138 *
139 * This routine iterates over all the threads in the system and
140 * calls the user_cb function for each thread.
141 *
142 * @param user_cb Pointer to the user callback function.
143 * @param user_data Pointer to user data.
144 *
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300145 * @note @option{CONFIG_THREAD_MONITOR} must be set for this function
Radoslaw Koppel2c529ce2019-11-27 14:20:37 +0100146 * to be effective.
147 * @note This API uses @ref k_spin_lock to protect the _kernel.threads
148 * list which means creation of new threads and terminations of existing
149 * threads are blocked until this API returns.
Ramakrishna Pallala110b8e42018-04-27 12:55:43 +0530150 *
151 * @return N/A
152 */
153extern void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data);
154
Radoslaw Koppel2c529ce2019-11-27 14:20:37 +0100155/**
156 * @brief Iterate over all the threads in the system without locking.
157 *
158 * This routine works exactly the same like @ref k_thread_foreach
159 * but unlocks interrupts when user_cb is executed.
160 *
161 * @param user_cb Pointer to the user callback function.
162 * @param user_data Pointer to user data.
163 *
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300164 * @note @option{CONFIG_THREAD_MONITOR} must be set for this function
Radoslaw Koppel2c529ce2019-11-27 14:20:37 +0100165 * to be effective.
166 * @note This API uses @ref k_spin_lock only when accessing the _kernel.threads
167 * queue elements. It unlocks it during user callback function processing.
168 * If a new task is created when this @c foreach function is in progress,
169 * the added new task would not be included in the enumeration.
170 * If a task is aborted during this enumeration, there would be a race here
171 * and there is a possibility that this aborted task would be included in the
172 * enumeration.
173 * @note If the task is aborted and the memory occupied by its @c k_thread
174 * structure is reused when this @c k_thread_foreach_unlocked is in progress
175 * it might even lead to the system behave unstable.
176 * This function may never return, as it would follow some @c next task
177 * pointers treating given pointer as a pointer to the k_thread structure
178 * while it is something different right now.
179 * Do not reuse the memory that was occupied by k_thread structure of aborted
180 * task if it was aborted after this function was called in any context.
181 */
182extern void k_thread_foreach_unlocked(
183 k_thread_user_cb_t user_cb, void *user_data);
184
Anas Nashif166f5192018-02-25 08:02:36 -0600185/** @} */
Carles Cuficb0cf9f2017-01-10 10:57:38 +0100186
187/**
Allan Stephensc98da842016-11-11 15:45:03 -0500188 * @defgroup thread_apis Thread APIs
189 * @ingroup kernel_apis
190 * @{
191 */
192
Benjamin Walshed240f22017-01-22 13:05:08 -0500193#endif /* !_ASMLANGUAGE */
194
195
196/*
197 * Thread user options. May be needed by assembly code. Common part uses low
198 * bits, arch-specific use high bits.
199 */
200
Anas Nashifa541e932018-05-24 11:19:16 -0500201/**
202 * @brief system thread that must not abort
Anas Nashifa541e932018-05-24 11:19:16 -0500203 * */
Flavio Ceolin8aec0872018-08-15 11:52:00 -0700204#define K_ESSENTIAL (BIT(0))
Benjamin Walshed240f22017-01-22 13:05:08 -0500205
Stephanos Ioannidisaaf93202020-05-03 18:03:19 +0900206#if defined(CONFIG_FPU_SHARING)
Anas Nashifa541e932018-05-24 11:19:16 -0500207/**
Katsuhiro Suzukifadef432020-12-16 11:22:13 +0900208 * @brief FPU registers are managed by context switch
209 *
210 * @details
211 * This option indicates that the thread uses the CPU's floating point
212 * registers. This instructs the kernel to take additional steps to save
213 * and restore the contents of these registers when scheduling the thread.
214 * No effect if @option{CONFIG_FPU_SHARING} is not enabled.
Anas Nashifa541e932018-05-24 11:19:16 -0500215 */
Flavio Ceolin8aec0872018-08-15 11:52:00 -0700216#define K_FP_REGS (BIT(1))
Benjamin Walshed240f22017-01-22 13:05:08 -0500217#endif
218
Anas Nashifa541e932018-05-24 11:19:16 -0500219/**
220 * @brief user mode thread
221 *
222 * This thread has dropped from supervisor mode to user mode and consequently
Andrew Boie5cfa5dc2017-08-30 14:17:44 -0700223 * has additional restrictions
224 */
Flavio Ceolin8aec0872018-08-15 11:52:00 -0700225#define K_USER (BIT(2))
Andrew Boie5cfa5dc2017-08-30 14:17:44 -0700226
Anas Nashifa541e932018-05-24 11:19:16 -0500227/**
228 * @brief Inherit Permissions
229 *
230 * @details
231 * Indicates that the thread being created should inherit all kernel object
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300232 * permissions from the thread that created it. No effect if
233 * @option{CONFIG_USERSPACE} is not enabled.
Andrew Boie47f8fd12017-10-05 11:11:02 -0700234 */
Flavio Ceolin8aec0872018-08-15 11:52:00 -0700235#define K_INHERIT_PERMS (BIT(3))
Andrew Boie47f8fd12017-10-05 11:11:02 -0700236
Andy Ross9a594a02021-02-10 14:54:21 -0800237/**
238 * @brief Callback item state
239 *
240 * @details
241 * This is a single bit of state reserved for "callback manager"
242 * utilities (p4wq initially) who need to track operations invoked
243 * from within a user-provided callback they have been invoked.
244 * Effectively it serves as a tiny bit of zero-overhead TLS data.
245 */
246#define K_CALLBACK_STATE (BIT(4))
247
Benjamin Walshed240f22017-01-22 13:05:08 -0500248#ifdef CONFIG_X86
249/* x86 Bitmask definitions for threads user options */
250
Daniel Leungce440482021-01-07 15:07:29 -0800251#if defined(CONFIG_FPU_SHARING) && defined(CONFIG_X86_SSE)
Benjamin Walshed240f22017-01-22 13:05:08 -0500252/* thread uses SSEx (and also FP) registers */
Flavio Ceolin8aec0872018-08-15 11:52:00 -0700253#define K_SSE_REGS (BIT(7))
Benjamin Walshed240f22017-01-22 13:05:08 -0500254#endif
255#endif
256
257/* end - thread options */
258
259#if !defined(_ASMLANGUAGE)
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400260/**
Andrew Boied26cf2d2017-03-30 13:07:02 -0700261 * @brief Create a thread.
262 *
263 * This routine initializes a thread, then schedules it for execution.
264 *
265 * The new thread may be scheduled for immediate execution or a delayed start.
266 * If the newly spawned thread does not have a delayed start the kernel
267 * scheduler may preempt the current thread to allow the new thread to
268 * execute.
269 *
270 * Thread options are architecture-specific, and can include K_ESSENTIAL,
271 * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
272 * them using "|" (the logical OR operator).
273 *
Andrew Boie8ce260d2020-04-24 16:24:46 -0700274 * Stack objects passed to this function must be originally defined with
275 * either of these macros in order to be portable:
276 *
277 * - K_THREAD_STACK_DEFINE() - For stacks that may support either user or
278 * supervisor threads.
279 * - K_KERNEL_STACK_DEFINE() - For stacks that may support supervisor
280 * threads only. These stacks use less memory if CONFIG_USERSPACE is
281 * enabled.
282 *
283 * The stack_size parameter has constraints. It must either be:
284 *
285 * - The original size value passed to K_THREAD_STACK_DEFINE() or
286 * K_KERNEL_STACK_DEFINE()
287 * - The return value of K_THREAD_STACK_SIZEOF(stack) if the stack was
288 * defined with K_THREAD_STACK_DEFINE()
289 * - The return value of K_KERNEL_STACK_SIZEOF(stack) if the stack was
290 * defined with K_KERNEL_STACK_DEFINE().
291 *
292 * Using other values, or sizeof(stack) may produce undefined behavior.
Andrew Boied26cf2d2017-03-30 13:07:02 -0700293 *
294 * @param new_thread Pointer to uninitialized struct k_thread
295 * @param stack Pointer to the stack space.
296 * @param stack_size Stack size in bytes.
297 * @param entry Thread entry function.
298 * @param p1 1st entry point parameter.
299 * @param p2 2nd entry point parameter.
300 * @param p3 3rd entry point parameter.
301 * @param prio Thread priority.
302 * @param options Thread options.
Andy Ross78327382020-03-05 15:18:14 -0800303 * @param delay Scheduling delay, or K_NO_WAIT (for no delay).
Andrew Boied26cf2d2017-03-30 13:07:02 -0700304 *
305 * @return ID of new thread.
Anas Nashif47420d02018-05-24 14:20:56 -0400306 *
Andrew Boied26cf2d2017-03-30 13:07:02 -0700307 */
Andrew Boie662c3452017-10-02 10:51:18 -0700308__syscall k_tid_t k_thread_create(struct k_thread *new_thread,
Andrew Boiec5c104f2017-10-16 14:46:34 -0700309 k_thread_stack_t *stack,
Andrew Boie662c3452017-10-02 10:51:18 -0700310 size_t stack_size,
311 k_thread_entry_t entry,
312 void *p1, void *p2, void *p3,
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500313 int prio, uint32_t options, k_timeout_t delay);
Andrew Boied26cf2d2017-03-30 13:07:02 -0700314
Andrew Boie3f091b52017-08-30 14:34:14 -0700315/**
316 * @brief Drop a thread's privileges permanently to user mode
317 *
Andrew Boie4d6bc472020-10-24 13:11:35 -0700318 * This allows a supervisor thread to be re-used as a user thread.
319 * This function does not return, but control will transfer to the provided
320 * entry point as if this was a new user thread.
321 *
322 * The implementation ensures that the stack buffer contents are erased.
323 * Any thread-local storage will be reverted to a pristine state.
324 *
325 * Memory domain membership, resource pool assignment, kernel object
326 * permissions, priority, and thread options are preserved.
327 *
328 * A common use of this function is to re-use the main thread as a user thread
329 * once all supervisor mode-only tasks have been completed.
330 *
Andrew Boie3f091b52017-08-30 14:34:14 -0700331 * @param entry Function to start executing from
332 * @param p1 1st entry point parameter
333 * @param p2 2nd entry point parameter
334 * @param p3 3rd entry point parameter
335 */
336extern FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
337 void *p1, void *p2,
338 void *p3);
Andrew Boie3f091b52017-08-30 14:34:14 -0700339
Andrew Boied26cf2d2017-03-30 13:07:02 -0700340/**
Adithya Baglody392219e2019-01-02 14:40:39 +0530341 * @brief Grant a thread access to a set of kernel objects
Andrew Boiee12857a2017-10-17 11:38:26 -0700342 *
343 * This is a convenience function. For the provided thread, grant access to
344 * the remaining arguments, which must be pointers to kernel objects.
Andrew Boiee12857a2017-10-17 11:38:26 -0700345 *
346 * The thread object must be initialized (i.e. running). The objects don't
347 * need to be.
Adithya Baglody392219e2019-01-02 14:40:39 +0530348 * Note that NULL shouldn't be passed as an argument.
Andrew Boiee12857a2017-10-17 11:38:26 -0700349 *
350 * @param thread Thread to grant access to objects
Adithya Baglody392219e2019-01-02 14:40:39 +0530351 * @param ... list of kernel object pointers
Andrew Boiee12857a2017-10-17 11:38:26 -0700352 */
Adithya Baglody392219e2019-01-02 14:40:39 +0530353#define k_thread_access_grant(thread, ...) \
Krzysztof Chruscinski1b4b9382020-05-08 07:06:58 +0200354 FOR_EACH_FIXED_ARG(k_object_access_grant, (;), thread, __VA_ARGS__)
Andrew Boiee12857a2017-10-17 11:38:26 -0700355
356/**
Andrew Boie92e5bd72018-04-12 17:12:15 -0700357 * @brief Assign a resource memory pool to a thread
358 *
359 * By default, threads have no resource pool assigned unless their parent
360 * thread has a resource pool, in which case it is inherited. Multiple
361 * threads may be assigned to the same memory pool.
362 *
363 * Changing a thread's resource pool will not migrate allocations from the
364 * previous pool.
365 *
Jukka Rissanenfdf18482020-05-01 12:37:51 +0300366 * @param thread Target thread to assign a memory pool for resource requests.
Andy Rossc770cab2020-10-02 08:22:03 -0700367 * @param heap Heap object to use for resources,
Jukka Rissanenfdf18482020-05-01 12:37:51 +0300368 * or NULL if the thread should no longer have a memory pool.
Andrew Boie92e5bd72018-04-12 17:12:15 -0700369 */
Andy Rossc770cab2020-10-02 08:22:03 -0700370static inline void k_thread_heap_assign(struct k_thread *thread,
371 struct k_heap *heap)
372{
373 thread->resource_pool = heap;
374}
375
Andrew Boieefc5fe02020-02-05 10:41:58 -0800376#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
377/**
378 * @brief Obtain stack usage information for the specified thread
379 *
380 * User threads will need to have permission on the target thread object.
381 *
382 * Some hardware may prevent inspection of a stack buffer currently in use.
383 * If this API is called from supervisor mode, on the currently running thread,
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300384 * on a platform which selects @option{CONFIG_NO_UNUSED_STACK_INSPECTION}, an
385 * error will be generated.
Andrew Boieefc5fe02020-02-05 10:41:58 -0800386 *
387 * @param thread Thread to inspect stack information
388 * @param unused_ptr Output parameter, filled in with the unused stack space
389 * of the target thread in bytes.
390 * @return 0 on success
391 * @return -EBADF Bad thread object (user mode only)
392 * @return -EPERM No permissions on thread object (user mode only)
393 * #return -ENOTSUP Forbidden by hardware policy
394 * @return -EINVAL Thread is uninitialized or exited (user mode only)
395 * @return -EFAULT Bad memory address for unused_ptr (user mode only)
396 */
397__syscall int k_thread_stack_space_get(const struct k_thread *thread,
398 size_t *unused_ptr);
399#endif
400
Andrew Boie92e5bd72018-04-12 17:12:15 -0700401#if (CONFIG_HEAP_MEM_POOL_SIZE > 0)
402/**
403 * @brief Assign the system heap as a thread's resource pool
404 *
Yasushi SHOJIa3e0f8c2021-03-11 20:45:20 +0900405 * Similar to z_thread_heap_assign(), but the thread will use
Andrew Boie92e5bd72018-04-12 17:12:15 -0700406 * the kernel heap to draw memory.
407 *
408 * Use with caution, as a malicious thread could perform DoS attacks on the
409 * kernel heap.
410 *
411 * @param thread Target thread to assign the system heap for resource requests
Anas Nashif47420d02018-05-24 14:20:56 -0400412 *
Andrew Boie92e5bd72018-04-12 17:12:15 -0700413 */
414void k_thread_system_pool_assign(struct k_thread *thread);
415#endif /* (CONFIG_HEAP_MEM_POOL_SIZE > 0) */
416
417/**
Andrew Boie322816e2020-02-20 16:33:06 -0800418 * @brief Sleep until a thread exits
419 *
420 * The caller will be put to sleep until the target thread exits, either due
421 * to being aborted, self-exiting, or taking a fatal error. This API returns
422 * immediately if the thread isn't running.
423 *
Andy Ross23f699b2021-02-23 06:12:17 -0800424 * This API may only be called from ISRs with a K_NO_WAIT timeout,
425 * where it can be useful as a predicate to detect when a thread has
426 * aborted.
Andrew Boie322816e2020-02-20 16:33:06 -0800427 *
428 * @param thread Thread to wait to exit
Andy Ross78327382020-03-05 15:18:14 -0800429 * @param timeout upper bound time to wait for the thread to exit.
Andrew Boie322816e2020-02-20 16:33:06 -0800430 * @retval 0 success, target thread has exited or wasn't running
431 * @retval -EBUSY returned without waiting
432 * @retval -EAGAIN waiting period timed out
433 * @retval -EDEADLK target thread is joining on the caller, or target thread
434 * is the caller
435 */
Andy Ross78327382020-03-05 15:18:14 -0800436__syscall int k_thread_join(struct k_thread *thread, k_timeout_t timeout);
437
438/**
439 * @brief Put the current thread to sleep.
440 *
441 * This routine puts the current thread to sleep for @a duration,
442 * specified as a k_timeout_t object.
443 *
Anas Nashifd2c71792020-10-17 07:52:17 -0400444 * @note if @a timeout is set to K_FOREVER then the thread is suspended.
445 *
Andy Ross78327382020-03-05 15:18:14 -0800446 * @param timeout Desired duration of sleep.
447 *
448 * @return Zero if the requested time has elapsed or the number of milliseconds
449 * left to sleep, if thread was woken up by \ref k_wakeup call.
450 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500451__syscall int32_t k_sleep(k_timeout_t timeout);
Andrew Boie322816e2020-02-20 16:33:06 -0800452
453/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500454 * @brief Put the current thread to sleep.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400455 *
Charles E. Yousea5678312019-05-09 16:46:46 -0700456 * This routine puts the current thread to sleep for @a duration milliseconds.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400457 *
Charles E. Yousea5678312019-05-09 16:46:46 -0700458 * @param ms Number of milliseconds to sleep.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400459 *
Piotr Zięcik7700eb22018-10-25 17:45:08 +0200460 * @return Zero if the requested time has elapsed or the number of milliseconds
461 * left to sleep, if thread was woken up by \ref k_wakeup call.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400462 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500463static inline int32_t k_msleep(int32_t ms)
Andy Ross78327382020-03-05 15:18:14 -0800464{
465 return k_sleep(Z_TIMEOUT_MS(ms));
466}
Charles E. Yousea5678312019-05-09 16:46:46 -0700467
468/**
469 * @brief Put the current thread to sleep with microsecond resolution.
470 *
471 * This function is unlikely to work as expected without kernel tuning.
472 * In particular, because the lower bound on the duration of a sleep is
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300473 * the duration of a tick, @option{CONFIG_SYS_CLOCK_TICKS_PER_SEC} must be
474 * adjusted to achieve the resolution desired. The implications of doing
475 * this must be understood before attempting to use k_usleep(). Use with
476 * caution.
Charles E. Yousea5678312019-05-09 16:46:46 -0700477 *
478 * @param us Number of microseconds to sleep.
479 *
480 * @return Zero if the requested time has elapsed or the number of microseconds
481 * left to sleep, if thread was woken up by \ref k_wakeup call.
482 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500483__syscall int32_t k_usleep(int32_t us);
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400484
485/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500486 * @brief Cause the current thread to busy wait.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400487 *
488 * This routine causes the current thread to execute a "do nothing" loop for
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500489 * @a usec_to_wait microseconds.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400490 *
Peter Bigot6a794362020-05-22 14:17:01 -0500491 * @note The clock used for the microsecond-resolution delay here may
492 * be skewed relative to the clock used for system timeouts like
493 * k_sleep(). For example k_busy_wait(1000) may take slightly more or
494 * less time than k_sleep(K_MSEC(1)), with the offset dependent on
495 * clock tolerances.
496 *
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400497 * @return N/A
498 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500499__syscall void k_busy_wait(uint32_t usec_to_wait);
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400500
501/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500502 * @brief Yield the current thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400503 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500504 * This routine causes the current thread to yield execution to another
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400505 * thread of the same or higher priority. If there are no other ready threads
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500506 * of the same or higher priority, the routine returns immediately.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400507 *
508 * @return N/A
509 */
Andrew Boie468190a2017-09-29 14:00:48 -0700510__syscall void k_yield(void);
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400511
512/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500513 * @brief Wake up a sleeping thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400514 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500515 * This routine prematurely wakes up @a thread from sleeping.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400516 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500517 * If @a thread is not currently sleeping, the routine has no effect.
518 *
519 * @param thread ID of thread to wake.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400520 *
521 * @return N/A
522 */
Andrew Boie468190a2017-09-29 14:00:48 -0700523__syscall void k_wakeup(k_tid_t thread);
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400524
525/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500526 * @brief Get thread ID of the current thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400527 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500528 * @return ID of current thread.
Anas Nashif47420d02018-05-24 14:20:56 -0400529 *
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400530 */
Nicolas Pitre0dc7b9e2021-04-09 00:40:41 -0400531__syscall k_tid_t k_current_get(void) __attribute_const__;
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400532
533/**
Allan Stephensc98da842016-11-11 15:45:03 -0500534 * @brief Abort a thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400535 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500536 * This routine permanently stops execution of @a thread. The thread is taken
537 * off all kernel queues it is part of (i.e. the ready queue, the timeout
538 * queue, or a kernel object wait queue). However, any kernel resources the
539 * thread might currently own (such as mutexes or memory blocks) are not
540 * released. It is the responsibility of the caller of this routine to ensure
541 * all necessary cleanup is performed.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400542 *
Andy Ross23f699b2021-02-23 06:12:17 -0800543 * After k_thread_abort() returns, the thread is guaranteed not to be
544 * running or to become runnable anywhere on the system. Normally
545 * this is done via blocking the caller (in the same manner as
546 * k_thread_join()), but in interrupt context on SMP systems the
547 * implementation is required to spin for threads that are running on
548 * other CPUs. Note that as specified, this means that on SMP
549 * platforms it is possible for application code to create a deadlock
550 * condition by simultaneously aborting a cycle of threads using at
551 * least one termination from interrupt context. Zephyr cannot detect
552 * all such conditions.
553 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500554 * @param thread ID of thread to abort.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400555 *
556 * @return N/A
557 */
Andrew Boie468190a2017-09-29 14:00:48 -0700558__syscall void k_thread_abort(k_tid_t thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400559
Andrew Boie7d627c52017-08-30 11:01:56 -0700560
561/**
562 * @brief Start an inactive thread
563 *
564 * If a thread was created with K_FOREVER in the delay parameter, it will
565 * not be added to the scheduling queue until this function is called
566 * on it.
567 *
568 * @param thread thread to start
569 */
Andrew Boie468190a2017-09-29 14:00:48 -0700570__syscall void k_thread_start(k_tid_t thread);
Andrew Boie7d627c52017-08-30 11:01:56 -0700571
Peter A. Bigot16a40812020-09-18 16:24:57 -0500572extern k_ticks_t z_timeout_expires(const struct _timeout *timeout);
573extern k_ticks_t z_timeout_remaining(const struct _timeout *timeout);
Andy Ross5a5d3da2020-03-09 13:59:15 -0700574
575#ifdef CONFIG_SYS_CLOCK_EXISTS
576
577/**
Andy Rosse39bf292020-03-19 10:30:33 -0700578 * @brief Get time when a thread wakes up, in system ticks
Andy Ross5a5d3da2020-03-09 13:59:15 -0700579 *
580 * This routine computes the system uptime when a waiting thread next
581 * executes, in units of system ticks. If the thread is not waiting,
582 * it returns current system time.
583 */
Peter Bigot0ab314f2020-11-16 15:28:59 -0600584__syscall k_ticks_t k_thread_timeout_expires_ticks(const struct k_thread *t);
Andy Ross5a5d3da2020-03-09 13:59:15 -0700585
586static inline k_ticks_t z_impl_k_thread_timeout_expires_ticks(
Peter Bigot0ab314f2020-11-16 15:28:59 -0600587 const struct k_thread *t)
Andy Ross5a5d3da2020-03-09 13:59:15 -0700588{
589 return z_timeout_expires(&t->base.timeout);
590}
591
592/**
Andy Rosse39bf292020-03-19 10:30:33 -0700593 * @brief Get time remaining before a thread wakes up, in system ticks
Andy Ross5a5d3da2020-03-09 13:59:15 -0700594 *
595 * This routine computes the time remaining before a waiting thread
596 * next executes, in units of system ticks. If the thread is not
597 * waiting, it returns zero.
598 */
Peter Bigot0ab314f2020-11-16 15:28:59 -0600599__syscall k_ticks_t k_thread_timeout_remaining_ticks(const struct k_thread *t);
Andy Ross5a5d3da2020-03-09 13:59:15 -0700600
601static inline k_ticks_t z_impl_k_thread_timeout_remaining_ticks(
Peter Bigot0ab314f2020-11-16 15:28:59 -0600602 const struct k_thread *t)
Andy Ross5a5d3da2020-03-09 13:59:15 -0700603{
604 return z_timeout_remaining(&t->base.timeout);
605}
606
607#endif /* CONFIG_SYS_CLOCK_EXISTS */
608
Allan Stephensc98da842016-11-11 15:45:03 -0500609/**
610 * @cond INTERNAL_HIDDEN
611 */
612
Benjamin Walshd211a522016-12-06 11:44:01 -0500613/* timeout has timed out and is not on _timeout_q anymore */
614#define _EXPIRED (-2)
615
Peter Mitsisa04c0d72016-09-28 19:26:00 -0400616struct _static_thread_data {
Andrew Boied26cf2d2017-03-30 13:07:02 -0700617 struct k_thread *init_thread;
Andrew Boiec5c104f2017-10-16 14:46:34 -0700618 k_thread_stack_t *init_stack;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400619 unsigned int init_stack_size;
Andrew Boie1e06ffc2017-09-11 09:30:04 -0700620 k_thread_entry_t init_entry;
Allan Stephens7c5bffa2016-10-26 10:01:28 -0500621 void *init_p1;
622 void *init_p2;
623 void *init_p3;
624 int init_prio;
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500625 uint32_t init_options;
626 int32_t init_delay;
Allan Stephens7c5bffa2016-10-26 10:01:28 -0500627 void (*init_abort)(void);
Anas Nashif57554052018-03-03 02:31:05 -0600628 const char *init_name;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400629};
630
Anas Nashif45a1d8a2020-04-24 11:29:17 -0400631#define Z_THREAD_INITIALIZER(thread, stack, stack_size, \
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400632 entry, p1, p2, p3, \
Anas Nashif57554052018-03-03 02:31:05 -0600633 prio, options, delay, abort, tname) \
Allan Stephens6cfe1322016-10-26 10:16:51 -0500634 { \
Andrew Boied26cf2d2017-03-30 13:07:02 -0700635 .init_thread = (thread), \
636 .init_stack = (stack), \
Allan Stephens6cfe1322016-10-26 10:16:51 -0500637 .init_stack_size = (stack_size), \
Andrew Boie1e06ffc2017-09-11 09:30:04 -0700638 .init_entry = (k_thread_entry_t)entry, \
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400639 .init_p1 = (void *)p1, \
640 .init_p2 = (void *)p2, \
641 .init_p3 = (void *)p3, \
Allan Stephens6cfe1322016-10-26 10:16:51 -0500642 .init_prio = (prio), \
643 .init_options = (options), \
644 .init_delay = (delay), \
645 .init_abort = (abort), \
Anas Nashif57554052018-03-03 02:31:05 -0600646 .init_name = STRINGIFY(tname), \
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400647 }
648
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400649/**
Allan Stephensc98da842016-11-11 15:45:03 -0500650 * INTERNAL_HIDDEN @endcond
651 */
652
653/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500654 * @brief Statically define and initialize a thread.
655 *
656 * The thread may be scheduled for immediate execution or a delayed start.
657 *
658 * Thread options are architecture-specific, and can include K_ESSENTIAL,
659 * K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
660 * them using "|" (the logical OR operator).
661 *
662 * The ID of the thread can be accessed using:
663 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -0500664 * @code extern const k_tid_t <name>; @endcode
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500665 *
666 * @param name Name of the thread.
667 * @param stack_size Stack size in bytes.
668 * @param entry Thread entry function.
669 * @param p1 1st entry point parameter.
670 * @param p2 2nd entry point parameter.
671 * @param p3 3rd entry point parameter.
672 * @param prio Thread priority.
673 * @param options Thread options.
Peter Bigot73c387c2020-04-20 08:55:20 -0500674 * @param delay Scheduling delay (in milliseconds), zero for no delay.
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400675 *
Anas Nashif47420d02018-05-24 14:20:56 -0400676 *
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400677 * @internal It has been observed that the x86 compiler by default aligns
678 * these _static_thread_data structures to 32-byte boundaries, thereby
679 * wasting space. To work around this, force a 4-byte alignment.
Anas Nashif47420d02018-05-24 14:20:56 -0400680 *
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400681 */
Allan Stephens6cfe1322016-10-26 10:16:51 -0500682#define K_THREAD_DEFINE(name, stack_size, \
683 entry, p1, p2, p3, \
684 prio, options, delay) \
Andrew Boiedc5d9352017-06-02 12:56:47 -0700685 K_THREAD_STACK_DEFINE(_k_thread_stack_##name, stack_size); \
Nicolas Pitreb1d37422019-06-03 10:51:32 -0400686 struct k_thread _k_thread_obj_##name; \
687 Z_STRUCT_SECTION_ITERABLE(_static_thread_data, _k_thread_data_##name) =\
Anas Nashif45a1d8a2020-04-24 11:29:17 -0400688 Z_THREAD_INITIALIZER(&_k_thread_obj_##name, \
Andrew Boied26cf2d2017-03-30 13:07:02 -0700689 _k_thread_stack_##name, stack_size, \
Allan Stephens6cfe1322016-10-26 10:16:51 -0500690 entry, p1, p2, p3, prio, options, delay, \
Anas Nashif57554052018-03-03 02:31:05 -0600691 NULL, name); \
Andrew Boied26cf2d2017-03-30 13:07:02 -0700692 const k_tid_t name = (k_tid_t)&_k_thread_obj_##name
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400693
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400694/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500695 * @brief Get a thread's priority.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400696 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500697 * This routine gets the priority of @a thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400698 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500699 * @param thread ID of thread whose priority is needed.
700 *
701 * @return Priority of @a thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400702 */
Andrew Boie76c04a22017-09-27 14:45:10 -0700703__syscall int k_thread_priority_get(k_tid_t thread);
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400704
705/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500706 * @brief Set a thread's priority.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400707 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500708 * This routine immediately changes the priority of @a thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400709 *
710 * Rescheduling can occur immediately depending on the priority @a thread is
711 * set to:
712 *
713 * - If its priority is raised above the priority of the caller of this
714 * function, and the caller is preemptible, @a thread will be scheduled in.
715 *
716 * - If the caller operates on itself, it lowers its priority below that of
717 * other threads in the system, and the caller is preemptible, the thread of
718 * highest priority will be scheduled in.
719 *
720 * Priority can be assigned in the range of -CONFIG_NUM_COOP_PRIORITIES to
721 * CONFIG_NUM_PREEMPT_PRIORITIES-1, where -CONFIG_NUM_COOP_PRIORITIES is the
722 * highest priority.
723 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500724 * @param thread ID of thread whose priority is to be set.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400725 * @param prio New priority.
726 *
727 * @warning Changing the priority of a thread currently involved in mutex
728 * priority inheritance may result in undefined behavior.
729 *
730 * @return N/A
731 */
Andrew Boie468190a2017-09-29 14:00:48 -0700732__syscall void k_thread_priority_set(k_tid_t thread, int prio);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400733
Andy Ross4a2e50f2018-05-15 11:06:25 -0700734
735#ifdef CONFIG_SCHED_DEADLINE
736/**
737 * @brief Set deadline expiration time for scheduler
738 *
739 * This sets the "deadline" expiration as a time delta from the
740 * current time, in the same units used by k_cycle_get_32(). The
741 * scheduler (when deadline scheduling is enabled) will choose the
742 * next expiring thread when selecting between threads at the same
743 * static priority. Threads at different priorities will be scheduled
744 * according to their static priority.
745 *
Andy Rossef626572020-07-10 09:43:36 -0700746 * @note Deadlines are stored internally using 32 bit unsigned
747 * integers. The number of cycles between the "first" deadline in the
748 * scheduler queue and the "last" deadline must be less than 2^31 (i.e
749 * a signed non-negative quantity). Failure to adhere to this rule
750 * may result in scheduled threads running in an incorrect dealine
751 * order.
Andy Ross4a2e50f2018-05-15 11:06:25 -0700752 *
753 * @note Despite the API naming, the scheduler makes no guarantees the
754 * the thread WILL be scheduled within that deadline, nor does it take
755 * extra metadata (like e.g. the "runtime" and "period" parameters in
756 * Linux sched_setattr()) that allows the kernel to validate the
757 * scheduling for achievability. Such features could be implemented
758 * above this call, which is simply input to the priority selection
759 * logic.
760 *
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300761 * @note You should enable @option{CONFIG_SCHED_DEADLINE} in your project
762 * configuration.
Anas Nashif240c5162019-06-10 12:25:50 -0400763 *
Andy Ross4a2e50f2018-05-15 11:06:25 -0700764 * @param thread A thread on which to set the deadline
765 * @param deadline A time delta, in cycle units
Anas Nashif47420d02018-05-24 14:20:56 -0400766 *
Andy Ross4a2e50f2018-05-15 11:06:25 -0700767 */
768__syscall void k_thread_deadline_set(k_tid_t thread, int deadline);
769#endif
770
Andy Rossab46b1b2019-01-30 15:00:42 -0800771#ifdef CONFIG_SCHED_CPU_MASK
772/**
773 * @brief Sets all CPU enable masks to zero
774 *
775 * After this returns, the thread will no longer be schedulable on any
776 * CPUs. The thread must not be currently runnable.
777 *
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300778 * @note You should enable @option{CONFIG_SCHED_DEADLINE} in your project
779 * configuration.
Anas Nashif240c5162019-06-10 12:25:50 -0400780 *
Andy Rossab46b1b2019-01-30 15:00:42 -0800781 * @param thread Thread to operate upon
782 * @return Zero on success, otherwise error code
783 */
784int k_thread_cpu_mask_clear(k_tid_t thread);
785
786/**
787 * @brief Sets all CPU enable masks to one
788 *
789 * After this returns, the thread will be schedulable on any CPU. The
790 * thread must not be currently runnable.
791 *
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300792 * @note You should enable @option{CONFIG_SCHED_DEADLINE} in your project
793 * configuration.
Anas Nashif240c5162019-06-10 12:25:50 -0400794 *
Andy Rossab46b1b2019-01-30 15:00:42 -0800795 * @param thread Thread to operate upon
796 * @return Zero on success, otherwise error code
797 */
798int k_thread_cpu_mask_enable_all(k_tid_t thread);
799
800/**
801 * @brief Enable thread to run on specified CPU
802 *
803 * The thread must not be currently runnable.
804 *
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300805 * @note You should enable @option{CONFIG_SCHED_DEADLINE} in your project
806 * configuration.
Anas Nashif240c5162019-06-10 12:25:50 -0400807 *
Andy Rossab46b1b2019-01-30 15:00:42 -0800808 * @param thread Thread to operate upon
809 * @param cpu CPU index
810 * @return Zero on success, otherwise error code
811 */
812int k_thread_cpu_mask_enable(k_tid_t thread, int cpu);
813
814/**
815 * @brief Prevent thread to run on specified CPU
816 *
817 * The thread must not be currently runnable.
818 *
Fabio Utzig39fa56b2020-09-11 10:14:37 -0300819 * @note You should enable @option{CONFIG_SCHED_DEADLINE} in your project
820 * configuration.
Anas Nashif240c5162019-06-10 12:25:50 -0400821 *
Andy Rossab46b1b2019-01-30 15:00:42 -0800822 * @param thread Thread to operate upon
823 * @param cpu CPU index
824 * @return Zero on success, otherwise error code
825 */
826int k_thread_cpu_mask_disable(k_tid_t thread, int cpu);
827#endif
828
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400829/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500830 * @brief Suspend a thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400831 *
Andy Ross50d09422019-11-19 11:20:07 -0800832 * This routine prevents the kernel scheduler from making @a thread
833 * the current thread. All other internal operations on @a thread are
834 * still performed; for example, kernel objects it is waiting on are
835 * still handed to it. Note that any existing timeouts
836 * (e.g. k_sleep(), or a timeout argument to k_sem_take() et. al.)
837 * will be canceled. On resume, the thread will begin running
838 * immediately and return from the blocked call.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400839 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500840 * If @a thread is already suspended, the routine has no effect.
841 *
842 * @param thread ID of thread to suspend.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400843 *
844 * @return N/A
845 */
Andrew Boie468190a2017-09-29 14:00:48 -0700846__syscall void k_thread_suspend(k_tid_t thread);
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400847
848/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500849 * @brief Resume a suspended thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400850 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500851 * This routine allows the kernel scheduler to make @a thread the current
852 * thread, when it is next eligible for that role.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400853 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500854 * If @a thread is not currently suspended, the routine has no effect.
855 *
856 * @param thread ID of thread to resume.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400857 *
858 * @return N/A
859 */
Andrew Boie468190a2017-09-29 14:00:48 -0700860__syscall void k_thread_resume(k_tid_t thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400861
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400862/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500863 * @brief Set time-slicing period and scope.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400864 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500865 * This routine specifies how the scheduler will perform time slicing of
866 * preemptible threads.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400867 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500868 * To enable time slicing, @a slice must be non-zero. The scheduler
869 * ensures that no thread runs for more than the specified time limit
870 * before other threads of that priority are given a chance to execute.
871 * Any thread whose priority is higher than @a prio is exempted, and may
David B. Kinder8b986d72017-04-18 15:56:26 -0700872 * execute as long as desired without being preempted due to time slicing.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400873 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500874 * Time slicing only limits the maximum amount of time a thread may continuously
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400875 * execute. Once the scheduler selects a thread for execution, there is no
876 * minimum guaranteed time the thread will execute before threads of greater or
877 * equal priority are scheduled.
878 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500879 * When the current thread is the only one of that priority eligible
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400880 * for execution, this routine has no effect; the thread is immediately
881 * rescheduled after the slice period expires.
882 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500883 * To disable timeslicing, set both @a slice and @a prio to zero.
884 *
885 * @param slice Maximum time slice length (in milliseconds).
886 * @param prio Highest thread priority level eligible for time slicing.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400887 *
888 * @return N/A
889 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500890extern void k_sched_time_slice_set(int32_t slice, int prio);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400891
Anas Nashif166f5192018-02-25 08:02:36 -0600892/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -0500893
894/**
895 * @addtogroup isr_apis
896 * @{
897 */
898
899/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500900 * @brief Determine if code is running at interrupt level.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400901 *
Allan Stephensc98da842016-11-11 15:45:03 -0500902 * This routine allows the caller to customize its actions, depending on
903 * whether it is a thread or an ISR.
904 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +0100905 * @funcprops \isr_ok
Allan Stephensc98da842016-11-11 15:45:03 -0500906 *
Flavio Ceolin6a4a86e2018-12-17 12:40:22 -0800907 * @return false if invoked by a thread.
908 * @return true if invoked by an ISR.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400909 */
Flavio Ceolin6a4a86e2018-12-17 12:40:22 -0800910extern bool k_is_in_isr(void);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400911
Benjamin Walsh445830d2016-11-10 15:54:27 -0500912/**
913 * @brief Determine if code is running in a preemptible thread.
914 *
Allan Stephensc98da842016-11-11 15:45:03 -0500915 * This routine allows the caller to customize its actions, depending on
916 * whether it can be preempted by another thread. The routine returns a 'true'
917 * value if all of the following conditions are met:
Benjamin Walsh445830d2016-11-10 15:54:27 -0500918 *
Allan Stephensc98da842016-11-11 15:45:03 -0500919 * - The code is running in a thread, not at ISR.
920 * - The thread's priority is in the preemptible range.
921 * - The thread has not locked the scheduler.
Benjamin Walsh445830d2016-11-10 15:54:27 -0500922 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +0100923 * @funcprops \isr_ok
Allan Stephensc98da842016-11-11 15:45:03 -0500924 *
925 * @return 0 if invoked by an ISR or by a cooperative thread.
Benjamin Walsh445830d2016-11-10 15:54:27 -0500926 * @return Non-zero if invoked by a preemptible thread.
927 */
Andrew Boie468190a2017-09-29 14:00:48 -0700928__syscall int k_is_preempt_thread(void);
Benjamin Walsh445830d2016-11-10 15:54:27 -0500929
Allan Stephensc98da842016-11-11 15:45:03 -0500930/**
Peter Bigot74ef3952019-12-23 11:48:43 -0600931 * @brief Test whether startup is in the before-main-task phase.
932 *
933 * This routine allows the caller to customize its actions, depending on
934 * whether it being invoked before the kernel is fully active.
935 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +0100936 * @funcprops \isr_ok
Peter Bigot74ef3952019-12-23 11:48:43 -0600937 *
938 * @return true if invoked before post-kernel initialization
939 * @return false if invoked during/after post-kernel initialization
940 */
941static inline bool k_is_pre_kernel(void)
942{
943 extern bool z_sys_post_kernel; /* in init.c */
944
945 return !z_sys_post_kernel;
946}
947
948/**
Anas Nashif166f5192018-02-25 08:02:36 -0600949 * @}
Allan Stephensc98da842016-11-11 15:45:03 -0500950 */
951
952/**
953 * @addtogroup thread_apis
954 * @{
955 */
956
957/**
958 * @brief Lock the scheduler.
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500959 *
Allan Stephensc98da842016-11-11 15:45:03 -0500960 * This routine prevents the current thread from being preempted by another
961 * thread by instructing the scheduler to treat it as a cooperative thread.
962 * If the thread subsequently performs an operation that makes it unready,
963 * it will be context switched out in the normal manner. When the thread
964 * again becomes the current thread, its non-preemptible status is maintained.
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500965 *
Allan Stephensc98da842016-11-11 15:45:03 -0500966 * This routine can be called recursively.
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500967 *
Allan Stephensc98da842016-11-11 15:45:03 -0500968 * @note k_sched_lock() and k_sched_unlock() should normally be used
969 * when the operation being performed can be safely interrupted by ISRs.
970 * However, if the amount of processing involved is very small, better
971 * performance may be obtained by using irq_lock() and irq_unlock().
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500972 *
973 * @return N/A
974 */
975extern void k_sched_lock(void);
976
Allan Stephensc98da842016-11-11 15:45:03 -0500977/**
978 * @brief Unlock the scheduler.
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500979 *
Allan Stephensc98da842016-11-11 15:45:03 -0500980 * This routine reverses the effect of a previous call to k_sched_lock().
981 * A thread must call the routine once for each time it called k_sched_lock()
982 * before the thread becomes preemptible.
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500983 *
984 * @return N/A
985 */
986extern void k_sched_unlock(void);
987
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400988/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500989 * @brief Set current thread's custom data.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400990 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500991 * This routine sets the custom data for the current thread to @ value.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400992 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500993 * Custom data is not used by the kernel itself, and is freely available
994 * for a thread to use as it sees fit. It can be used as a framework
995 * upon which to build thread-local storage.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400996 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -0500997 * @param value New custom data value.
Peter Mitsis348eb4c2016-10-26 11:22:14 -0400998 *
999 * @return N/A
Anas Nashif47420d02018-05-24 14:20:56 -04001000 *
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001001 */
Andrew Boie468190a2017-09-29 14:00:48 -07001002__syscall void k_thread_custom_data_set(void *value);
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001003
1004/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001005 * @brief Get current thread's custom data.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001006 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001007 * This routine returns the custom data for the current thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001008 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001009 * @return Current custom data value.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001010 */
Andrew Boie468190a2017-09-29 14:00:48 -07001011__syscall void *k_thread_custom_data_get(void);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001012
1013/**
Anas Nashif57554052018-03-03 02:31:05 -06001014 * @brief Set current thread name
1015 *
Fabio Utzig39fa56b2020-09-11 10:14:37 -03001016 * Set the name of the thread to be used when @option{CONFIG_THREAD_MONITOR}
1017 * is enabled for tracing and debugging.
Anas Nashif57554052018-03-03 02:31:05 -06001018 *
Anas Nashif25c87db2021-03-29 10:54:23 -04001019 * @param thread Thread to set name, or NULL to set the current thread
1020 * @param str Name string
Andrew Boie38129ce2019-06-25 08:54:37 -07001021 * @retval 0 on success
1022 * @retval -EFAULT Memory access error with supplied string
1023 * @retval -ENOSYS Thread name configuration option not enabled
1024 * @retval -EINVAL Thread name too long
Anas Nashif57554052018-03-03 02:31:05 -06001025 */
Anas Nashif25c87db2021-03-29 10:54:23 -04001026__syscall int k_thread_name_set(k_tid_t thread, const char *str);
Anas Nashif57554052018-03-03 02:31:05 -06001027
1028/**
1029 * @brief Get thread name
1030 *
1031 * Get the name of a thread
1032 *
Anas Nashif25c87db2021-03-29 10:54:23 -04001033 * @param thread Thread ID
Andrew Boie38129ce2019-06-25 08:54:37 -07001034 * @retval Thread name, or NULL if configuration not enabled
Anas Nashif57554052018-03-03 02:31:05 -06001035 */
Anas Nashif25c87db2021-03-29 10:54:23 -04001036const char *k_thread_name_get(k_tid_t thread);
Andrew Boie38129ce2019-06-25 08:54:37 -07001037
1038/**
1039 * @brief Copy the thread name into a supplied buffer
1040 *
Anas Nashif25c87db2021-03-29 10:54:23 -04001041 * @param thread Thread to obtain name information
Andrew Boie38129ce2019-06-25 08:54:37 -07001042 * @param buf Destination buffer
David B. Kinder73896c02019-10-28 16:27:57 -07001043 * @param size Destination buffer size
Andrew Boie38129ce2019-06-25 08:54:37 -07001044 * @retval -ENOSPC Destination buffer too small
1045 * @retval -EFAULT Memory access error
1046 * @retval -ENOSYS Thread name feature not enabled
1047 * @retval 0 Success
1048 */
Anas Nashif25c87db2021-03-29 10:54:23 -04001049__syscall int k_thread_name_copy(k_tid_t thread, char *buf,
Andrew Boie38129ce2019-06-25 08:54:37 -07001050 size_t size);
Anas Nashif57554052018-03-03 02:31:05 -06001051
1052/**
Pavlo Hamov8076c802019-07-31 12:43:54 +03001053 * @brief Get thread state string
1054 *
1055 * Get the human friendly thread state string
1056 *
1057 * @param thread_id Thread ID
1058 * @retval Thread state string, empty if no state flag is set
1059 */
1060const char *k_thread_state_str(k_tid_t thread_id);
1061
1062/**
Andy Rosscfe62032018-09-29 07:34:55 -07001063 * @}
1064 */
1065
1066/**
Allan Stephensc2f15a42016-11-17 12:24:22 -05001067 * @addtogroup clock_apis
1068 * @{
1069 */
1070
1071/**
1072 * @brief Generate null timeout delay.
1073 *
Maksim Masalskife1ff2f2019-10-29 16:50:44 +08001074 * This macro generates a timeout delay that instructs a kernel API
Allan Stephensc2f15a42016-11-17 12:24:22 -05001075 * not to wait if the requested operation cannot be performed immediately.
1076 *
1077 * @return Timeout delay value.
1078 */
Andy Ross78327382020-03-05 15:18:14 -08001079#define K_NO_WAIT Z_TIMEOUT_NO_WAIT
Allan Stephensc2f15a42016-11-17 12:24:22 -05001080
1081/**
Andy Rosse1bc5952020-03-09 12:19:54 -07001082 * @brief Generate timeout delay from nanoseconds.
1083 *
1084 * This macro generates a timeout delay that instructs a kernel API to
1085 * wait up to @a t nanoseconds to perform the requested operation.
1086 * Note that timer precision is limited to the tick rate, not the
1087 * requested value.
1088 *
Andy Rosse39bf292020-03-19 10:30:33 -07001089 * @param t Duration in nanoseconds.
Andy Rosse1bc5952020-03-09 12:19:54 -07001090 *
1091 * @return Timeout delay value.
1092 */
1093#define K_NSEC(t) Z_TIMEOUT_NS(t)
1094
1095/**
1096 * @brief Generate timeout delay from microseconds.
1097 *
1098 * This macro generates a timeout delay that instructs a kernel API
1099 * to wait up to @a t microseconds to perform the requested operation.
1100 * Note that timer precision is limited to the tick rate, not the
1101 * requested value.
1102 *
Andy Rosse39bf292020-03-19 10:30:33 -07001103 * @param t Duration in microseconds.
Andy Rosse1bc5952020-03-09 12:19:54 -07001104 *
1105 * @return Timeout delay value.
1106 */
1107#define K_USEC(t) Z_TIMEOUT_US(t)
1108
1109/**
1110 * @brief Generate timeout delay from cycles.
1111 *
1112 * This macro generates a timeout delay that instructs a kernel API
1113 * to wait up to @a t cycles to perform the requested operation.
1114 *
Andy Rosse39bf292020-03-19 10:30:33 -07001115 * @param t Duration in cycles.
Andy Rosse1bc5952020-03-09 12:19:54 -07001116 *
1117 * @return Timeout delay value.
1118 */
1119#define K_CYC(t) Z_TIMEOUT_CYC(t)
1120
1121/**
1122 * @brief Generate timeout delay from system ticks.
1123 *
1124 * This macro generates a timeout delay that instructs a kernel API
1125 * to wait up to @a t ticks to perform the requested operation.
1126 *
Andy Rosse39bf292020-03-19 10:30:33 -07001127 * @param t Duration in system ticks.
Andy Rosse1bc5952020-03-09 12:19:54 -07001128 *
1129 * @return Timeout delay value.
1130 */
1131#define K_TICKS(t) Z_TIMEOUT_TICKS(t)
1132
1133/**
Allan Stephensc2f15a42016-11-17 12:24:22 -05001134 * @brief Generate timeout delay from milliseconds.
1135 *
Maksim Masalskife1ff2f2019-10-29 16:50:44 +08001136 * This macro generates a timeout delay that instructs a kernel API
Allan Stephensc2f15a42016-11-17 12:24:22 -05001137 * to wait up to @a ms milliseconds to perform the requested operation.
1138 *
1139 * @param ms Duration in milliseconds.
1140 *
1141 * @return Timeout delay value.
1142 */
Andy Ross78327382020-03-05 15:18:14 -08001143#define K_MSEC(ms) Z_TIMEOUT_MS(ms)
Allan Stephensc2f15a42016-11-17 12:24:22 -05001144
1145/**
1146 * @brief Generate timeout delay from seconds.
1147 *
Maksim Masalskife1ff2f2019-10-29 16:50:44 +08001148 * This macro generates a timeout delay that instructs a kernel API
Allan Stephensc2f15a42016-11-17 12:24:22 -05001149 * to wait up to @a s seconds to perform the requested operation.
1150 *
1151 * @param s Duration in seconds.
1152 *
1153 * @return Timeout delay value.
1154 */
Johan Hedberg14471692016-11-13 10:52:15 +02001155#define K_SECONDS(s) K_MSEC((s) * MSEC_PER_SEC)
Allan Stephensc2f15a42016-11-17 12:24:22 -05001156
1157/**
1158 * @brief Generate timeout delay from minutes.
Maksim Masalskife1ff2f2019-10-29 16:50:44 +08001159
1160 * This macro generates a timeout delay that instructs a kernel API
Allan Stephensc2f15a42016-11-17 12:24:22 -05001161 * to wait up to @a m minutes to perform the requested operation.
1162 *
1163 * @param m Duration in minutes.
1164 *
1165 * @return Timeout delay value.
1166 */
Johan Hedberg14471692016-11-13 10:52:15 +02001167#define K_MINUTES(m) K_SECONDS((m) * 60)
Allan Stephensc2f15a42016-11-17 12:24:22 -05001168
1169/**
1170 * @brief Generate timeout delay from hours.
1171 *
Maksim Masalskife1ff2f2019-10-29 16:50:44 +08001172 * This macro generates a timeout delay that instructs a kernel API
Allan Stephensc2f15a42016-11-17 12:24:22 -05001173 * to wait up to @a h hours to perform the requested operation.
1174 *
1175 * @param h Duration in hours.
1176 *
1177 * @return Timeout delay value.
1178 */
Johan Hedberg14471692016-11-13 10:52:15 +02001179#define K_HOURS(h) K_MINUTES((h) * 60)
1180
Allan Stephensc98da842016-11-11 15:45:03 -05001181/**
Allan Stephensc2f15a42016-11-17 12:24:22 -05001182 * @brief Generate infinite timeout delay.
1183 *
Maksim Masalskife1ff2f2019-10-29 16:50:44 +08001184 * This macro generates a timeout delay that instructs a kernel API
Allan Stephensc2f15a42016-11-17 12:24:22 -05001185 * to wait as long as necessary to perform the requested operation.
1186 *
1187 * @return Timeout delay value.
1188 */
Andy Ross78327382020-03-05 15:18:14 -08001189#define K_FOREVER Z_FOREVER
Allan Stephensc2f15a42016-11-17 12:24:22 -05001190
Andy Rosse1bc5952020-03-09 12:19:54 -07001191#ifdef CONFIG_TIMEOUT_64BIT
1192
Allan Stephensc2f15a42016-11-17 12:24:22 -05001193/**
Andy Rosse39bf292020-03-19 10:30:33 -07001194 * @brief Generates an absolute/uptime timeout value from system ticks
Andy Ross4c7b77a2020-03-09 09:35:35 -07001195 *
1196 * This macro generates a timeout delay that represents an expiration
Andy Rosse39bf292020-03-19 10:30:33 -07001197 * at the absolute uptime value specified, in system ticks. That is, the
Andy Ross4c7b77a2020-03-09 09:35:35 -07001198 * timeout will expire immediately after the system uptime reaches the
1199 * specified tick count.
1200 *
1201 * @param t Tick uptime value
1202 * @return Timeout delay value
1203 */
Martin Jäger19c2f782020-11-09 10:14:53 +01001204#define K_TIMEOUT_ABS_TICKS(t) \
1205 Z_TIMEOUT_TICKS(Z_TICK_ABS((k_ticks_t)MAX(t, 0)))
Andy Ross4c7b77a2020-03-09 09:35:35 -07001206
1207/**
Andy Rosse39bf292020-03-19 10:30:33 -07001208 * @brief Generates an absolute/uptime timeout value from milliseconds
Andy Ross4c7b77a2020-03-09 09:35:35 -07001209 *
1210 * This macro generates a timeout delay that represents an expiration
1211 * at the absolute uptime value specified, in milliseconds. That is,
1212 * the timeout will expire immediately after the system uptime reaches
1213 * the specified tick count.
1214 *
1215 * @param t Millisecond uptime value
1216 * @return Timeout delay value
1217 */
1218#define K_TIMEOUT_ABS_MS(t) K_TIMEOUT_ABS_TICKS(k_ms_to_ticks_ceil64(t))
1219
1220/**
Andy Rosse39bf292020-03-19 10:30:33 -07001221 * @brief Generates an absolute/uptime timeout value from microseconds
Andy Rosse1bc5952020-03-09 12:19:54 -07001222 *
1223 * This macro generates a timeout delay that represents an expiration
1224 * at the absolute uptime value specified, in microseconds. That is,
1225 * the timeout will expire immediately after the system uptime reaches
1226 * the specified time. Note that timer precision is limited by the
1227 * system tick rate and not the requested timeout value.
1228 *
1229 * @param t Microsecond uptime value
1230 * @return Timeout delay value
1231 */
1232#define K_TIMEOUT_ABS_US(t) K_TIMEOUT_ABS_TICKS(k_us_to_ticks_ceil64(t))
1233
1234/**
Andy Rosse39bf292020-03-19 10:30:33 -07001235 * @brief Generates an absolute/uptime timeout value from nanoseconds
Andy Rosse1bc5952020-03-09 12:19:54 -07001236 *
1237 * This macro generates a timeout delay that represents an expiration
1238 * at the absolute uptime value specified, in nanoseconds. That is,
1239 * the timeout will expire immediately after the system uptime reaches
1240 * the specified time. Note that timer precision is limited by the
1241 * system tick rate and not the requested timeout value.
1242 *
1243 * @param t Nanosecond uptime value
1244 * @return Timeout delay value
1245 */
1246#define K_TIMEOUT_ABS_NS(t) K_TIMEOUT_ABS_TICKS(k_ns_to_ticks_ceil64(t))
1247
1248/**
Andy Rosse39bf292020-03-19 10:30:33 -07001249 * @brief Generates an absolute/uptime timeout value from system cycles
Andy Rosse1bc5952020-03-09 12:19:54 -07001250 *
1251 * This macro generates a timeout delay that represents an expiration
1252 * at the absolute uptime value specified, in cycles. That is, the
1253 * timeout will expire immediately after the system uptime reaches the
1254 * specified time. Note that timer precision is limited by the system
1255 * tick rate and not the requested timeout value.
1256 *
1257 * @param t Cycle uptime value
1258 * @return Timeout delay value
1259 */
1260#define K_TIMEOUT_ABS_CYC(t) K_TIMEOUT_ABS_TICKS(k_cyc_to_ticks_ceil64(t))
1261
1262#endif
1263
1264/**
Anas Nashif166f5192018-02-25 08:02:36 -06001265 * @}
Allan Stephensc2f15a42016-11-17 12:24:22 -05001266 */
1267
1268/**
Allan Stephensc98da842016-11-11 15:45:03 -05001269 * @cond INTERNAL_HIDDEN
1270 */
Benjamin Walsha9604bd2016-09-21 11:05:56 -04001271
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001272struct k_timer {
1273 /*
1274 * _timeout structure must be first here if we want to use
1275 * dynamic timer allocation. timeout.node is used in the double-linked
1276 * list of free timers
1277 */
1278 struct _timeout timeout;
1279
Allan Stephens45bfa372016-10-12 12:39:42 -05001280 /* wait queue for the (single) thread waiting on this timer */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001281 _wait_q_t wait_q;
1282
1283 /* runs in ISR context */
Flavio Ceolin4b35dd22018-11-16 19:06:59 -08001284 void (*expiry_fn)(struct k_timer *timer);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001285
1286 /* runs in the context of the thread that calls k_timer_stop() */
Flavio Ceolin4b35dd22018-11-16 19:06:59 -08001287 void (*stop_fn)(struct k_timer *timer);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001288
1289 /* timer period */
Andy Ross78327382020-03-05 15:18:14 -08001290 k_timeout_t period;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001291
Allan Stephens45bfa372016-10-12 12:39:42 -05001292 /* timer status */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001293 uint32_t status;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001294
Benjamin Walshe4e98f92017-01-12 19:38:53 -05001295 /* user-specific data, also used to support legacy features */
1296 void *user_data;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001297
Flavio Ceolind1ed3362018-12-07 11:39:13 -08001298 _OBJECT_TRACING_NEXT_PTR(k_timer)
Shih-Wei Teng5ebceeb2019-10-08 14:37:47 +08001299 _OBJECT_TRACING_LINKED_FLAG
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001300};
1301
Patrik Flykt97b3bd12019-03-12 15:15:42 -06001302#define Z_TIMER_INITIALIZER(obj, expiry, stop) \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001303 { \
Krzysztof Chruscinskibe063272019-02-13 11:19:54 +01001304 .timeout = { \
1305 .node = {},\
Peter Bigote37c7852020-07-07 12:34:05 -05001306 .fn = z_timer_expiration_handler, \
Krzysztof Chruscinskibe063272019-02-13 11:19:54 +01001307 .dticks = 0, \
Krzysztof Chruscinskibe063272019-02-13 11:19:54 +01001308 }, \
Patrik Flykt4344e272019-03-08 14:19:05 -07001309 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
Allan Stephens1342adb2016-11-03 13:54:53 -05001310 .expiry_fn = expiry, \
1311 .stop_fn = stop, \
1312 .status = 0, \
Benjamin Walshe4e98f92017-01-12 19:38:53 -05001313 .user_data = 0, \
Anas Nashif2f203c22016-12-18 06:57:45 -05001314 _OBJECT_TRACING_INIT \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001315 }
1316
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001317/**
Allan Stephensc98da842016-11-11 15:45:03 -05001318 * INTERNAL_HIDDEN @endcond
1319 */
1320
1321/**
1322 * @defgroup timer_apis Timer APIs
1323 * @ingroup kernel_apis
1324 * @{
1325 */
1326
1327/**
Allan Stephens5eceb852016-11-16 10:16:30 -05001328 * @typedef k_timer_expiry_t
1329 * @brief Timer expiry function type.
1330 *
1331 * A timer's expiry function is executed by the system clock interrupt handler
1332 * each time the timer expires. The expiry function is optional, and is only
1333 * invoked if the timer has been initialized with one.
1334 *
1335 * @param timer Address of timer.
1336 *
1337 * @return N/A
1338 */
1339typedef void (*k_timer_expiry_t)(struct k_timer *timer);
1340
1341/**
1342 * @typedef k_timer_stop_t
1343 * @brief Timer stop function type.
1344 *
1345 * A timer's stop function is executed if the timer is stopped prematurely.
Peter A. Bigot82a98d72020-09-21 05:34:56 -05001346 * The function runs in the context of call that stops the timer. As
1347 * k_timer_stop() can be invoked from an ISR, the stop function must be
1348 * callable from interrupt context (isr-ok).
1349 *
Allan Stephens5eceb852016-11-16 10:16:30 -05001350 * The stop function is optional, and is only invoked if the timer has been
1351 * initialized with one.
1352 *
1353 * @param timer Address of timer.
1354 *
1355 * @return N/A
1356 */
1357typedef void (*k_timer_stop_t)(struct k_timer *timer);
1358
1359/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001360 * @brief Statically define and initialize a timer.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001361 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001362 * The timer can be accessed outside the module where it is defined using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001363 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05001364 * @code extern struct k_timer <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001365 *
1366 * @param name Name of the timer variable.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001367 * @param expiry_fn Function to invoke each time the timer expires.
1368 * @param stop_fn Function to invoke if the timer is stopped while running.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001369 */
Allan Stephens1342adb2016-11-03 13:54:53 -05001370#define K_TIMER_DEFINE(name, expiry_fn, stop_fn) \
Nicolas Pitreb1d37422019-06-03 10:51:32 -04001371 Z_STRUCT_SECTION_ITERABLE(k_timer, name) = \
Patrik Flykt97b3bd12019-03-12 15:15:42 -06001372 Z_TIMER_INITIALIZER(name, expiry_fn, stop_fn)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001373
Allan Stephens45bfa372016-10-12 12:39:42 -05001374/**
1375 * @brief Initialize a timer.
1376 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001377 * This routine initializes a timer, prior to its first use.
Allan Stephens45bfa372016-10-12 12:39:42 -05001378 *
1379 * @param timer Address of timer.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001380 * @param expiry_fn Function to invoke each time the timer expires.
1381 * @param stop_fn Function to invoke if the timer is stopped while running.
Allan Stephens45bfa372016-10-12 12:39:42 -05001382 *
1383 * @return N/A
1384 */
1385extern void k_timer_init(struct k_timer *timer,
Allan Stephens5eceb852016-11-16 10:16:30 -05001386 k_timer_expiry_t expiry_fn,
1387 k_timer_stop_t stop_fn);
Andy Ross8d8b2ac2016-09-23 10:08:54 -07001388
Allan Stephens45bfa372016-10-12 12:39:42 -05001389/**
1390 * @brief Start a timer.
1391 *
1392 * This routine starts a timer, and resets its status to zero. The timer
1393 * begins counting down using the specified duration and period values.
1394 *
1395 * Attempting to start a timer that is already running is permitted.
1396 * The timer's status is reset to zero and the timer begins counting down
1397 * using the new duration and period values.
1398 *
1399 * @param timer Address of timer.
Andy Ross78327382020-03-05 15:18:14 -08001400 * @param duration Initial timer duration.
1401 * @param period Timer period.
Allan Stephens45bfa372016-10-12 12:39:42 -05001402 *
1403 * @return N/A
1404 */
Andrew Boiea354d492017-09-29 16:22:28 -07001405__syscall void k_timer_start(struct k_timer *timer,
Andy Ross78327382020-03-05 15:18:14 -08001406 k_timeout_t duration, k_timeout_t period);
Allan Stephens45bfa372016-10-12 12:39:42 -05001407
1408/**
1409 * @brief Stop a timer.
1410 *
1411 * This routine stops a running timer prematurely. The timer's stop function,
1412 * if one exists, is invoked by the caller.
1413 *
1414 * Attempting to stop a timer that is not running is permitted, but has no
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001415 * effect on the timer.
Allan Stephens45bfa372016-10-12 12:39:42 -05001416 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001417 * @note The stop handler has to be callable from ISRs if @a k_timer_stop is to
1418 * be called from ISRs.
1419 *
1420 * @funcprops \isr_ok
Anas Nashif4fb12ae2017-02-01 20:06:55 -05001421 *
Allan Stephens45bfa372016-10-12 12:39:42 -05001422 * @param timer Address of timer.
1423 *
1424 * @return N/A
1425 */
Andrew Boiea354d492017-09-29 16:22:28 -07001426__syscall void k_timer_stop(struct k_timer *timer);
Allan Stephens45bfa372016-10-12 12:39:42 -05001427
1428/**
1429 * @brief Read timer status.
1430 *
1431 * This routine reads the timer's status, which indicates the number of times
1432 * it has expired since its status was last read.
1433 *
1434 * Calling this routine resets the timer's status to zero.
1435 *
1436 * @param timer Address of timer.
1437 *
1438 * @return Timer status.
1439 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001440__syscall uint32_t k_timer_status_get(struct k_timer *timer);
Allan Stephens45bfa372016-10-12 12:39:42 -05001441
1442/**
1443 * @brief Synchronize thread to timer expiration.
1444 *
1445 * This routine blocks the calling thread until the timer's status is non-zero
1446 * (indicating that it has expired at least once since it was last examined)
1447 * or the timer is stopped. If the timer status is already non-zero,
1448 * or the timer is already stopped, the caller continues without waiting.
1449 *
1450 * Calling this routine resets the timer's status to zero.
1451 *
1452 * This routine must not be used by interrupt handlers, since they are not
1453 * allowed to block.
1454 *
1455 * @param timer Address of timer.
1456 *
1457 * @return Timer status.
1458 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001459__syscall uint32_t k_timer_status_sync(struct k_timer *timer);
Allan Stephens45bfa372016-10-12 12:39:42 -05001460
Andy Ross5a5d3da2020-03-09 13:59:15 -07001461#ifdef CONFIG_SYS_CLOCK_EXISTS
1462
1463/**
Andy Rosse39bf292020-03-19 10:30:33 -07001464 * @brief Get next expiration time of a timer, in system ticks
Andy Ross5a5d3da2020-03-09 13:59:15 -07001465 *
1466 * This routine returns the future system uptime reached at the next
1467 * time of expiration of the timer, in units of system ticks. If the
1468 * timer is not running, current system time is returned.
1469 *
1470 * @param timer The timer object
1471 * @return Uptime of expiration, in ticks
1472 */
Peter Bigot0ab314f2020-11-16 15:28:59 -06001473__syscall k_ticks_t k_timer_expires_ticks(const struct k_timer *timer);
Andy Ross5a5d3da2020-03-09 13:59:15 -07001474
Peter Bigot0ab314f2020-11-16 15:28:59 -06001475static inline k_ticks_t z_impl_k_timer_expires_ticks(
1476 const struct k_timer *timer)
Andy Ross5a5d3da2020-03-09 13:59:15 -07001477{
1478 return z_timeout_expires(&timer->timeout);
1479}
1480
1481/**
Andy Rosse39bf292020-03-19 10:30:33 -07001482 * @brief Get time remaining before a timer next expires, in system ticks
Andy Ross5a5d3da2020-03-09 13:59:15 -07001483 *
1484 * This routine computes the time remaining before a running timer
1485 * next expires, in units of system ticks. If the timer is not
1486 * running, it returns zero.
1487 */
Peter Bigot0ab314f2020-11-16 15:28:59 -06001488__syscall k_ticks_t k_timer_remaining_ticks(const struct k_timer *timer);
Andy Ross5a5d3da2020-03-09 13:59:15 -07001489
Peter Bigot0ab314f2020-11-16 15:28:59 -06001490static inline k_ticks_t z_impl_k_timer_remaining_ticks(
1491 const struct k_timer *timer)
Andy Ross5a5d3da2020-03-09 13:59:15 -07001492{
1493 return z_timeout_remaining(&timer->timeout);
1494}
Andy Ross52e444b2018-09-28 09:06:37 -07001495
Allan Stephens45bfa372016-10-12 12:39:42 -05001496/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001497 * @brief Get time remaining before a timer next expires.
Allan Stephens45bfa372016-10-12 12:39:42 -05001498 *
1499 * This routine computes the (approximate) time remaining before a running
1500 * timer next expires. If the timer is not running, it returns zero.
1501 *
1502 * @param timer Address of timer.
1503 *
1504 * @return Remaining time (in milliseconds).
1505 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001506static inline uint32_t k_timer_remaining_get(struct k_timer *timer)
Johan Hedbergf99ad3f2016-12-09 10:39:49 +02001507{
Andy Ross5a5d3da2020-03-09 13:59:15 -07001508 return k_ticks_to_ms_floor32(k_timer_remaining_ticks(timer));
Johan Hedbergf99ad3f2016-12-09 10:39:49 +02001509}
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001510
Andy Ross5a5d3da2020-03-09 13:59:15 -07001511#endif /* CONFIG_SYS_CLOCK_EXISTS */
1512
Allan Stephensc98da842016-11-11 15:45:03 -05001513/**
Benjamin Walshe4e98f92017-01-12 19:38:53 -05001514 * @brief Associate user-specific data with a timer.
1515 *
1516 * This routine records the @a user_data with the @a timer, to be retrieved
1517 * later.
1518 *
1519 * It can be used e.g. in a timer handler shared across multiple subsystems to
1520 * retrieve data specific to the subsystem this timer is associated with.
1521 *
1522 * @param timer Address of timer.
1523 * @param user_data User data to associate with the timer.
1524 *
1525 * @return N/A
1526 */
Andrew Boiea354d492017-09-29 16:22:28 -07001527__syscall void k_timer_user_data_set(struct k_timer *timer, void *user_data);
1528
Anas Nashif954d5502018-02-25 08:37:28 -06001529/**
1530 * @internal
1531 */
Patrik Flykt4344e272019-03-08 14:19:05 -07001532static inline void z_impl_k_timer_user_data_set(struct k_timer *timer,
Andrew Boiea354d492017-09-29 16:22:28 -07001533 void *user_data)
Benjamin Walshe4e98f92017-01-12 19:38:53 -05001534{
1535 timer->user_data = user_data;
1536}
1537
1538/**
1539 * @brief Retrieve the user-specific data from a timer.
1540 *
1541 * @param timer Address of timer.
1542 *
1543 * @return The user data.
1544 */
Peter A. Bigotf1b86ca2020-09-18 16:24:57 -05001545__syscall void *k_timer_user_data_get(const struct k_timer *timer);
Andrew Boiea354d492017-09-29 16:22:28 -07001546
Peter A. Bigotf1b86ca2020-09-18 16:24:57 -05001547static inline void *z_impl_k_timer_user_data_get(const struct k_timer *timer)
Benjamin Walshe4e98f92017-01-12 19:38:53 -05001548{
1549 return timer->user_data;
1550}
1551
Anas Nashif166f5192018-02-25 08:02:36 -06001552/** @} */
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001553
Allan Stephensc98da842016-11-11 15:45:03 -05001554/**
Allan Stephensc2f15a42016-11-17 12:24:22 -05001555 * @addtogroup clock_apis
Allan Stephensc98da842016-11-11 15:45:03 -05001556 * @{
1557 */
Allan Stephens45bfa372016-10-12 12:39:42 -05001558
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001559/**
Andy Rosse39bf292020-03-19 10:30:33 -07001560 * @brief Get system uptime, in system ticks.
Andy Ross914205c2020-03-10 15:26:38 -07001561 *
1562 * This routine returns the elapsed time since the system booted, in
Fabio Utzig39fa56b2020-09-11 10:14:37 -03001563 * ticks (c.f. @option{CONFIG_SYS_CLOCK_TICKS_PER_SEC}), which is the
Andy Ross914205c2020-03-10 15:26:38 -07001564 * fundamental unit of resolution of kernel timekeeping.
1565 *
1566 * @return Current uptime in ticks.
1567 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001568__syscall int64_t k_uptime_ticks(void);
Andy Ross914205c2020-03-10 15:26:38 -07001569
1570/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001571 * @brief Get system uptime.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001572 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001573 * This routine returns the elapsed time since the system booted,
1574 * in milliseconds.
1575 *
David B. Kinder00c41ea2019-06-10 11:13:33 -07001576 * @note
David B. Kinder00c41ea2019-06-10 11:13:33 -07001577 * While this function returns time in milliseconds, it does
1578 * not mean it has millisecond resolution. The actual resolution depends on
Fabio Utzig39fa56b2020-09-11 10:14:37 -03001579 * @option{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option.
Paul Sokolovsky65d51fd2019-02-04 22:44:50 +03001580 *
1581 * @return Current uptime in milliseconds.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001582 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001583static inline int64_t k_uptime_get(void)
Andy Ross914205c2020-03-10 15:26:38 -07001584{
1585 return k_ticks_to_ms_floor64(k_uptime_ticks());
1586}
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001587
Ramesh Thomas89ffd442017-02-05 19:37:19 -08001588/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001589 * @brief Get system uptime (32-bit version).
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001590 *
Peter Bigota6067a32019-08-28 08:19:26 -05001591 * This routine returns the lower 32 bits of the system uptime in
1592 * milliseconds.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001593 *
Peter Bigota6067a32019-08-28 08:19:26 -05001594 * Because correct conversion requires full precision of the system
1595 * clock there is no benefit to using this over k_uptime_get() unless
1596 * you know the application will never run long enough for the system
1597 * clock to approach 2^32 ticks. Calls to this function may involve
1598 * interrupt blocking and 64-bit math.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001599 *
David B. Kinder00c41ea2019-06-10 11:13:33 -07001600 * @note
David B. Kinder00c41ea2019-06-10 11:13:33 -07001601 * While this function returns time in milliseconds, it does
1602 * not mean it has millisecond resolution. The actual resolution depends on
Fabio Utzig39fa56b2020-09-11 10:14:37 -03001603 * @option{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option
Paul Sokolovsky65d51fd2019-02-04 22:44:50 +03001604 *
Peter Bigota6067a32019-08-28 08:19:26 -05001605 * @return The low 32 bits of the current uptime, in milliseconds.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001606 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001607static inline uint32_t k_uptime_get_32(void)
Peter Bigota6067a32019-08-28 08:19:26 -05001608{
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001609 return (uint32_t)k_uptime_get();
Peter Bigota6067a32019-08-28 08:19:26 -05001610}
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001611
1612/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001613 * @brief Get elapsed time.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001614 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001615 * This routine computes the elapsed time between the current system uptime
1616 * and an earlier reference time, in milliseconds.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001617 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001618 * @param reftime Pointer to a reference time, which is updated to the current
1619 * uptime upon return.
1620 *
1621 * @return Elapsed time.
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001622 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001623static inline int64_t k_uptime_delta(int64_t *reftime)
Andy Ross987c0e52018-09-27 16:50:00 -07001624{
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001625 int64_t uptime, delta;
Andy Ross987c0e52018-09-27 16:50:00 -07001626
1627 uptime = k_uptime_get();
1628 delta = uptime - *reftime;
1629 *reftime = uptime;
1630
1631 return delta;
1632}
Benjamin Walshba5ddc12016-09-21 16:01:22 -04001633
1634/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001635 * @brief Read the hardware clock.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001636 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001637 * This routine returns the current time, as measured by the system's hardware
1638 * clock.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001639 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05001640 * @return Current hardware clock up-counter (in cycles).
Peter Mitsis348eb4c2016-10-26 11:22:14 -04001641 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001642static inline uint32_t k_cycle_get_32(void)
Andrew Boie979b17f2019-10-03 15:20:41 -07001643{
Andrew Boie4f77c2a2019-11-07 12:43:29 -08001644 return arch_k_cycle_get_32();
Andrew Boie979b17f2019-10-03 15:20:41 -07001645}
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001646
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001647/**
Anas Nashif166f5192018-02-25 08:02:36 -06001648 * @}
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001649 */
1650
Allan Stephensc98da842016-11-11 15:45:03 -05001651/**
1652 * @cond INTERNAL_HIDDEN
1653 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001654
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001655struct k_queue {
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001656 sys_sflist_t data_q;
Andy Ross603ea422018-07-25 13:01:54 -07001657 struct k_spinlock lock;
Andy Ross99c2d2d2020-06-02 08:34:12 -07001658 _wait_q_t wait_q;
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +03001659
Andy Ross99c2d2d2020-06-02 08:34:12 -07001660 _POLL_EVENT;
Flavio Ceolind1ed3362018-12-07 11:39:13 -08001661 _OBJECT_TRACING_NEXT_PTR(k_queue)
Shih-Wei Teng5ebceeb2019-10-08 14:37:47 +08001662 _OBJECT_TRACING_LINKED_FLAG
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001663};
1664
Anas Nashif45a1d8a2020-04-24 11:29:17 -04001665#define Z_QUEUE_INITIALIZER(obj) \
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001666 { \
Toby Firth680ec0b2020-10-05 13:45:47 +01001667 .data_q = SYS_SFLIST_STATIC_INIT(&obj.data_q), \
Stephanos Ioannidisf628dcd2019-09-11 18:09:49 +09001668 .lock = { }, \
Andy Ross99c2d2d2020-06-02 08:34:12 -07001669 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
1670 _POLL_EVENT_OBJ_INIT(obj) \
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001671 _OBJECT_TRACING_INIT \
1672 }
1673
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001674extern void *z_queue_node_peek(sys_sfnode_t *node, bool needs_free);
1675
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001676/**
1677 * INTERNAL_HIDDEN @endcond
1678 */
1679
1680/**
1681 * @defgroup queue_apis Queue APIs
1682 * @ingroup kernel_apis
1683 * @{
1684 */
1685
1686/**
1687 * @brief Initialize a queue.
1688 *
1689 * This routine initializes a queue object, prior to its first use.
1690 *
1691 * @param queue Address of the queue.
1692 *
1693 * @return N/A
1694 */
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001695__syscall void k_queue_init(struct k_queue *queue);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001696
1697/**
Paul Sokolovsky3f507072017-04-25 17:54:31 +03001698 * @brief Cancel waiting on a queue.
1699 *
1700 * This routine causes first thread pending on @a queue, if any, to
1701 * return from k_queue_get() call with NULL value (as if timeout expired).
Paul Sokolovsky45c0b202018-08-21 23:29:11 +03001702 * If the queue is being waited on by k_poll(), it will return with
1703 * -EINTR and K_POLL_STATE_CANCELLED state (and per above, subsequent
1704 * k_queue_get() will return NULL).
Paul Sokolovsky3f507072017-04-25 17:54:31 +03001705 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001706 * @funcprops \isr_ok
Paul Sokolovsky3f507072017-04-25 17:54:31 +03001707 *
1708 * @param queue Address of the queue.
1709 *
1710 * @return N/A
1711 */
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001712__syscall void k_queue_cancel_wait(struct k_queue *queue);
Paul Sokolovsky3f507072017-04-25 17:54:31 +03001713
1714/**
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001715 * @brief Append an element to the end of a queue.
1716 *
1717 * This routine appends a data item to @a queue. A queue data item must be
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04001718 * aligned on a word boundary, and the first word of the item is reserved
1719 * for the kernel's use.
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001720 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001721 * @funcprops \isr_ok
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001722 *
1723 * @param queue Address of the queue.
1724 * @param data Address of the data item.
1725 *
1726 * @return N/A
1727 */
1728extern void k_queue_append(struct k_queue *queue, void *data);
1729
1730/**
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001731 * @brief Append an element to a queue.
1732 *
Andrew Boieac3dcc12019-04-01 12:28:03 -07001733 * This routine appends a data item to @a queue. There is an implicit memory
1734 * allocation to create an additional temporary bookkeeping data structure from
1735 * the calling thread's resource pool, which is automatically freed when the
1736 * item is removed. The data itself is not copied.
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001737 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001738 * @funcprops \isr_ok
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001739 *
1740 * @param queue Address of the queue.
1741 * @param data Address of the data item.
1742 *
1743 * @retval 0 on success
1744 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
1745 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001746__syscall int32_t k_queue_alloc_append(struct k_queue *queue, void *data);
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001747
1748/**
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001749 * @brief Prepend an element to a queue.
1750 *
1751 * This routine prepends a data item to @a queue. A queue data item must be
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04001752 * aligned on a word boundary, and the first word of the item is reserved
1753 * for the kernel's use.
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001754 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001755 * @funcprops \isr_ok
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001756 *
1757 * @param queue Address of the queue.
1758 * @param data Address of the data item.
1759 *
1760 * @return N/A
1761 */
1762extern void k_queue_prepend(struct k_queue *queue, void *data);
1763
1764/**
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001765 * @brief Prepend an element to a queue.
1766 *
Andrew Boieac3dcc12019-04-01 12:28:03 -07001767 * This routine prepends a data item to @a queue. There is an implicit memory
1768 * allocation to create an additional temporary bookkeeping data structure from
1769 * the calling thread's resource pool, which is automatically freed when the
1770 * item is removed. The data itself is not copied.
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001771 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001772 * @funcprops \isr_ok
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001773 *
1774 * @param queue Address of the queue.
1775 * @param data Address of the data item.
1776 *
1777 * @retval 0 on success
1778 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
1779 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05001780__syscall int32_t k_queue_alloc_prepend(struct k_queue *queue, void *data);
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001781
1782/**
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001783 * @brief Inserts an element to a queue.
1784 *
1785 * This routine inserts a data item to @a queue after previous item. A queue
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04001786 * data item must be aligned on a word boundary, and the first word of
1787 * the item is reserved for the kernel's use.
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001788 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001789 * @funcprops \isr_ok
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001790 *
1791 * @param queue Address of the queue.
1792 * @param prev Address of the previous data item.
1793 * @param data Address of the data item.
1794 *
1795 * @return N/A
1796 */
1797extern void k_queue_insert(struct k_queue *queue, void *prev, void *data);
1798
1799/**
1800 * @brief Atomically append a list of elements to a queue.
1801 *
1802 * This routine adds a list of data items to @a queue in one operation.
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04001803 * The data items must be in a singly-linked list, with the first word
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001804 * in each data item pointing to the next data item; the list must be
1805 * NULL-terminated.
1806 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001807 * @funcprops \isr_ok
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001808 *
1809 * @param queue Address of the queue.
1810 * @param head Pointer to first node in singly-linked list.
1811 * @param tail Pointer to last node in singly-linked list.
1812 *
Anas Nashif756d8b02019-06-16 09:53:55 -04001813 * @retval 0 on success
1814 * @retval -EINVAL on invalid supplied data
1815 *
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001816 */
Anas Nashif756d8b02019-06-16 09:53:55 -04001817extern int k_queue_append_list(struct k_queue *queue, void *head, void *tail);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001818
1819/**
1820 * @brief Atomically add a list of elements to a queue.
1821 *
1822 * This routine adds a list of data items to @a queue in one operation.
1823 * The data items must be in a singly-linked list implemented using a
1824 * sys_slist_t object. Upon completion, the original list is empty.
1825 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001826 * @funcprops \isr_ok
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001827 *
1828 * @param queue Address of the queue.
1829 * @param list Pointer to sys_slist_t object.
1830 *
Anas Nashif756d8b02019-06-16 09:53:55 -04001831 * @retval 0 on success
1832 * @retval -EINVAL on invalid data
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001833 */
Anas Nashif756d8b02019-06-16 09:53:55 -04001834extern int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001835
1836/**
1837 * @brief Get an element from a queue.
1838 *
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04001839 * This routine removes first data item from @a queue. The first word of the
1840 * data item is reserved for the kernel's use.
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001841 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001842 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
1843 *
1844 * @funcprops \isr_ok
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001845 *
1846 * @param queue Address of the queue.
Andy Ross78327382020-03-05 15:18:14 -08001847 * @param timeout Non-negative waiting period to obtain a data item
1848 * or one of the special values K_NO_WAIT and
Krzysztof Chruscinski94f742e2019-11-07 19:28:00 +01001849 * K_FOREVER.
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001850 *
1851 * @return Address of the data item if successful; NULL if returned
1852 * without waiting, or waiting period timed out.
1853 */
Andy Ross78327382020-03-05 15:18:14 -08001854__syscall void *k_queue_get(struct k_queue *queue, k_timeout_t timeout);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001855
1856/**
Luiz Augusto von Dentz50b93772017-07-03 16:52:45 +03001857 * @brief Remove an element from a queue.
1858 *
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04001859 * This routine removes data item from @a queue. The first word of the
1860 * data item is reserved for the kernel's use. Removing elements from k_queue
Luiz Augusto von Dentz50b93772017-07-03 16:52:45 +03001861 * rely on sys_slist_find_and_remove which is not a constant time operation.
1862 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001863 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
1864 *
1865 * @funcprops \isr_ok
Luiz Augusto von Dentz50b93772017-07-03 16:52:45 +03001866 *
1867 * @param queue Address of the queue.
1868 * @param data Address of the data item.
1869 *
1870 * @return true if data item was removed
1871 */
1872static inline bool k_queue_remove(struct k_queue *queue, void *data)
1873{
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001874 return sys_sflist_find_and_remove(&queue->data_q, (sys_sfnode_t *)data);
Luiz Augusto von Dentz50b93772017-07-03 16:52:45 +03001875}
1876
1877/**
Dhananjay Gundapu Jayakrishnan24bfa402018-08-22 12:33:00 +02001878 * @brief Append an element to a queue only if it's not present already.
1879 *
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04001880 * This routine appends data item to @a queue. The first word of the data
1881 * item is reserved for the kernel's use. Appending elements to k_queue
Dhananjay Gundapu Jayakrishnan24bfa402018-08-22 12:33:00 +02001882 * relies on sys_slist_is_node_in_list which is not a constant time operation.
1883 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001884 * @funcprops \isr_ok
Dhananjay Gundapu Jayakrishnan24bfa402018-08-22 12:33:00 +02001885 *
1886 * @param queue Address of the queue.
1887 * @param data Address of the data item.
1888 *
1889 * @return true if data item was added, false if not
1890 */
1891static inline bool k_queue_unique_append(struct k_queue *queue, void *data)
1892{
1893 sys_sfnode_t *test;
1894
1895 SYS_SFLIST_FOR_EACH_NODE(&queue->data_q, test) {
1896 if (test == (sys_sfnode_t *) data) {
1897 return false;
1898 }
1899 }
1900
1901 k_queue_append(queue, data);
1902 return true;
1903}
1904
1905/**
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001906 * @brief Query a queue to see if it has data available.
1907 *
1908 * Note that the data might be already gone by the time this function returns
1909 * if other threads are also trying to read from the queue.
1910 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01001911 * @funcprops \isr_ok
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001912 *
1913 * @param queue Address of the queue.
1914 *
1915 * @return Non-zero if the queue is empty.
1916 * @return 0 if data is available.
1917 */
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001918__syscall int k_queue_is_empty(struct k_queue *queue);
1919
Patrik Flykt4344e272019-03-08 14:19:05 -07001920static inline int z_impl_k_queue_is_empty(struct k_queue *queue)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001921{
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001922 return (int)sys_sflist_is_empty(&queue->data_q);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001923}
1924
1925/**
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03001926 * @brief Peek element at the head of queue.
1927 *
1928 * Return element from the head of queue without removing it.
1929 *
1930 * @param queue Address of the queue.
1931 *
1932 * @return Head element, or NULL if queue is empty.
1933 */
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001934__syscall void *k_queue_peek_head(struct k_queue *queue);
1935
Patrik Flykt4344e272019-03-08 14:19:05 -07001936static inline void *z_impl_k_queue_peek_head(struct k_queue *queue)
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03001937{
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001938 return z_queue_node_peek(sys_sflist_peek_head(&queue->data_q), false);
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03001939}
1940
1941/**
1942 * @brief Peek element at the tail of queue.
1943 *
1944 * Return element from the tail of queue without removing it.
1945 *
1946 * @param queue Address of the queue.
1947 *
1948 * @return Tail element, or NULL if queue is empty.
1949 */
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001950__syscall void *k_queue_peek_tail(struct k_queue *queue);
1951
Patrik Flykt4344e272019-03-08 14:19:05 -07001952static inline void *z_impl_k_queue_peek_tail(struct k_queue *queue)
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03001953{
Andrew Boie2b9b4b22018-04-27 13:21:22 -07001954 return z_queue_node_peek(sys_sflist_peek_tail(&queue->data_q), false);
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03001955}
1956
1957/**
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001958 * @brief Statically define and initialize a queue.
1959 *
1960 * The queue can be accessed outside the module where it is defined using:
1961 *
1962 * @code extern struct k_queue <name>; @endcode
1963 *
1964 * @param name Name of the queue.
1965 */
1966#define K_QUEUE_DEFINE(name) \
Nicolas Pitreb1d37422019-06-03 10:51:32 -04001967 Z_STRUCT_SECTION_ITERABLE(k_queue, name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04001968 Z_QUEUE_INITIALIZER(name)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001969
Anas Nashif166f5192018-02-25 08:02:36 -06001970/** @} */
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001971
Wentong Wu5611e922019-06-20 23:51:27 +08001972#ifdef CONFIG_USERSPACE
1973/**
1974 * @brief futex structure
1975 *
1976 * A k_futex is a lightweight mutual exclusion primitive designed
1977 * to minimize kernel involvement. Uncontended operation relies
1978 * only on atomic access to shared memory. k_futex are tracked as
Lauren Murphyd922fed2021-02-01 21:24:47 -06001979 * kernel objects and can live in user memory so that any access
1980 * bypasses the kernel object permission management mechanism.
Wentong Wu5611e922019-06-20 23:51:27 +08001981 */
1982struct k_futex {
1983 atomic_t val;
1984};
1985
1986/**
1987 * @brief futex kernel data structure
1988 *
1989 * z_futex_data are the helper data structure for k_futex to complete
1990 * futex contended operation on kernel side, structure z_futex_data
1991 * of every futex object is invisible in user mode.
1992 */
1993struct z_futex_data {
1994 _wait_q_t wait_q;
1995 struct k_spinlock lock;
1996};
1997
1998#define Z_FUTEX_DATA_INITIALIZER(obj) \
1999 { \
2000 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q) \
2001 }
2002
2003/**
2004 * @defgroup futex_apis FUTEX APIs
2005 * @ingroup kernel_apis
2006 * @{
2007 */
2008
2009/**
Wentong Wu5611e922019-06-20 23:51:27 +08002010 * @brief Pend the current thread on a futex
2011 *
2012 * Tests that the supplied futex contains the expected value, and if so,
2013 * goes to sleep until some other thread calls k_futex_wake() on it.
2014 *
2015 * @param futex Address of the futex.
2016 * @param expected Expected value of the futex, if it is different the caller
2017 * will not wait on it.
Andy Ross78327382020-03-05 15:18:14 -08002018 * @param timeout Non-negative waiting period on the futex, or
Krzysztof Chruscinski94f742e2019-11-07 19:28:00 +01002019 * one of the special values K_NO_WAIT or K_FOREVER.
Wentong Wu5611e922019-06-20 23:51:27 +08002020 * @retval -EACCES Caller does not have read access to futex address.
2021 * @retval -EAGAIN If the futex value did not match the expected parameter.
2022 * @retval -EINVAL Futex parameter address not recognized by the kernel.
2023 * @retval -ETIMEDOUT Thread woke up due to timeout and not a futex wakeup.
2024 * @retval 0 if the caller went to sleep and was woken up. The caller
2025 * should check the futex's value on wakeup to determine if it needs
2026 * to block again.
2027 */
Andy Ross78327382020-03-05 15:18:14 -08002028__syscall int k_futex_wait(struct k_futex *futex, int expected,
2029 k_timeout_t timeout);
Wentong Wu5611e922019-06-20 23:51:27 +08002030
2031/**
2032 * @brief Wake one/all threads pending on a futex
2033 *
2034 * Wake up the highest priority thread pending on the supplied futex, or
2035 * wakeup all the threads pending on the supplied futex, and the behavior
2036 * depends on wake_all.
2037 *
2038 * @param futex Futex to wake up pending threads.
2039 * @param wake_all If true, wake up all pending threads; If false,
2040 * wakeup the highest priority thread.
2041 * @retval -EACCES Caller does not have access to the futex address.
2042 * @retval -EINVAL Futex parameter address not recognized by the kernel.
2043 * @retval Number of threads that were woken up.
2044 */
2045__syscall int k_futex_wake(struct k_futex *futex, bool wake_all);
2046
2047/** @} */
2048#endif
2049
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002050struct k_fifo {
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02002051 struct k_queue _queue;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002052};
2053
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04002054/**
2055 * @cond INTERNAL_HIDDEN
2056 */
Patrik Flykt97b3bd12019-03-12 15:15:42 -06002057#define Z_FIFO_INITIALIZER(obj) \
Allan Stephensc98da842016-11-11 15:45:03 -05002058 { \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002059 ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
Allan Stephensc98da842016-11-11 15:45:03 -05002060 }
2061
2062/**
2063 * INTERNAL_HIDDEN @endcond
2064 */
2065
2066/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002067 * @defgroup fifo_apis FIFO APIs
Allan Stephensc98da842016-11-11 15:45:03 -05002068 * @ingroup kernel_apis
2069 * @{
2070 */
2071
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002072/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002073 * @brief Initialize a FIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002074 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002075 * This routine initializes a FIFO queue, prior to its first use.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002076 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002077 * @param fifo Address of the FIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002078 *
2079 * @return N/A
2080 */
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02002081#define k_fifo_init(fifo) \
Nicolas Pitrea04a2ca2019-05-20 23:02:39 -04002082 k_queue_init(&(fifo)->_queue)
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002083
2084/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002085 * @brief Cancel waiting on a FIFO queue.
Paul Sokolovsky3f507072017-04-25 17:54:31 +03002086 *
2087 * This routine causes first thread pending on @a fifo, if any, to
2088 * return from k_fifo_get() call with NULL value (as if timeout
2089 * expired).
2090 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002091 * @funcprops \isr_ok
Paul Sokolovsky3f507072017-04-25 17:54:31 +03002092 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002093 * @param fifo Address of the FIFO queue.
Paul Sokolovsky3f507072017-04-25 17:54:31 +03002094 *
2095 * @return N/A
2096 */
2097#define k_fifo_cancel_wait(fifo) \
Nicolas Pitrea04a2ca2019-05-20 23:02:39 -04002098 k_queue_cancel_wait(&(fifo)->_queue)
Paul Sokolovsky3f507072017-04-25 17:54:31 +03002099
2100/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002101 * @brief Add an element to a FIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002102 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002103 * This routine adds a data item to @a fifo. A FIFO data item must be
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04002104 * aligned on a word boundary, and the first word of the item is reserved
2105 * for the kernel's use.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002106 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002107 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002108 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002109 * @param fifo Address of the FIFO.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002110 * @param data Address of the data item.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002111 *
2112 * @return N/A
2113 */
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02002114#define k_fifo_put(fifo, data) \
Nicolas Pitrea04a2ca2019-05-20 23:02:39 -04002115 k_queue_append(&(fifo)->_queue, data)
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002116
2117/**
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002118 * @brief Add an element to a FIFO queue.
2119 *
Andrew Boieac3dcc12019-04-01 12:28:03 -07002120 * This routine adds a data item to @a fifo. There is an implicit memory
2121 * allocation to create an additional temporary bookkeeping data structure from
2122 * the calling thread's resource pool, which is automatically freed when the
2123 * item is removed. The data itself is not copied.
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002124 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002125 * @funcprops \isr_ok
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002126 *
2127 * @param fifo Address of the FIFO.
2128 * @param data Address of the data item.
2129 *
2130 * @retval 0 on success
2131 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2132 */
2133#define k_fifo_alloc_put(fifo, data) \
Nicolas Pitrea04a2ca2019-05-20 23:02:39 -04002134 k_queue_alloc_append(&(fifo)->_queue, data)
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002135
2136/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002137 * @brief Atomically add a list of elements to a FIFO.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002138 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002139 * This routine adds a list of data items to @a fifo in one operation.
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04002140 * The data items must be in a singly-linked list, with the first word of
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002141 * each data item pointing to the next data item; the list must be
2142 * NULL-terminated.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002143 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002144 * @funcprops \isr_ok
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002145 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002146 * @param fifo Address of the FIFO queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002147 * @param head Pointer to first node in singly-linked list.
2148 * @param tail Pointer to last node in singly-linked list.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002149 *
2150 * @return N/A
2151 */
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02002152#define k_fifo_put_list(fifo, head, tail) \
Nicolas Pitrea04a2ca2019-05-20 23:02:39 -04002153 k_queue_append_list(&(fifo)->_queue, head, tail)
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002154
2155/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002156 * @brief Atomically add a list of elements to a FIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002157 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002158 * This routine adds a list of data items to @a fifo in one operation.
2159 * The data items must be in a singly-linked list implemented using a
2160 * sys_slist_t object. Upon completion, the sys_slist_t object is invalid
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002161 * and must be re-initialized via sys_slist_init().
2162 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002163 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002164 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002165 * @param fifo Address of the FIFO queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002166 * @param list Pointer to sys_slist_t object.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002167 *
2168 * @return N/A
2169 */
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02002170#define k_fifo_put_slist(fifo, list) \
Nicolas Pitrea04a2ca2019-05-20 23:02:39 -04002171 k_queue_merge_slist(&(fifo)->_queue, list)
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002172
2173/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002174 * @brief Get an element from a FIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002175 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002176 * This routine removes a data item from @a fifo in a "first in, first out"
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04002177 * manner. The first word of the data item is reserved for the kernel's use.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002178 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002179 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2180 *
2181 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002182 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002183 * @param fifo Address of the FIFO queue.
Andy Ross78327382020-03-05 15:18:14 -08002184 * @param timeout Waiting period to obtain a data item,
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002185 * or one of the special values K_NO_WAIT and K_FOREVER.
2186 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05002187 * @return Address of the data item if successful; NULL if returned
2188 * without waiting, or waiting period timed out.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002189 */
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02002190#define k_fifo_get(fifo, timeout) \
Nicolas Pitrea04a2ca2019-05-20 23:02:39 -04002191 k_queue_get(&(fifo)->_queue, timeout)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002192
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002193/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002194 * @brief Query a FIFO queue to see if it has data available.
Benjamin Walsh39b80d82017-01-28 10:06:07 -05002195 *
2196 * Note that the data might be already gone by the time this function returns
Anas Nashif585fd1f2018-02-25 08:04:59 -06002197 * if other threads is also trying to read from the FIFO.
Benjamin Walsh39b80d82017-01-28 10:06:07 -05002198 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002199 * @funcprops \isr_ok
Benjamin Walsh39b80d82017-01-28 10:06:07 -05002200 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002201 * @param fifo Address of the FIFO queue.
Benjamin Walsh39b80d82017-01-28 10:06:07 -05002202 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002203 * @return Non-zero if the FIFO queue is empty.
Benjamin Walsh39b80d82017-01-28 10:06:07 -05002204 * @return 0 if data is available.
2205 */
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02002206#define k_fifo_is_empty(fifo) \
Nicolas Pitrea04a2ca2019-05-20 23:02:39 -04002207 k_queue_is_empty(&(fifo)->_queue)
Benjamin Walsh39b80d82017-01-28 10:06:07 -05002208
2209/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002210 * @brief Peek element at the head of a FIFO queue.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002211 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002212 * Return element from the head of FIFO queue without removing it. A usecase
Ramakrishna Pallala92489ea2018-03-29 22:44:23 +05302213 * for this is if elements of the FIFO object are themselves containers. Then
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002214 * on each iteration of processing, a head container will be peeked,
2215 * and some data processed out of it, and only if the container is empty,
Anas Nashif585fd1f2018-02-25 08:04:59 -06002216 * it will be completely remove from the FIFO queue.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002217 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002218 * @param fifo Address of the FIFO queue.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002219 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002220 * @return Head element, or NULL if the FIFO queue is empty.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002221 */
2222#define k_fifo_peek_head(fifo) \
Nicolas Pitrea04a2ca2019-05-20 23:02:39 -04002223 k_queue_peek_head(&(fifo)->_queue)
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002224
2225/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002226 * @brief Peek element at the tail of FIFO queue.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002227 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002228 * Return element from the tail of FIFO queue (without removing it). A usecase
2229 * for this is if elements of the FIFO queue are themselves containers. Then
2230 * it may be useful to add more data to the last container in a FIFO queue.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002231 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002232 * @param fifo Address of the FIFO queue.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002233 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002234 * @return Tail element, or NULL if a FIFO queue is empty.
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002235 */
2236#define k_fifo_peek_tail(fifo) \
Nicolas Pitrea04a2ca2019-05-20 23:02:39 -04002237 k_queue_peek_tail(&(fifo)->_queue)
Paul Sokolovsky16bb3ec2017-06-08 17:13:03 +03002238
2239/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002240 * @brief Statically define and initialize a FIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002241 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002242 * The FIFO queue can be accessed outside the module where it is defined using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002243 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05002244 * @code extern struct k_fifo <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002245 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002246 * @param name Name of the FIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002247 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002248#define K_FIFO_DEFINE(name) \
Andrew Boie45979da2020-05-23 14:38:39 -07002249 Z_STRUCT_SECTION_ITERABLE_ALTERNATE(k_queue, k_fifo, name) = \
Patrik Flykt97b3bd12019-03-12 15:15:42 -06002250 Z_FIFO_INITIALIZER(name)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002251
Anas Nashif166f5192018-02-25 08:02:36 -06002252/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05002253
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002254struct k_lifo {
Luiz Augusto von Dentz0dc4dd42017-02-21 15:49:52 +02002255 struct k_queue _queue;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002256};
2257
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04002258/**
2259 * @cond INTERNAL_HIDDEN
2260 */
2261
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002262#define Z_LIFO_INITIALIZER(obj) \
Allan Stephensc98da842016-11-11 15:45:03 -05002263 { \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002264 ._queue = Z_QUEUE_INITIALIZER(obj._queue) \
Allan Stephensc98da842016-11-11 15:45:03 -05002265 }
2266
2267/**
2268 * INTERNAL_HIDDEN @endcond
2269 */
2270
2271/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002272 * @defgroup lifo_apis LIFO APIs
Allan Stephensc98da842016-11-11 15:45:03 -05002273 * @ingroup kernel_apis
2274 * @{
2275 */
2276
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002277/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002278 * @brief Initialize a LIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002279 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002280 * This routine initializes a LIFO queue object, prior to its first use.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002281 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002282 * @param lifo Address of the LIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002283 *
2284 * @return N/A
2285 */
Luiz Augusto von Dentz0dc4dd42017-02-21 15:49:52 +02002286#define k_lifo_init(lifo) \
Nicolas Pitrea04a2ca2019-05-20 23:02:39 -04002287 k_queue_init(&(lifo)->_queue)
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002288
2289/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002290 * @brief Add an element to a LIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002291 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002292 * This routine adds a data item to @a lifo. A LIFO queue data item must be
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04002293 * aligned on a word boundary, and the first word of the item is
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002294 * reserved for the kernel's use.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002295 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002296 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002297 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002298 * @param lifo Address of the LIFO queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002299 * @param data Address of the data item.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002300 *
2301 * @return N/A
2302 */
Luiz Augusto von Dentz0dc4dd42017-02-21 15:49:52 +02002303#define k_lifo_put(lifo, data) \
Nicolas Pitrea04a2ca2019-05-20 23:02:39 -04002304 k_queue_prepend(&(lifo)->_queue, data)
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002305
2306/**
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002307 * @brief Add an element to a LIFO queue.
2308 *
Andrew Boieac3dcc12019-04-01 12:28:03 -07002309 * This routine adds a data item to @a lifo. There is an implicit memory
2310 * allocation to create an additional temporary bookkeeping data structure from
2311 * the calling thread's resource pool, which is automatically freed when the
2312 * item is removed. The data itself is not copied.
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002313 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002314 * @funcprops \isr_ok
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002315 *
2316 * @param lifo Address of the LIFO.
2317 * @param data Address of the data item.
2318 *
2319 * @retval 0 on success
2320 * @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
2321 */
2322#define k_lifo_alloc_put(lifo, data) \
Nicolas Pitrea04a2ca2019-05-20 23:02:39 -04002323 k_queue_alloc_prepend(&(lifo)->_queue, data)
Andrew Boie2b9b4b22018-04-27 13:21:22 -07002324
2325/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002326 * @brief Get an element from a LIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002327 *
Anas Nashif56821172020-07-08 14:14:25 -04002328 * This routine removes a data item from @a LIFO in a "last in, first out"
Nicolas Pitre659fa0d2019-05-21 22:13:01 -04002329 * manner. The first word of the data item is reserved for the kernel's use.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002330 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002331 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2332 *
2333 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002334 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002335 * @param lifo Address of the LIFO queue.
Andy Ross78327382020-03-05 15:18:14 -08002336 * @param timeout Waiting period to obtain a data item,
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002337 * or one of the special values K_NO_WAIT and K_FOREVER.
2338 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05002339 * @return Address of the data item if successful; NULL if returned
2340 * without waiting, or waiting period timed out.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002341 */
Luiz Augusto von Dentz0dc4dd42017-02-21 15:49:52 +02002342#define k_lifo_get(lifo, timeout) \
Nicolas Pitrea04a2ca2019-05-20 23:02:39 -04002343 k_queue_get(&(lifo)->_queue, timeout)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002344
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002345/**
Anas Nashif585fd1f2018-02-25 08:04:59 -06002346 * @brief Statically define and initialize a LIFO queue.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002347 *
Anas Nashif585fd1f2018-02-25 08:04:59 -06002348 * The LIFO queue can be accessed outside the module where it is defined using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002349 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05002350 * @code extern struct k_lifo <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002351 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002352 * @param name Name of the fifo.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002353 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002354#define K_LIFO_DEFINE(name) \
Andrew Boie45979da2020-05-23 14:38:39 -07002355 Z_STRUCT_SECTION_ITERABLE_ALTERNATE(k_queue, k_lifo, name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002356 Z_LIFO_INITIALIZER(name)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002357
Anas Nashif166f5192018-02-25 08:02:36 -06002358/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05002359
2360/**
2361 * @cond INTERNAL_HIDDEN
2362 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05002363#define K_STACK_FLAG_ALLOC ((uint8_t)1) /* Buffer was allocated */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002364
Nicolas Pitre3d51f7c2019-05-17 22:48:26 -04002365typedef uintptr_t stack_data_t;
2366
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002367struct k_stack {
2368 _wait_q_t wait_q;
Andy Rossf0933d02018-07-26 10:23:02 -07002369 struct k_spinlock lock;
Nicolas Pitre3d51f7c2019-05-17 22:48:26 -04002370 stack_data_t *base, *next, *top;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002371
Flavio Ceolind1ed3362018-12-07 11:39:13 -08002372 _OBJECT_TRACING_NEXT_PTR(k_stack)
Shih-Wei Teng5ebceeb2019-10-08 14:37:47 +08002373 _OBJECT_TRACING_LINKED_FLAG
Kumar Galaa1b77fd2020-05-27 11:26:57 -05002374 uint8_t flags;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002375};
2376
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002377#define Z_STACK_INITIALIZER(obj, stack_buffer, stack_num_entries) \
Allan Stephensc98da842016-11-11 15:45:03 -05002378 { \
Patrik Flykt4344e272019-03-08 14:19:05 -07002379 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
Allan Stephensc98da842016-11-11 15:45:03 -05002380 .base = stack_buffer, \
2381 .next = stack_buffer, \
2382 .top = stack_buffer + stack_num_entries, \
Anas Nashif2f203c22016-12-18 06:57:45 -05002383 _OBJECT_TRACING_INIT \
Allan Stephensc98da842016-11-11 15:45:03 -05002384 }
2385
2386/**
2387 * INTERNAL_HIDDEN @endcond
2388 */
2389
2390/**
2391 * @defgroup stack_apis Stack APIs
2392 * @ingroup kernel_apis
2393 * @{
2394 */
2395
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002396/**
2397 * @brief Initialize a stack.
2398 *
2399 * This routine initializes a stack object, prior to its first use.
2400 *
2401 * @param stack Address of the stack.
2402 * @param buffer Address of array used to hold stacked values.
2403 * @param num_entries Maximum number of values that can be stacked.
2404 *
2405 * @return N/A
2406 */
Andrew Boief3bee952018-05-02 17:44:39 -07002407void k_stack_init(struct k_stack *stack,
Kumar Galaa1b77fd2020-05-27 11:26:57 -05002408 stack_data_t *buffer, uint32_t num_entries);
Andrew Boief3bee952018-05-02 17:44:39 -07002409
2410
2411/**
2412 * @brief Initialize a stack.
2413 *
2414 * This routine initializes a stack object, prior to its first use. Internal
2415 * buffers will be allocated from the calling thread's resource pool.
2416 * This memory will be released if k_stack_cleanup() is called, or
2417 * userspace is enabled and the stack object loses all references to it.
2418 *
2419 * @param stack Address of the stack.
2420 * @param num_entries Maximum number of values that can be stacked.
2421 *
2422 * @return -ENOMEM if memory couldn't be allocated
2423 */
2424
Kumar Galaa1b77fd2020-05-27 11:26:57 -05002425__syscall int32_t k_stack_alloc_init(struct k_stack *stack,
2426 uint32_t num_entries);
Andrew Boief3bee952018-05-02 17:44:39 -07002427
2428/**
2429 * @brief Release a stack's allocated buffer
2430 *
2431 * If a stack object was given a dynamically allocated buffer via
2432 * k_stack_alloc_init(), this will free it. This function does nothing
2433 * if the buffer wasn't dynamically allocated.
2434 *
2435 * @param stack Address of the stack.
Anas Nashif1ed67d12019-06-16 08:58:10 -04002436 * @retval 0 on success
2437 * @retval -EAGAIN when object is still in use
Andrew Boief3bee952018-05-02 17:44:39 -07002438 */
Anas Nashif1ed67d12019-06-16 08:58:10 -04002439int k_stack_cleanup(struct k_stack *stack);
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002440
2441/**
2442 * @brief Push an element onto a stack.
2443 *
Nicolas Pitre3d51f7c2019-05-17 22:48:26 -04002444 * This routine adds a stack_data_t value @a data to @a stack.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002445 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002446 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002447 *
2448 * @param stack Address of the stack.
2449 * @param data Value to push onto the stack.
2450 *
Anas Nashif1ed67d12019-06-16 08:58:10 -04002451 * @retval 0 on success
2452 * @retval -ENOMEM if stack is full
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002453 */
Anas Nashif1ed67d12019-06-16 08:58:10 -04002454__syscall int k_stack_push(struct k_stack *stack, stack_data_t data);
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002455
2456/**
2457 * @brief Pop an element from a stack.
2458 *
Nicolas Pitre3d51f7c2019-05-17 22:48:26 -04002459 * This routine removes a stack_data_t value from @a stack in a "last in,
2460 * first out" manner and stores the value in @a data.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002461 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002462 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2463 *
2464 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002465 *
2466 * @param stack Address of the stack.
2467 * @param data Address of area to hold the value popped from the stack.
Andy Ross78327382020-03-05 15:18:14 -08002468 * @param timeout Waiting period to obtain a value,
2469 * or one of the special values K_NO_WAIT and
Krzysztof Chruscinski94f742e2019-11-07 19:28:00 +01002470 * K_FOREVER.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002471 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05002472 * @retval 0 Element popped from stack.
2473 * @retval -EBUSY Returned without waiting.
2474 * @retval -EAGAIN Waiting period timed out.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002475 */
Krzysztof Chruscinski94f742e2019-11-07 19:28:00 +01002476__syscall int k_stack_pop(struct k_stack *stack, stack_data_t *data,
Andy Ross78327382020-03-05 15:18:14 -08002477 k_timeout_t timeout);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002478
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002479/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002480 * @brief Statically define and initialize a stack
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002481 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002482 * The stack can be accessed outside the module where it is defined using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002483 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05002484 * @code extern struct k_stack <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002485 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002486 * @param name Name of the stack.
2487 * @param stack_num_entries Maximum number of values that can be stacked.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002488 */
Peter Mitsis602e6a82016-10-17 11:48:43 -04002489#define K_STACK_DEFINE(name, stack_num_entries) \
Nicolas Pitre3d51f7c2019-05-17 22:48:26 -04002490 stack_data_t __noinit \
Peter Mitsis602e6a82016-10-17 11:48:43 -04002491 _k_stack_buf_##name[stack_num_entries]; \
Nicolas Pitreb1d37422019-06-03 10:51:32 -04002492 Z_STRUCT_SECTION_ITERABLE(k_stack, name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002493 Z_STACK_INITIALIZER(name, _k_stack_buf_##name, \
Peter Mitsis602e6a82016-10-17 11:48:43 -04002494 stack_num_entries)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002495
Anas Nashif166f5192018-02-25 08:02:36 -06002496/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05002497
Peter Bigotdc34e7c2020-10-28 11:24:05 -05002498/**
2499 * @cond INTERNAL_HIDDEN
2500 */
Peter Bigot44539ed2020-11-21 06:58:58 -06002501
Allan Stephens6bba9b02016-11-16 14:56:54 -05002502struct k_work;
Peter Bigotdc34e7c2020-10-28 11:24:05 -05002503struct k_work_q;
2504struct k_work_queue_config;
2505struct k_delayed_work;
2506extern struct k_work_q k_sys_work_q;
2507
2508/**
2509 * INTERNAL_HIDDEN @endcond
2510 */
2511
Allan Stephensc98da842016-11-11 15:45:03 -05002512/**
Anas Nashifce78d162018-05-24 12:43:11 -05002513 * @defgroup mutex_apis Mutex APIs
2514 * @ingroup kernel_apis
2515 * @{
Allan Stephensc98da842016-11-11 15:45:03 -05002516 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002517
Anas Nashifce78d162018-05-24 12:43:11 -05002518/**
2519 * Mutex Structure
2520 * @ingroup mutex_apis
2521 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002522struct k_mutex {
Anas Nashife71293e2019-12-04 20:00:14 -05002523 /** Mutex wait queue */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002524 _wait_q_t wait_q;
Anas Nashifce78d162018-05-24 12:43:11 -05002525 /** Mutex owner */
Benjamin Walshb7ef0cb2016-10-05 17:32:01 -04002526 struct k_thread *owner;
Anas Nashife71293e2019-12-04 20:00:14 -05002527
2528 /** Current lock count */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05002529 uint32_t lock_count;
Anas Nashife71293e2019-12-04 20:00:14 -05002530
2531 /** Original thread priority */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002532 int owner_orig_prio;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002533
Flavio Ceolind1ed3362018-12-07 11:39:13 -08002534 _OBJECT_TRACING_NEXT_PTR(k_mutex)
Shih-Wei Teng5ebceeb2019-10-08 14:37:47 +08002535 _OBJECT_TRACING_LINKED_FLAG
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002536};
2537
Anas Nashifce78d162018-05-24 12:43:11 -05002538/**
2539 * @cond INTERNAL_HIDDEN
2540 */
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002541#define Z_MUTEX_INITIALIZER(obj) \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002542 { \
Patrik Flykt4344e272019-03-08 14:19:05 -07002543 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002544 .owner = NULL, \
2545 .lock_count = 0, \
2546 .owner_orig_prio = K_LOWEST_THREAD_PRIO, \
Anas Nashif2f203c22016-12-18 06:57:45 -05002547 _OBJECT_TRACING_INIT \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002548 }
2549
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002550/**
Allan Stephensc98da842016-11-11 15:45:03 -05002551 * INTERNAL_HIDDEN @endcond
2552 */
2553
2554/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002555 * @brief Statically define and initialize a mutex.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002556 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002557 * The mutex can be accessed outside the module where it is defined using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002558 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05002559 * @code extern struct k_mutex <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002560 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002561 * @param name Name of the mutex.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002562 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002563#define K_MUTEX_DEFINE(name) \
Nicolas Pitreb1d37422019-06-03 10:51:32 -04002564 Z_STRUCT_SECTION_ITERABLE(k_mutex, name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04002565 Z_MUTEX_INITIALIZER(name)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002566
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002567/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002568 * @brief Initialize a mutex.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002569 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002570 * This routine initializes a mutex object, prior to its first use.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002571 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002572 * Upon completion, the mutex is available and does not have an owner.
2573 *
2574 * @param mutex Address of the mutex.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002575 *
Anas Nashif86bb2d02019-05-04 10:18:13 -04002576 * @retval 0 Mutex object created
2577 *
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002578 */
Anas Nashif86bb2d02019-05-04 10:18:13 -04002579__syscall int k_mutex_init(struct k_mutex *mutex);
2580
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002581
2582/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002583 * @brief Lock a mutex.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002584 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002585 * This routine locks @a mutex. If the mutex is locked by another thread,
2586 * the calling thread waits until the mutex becomes available or until
2587 * a timeout occurs.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002588 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002589 * A thread is permitted to lock a mutex it has already locked. The operation
2590 * completes immediately and the lock count is increased by 1.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002591 *
Andrew Boie6af97932020-05-27 11:48:30 -07002592 * Mutexes may not be locked in ISRs.
2593 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002594 * @param mutex Address of the mutex.
Andy Ross78327382020-03-05 15:18:14 -08002595 * @param timeout Waiting period to lock the mutex,
2596 * or one of the special values K_NO_WAIT and
Krzysztof Chruscinski94f742e2019-11-07 19:28:00 +01002597 * K_FOREVER.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002598 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05002599 * @retval 0 Mutex locked.
2600 * @retval -EBUSY Returned without waiting.
2601 * @retval -EAGAIN Waiting period timed out.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002602 */
Andy Ross78327382020-03-05 15:18:14 -08002603__syscall int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout);
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002604
2605/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002606 * @brief Unlock a mutex.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002607 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002608 * This routine unlocks @a mutex. The mutex must already be locked by the
2609 * calling thread.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002610 *
2611 * The mutex cannot be claimed by another thread until it has been unlocked by
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002612 * the calling thread as many times as it was previously locked by that
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002613 * thread.
2614 *
Andrew Boie6af97932020-05-27 11:48:30 -07002615 * Mutexes may not be unlocked in ISRs, as mutexes must only be manipulated
2616 * in thread context due to ownership and priority inheritance semantics.
2617 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002618 * @param mutex Address of the mutex.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002619 *
Anas Nashif86bb2d02019-05-04 10:18:13 -04002620 * @retval 0 Mutex unlocked.
2621 * @retval -EPERM The current thread does not own the mutex
2622 * @retval -EINVAL The mutex is not locked
2623 *
Peter Mitsis348eb4c2016-10-26 11:22:14 -04002624 */
Anas Nashif86bb2d02019-05-04 10:18:13 -04002625__syscall int k_mutex_unlock(struct k_mutex *mutex);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002626
Allan Stephensc98da842016-11-11 15:45:03 -05002627/**
Anas Nashif166f5192018-02-25 08:02:36 -06002628 * @}
Allan Stephensc98da842016-11-11 15:45:03 -05002629 */
2630
Anas Nashif06eb4892020-08-23 12:39:09 -04002631
2632struct k_condvar {
2633 _wait_q_t wait_q;
2634};
2635
2636#define Z_CONDVAR_INITIALIZER(obj) \
2637 { \
2638 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
2639 }
2640
2641/**
2642 * @defgroup condvar_apis Condition Variables APIs
2643 * @ingroup kernel_apis
2644 * @{
2645 */
2646
2647/**
2648 * @brief Initialize a condition variable
2649 *
2650 * @param condvar pointer to a @p k_condvar structure
2651 * @retval 0 Condition variable created successfully
2652 */
2653__syscall int k_condvar_init(struct k_condvar *condvar);
2654
2655/**
2656 * @brief Signals one thread that is pending on the condition variable
2657 *
2658 * @param condvar pointer to a @p k_condvar structure
2659 * @retval 0 On success
2660 */
2661__syscall int k_condvar_signal(struct k_condvar *condvar);
2662
2663/**
2664 * @brief Unblock all threads that are pending on the condition
2665 * variable
2666 *
2667 * @param condvar pointer to a @p k_condvar structure
2668 * @return An integer with number of woken threads on success
2669 */
2670__syscall int k_condvar_broadcast(struct k_condvar *condvar);
2671
2672/**
2673 * @brief Waits on the condition variable releasing the mutex lock
2674 *
2675 * Automically releases the currently owned mutex, blocks the current thread
2676 * waiting on the condition variable specified by @a condvar,
2677 * and finally acquires the mutex again.
2678 *
2679 * The waiting thread unblocks only after another thread calls
2680 * k_condvar_signal, or k_condvar_broadcast with the same condition variable.
2681 *
2682 * @param condvar pointer to a @p k_condvar structure
2683 * @param mutex Address of the mutex.
2684 * @param timeout Waiting period for the condition variable
2685 * or one of the special values K_NO_WAIT and K_FOREVER.
2686 * @retval 0 On success
2687 * @retval -EAGAIN Waiting period timed out.
2688 */
2689__syscall int k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex,
2690 k_timeout_t timeout);
2691
2692/**
2693 * @brief Statically define and initialize a condition variable.
2694 *
2695 * The condition variable can be accessed outside the module where it is
2696 * defined using:
2697 *
2698 * @code extern struct k_condvar <name>; @endcode
2699 *
2700 * @param name Name of the condition variable.
2701 */
2702#define K_CONDVAR_DEFINE(name) \
2703 Z_STRUCT_SECTION_ITERABLE(k_condvar, name) = \
2704 Z_CONDVAR_INITIALIZER(name)
2705/**
2706 * @}
2707 */
2708
Allan Stephensc98da842016-11-11 15:45:03 -05002709/**
2710 * @cond INTERNAL_HIDDEN
2711 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002712
2713struct k_sem {
2714 _wait_q_t wait_q;
James Harrisb1042812021-03-03 12:02:05 -08002715 unsigned int count;
2716 unsigned int limit;
Peter Bigot7aefa3d2021-03-02 06:18:29 -06002717
Benjamin Walshacc68c12017-01-29 18:57:45 -05002718 _POLL_EVENT;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002719
Flavio Ceolind1ed3362018-12-07 11:39:13 -08002720 _OBJECT_TRACING_NEXT_PTR(k_sem)
Shih-Wei Teng5ebceeb2019-10-08 14:37:47 +08002721 _OBJECT_TRACING_LINKED_FLAG
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002722};
2723
Patrik Flykt97b3bd12019-03-12 15:15:42 -06002724#define Z_SEM_INITIALIZER(obj, initial_count, count_limit) \
Allan Stephensc98da842016-11-11 15:45:03 -05002725 { \
Patrik Flykt4344e272019-03-08 14:19:05 -07002726 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
Allan Stephensc98da842016-11-11 15:45:03 -05002727 .count = initial_count, \
2728 .limit = count_limit, \
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +03002729 _POLL_EVENT_OBJ_INIT(obj) \
Anas Nashif2f203c22016-12-18 06:57:45 -05002730 _OBJECT_TRACING_INIT \
Allan Stephensc98da842016-11-11 15:45:03 -05002731 }
2732
2733/**
2734 * INTERNAL_HIDDEN @endcond
2735 */
2736
2737/**
2738 * @defgroup semaphore_apis Semaphore APIs
2739 * @ingroup kernel_apis
2740 * @{
2741 */
2742
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002743/**
James Harrisb1042812021-03-03 12:02:05 -08002744 * @brief Maximum limit value allowed for a semaphore.
2745 *
2746 * This is intended for use when a semaphore does not have
2747 * an explicit maximum limit, and instead is just used for
2748 * counting purposes.
2749 *
2750 */
2751#define K_SEM_MAX_LIMIT UINT_MAX
2752
2753/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002754 * @brief Initialize a semaphore.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002755 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002756 * This routine initializes a semaphore object, prior to its first use.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002757 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002758 * @param sem Address of the semaphore.
2759 * @param initial_count Initial semaphore count.
2760 * @param limit Maximum permitted semaphore count.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002761 *
James Harrisb1042812021-03-03 12:02:05 -08002762 * @see K_SEM_MAX_LIMIT
2763 *
Anas Nashif928af3c2019-05-04 10:36:14 -04002764 * @retval 0 Semaphore created successfully
2765 * @retval -EINVAL Invalid values
2766 *
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002767 */
Anas Nashif928af3c2019-05-04 10:36:14 -04002768__syscall int k_sem_init(struct k_sem *sem, unsigned int initial_count,
Andrew Boie99280232017-09-29 14:17:47 -07002769 unsigned int limit);
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002770
2771/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002772 * @brief Take a semaphore.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002773 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002774 * This routine takes @a sem.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002775 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002776 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
2777 *
2778 * @funcprops \isr_ok
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002779 *
2780 * @param sem Address of the semaphore.
Andy Ross78327382020-03-05 15:18:14 -08002781 * @param timeout Waiting period to take the semaphore,
2782 * or one of the special values K_NO_WAIT and K_FOREVER.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002783 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05002784 * @retval 0 Semaphore taken.
2785 * @retval -EBUSY Returned without waiting.
James Harris53b81792021-03-04 15:47:27 -08002786 * @retval -EAGAIN Waiting period timed out,
2787 * or the semaphore was reset during the waiting period.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002788 */
Andy Ross78327382020-03-05 15:18:14 -08002789__syscall int k_sem_take(struct k_sem *sem, k_timeout_t timeout);
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002790
2791/**
2792 * @brief Give a semaphore.
2793 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002794 * This routine gives @a sem, unless the semaphore is already at its maximum
2795 * permitted count.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002796 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002797 * @funcprops \isr_ok
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002798 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002799 * @param sem Address of the semaphore.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002800 *
2801 * @return N/A
2802 */
Andrew Boie99280232017-09-29 14:17:47 -07002803__syscall void k_sem_give(struct k_sem *sem);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002804
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002805/**
James Harris53b81792021-03-04 15:47:27 -08002806 * @brief Resets a semaphore's count to zero.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002807 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002808 * This routine sets the count of @a sem to zero.
James Harris53b81792021-03-04 15:47:27 -08002809 * Any outstanding semaphore takes will be aborted
2810 * with -EAGAIN.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002811 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002812 * @param sem Address of the semaphore.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002813 *
2814 * @return N/A
2815 */
Andrew Boie990bf162017-10-03 12:36:49 -07002816__syscall void k_sem_reset(struct k_sem *sem);
Andrew Boiefc273c02017-09-23 12:51:23 -07002817
Anas Nashif954d5502018-02-25 08:37:28 -06002818/**
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002819 * @brief Get a semaphore's count.
2820 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002821 * This routine returns the current count of @a sem.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002822 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002823 * @param sem Address of the semaphore.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002824 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002825 * @return Current semaphore count.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002826 */
Andrew Boie990bf162017-10-03 12:36:49 -07002827__syscall unsigned int k_sem_count_get(struct k_sem *sem);
Andrew Boiefc273c02017-09-23 12:51:23 -07002828
Anas Nashif954d5502018-02-25 08:37:28 -06002829/**
2830 * @internal
2831 */
Patrik Flykt4344e272019-03-08 14:19:05 -07002832static inline unsigned int z_impl_k_sem_count_get(struct k_sem *sem)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002833{
2834 return sem->count;
2835}
2836
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002837/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002838 * @brief Statically define and initialize a semaphore.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002839 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002840 * The semaphore can be accessed outside the module where it is defined using:
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002841 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05002842 * @code extern struct k_sem <name>; @endcode
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002843 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05002844 * @param name Name of the semaphore.
2845 * @param initial_count Initial semaphore count.
2846 * @param count_limit Maximum permitted semaphore count.
Benjamin Walshb9c1a062016-10-15 17:12:35 -04002847 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002848#define K_SEM_DEFINE(name, initial_count, count_limit) \
Nicolas Pitreb1d37422019-06-03 10:51:32 -04002849 Z_STRUCT_SECTION_ITERABLE(k_sem, name) = \
Patrik Flykt97b3bd12019-03-12 15:15:42 -06002850 Z_SEM_INITIALIZER(name, initial_count, count_limit); \
Rajavardhan Gundi68040c82018-04-27 10:15:15 +05302851 BUILD_ASSERT(((count_limit) != 0) && \
James Harrisb1042812021-03-03 12:02:05 -08002852 ((initial_count) <= (count_limit)) && \
2853 ((count_limit) <= K_SEM_MAX_LIMIT));
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002854
Anas Nashif166f5192018-02-25 08:02:36 -06002855/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05002856
Peter Bigotdc34e7c2020-10-28 11:24:05 -05002857/**
2858 * @cond INTERNAL_HIDDEN
2859 */
2860
2861struct k_work_delayable;
2862struct k_work_sync;
2863
2864/**
2865 * INTERNAL_HIDDEN @endcond
2866 */
2867
2868/**
Anas Nashifc355b7e2021-04-14 08:49:05 -04002869 * @defgroup workqueue_apis Work Queue APIs
2870 * @ingroup kernel_apis
Peter Bigotdc34e7c2020-10-28 11:24:05 -05002871 * @{
2872 */
2873
2874/** @brief The signature for a work item handler function.
2875 *
2876 * The function will be invoked by the thread animating a work queue.
2877 *
2878 * @param work the work item that provided the handler.
2879 */
2880typedef void (*k_work_handler_t)(struct k_work *work);
2881
2882/** @brief Initialize a (non-delayable) work structure.
2883 *
2884 * This must be invoked before submitting a work structure for the first time.
2885 * It need not be invoked again on the same work structure. It can be
2886 * re-invoked to change the associated handler, but this must be done when the
2887 * work item is idle.
2888 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002889 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05002890 *
2891 * @param work the work structure to be initialized.
2892 *
2893 * @param handler the handler to be invoked by the work item.
2894 */
2895void k_work_init(struct k_work *work,
2896 k_work_handler_t handler);
2897
2898/** @brief Busy state flags from the work item.
2899 *
2900 * A zero return value indicates the work item appears to be idle.
2901 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05002902 * @note This is a live snapshot of state, which may change before the result
2903 * is checked. Use locks where appropriate.
2904 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002905 * @funcprops \isr_ok
2906 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05002907 * @param work pointer to the work item.
2908 *
2909 * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED,
2910 * K_WORK_RUNNING, and K_WORK_CANCELING.
2911 */
2912int k_work_busy_get(const struct k_work *work);
2913
2914/** @brief Test whether a work item is currently pending.
2915 *
2916 * Wrapper to determine whether a work item is in a non-idle dstate.
2917 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05002918 * @note This is a live snapshot of state, which may change before the result
2919 * is checked. Use locks where appropriate.
2920 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002921 * @funcprops \isr_ok
2922 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05002923 * @param work pointer to the work item.
2924 *
2925 * @return true if and only if k_work_busy_get() returns a non-zero value.
2926 */
2927static inline bool k_work_is_pending(const struct k_work *work);
2928
2929/** @brief Submit a work item to a queue.
2930 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05002931 * @param queue pointer to the work queue on which the item should run. If
2932 * NULL the queue from the most recent submission will be used.
2933 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002934 * @funcprops \isr_ok
2935 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05002936 * @param work pointer to the work item.
2937 *
2938 * @retval 0 if work was already submitted to a queue
2939 * @retval 1 if work was not submitted and has been queued to @p queue
2940 * @retval 2 if work was running and has been queued to the queue that was
2941 * running it
2942 * @retval -EBUSY
2943 * * if work submission was rejected because the work item is cancelling; or
2944 * * @p queue is draining; or
2945 * * @p queue is plugged.
2946 * @retval -EINVAL if @p queue is null and the work item has never been run.
2947 */
2948int k_work_submit_to_queue(struct k_work_q *queue,
2949 struct k_work *work);
2950
2951/** @brief Submit a work item to the system queue.
2952 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01002953 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05002954 *
2955 * @param work pointer to the work item.
2956 *
2957 * @return as with k_work_submit_to_queue().
2958 */
2959static inline int k_work_submit(struct k_work *work)
2960{
2961 return k_work_submit_to_queue(&k_sys_work_q, work);
2962}
2963
2964/** @brief Wait for last-submitted instance to complete.
2965 *
2966 * Resubmissions may occur while waiting, including chained submissions (from
2967 * within the handler).
2968 *
2969 * @note Be careful of caller and work queue thread relative priority. If
2970 * this function sleeps it will not return until the work queue thread
2971 * completes the tasks that allow this thread to resume.
2972 *
2973 * @note Behavior is undefined if this function is invoked on @p work from a
2974 * work queue running @p work.
2975 *
2976 * @param work pointer to the work item.
2977 *
2978 * @param sync pointer to an opaque item containing state related to the
2979 * pending cancellation. The object must persist until the call returns, and
2980 * be accessible from both the caller thread and the work queue thread. The
2981 * object must not be used for any other flush or cancel operation until this
2982 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
2983 * must be allocated in coherent memory.
2984 *
2985 * @retval true if call had to wait for completion
2986 * @retval false if work was already idle
2987 */
2988bool k_work_flush(struct k_work *work,
2989 struct k_work_sync *sync);
2990
2991/** @brief Cancel a work item.
2992 *
2993 * This attempts to prevent a pending (non-delayable) work item from being
2994 * processed by removing it from the work queue. If the item is being
2995 * processed, the work item will continue to be processed, but resubmissions
2996 * are rejected until cancellation completes.
2997 *
2998 * If this returns zero cancellation is complete, otherwise something
2999 * (probably a work queue thread) is still referencing the item.
3000 *
3001 * See also k_work_cancel_sync().
3002 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003003 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003004 *
3005 * @param work pointer to the work item.
3006 *
3007 * @return the k_work_busy_get() status indicating the state of the item after all
3008 * cancellation steps performed by this call are completed.
3009 */
3010int k_work_cancel(struct k_work *work);
3011
3012/** @brief Cancel a work item and wait for it to complete.
3013 *
3014 * Same as k_work_cancel() but does not return until cancellation is complete.
3015 * This can be invoked by a thread after k_work_cancel() to synchronize with a
3016 * previous cancellation.
3017 *
3018 * On return the work structure will be idle unless something submits it after
3019 * the cancellation was complete.
3020 *
3021 * @note Be careful of caller and work queue thread relative priority. If
3022 * this function sleeps it will not return until the work queue thread
3023 * completes the tasks that allow this thread to resume.
3024 *
3025 * @note Behavior is undefined if this function is invoked on @p work from a
3026 * work queue running @p work.
3027 *
3028 * @param work pointer to the work item.
3029 *
3030 * @param sync pointer to an opaque item containing state related to the
3031 * pending cancellation. The object must persist until the call returns, and
3032 * be accessible from both the caller thread and the work queue thread. The
3033 * object must not be used for any other flush or cancel operation until this
3034 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3035 * must be allocated in coherent memory.
3036 *
Peter Bigot707dc222021-04-16 11:48:50 -05003037 * @retval true if work was pending (call had to wait for cancellation of a
3038 * running handler to complete, or scheduled or submitted operations were
3039 * cancelled);
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003040 * @retval false otherwise
3041 */
3042bool k_work_cancel_sync(struct k_work *work, struct k_work_sync *sync);
3043
3044/** @brief Initialize a work queue.
3045 *
3046 * This configures the work queue thread and starts it running. The function
3047 * should not be re-invoked on a queue.
3048 *
3049 * @param queue pointer to the queue structure.
3050 *
3051 * @param stack pointer to the work thread stack area.
3052 *
3053 * @param stack_size size of the the work thread stack area, in bytes.
3054 *
3055 * @param prio initial thread priority
3056 *
3057 * @param cfg optional additional configuration parameters. Pass @c
3058 * NULL if not required, to use the defaults documented in
3059 * k_work_queue_config.
3060 */
3061void k_work_queue_start(struct k_work_q *queue,
3062 k_thread_stack_t *stack, size_t stack_size,
3063 int prio, const struct k_work_queue_config *cfg);
3064
3065/** @brief Access the thread that animates a work queue.
3066 *
3067 * This is necessary to grant a work queue thread access to things the work
3068 * items it will process are expected to use.
3069 *
3070 * @param queue pointer to the queue structure.
3071 *
3072 * @return the thread associated with the work queue.
3073 */
3074static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue);
3075
3076/** @brief Wait until the work queue has drained, optionally plugging it.
3077 *
3078 * This blocks submission to the work queue except when coming from queue
3079 * thread, and blocks the caller until no more work items are available in the
3080 * queue.
3081 *
3082 * If @p plug is true then submission will continue to be blocked after the
3083 * drain operation completes until k_work_queue_unplug() is invoked.
3084 *
3085 * Note that work items that are delayed are not yet associated with their
3086 * work queue. They must be cancelled externally if a goal is to ensure the
3087 * work queue remains empty. The @p plug feature can be used to prevent
3088 * delayed items from being submitted after the drain completes.
3089 *
3090 * @param queue pointer to the queue structure.
3091 *
3092 * @param plug if true the work queue will continue to block new submissions
3093 * after all items have drained.
3094 *
3095 * @retval 1 if call had to wait for the drain to complete
3096 * @retval 0 if call did not have to wait
3097 * @retval negative if wait was interrupted or failed
3098 */
3099int k_work_queue_drain(struct k_work_q *queue, bool plug);
3100
3101/** @brief Release a work queue to accept new submissions.
3102 *
3103 * This releases the block on new submissions placed when k_work_queue_drain()
3104 * is invoked with the @p plug option enabled. If this is invoked before the
3105 * drain completes new items may be submitted as soon as the drain completes.
3106 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003107 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003108 *
3109 * @param queue pointer to the queue structure.
3110 *
3111 * @retval 0 if successfully unplugged
3112 * @retval -EALREADY if the work queue was not plugged.
3113 */
3114int k_work_queue_unplug(struct k_work_q *queue);
3115
3116/** @brief Initialize a delayable work structure.
3117 *
3118 * This must be invoked before scheduling a delayable work structure for the
3119 * first time. It need not be invoked again on the same work structure. It
3120 * can be re-invoked to change the associated handler, but this must be done
3121 * when the work item is idle.
3122 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003123 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003124 *
3125 * @param dwork the delayable work structure to be initialized.
3126 *
3127 * @param handler the handler to be invoked by the work item.
3128 */
3129void k_work_init_delayable(struct k_work_delayable *dwork,
3130 k_work_handler_t handler);
3131
3132/**
3133 * @brief Get the parent delayable work structure from a work pointer.
3134 *
3135 * This function is necessary when a @c k_work_handler_t function is passed to
3136 * k_work_schedule_for_queue() and the handler needs to access data from the
3137 * container of the containing `k_work_delayable`.
3138 *
3139 * @param work Address passed to the work handler
3140 *
3141 * @return Address of the containing @c k_work_delayable structure.
3142 */
3143static inline struct k_work_delayable *
3144k_work_delayable_from_work(struct k_work *work);
3145
3146/** @brief Busy state flags from the delayable work item.
3147 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003148 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003149 *
3150 * @note This is a live snapshot of state, which may change before the result
3151 * can be inspected. Use locks where appropriate.
3152 *
3153 * @param dwork pointer to the delayable work item.
3154 *
3155 * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED, K_WORK_RUNNING, and
3156 * K_WORK_CANCELING. A zero return value indicates the work item appears to
3157 * be idle.
3158 */
3159int k_work_delayable_busy_get(const struct k_work_delayable *dwork);
3160
3161/** @brief Test whether a delayed work item is currently pending.
3162 *
3163 * Wrapper to determine whether a delayed work item is in a non-idle state.
3164 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003165 * @note This is a live snapshot of state, which may change before the result
3166 * can be inspected. Use locks where appropriate.
3167 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003168 * @funcprops \isr_ok
3169 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003170 * @param dwork pointer to the delayable work item.
3171 *
3172 * @return true if and only if k_work_delayable_busy_get() returns a non-zero
3173 * value.
3174 */
3175static inline bool k_work_delayable_is_pending(
3176 const struct k_work_delayable *dwork);
3177
3178/** @brief Get the absolute tick count at which a scheduled delayable work
3179 * will be submitted.
3180 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003181 * @note This is a live snapshot of state, which may change before the result
3182 * can be inspected. Use locks where appropriate.
3183 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003184 * @funcprops \isr_ok
3185 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003186 * @param dwork pointer to the delayable work item.
3187 *
3188 * @return the tick count when the timer that will schedule the work item will
3189 * expire, or the current tick count if the work is not scheduled.
3190 */
3191static inline k_ticks_t k_work_delayable_expires_get(
3192 const struct k_work_delayable *dwork);
3193
3194/** @brief Get the number of ticks until a scheduled delayable work will be
3195 * submitted.
3196 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003197 * @note This is a live snapshot of state, which may change before the result
3198 * can be inspected. Use locks where appropriate.
3199 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003200 * @funcprops \isr_ok
3201 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003202 * @param dwork pointer to the delayable work item.
3203 *
3204 * @return the number of ticks until the timer that will schedule the work
3205 * item will expire, or zero if the item is not scheduled.
3206 */
3207static inline k_ticks_t k_work_delayable_remaining_get(
3208 const struct k_work_delayable *dwork);
3209
3210/** @brief Submit an idle work item to a queue after a delay.
3211 *
3212 * Unlike k_work_reschedule_for_queue() this is a no-op if the work item is
3213 * already scheduled or submitted, even if @p delay is @c K_NO_WAIT.
3214 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003215 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003216 *
3217 * @param queue the queue on which the work item should be submitted after the
3218 * delay.
3219 *
3220 * @param dwork pointer to the delayable work item.
3221 *
3222 * @param delay the time to wait before submitting the work item. If @c
3223 * K_NO_WAIT and the work is not pending this is equivalent to
3224 * k_work_submit_to_queue().
3225 *
3226 * @retval 0 if work was already scheduled or submitted.
3227 * @retval 1 if work has been scheduled.
3228 */
3229int k_work_schedule_for_queue(struct k_work_q *queue,
3230 struct k_work_delayable *dwork,
3231 k_timeout_t delay);
3232
3233/** @brief Submit an idle work item to the system work queue after a
3234 * delay.
3235 *
3236 * This is a thin wrapper around k_work_schedule_for_queue(), with all the API
3237 * characteristcs of that function.
3238 *
3239 * @param dwork pointer to the delayable work item.
3240 *
3241 * @param delay the time to wait before submitting the work item. If @c
3242 * K_NO_WAIT this is equivalent to k_work_submit_to_queue().
3243 *
3244 * @return as with k_work_schedule_for_queue().
3245 */
3246static inline int k_work_schedule(struct k_work_delayable *dwork,
3247 k_timeout_t delay)
3248{
3249 return k_work_schedule_for_queue(&k_sys_work_q, dwork, delay);
3250}
3251
3252/** @brief Reschedule a work item to a queue after a delay.
3253 *
3254 * Unlike k_work_schedule_for_queue() this function can change the deadline of
3255 * a scheduled work item, and will schedule a work item that isn't idle
3256 * (e.g. is submitted or running). This function does not affect ("unsubmit")
3257 * a work item that has been submitted to a queue.
3258 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003259 * @funcprops \isr_ok
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003260 *
3261 * @param queue the queue on which the work item should be submitted after the
3262 * delay.
3263 *
3264 * @param dwork pointer to the delayable work item.
3265 *
3266 * @param delay the time to wait before submitting the work item. If @c
3267 * K_NO_WAIT this is equivalent to k_work_submit_to_queue() after canceling
3268 * any previous scheduled submission.
3269 *
3270 * @note If delay is @c K_NO_WAIT ("no delay") the return values are as with
3271 * k_work_submit_to_queue().
3272 *
3273 * @retval 0 if delay is @c K_NO_WAIT and work was already on a queue
3274 * @retval 1 if
3275 * * delay is @c K_NO_WAIT and work was not submitted but has now been queued
3276 * to @p queue; or
3277 * * delay not @c K_NO_WAIT and work has been scheduled
3278 * @retval 2 if delay is @c K_NO_WAIT and work was running and has been queued
3279 * to the queue that was running it
3280 */
3281int k_work_reschedule_for_queue(struct k_work_q *queue,
3282 struct k_work_delayable *dwork,
3283 k_timeout_t delay);
3284
3285/** @brief Reschedule a work item to the system work queue after a
3286 * delay.
3287 *
3288 * This is a thin wrapper around k_work_reschedule_for_queue(), with all the
3289 * API characteristcs of that function.
3290 *
3291 * @param dwork pointer to the delayable work item.
3292 *
3293 * @param delay the time to wait before submitting the work item.
3294 *
3295 * @return as with k_work_reschedule_for_queue().
3296 */
3297static inline int k_work_reschedule(struct k_work_delayable *dwork,
3298 k_timeout_t delay)
3299{
3300 return k_work_reschedule_for_queue(&k_sys_work_q, dwork, delay);
3301}
3302
3303/** @brief Flush delayable work.
3304 *
3305 * If the work is scheduled, it is immediately submitted. Then the caller
3306 * blocks until the work completes, as with k_work_flush().
3307 *
3308 * @note Be careful of caller and work queue thread relative priority. If
3309 * this function sleeps it will not return until the work queue thread
3310 * completes the tasks that allow this thread to resume.
3311 *
3312 * @note Behavior is undefined if this function is invoked on @p dwork from a
3313 * work queue running @p dwork.
3314 *
3315 * @param dwork pointer to the delayable work item.
3316 *
3317 * @param sync pointer to an opaque item containing state related to the
3318 * pending cancellation. The object must persist until the call returns, and
3319 * be accessible from both the caller thread and the work queue thread. The
3320 * object must not be used for any other flush or cancel operation until this
3321 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3322 * must be allocated in coherent memory.
3323 *
3324 * @retval true if call had to wait for completion
3325 * @retval false if work was already idle
3326 */
3327bool k_work_flush_delayable(struct k_work_delayable *dwork,
3328 struct k_work_sync *sync);
3329
3330/** @brief Cancel delayable work.
3331 *
3332 * Similar to k_work_cancel() but for delayable work. If the work is
3333 * scheduled or submitted it is canceled. This function does not wait for the
3334 * cancellation to complete.
3335 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003336 * @note The work may still be running when this returns. Use
3337 * k_work_flush_delayable() or k_work_cancel_delayable_sync() to ensure it is
3338 * not running.
3339 *
3340 * @note Canceling delayable work does not prevent rescheduling it. It does
3341 * prevent submitting it until the cancellation completes.
3342 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003343 * @funcprops \isr_ok
3344 *
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003345 * @param dwork pointer to the delayable work item.
3346 *
3347 * @return the k_work_delayable_busy_get() status indicating the state of the
3348 * item after all cancellation steps performed by this call are completed.
3349 */
3350int k_work_cancel_delayable(struct k_work_delayable *dwork);
3351
3352/** @brief Cancel delayable work and wait.
3353 *
3354 * Like k_work_cancel_delayable() but waits until the work becomes idle.
3355 *
3356 * @note Canceling delayable work does not prevent rescheduling it. It does
3357 * prevent submitting it until the cancellation completes.
3358 *
3359 * @note Be careful of caller and work queue thread relative priority. If
3360 * this function sleeps it will not return until the work queue thread
3361 * completes the tasks that allow this thread to resume.
3362 *
3363 * @note Behavior is undefined if this function is invoked on @p dwork from a
3364 * work queue running @p dwork.
3365 *
3366 * @param dwork pointer to the delayable work item.
3367 *
3368 * @param sync pointer to an opaque item containing state related to the
3369 * pending cancellation. The object must persist until the call returns, and
3370 * be accessible from both the caller thread and the work queue thread. The
3371 * object must not be used for any other flush or cancel operation until this
3372 * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
3373 * must be allocated in coherent memory.
3374 *
Peter Bigot707dc222021-04-16 11:48:50 -05003375 * @retval true if work was not idle (call had to wait for cancellation of a
3376 * running handler to complete, or scheduled or submitted operations were
3377 * cancelled);
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003378 * @retval false otherwise
3379 */
3380bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
3381 struct k_work_sync *sync);
3382
3383enum {
3384/**
3385 * @cond INTERNAL_HIDDEN
3386 */
3387
3388 /* The atomic API is used for all work and queue flags fields to
3389 * enforce sequential consistency in SMP environments.
3390 */
3391
3392 /* Bits that represent the work item states. At least nine of the
3393 * combinations are distinct valid stable states.
3394 */
3395 K_WORK_RUNNING_BIT = 0,
3396 K_WORK_CANCELING_BIT = 1,
3397 K_WORK_QUEUED_BIT = 2,
3398 K_WORK_DELAYED_BIT = 3,
3399
3400 K_WORK_MASK = BIT(K_WORK_DELAYED_BIT) | BIT(K_WORK_QUEUED_BIT)
3401 | BIT(K_WORK_RUNNING_BIT) | BIT(K_WORK_CANCELING_BIT),
3402
3403 /* Static work flags */
3404 K_WORK_DELAYABLE_BIT = 8,
3405 K_WORK_DELAYABLE = BIT(K_WORK_DELAYABLE_BIT),
3406
3407 /* Dynamic work queue flags */
3408 K_WORK_QUEUE_STARTED_BIT = 0,
3409 K_WORK_QUEUE_STARTED = BIT(K_WORK_QUEUE_STARTED_BIT),
3410 K_WORK_QUEUE_BUSY_BIT = 1,
3411 K_WORK_QUEUE_BUSY = BIT(K_WORK_QUEUE_BUSY_BIT),
3412 K_WORK_QUEUE_DRAIN_BIT = 2,
3413 K_WORK_QUEUE_DRAIN = BIT(K_WORK_QUEUE_DRAIN_BIT),
3414 K_WORK_QUEUE_PLUGGED_BIT = 3,
3415 K_WORK_QUEUE_PLUGGED = BIT(K_WORK_QUEUE_PLUGGED_BIT),
3416
3417 /* Static work queue flags */
3418 K_WORK_QUEUE_NO_YIELD_BIT = 8,
3419 K_WORK_QUEUE_NO_YIELD = BIT(K_WORK_QUEUE_NO_YIELD_BIT),
3420
3421/**
3422 * INTERNAL_HIDDEN @endcond
3423 */
3424 /* Transient work flags */
3425
3426 /** @brief Flag indicating a work item that is running under a work
3427 * queue thread.
3428 *
3429 * Accessed via k_work_busy_get(). May co-occur with other flags.
3430 */
3431 K_WORK_RUNNING = BIT(K_WORK_RUNNING_BIT),
3432
3433 /** @brief Flag indicating a work item that is being canceled.
3434 *
3435 * Accessed via k_work_busy_get(). May co-occur with other flags.
3436 */
3437 K_WORK_CANCELING = BIT(K_WORK_CANCELING_BIT),
3438
3439 /** @brief Flag indicating a work item that has been submitted to a
3440 * queue but has not started running.
3441 *
3442 * Accessed via k_work_busy_get(). May co-occur with other flags.
3443 */
3444 K_WORK_QUEUED = BIT(K_WORK_QUEUED_BIT),
3445
3446 /** @brief Flag indicating a delayed work item that is scheduled for
3447 * submission to a queue.
3448 *
3449 * Accessed via k_work_busy_get(). May co-occur with other flags.
3450 */
3451 K_WORK_DELAYED = BIT(K_WORK_DELAYED_BIT),
3452};
3453
3454/** @brief A structure used to submit work. */
3455struct k_work {
3456 /* All fields are protected by the work module spinlock. No fields
3457 * are to be accessed except through kernel API.
3458 */
3459
3460 /* Node to link into k_work_q pending list. */
3461 sys_snode_t node;
3462
3463 /* The function to be invoked by the work queue thread. */
3464 k_work_handler_t handler;
3465
3466 /* The queue on which the work item was last submitted. */
3467 struct k_work_q *queue;
3468
3469 /* State of the work item.
3470 *
3471 * The item can be DELAYED, QUEUED, and RUNNING simultaneously.
3472 *
3473 * It can be RUNNING and CANCELING simultaneously.
3474 */
3475 uint32_t flags;
3476};
3477
3478#define Z_WORK_INITIALIZER(work_handler) { \
3479 .handler = work_handler, \
3480}
3481
3482/** @brief A structure used to submit work after a delay. */
3483struct k_work_delayable {
3484 /* The work item. */
3485 struct k_work work;
3486
3487 /* Timeout used to submit work after a delay. */
3488 struct _timeout timeout;
3489
3490 /* The queue to which the work should be submitted. */
3491 struct k_work_q *queue;
3492};
3493
3494#define Z_WORK_DELAYABLE_INITIALIZER(work_handler) { \
3495 .work = { \
3496 .handler = work_handler, \
3497 .flags = K_WORK_DELAYABLE, \
3498 }, \
3499}
3500
3501/**
3502 * @brief Initialize a statically-defined delayable work item.
3503 *
3504 * This macro can be used to initialize a statically-defined delayable
3505 * work item, prior to its first use. For example,
3506 *
3507 * @code static K_WORK_DELAYABLE_DEFINE(<dwork>, <work_handler>); @endcode
3508 *
3509 * Note that if the runtime dependencies support initialization with
3510 * k_work_init_delayable() using that will eliminate the initialized
3511 * object in ROM that is produced by this macro and copied in at
3512 * system startup.
3513 *
3514 * @param work Symbol name for delayable work item object
3515 * @param work_handler Function to invoke each time work item is processed.
3516 */
3517#define K_WORK_DELAYABLE_DEFINE(work, work_handler) \
3518 struct k_work_delayable work \
3519 = Z_WORK_DELAYABLE_INITIALIZER(work_handler)
3520
3521/**
3522 * @cond INTERNAL_HIDDEN
3523 */
3524
3525/* Record used to wait for work to flush.
3526 *
3527 * The work item is inserted into the queue that will process (or is
3528 * processing) the item, and will be processed as soon as the item
3529 * completes. When the flusher is processed the semaphore will be
3530 * signaled, releasing the thread waiting for the flush.
3531 */
3532struct z_work_flusher {
3533 struct k_work work;
3534 struct k_sem sem;
3535};
3536
3537/* Record used to wait for work to complete a cancellation.
3538 *
3539 * The work item is inserted into a global queue of pending cancels.
3540 * When a cancelling work item goes idle any matching waiters are
3541 * removed from pending_cancels and are woken.
3542 */
3543struct z_work_canceller {
3544 sys_snode_t node;
3545 struct k_work *work;
3546 struct k_sem sem;
3547};
3548
3549/**
3550 * INTERNAL_HIDDEN @endcond
3551 */
3552
3553/** @brief A structure holding internal state for a pending synchronous
3554 * operation on a work item or queue.
3555 *
3556 * Instances of this type are provided by the caller for invocation of
3557 * k_work_flush(), k_work_cancel_sync() and sibling flush and cancel APIs. A
3558 * referenced object must persist until the call returns, and be accessible
3559 * from both the caller thread and the work queue thread.
3560 *
3561 * @note If CONFIG_KERNEL_COHERENCE is enabled the object must be allocated in
3562 * coherent memory; see arch_mem_coherent(). The stack on these architectures
3563 * is generally not coherent. be stack-allocated. Violations are detected by
3564 * runtime assertion.
3565 */
3566struct k_work_sync {
3567 union {
3568 struct z_work_flusher flusher;
3569 struct z_work_canceller canceller;
3570 };
3571};
3572
3573/** @brief A structure holding optional configuration items for a work
3574 * queue.
3575 *
3576 * This structure, and values it references, are not retained by
3577 * k_work_queue_start().
3578 */
3579struct k_work_queue_config {
3580 /** The name to be given to the work queue thread.
3581 *
3582 * If left null the thread will not have a name.
3583 */
3584 const char *name;
3585
3586 /** Control whether the work queue thread should yield between
3587 * items.
3588 *
3589 * Yielding between items helps guarantee the work queue
3590 * thread does not starve other threads, including cooperative
3591 * ones released by a work item. This is the default behavior.
3592 *
3593 * Set this to @c true to prevent the work queue thread from
3594 * yielding between items. This may be appropriate when a
3595 * sequence of items should complete without yielding
3596 * control.
3597 */
3598 bool no_yield;
3599};
3600
3601/** @brief A structure used to hold work until it can be processed. */
3602struct k_work_q {
3603 /* The thread that animates the work. */
3604 struct k_thread thread;
3605
3606 /* All the following fields must be accessed only while the
3607 * work module spinlock is held.
3608 */
3609
3610 /* List of k_work items to be worked. */
3611 sys_slist_t pending;
3612
3613 /* Wait queue for idle work thread. */
3614 _wait_q_t notifyq;
3615
3616 /* Wait queue for threads waiting for the queue to drain. */
3617 _wait_q_t drainq;
3618
3619 /* Flags describing queue state. */
3620 uint32_t flags;
3621};
3622
3623/* Provide the implementation for inline functions declared above */
3624
3625static inline bool k_work_is_pending(const struct k_work *work)
3626{
3627 return k_work_busy_get(work) != 0;
3628}
3629
3630static inline struct k_work_delayable *
3631k_work_delayable_from_work(struct k_work *work)
3632{
3633 return CONTAINER_OF(work, struct k_work_delayable, work);
3634}
3635
3636static inline bool k_work_delayable_is_pending(
3637 const struct k_work_delayable *dwork)
3638{
3639 return k_work_delayable_busy_get(dwork) != 0;
3640}
3641
3642static inline k_ticks_t k_work_delayable_expires_get(
3643 const struct k_work_delayable *dwork)
3644{
3645 return z_timeout_expires(&dwork->timeout);
3646}
3647
3648static inline k_ticks_t k_work_delayable_remaining_get(
3649 const struct k_work_delayable *dwork)
3650{
3651 return z_timeout_remaining(&dwork->timeout);
3652}
3653
3654static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue)
3655{
3656 return &queue->thread;
3657}
3658
3659/* Legacy wrappers */
3660
3661/* to be deprecated */
3662static inline bool k_work_pending(const struct k_work *work)
3663{
3664 return k_work_is_pending(work);
3665}
3666
3667/* to be deprecated */
3668static inline void k_work_q_start(struct k_work_q *work_q,
3669 k_thread_stack_t *stack,
3670 size_t stack_size, int prio)
3671{
3672 k_work_queue_start(work_q, stack, stack_size, prio, NULL);
3673}
3674
3675/* to be deprecated */
3676struct k_delayed_work {
3677 struct k_work_delayable work;
3678};
3679
3680/* to be deprecated */
3681#define Z_DELAYED_WORK_INITIALIZER(work_handler) { \
3682 .work = Z_WORK_DELAYABLE_INITIALIZER(work_handler), \
3683}
3684
3685/* to be deprecated */
3686static inline void k_delayed_work_init(struct k_delayed_work *work,
3687 k_work_handler_t handler)
3688{
3689 k_work_init_delayable(&work->work, handler);
3690}
3691
3692/* to be deprecated */
3693static inline int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
3694 struct k_delayed_work *work,
3695 k_timeout_t delay)
3696{
3697 int rc = k_work_reschedule_for_queue(work_q, &work->work, delay);
3698
3699 /* Legacy API doesn't distinguish success cases. */
3700 return (rc >= 0) ? 0 : rc;
3701}
3702
3703/* to be deprecated */
3704static inline int k_delayed_work_submit(struct k_delayed_work *work,
3705 k_timeout_t delay)
3706{
3707 int rc = k_work_reschedule(&work->work, delay);
3708
3709 /* Legacy API doesn't distinguish success cases. */
3710 return (rc >= 0) ? 0 : rc;
3711}
3712
3713/* to be deprecated */
3714static inline int k_delayed_work_cancel(struct k_delayed_work *work)
3715{
3716 bool pending = k_work_delayable_is_pending(&work->work);
3717 int rc = k_work_cancel_delayable(&work->work);
3718
3719 /* Old return value rules:
3720 *
3721 * 0 if:
3722 * * Work item countdown cancelled before the item was submitted to
3723 * its queue; or
3724 * * Work item was removed from its queue before it was processed.
3725 *
3726 * -EINVAL if:
3727 * * Work item has never been submitted; or
3728 * * Work item has been successfully cancelled; or
3729 * * Timeout handler is in the process of submitting the work item to
3730 * its queue; or
3731 * * Work queue thread has removed the work item from the queue but
3732 * has not called its handler.
3733 *
3734 * -EALREADY if:
3735 * * Work queue thread has removed the work item from the queue and
3736 * cleared its pending flag; or
3737 * * Work queue thread is invoking the item handler; or
3738 * * Work item handler has completed.
3739 *
3740
3741 * We can't reconstruct those states, so call it successful only when
3742 * a pending item is no longer pending, -EINVAL if it was pending and
3743 * still is, and cancel, and -EALREADY if it wasn't pending (so
3744 * presumably cancellation should have had no effect, assuming we
3745 * didn't hit a race condition).
3746 */
3747 if (pending) {
3748 return (rc == 0) ? 0 : -EINVAL;
3749 }
3750
3751 return -EALREADY;
3752}
3753
3754/* to be deprecated */
3755static inline bool k_delayed_work_pending(struct k_delayed_work *work)
3756{
3757 return k_work_delayable_is_pending(&work->work);
3758}
3759
3760/* to be deprecated */
3761static inline int32_t k_delayed_work_remaining_get(struct k_delayed_work *work)
3762{
3763 k_ticks_t rem = k_work_delayable_remaining_get(&work->work);
3764
3765 /* Probably should be ceil32, but was floor32 */
3766 return k_ticks_to_ms_floor32(rem);
3767}
3768
3769/* to be deprecated, not used in-tree */
3770static inline k_ticks_t k_delayed_work_expires_ticks(
3771 struct k_delayed_work *work)
3772{
3773 return k_work_delayable_expires_get(&work->work);
3774}
3775
3776/* to be deprecated, not used in-tree */
3777static inline k_ticks_t k_delayed_work_remaining_ticks(
3778 struct k_delayed_work *work)
3779{
3780 return k_work_delayable_remaining_get(&work->work);
3781}
3782
3783/** @} */
3784
Peter Bigot4e3b9262021-01-15 10:52:38 -06003785struct k_work_user;
3786
3787/**
Anas Nashifc355b7e2021-04-14 08:49:05 -04003788 * @addtogroup workqueue_apis
Peter Bigot4e3b9262021-01-15 10:52:38 -06003789 * @{
3790 */
3791
3792/**
3793 * @typedef k_work_user_handler_t
3794 * @brief Work item handler function type for user work queues.
3795 *
3796 * A work item's handler function is executed by a user workqueue's thread
3797 * when the work item is processed by the workqueue.
3798 *
3799 * @param work Address of the work item.
3800 *
3801 * @return N/A
3802 */
3803typedef void (*k_work_user_handler_t)(struct k_work_user *work);
3804
3805/**
3806 * @cond INTERNAL_HIDDEN
3807 */
3808
3809struct k_work_user_q {
3810 struct k_queue queue;
3811 struct k_thread thread;
3812};
3813
3814enum {
3815 K_WORK_USER_STATE_PENDING, /* Work item pending state */
3816};
3817
3818struct k_work_user {
3819 void *_reserved; /* Used by k_queue implementation. */
3820 k_work_user_handler_t handler;
3821 atomic_t flags;
3822};
3823
3824/**
3825 * INTERNAL_HIDDEN @endcond
3826 */
3827
3828#define Z_WORK_USER_INITIALIZER(work_handler) \
3829 { \
3830 .handler = work_handler, \
3831 }
3832
3833/**
3834 * @brief Initialize a statically-defined user work item.
3835 *
3836 * This macro can be used to initialize a statically-defined user work
3837 * item, prior to its first use. For example,
3838 *
3839 * @code static K_WORK_USER_DEFINE(<work>, <work_handler>); @endcode
3840 *
3841 * @param work Symbol name for work item object
3842 * @param work_handler Function to invoke each time work item is processed.
3843 */
3844#define K_WORK_USER_DEFINE(work, work_handler) \
3845 struct k_work_user work = Z_WORK_USER_INITIALIZER(work_handler)
3846
3847/**
3848 * @brief Initialize a userspace work item.
3849 *
3850 * This routine initializes a user workqueue work item, prior to its
3851 * first use.
3852 *
3853 * @param work Address of work item.
3854 * @param handler Function to invoke each time work item is processed.
3855 *
3856 * @return N/A
3857 */
3858static inline void k_work_user_init(struct k_work_user *work,
3859 k_work_user_handler_t handler)
3860{
3861 *work = (struct k_work_user)Z_WORK_USER_INITIALIZER(handler);
3862}
3863
3864/**
3865 * @brief Check if a userspace work item is pending.
3866 *
3867 * This routine indicates if user work item @a work is pending in a workqueue's
3868 * queue.
3869 *
3870 * @note Checking if the work is pending gives no guarantee that the
3871 * work will still be pending when this information is used. It is up to
3872 * the caller to make sure that this information is used in a safe manner.
3873 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003874 * @funcprops \isr_ok
Peter Bigot4e3b9262021-01-15 10:52:38 -06003875 *
3876 * @param work Address of work item.
3877 *
3878 * @return true if work item is pending, or false if it is not pending.
3879 */
3880static inline bool k_work_user_is_pending(struct k_work_user *work)
3881{
3882 return atomic_test_bit(&work->flags, K_WORK_USER_STATE_PENDING);
3883}
3884
3885/**
3886 * @brief Submit a work item to a user mode workqueue
3887 *
3888 * Submits a work item to a workqueue that runs in user mode. A temporary
3889 * memory allocation is made from the caller's resource pool which is freed
3890 * once the worker thread consumes the k_work item. The workqueue
3891 * thread must have memory access to the k_work item being submitted. The caller
3892 * must have permission granted on the work_q parameter's queue object.
3893 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01003894 * @funcprops \isr_ok
Peter Bigot4e3b9262021-01-15 10:52:38 -06003895 *
3896 * @param work_q Address of workqueue.
3897 * @param work Address of work item.
3898 *
3899 * @retval -EBUSY if the work item was already in some workqueue
3900 * @retval -ENOMEM if no memory for thread resource pool allocation
3901 * @retval 0 Success
3902 */
3903static inline int k_work_user_submit_to_queue(struct k_work_user_q *work_q,
3904 struct k_work_user *work)
3905{
3906 int ret = -EBUSY;
3907
3908 if (!atomic_test_and_set_bit(&work->flags,
3909 K_WORK_USER_STATE_PENDING)) {
3910 ret = k_queue_alloc_append(&work_q->queue, work);
3911
3912 /* Couldn't insert into the queue. Clear the pending bit
3913 * so the work item can be submitted again
3914 */
3915 if (ret != 0) {
3916 atomic_clear_bit(&work->flags,
3917 K_WORK_USER_STATE_PENDING);
3918 }
3919 }
3920
3921 return ret;
3922}
3923
3924/**
3925 * @brief Start a workqueue in user mode
3926 *
3927 * This works identically to k_work_queue_start() except it is callable from
3928 * user mode, and the worker thread created will run in user mode. The caller
3929 * must have permissions granted on both the work_q parameter's thread and
3930 * queue objects, and the same restrictions on priority apply as
3931 * k_thread_create().
3932 *
3933 * @param work_q Address of workqueue.
3934 * @param stack Pointer to work queue thread's stack space, as defined by
3935 * K_THREAD_STACK_DEFINE()
3936 * @param stack_size Size of the work queue thread's stack (in bytes), which
3937 * should either be the same constant passed to
3938 * K_THREAD_STACK_DEFINE() or the value of K_THREAD_STACK_SIZEOF().
3939 * @param prio Priority of the work queue's thread.
3940 * @param name optional thread name. If not null a copy is made into the
3941 * thread's name buffer.
3942 *
3943 * @return N/A
3944 */
3945extern void k_work_user_queue_start(struct k_work_user_q *work_q,
3946 k_thread_stack_t *stack,
3947 size_t stack_size, int prio,
3948 const char *name);
3949
3950/** @} */
3951
Allan Stephensc98da842016-11-11 15:45:03 -05003952/**
Peter Bigot3d583982020-11-18 08:55:32 -06003953 * @cond INTERNAL_HIDDEN
3954 */
3955
3956struct k_work_poll {
3957 struct k_work work;
3958 struct k_work_q *workq;
3959 struct z_poller poller;
3960 struct k_poll_event *events;
3961 int num_events;
3962 k_work_handler_t real_handler;
3963 struct _timeout timeout;
3964 int poll_result;
3965};
3966
3967/**
3968 * INTERNAL_HIDDEN @endcond
3969 */
3970
3971/**
Anas Nashifc355b7e2021-04-14 08:49:05 -04003972 * @addtogroup workqueue_apis
Peter Bigot3d583982020-11-18 08:55:32 -06003973 * @{
3974 */
3975
3976/**
Peter Bigotdc34e7c2020-10-28 11:24:05 -05003977 * @brief Initialize a statically-defined work item.
3978 *
3979 * This macro can be used to initialize a statically-defined workqueue work
3980 * item, prior to its first use. For example,
3981 *
3982 * @code static K_WORK_DEFINE(<work>, <work_handler>); @endcode
3983 *
3984 * @param work Symbol name for work item object
3985 * @param work_handler Function to invoke each time work item is processed.
3986 */
3987#define K_WORK_DEFINE(work, work_handler) \
3988 struct k_work work = Z_WORK_INITIALIZER(work_handler)
3989
3990/**
3991 * @brief Initialize a statically-defined delayed work item.
3992 *
3993 * This macro can be used to initialize a statically-defined workqueue
3994 * delayed work item, prior to its first use. For example,
3995 *
3996 * @code static K_DELAYED_WORK_DEFINE(<work>, <work_handler>); @endcode
3997 *
3998 * @param work Symbol name for delayed work item object
3999 * @param work_handler Function to invoke each time work item is processed.
4000 */
4001#define K_DELAYED_WORK_DEFINE(work, work_handler) \
4002 struct k_delayed_work work = Z_DELAYED_WORK_INITIALIZER(work_handler)
4003
4004/**
Peter Bigot3d583982020-11-18 08:55:32 -06004005 * @brief Initialize a triggered work item.
4006 *
4007 * This routine initializes a workqueue triggered work item, prior to
4008 * its first use.
4009 *
4010 * @param work Address of triggered work item.
4011 * @param handler Function to invoke each time work item is processed.
4012 *
4013 * @return N/A
4014 */
4015extern void k_work_poll_init(struct k_work_poll *work,
4016 k_work_handler_t handler);
4017
4018/**
4019 * @brief Submit a triggered work item.
4020 *
4021 * This routine schedules work item @a work to be processed by workqueue
4022 * @a work_q when one of the given @a events is signaled. The routine
4023 * initiates internal poller for the work item and then returns to the caller.
4024 * Only when one of the watched events happen the work item is actually
4025 * submitted to the workqueue and becomes pending.
4026 *
4027 * Submitting a previously submitted triggered work item that is still
4028 * waiting for the event cancels the existing submission and reschedules it
4029 * the using the new event list. Note that this behavior is inherently subject
4030 * to race conditions with the pre-existing triggered work item and work queue,
4031 * so care must be taken to synchronize such resubmissions externally.
4032 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004033 * @funcprops \isr_ok
Peter Bigot3d583982020-11-18 08:55:32 -06004034 *
4035 * @warning
4036 * Provided array of events as well as a triggered work item must be placed
4037 * in persistent memory (valid until work handler execution or work
4038 * cancellation) and cannot be modified after submission.
4039 *
4040 * @param work_q Address of workqueue.
4041 * @param work Address of delayed work item.
4042 * @param events An array of events which trigger the work.
4043 * @param num_events The number of events in the array.
4044 * @param timeout Timeout after which the work will be scheduled
4045 * for execution even if not triggered.
4046 *
4047 *
4048 * @retval 0 Work item started watching for events.
4049 * @retval -EINVAL Work item is being processed or has completed its work.
4050 * @retval -EADDRINUSE Work item is pending on a different workqueue.
4051 */
4052extern int k_work_poll_submit_to_queue(struct k_work_q *work_q,
4053 struct k_work_poll *work,
4054 struct k_poll_event *events,
4055 int num_events,
4056 k_timeout_t timeout);
4057
4058/**
4059 * @brief Submit a triggered work item to the system workqueue.
4060 *
4061 * This routine schedules work item @a work to be processed by system
4062 * workqueue when one of the given @a events is signaled. The routine
4063 * initiates internal poller for the work item and then returns to the caller.
4064 * Only when one of the watched events happen the work item is actually
4065 * submitted to the workqueue and becomes pending.
4066 *
4067 * Submitting a previously submitted triggered work item that is still
4068 * waiting for the event cancels the existing submission and reschedules it
4069 * the using the new event list. Note that this behavior is inherently subject
4070 * to race conditions with the pre-existing triggered work item and work queue,
4071 * so care must be taken to synchronize such resubmissions externally.
4072 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004073 * @funcprops \isr_ok
Peter Bigot3d583982020-11-18 08:55:32 -06004074 *
4075 * @warning
4076 * Provided array of events as well as a triggered work item must not be
4077 * modified until the item has been processed by the workqueue.
4078 *
4079 * @param work Address of delayed work item.
4080 * @param events An array of events which trigger the work.
4081 * @param num_events The number of events in the array.
4082 * @param timeout Timeout after which the work will be scheduled
4083 * for execution even if not triggered.
4084 *
4085 * @retval 0 Work item started watching for events.
4086 * @retval -EINVAL Work item is being processed or has completed its work.
4087 * @retval -EADDRINUSE Work item is pending on a different workqueue.
4088 */
4089static inline int k_work_poll_submit(struct k_work_poll *work,
4090 struct k_poll_event *events,
4091 int num_events,
4092 k_timeout_t timeout)
4093{
4094 return k_work_poll_submit_to_queue(&k_sys_work_q, work,
4095 events, num_events, timeout);
4096}
4097
4098/**
4099 * @brief Cancel a triggered work item.
4100 *
4101 * This routine cancels the submission of triggered work item @a work.
4102 * A triggered work item can only be canceled if no event triggered work
4103 * submission.
4104 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004105 * @funcprops \isr_ok
Peter Bigot3d583982020-11-18 08:55:32 -06004106 *
4107 * @param work Address of delayed work item.
4108 *
4109 * @retval 0 Work item canceled.
4110 * @retval -EINVAL Work item is being processed or has completed its work.
4111 */
4112extern int k_work_poll_cancel(struct k_work_poll *work);
4113
4114/** @} */
4115
4116/**
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004117 * @defgroup msgq_apis Message Queue APIs
4118 * @ingroup kernel_apis
4119 * @{
Allan Stephensc98da842016-11-11 15:45:03 -05004120 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004121
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004122/**
4123 * @brief Message Queue Structure
4124 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004125struct k_msgq {
Anas Nashife71293e2019-12-04 20:00:14 -05004126 /** Message queue wait queue */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004127 _wait_q_t wait_q;
Anas Nashife71293e2019-12-04 20:00:14 -05004128 /** Lock */
Andy Rossbe03dbd2018-07-26 10:23:02 -07004129 struct k_spinlock lock;
Anas Nashife71293e2019-12-04 20:00:14 -05004130 /** Message size */
Peter Mitsis026b4ed2016-10-13 11:41:45 -04004131 size_t msg_size;
Anas Nashife71293e2019-12-04 20:00:14 -05004132 /** Maximal number of messages */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004133 uint32_t max_msgs;
Anas Nashife71293e2019-12-04 20:00:14 -05004134 /** Start of message buffer */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004135 char *buffer_start;
Anas Nashife71293e2019-12-04 20:00:14 -05004136 /** End of message buffer */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004137 char *buffer_end;
Anas Nashife71293e2019-12-04 20:00:14 -05004138 /** Read pointer */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004139 char *read_ptr;
Anas Nashife71293e2019-12-04 20:00:14 -05004140 /** Write pointer */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004141 char *write_ptr;
Anas Nashife71293e2019-12-04 20:00:14 -05004142 /** Number of used messages */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004143 uint32_t used_msgs;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004144
Nick Gravesb445f132021-04-12 12:35:18 -07004145 _POLL_EVENT;
4146
Flavio Ceolind1ed3362018-12-07 11:39:13 -08004147 _OBJECT_TRACING_NEXT_PTR(k_msgq)
Shih-Wei Teng5ebceeb2019-10-08 14:37:47 +08004148 _OBJECT_TRACING_LINKED_FLAG
Anas Nashife71293e2019-12-04 20:00:14 -05004149
4150 /** Message queue */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004151 uint8_t flags;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004152};
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004153/**
4154 * @cond INTERNAL_HIDDEN
4155 */
4156
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004157
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004158#define Z_MSGQ_INITIALIZER(obj, q_buffer, q_msg_size, q_max_msgs) \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004159 { \
Patrik Flykt4344e272019-03-08 14:19:05 -07004160 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
Peter Mitsis1da807e2016-10-06 11:36:59 -04004161 .msg_size = q_msg_size, \
Charles E. Youse6d01f672019-03-18 10:27:34 -07004162 .max_msgs = q_max_msgs, \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004163 .buffer_start = q_buffer, \
Peter Mitsis1da807e2016-10-06 11:36:59 -04004164 .buffer_end = q_buffer + (q_max_msgs * q_msg_size), \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004165 .read_ptr = q_buffer, \
4166 .write_ptr = q_buffer, \
4167 .used_msgs = 0, \
Nick Gravesb445f132021-04-12 12:35:18 -07004168 _POLL_EVENT_OBJ_INIT(obj) \
Anas Nashif2f203c22016-12-18 06:57:45 -05004169 _OBJECT_TRACING_INIT \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004170 }
Kumar Galac8b94f42020-09-29 09:52:23 -05004171
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004172/**
4173 * INTERNAL_HIDDEN @endcond
4174 */
4175
Andrew Boie65a9d2a2017-06-27 10:51:23 -07004176
Andrew Boie0fe789f2018-04-12 18:35:56 -07004177#define K_MSGQ_FLAG_ALLOC BIT(0)
4178
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004179/**
4180 * @brief Message Queue Attributes
4181 */
Youvedeep Singh188c1ab2018-03-19 20:02:40 +05304182struct k_msgq_attrs {
Anas Nashife71293e2019-12-04 20:00:14 -05004183 /** Message Size */
Youvedeep Singh188c1ab2018-03-19 20:02:40 +05304184 size_t msg_size;
Anas Nashife71293e2019-12-04 20:00:14 -05004185 /** Maximal number of messages */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004186 uint32_t max_msgs;
Anas Nashife71293e2019-12-04 20:00:14 -05004187 /** Used messages */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004188 uint32_t used_msgs;
Youvedeep Singh188c1ab2018-03-19 20:02:40 +05304189};
4190
Allan Stephensc98da842016-11-11 15:45:03 -05004191
4192/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004193 * @brief Statically define and initialize a message queue.
Peter Mitsis1da807e2016-10-06 11:36:59 -04004194 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004195 * The message queue's ring buffer contains space for @a q_max_msgs messages,
4196 * each of which is @a q_msg_size bytes long. The buffer is aligned to a
Allan Stephensda827222016-11-09 14:23:58 -06004197 * @a q_align -byte boundary, which must be a power of 2. To ensure that each
4198 * message is similarly aligned to this boundary, @a q_msg_size must also be
4199 * a multiple of @a q_align.
Peter Mitsis1da807e2016-10-06 11:36:59 -04004200 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004201 * The message queue can be accessed outside the module where it is defined
4202 * using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004203 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05004204 * @code extern struct k_msgq <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004205 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004206 * @param q_name Name of the message queue.
4207 * @param q_msg_size Message size (in bytes).
4208 * @param q_max_msgs Maximum number of messages that can be queued.
Allan Stephensda827222016-11-09 14:23:58 -06004209 * @param q_align Alignment of the message queue's ring buffer.
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004210 *
Peter Mitsis1da807e2016-10-06 11:36:59 -04004211 */
Nicolas Pitreb1d37422019-06-03 10:51:32 -04004212#define K_MSGQ_DEFINE(q_name, q_msg_size, q_max_msgs, q_align) \
4213 static char __noinit __aligned(q_align) \
4214 _k_fifo_buf_##q_name[(q_max_msgs) * (q_msg_size)]; \
4215 Z_STRUCT_SECTION_ITERABLE(k_msgq, q_name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004216 Z_MSGQ_INITIALIZER(q_name, _k_fifo_buf_##q_name, \
Peter Mitsis1da807e2016-10-06 11:36:59 -04004217 q_msg_size, q_max_msgs)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004218
Peter Mitsisd7a37502016-10-13 11:37:40 -04004219/**
4220 * @brief Initialize a message queue.
4221 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004222 * This routine initializes a message queue object, prior to its first use.
4223 *
Allan Stephensda827222016-11-09 14:23:58 -06004224 * The message queue's ring buffer must contain space for @a max_msgs messages,
4225 * each of which is @a msg_size bytes long. The buffer must be aligned to an
4226 * N-byte boundary, where N is a power of 2 (i.e. 1, 2, 4, ...). To ensure
4227 * that each message is similarly aligned to this boundary, @a q_msg_size
4228 * must also be a multiple of N.
4229 *
Anas Nashif25c87db2021-03-29 10:54:23 -04004230 * @param msgq Address of the message queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004231 * @param buffer Pointer to ring buffer that holds queued messages.
4232 * @param msg_size Message size (in bytes).
Peter Mitsisd7a37502016-10-13 11:37:40 -04004233 * @param max_msgs Maximum number of messages that can be queued.
4234 *
4235 * @return N/A
4236 */
Anas Nashif25c87db2021-03-29 10:54:23 -04004237void k_msgq_init(struct k_msgq *msgq, char *buffer, size_t msg_size,
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004238 uint32_t max_msgs);
Andrew Boie0fe789f2018-04-12 18:35:56 -07004239
4240/**
4241 * @brief Initialize a message queue.
4242 *
4243 * This routine initializes a message queue object, prior to its first use,
4244 * allocating its internal ring buffer from the calling thread's resource
4245 * pool.
4246 *
4247 * Memory allocated for the ring buffer can be released by calling
4248 * k_msgq_cleanup(), or if userspace is enabled and the msgq object loses
4249 * all of its references.
4250 *
Anas Nashif4b386592019-11-25 09:30:47 -05004251 * @param msgq Address of the message queue.
Andrew Boie0fe789f2018-04-12 18:35:56 -07004252 * @param msg_size Message size (in bytes).
4253 * @param max_msgs Maximum number of messages that can be queued.
4254 *
4255 * @return 0 on success, -ENOMEM if there was insufficient memory in the
4256 * thread's resource pool, or -EINVAL if the size parameters cause
4257 * an integer overflow.
4258 */
Anas Nashif4b386592019-11-25 09:30:47 -05004259__syscall int k_msgq_alloc_init(struct k_msgq *msgq, size_t msg_size,
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004260 uint32_t max_msgs);
Andrew Boie0fe789f2018-04-12 18:35:56 -07004261
Anas Nashife71293e2019-12-04 20:00:14 -05004262/**
Anas Nashif4b386592019-11-25 09:30:47 -05004263 * @brief Release allocated buffer for a queue
Anas Nashife71293e2019-12-04 20:00:14 -05004264 *
4265 * Releases memory allocated for the ring buffer.
Anas Nashif4b386592019-11-25 09:30:47 -05004266 *
4267 * @param msgq message queue to cleanup
4268 *
Anas Nashif11b93652019-06-16 08:43:48 -04004269 * @retval 0 on success
4270 * @retval -EBUSY Queue not empty
Anas Nashife71293e2019-12-04 20:00:14 -05004271 */
Anas Nashif11b93652019-06-16 08:43:48 -04004272int k_msgq_cleanup(struct k_msgq *msgq);
Peter Mitsisd7a37502016-10-13 11:37:40 -04004273
4274/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004275 * @brief Send a message to a message queue.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004276 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004277 * This routine sends a message to message queue @a q.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004278 *
Lauren Murphyf29a2d12020-09-16 21:13:40 -05004279 * @note The message content is copied from @a data into @a msgq and the @a data
4280 * pointer is not retained, so the message content will not be modified
4281 * by this function.
Benjamin Walsh8215ce12016-11-09 19:45:19 -05004282 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004283 * @funcprops \isr_ok
4284 *
Anas Nashif4b386592019-11-25 09:30:47 -05004285 * @param msgq Address of the message queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004286 * @param data Pointer to the message.
Andy Ross78327382020-03-05 15:18:14 -08004287 * @param timeout Non-negative waiting period to add the message,
4288 * or one of the special values K_NO_WAIT and
Krzysztof Chruscinski94f742e2019-11-07 19:28:00 +01004289 * K_FOREVER.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004290 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05004291 * @retval 0 Message sent.
4292 * @retval -ENOMSG Returned without waiting or queue purged.
4293 * @retval -EAGAIN Waiting period timed out.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004294 */
Lauren Murphyf29a2d12020-09-16 21:13:40 -05004295__syscall int k_msgq_put(struct k_msgq *msgq, const void *data, k_timeout_t timeout);
Peter Mitsisd7a37502016-10-13 11:37:40 -04004296
4297/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004298 * @brief Receive a message from a message queue.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004299 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004300 * This routine receives a message from message queue @a q in a "first in,
4301 * first out" manner.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004302 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004303 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
4304 *
4305 * @funcprops \isr_ok
Benjamin Walsh8215ce12016-11-09 19:45:19 -05004306 *
Anas Nashif4b386592019-11-25 09:30:47 -05004307 * @param msgq Address of the message queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004308 * @param data Address of area to hold the received message.
Andy Ross78327382020-03-05 15:18:14 -08004309 * @param timeout Waiting period to receive the message,
4310 * or one of the special values K_NO_WAIT and
Krzysztof Chruscinski94f742e2019-11-07 19:28:00 +01004311 * K_FOREVER.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004312 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05004313 * @retval 0 Message received.
4314 * @retval -ENOMSG Returned without waiting.
4315 * @retval -EAGAIN Waiting period timed out.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004316 */
Andy Ross78327382020-03-05 15:18:14 -08004317__syscall int k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout);
Peter Mitsisd7a37502016-10-13 11:37:40 -04004318
4319/**
Sathish Kuttan3efd8e12018-11-09 21:03:10 -08004320 * @brief Peek/read a message from a message queue.
4321 *
4322 * This routine reads a message from message queue @a q in a "first in,
4323 * first out" manner and leaves the message in the queue.
4324 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004325 * @funcprops \isr_ok
Sathish Kuttan3efd8e12018-11-09 21:03:10 -08004326 *
Anas Nashif4b386592019-11-25 09:30:47 -05004327 * @param msgq Address of the message queue.
Sathish Kuttan3efd8e12018-11-09 21:03:10 -08004328 * @param data Address of area to hold the message read from the queue.
4329 *
4330 * @retval 0 Message read.
4331 * @retval -ENOMSG Returned when the queue has no message.
Sathish Kuttan3efd8e12018-11-09 21:03:10 -08004332 */
Anas Nashif4b386592019-11-25 09:30:47 -05004333__syscall int k_msgq_peek(struct k_msgq *msgq, void *data);
Sathish Kuttan3efd8e12018-11-09 21:03:10 -08004334
4335/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004336 * @brief Purge a message queue.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004337 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004338 * This routine discards all unreceived messages in a message queue's ring
4339 * buffer. Any threads that are blocked waiting to send a message to the
4340 * message queue are unblocked and see an -ENOMSG error code.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004341 *
Anas Nashif4b386592019-11-25 09:30:47 -05004342 * @param msgq Address of the message queue.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004343 *
4344 * @return N/A
4345 */
Anas Nashif4b386592019-11-25 09:30:47 -05004346__syscall void k_msgq_purge(struct k_msgq *msgq);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004347
Peter Mitsis67be2492016-10-07 11:44:34 -04004348/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004349 * @brief Get the amount of free space in a message queue.
Peter Mitsis67be2492016-10-07 11:44:34 -04004350 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004351 * This routine returns the number of unused entries in a message queue's
4352 * ring buffer.
Peter Mitsis67be2492016-10-07 11:44:34 -04004353 *
Anas Nashif4b386592019-11-25 09:30:47 -05004354 * @param msgq Address of the message queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004355 *
4356 * @return Number of unused ring buffer entries.
Peter Mitsis67be2492016-10-07 11:44:34 -04004357 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004358__syscall uint32_t k_msgq_num_free_get(struct k_msgq *msgq);
Andrew Boie82edb6e2017-10-02 10:53:06 -07004359
Youvedeep Singh188c1ab2018-03-19 20:02:40 +05304360/**
4361 * @brief Get basic attributes of a message queue.
4362 *
4363 * This routine fetches basic attributes of message queue into attr argument.
4364 *
Anas Nashif4b386592019-11-25 09:30:47 -05004365 * @param msgq Address of the message queue.
Youvedeep Singh188c1ab2018-03-19 20:02:40 +05304366 * @param attrs pointer to message queue attribute structure.
4367 *
4368 * @return N/A
4369 */
Anas Nashif4b386592019-11-25 09:30:47 -05004370__syscall void k_msgq_get_attrs(struct k_msgq *msgq,
4371 struct k_msgq_attrs *attrs);
Youvedeep Singh188c1ab2018-03-19 20:02:40 +05304372
4373
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004374static inline uint32_t z_impl_k_msgq_num_free_get(struct k_msgq *msgq)
Peter Mitsis67be2492016-10-07 11:44:34 -04004375{
Anas Nashif4b386592019-11-25 09:30:47 -05004376 return msgq->max_msgs - msgq->used_msgs;
Peter Mitsis67be2492016-10-07 11:44:34 -04004377}
4378
Peter Mitsisd7a37502016-10-13 11:37:40 -04004379/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004380 * @brief Get the number of messages in a message queue.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004381 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004382 * This routine returns the number of messages in a message queue's ring buffer.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004383 *
Anas Nashif4b386592019-11-25 09:30:47 -05004384 * @param msgq Address of the message queue.
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004385 *
4386 * @return Number of messages.
Peter Mitsisd7a37502016-10-13 11:37:40 -04004387 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004388__syscall uint32_t k_msgq_num_used_get(struct k_msgq *msgq);
Andrew Boie82edb6e2017-10-02 10:53:06 -07004389
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004390static inline uint32_t z_impl_k_msgq_num_used_get(struct k_msgq *msgq)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004391{
Anas Nashif4b386592019-11-25 09:30:47 -05004392 return msgq->used_msgs;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004393}
4394
Anas Nashif166f5192018-02-25 08:02:36 -06004395/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05004396
4397/**
Allan Stephensc98da842016-11-11 15:45:03 -05004398 * @defgroup mailbox_apis Mailbox APIs
4399 * @ingroup kernel_apis
4400 * @{
4401 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004402
Anas Nashife71293e2019-12-04 20:00:14 -05004403/**
4404 * @brief Mailbox Message Structure
4405 *
4406 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004407struct k_mbox_msg {
4408 /** internal use only - needed for legacy API support */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004409 uint32_t _mailbox;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004410 /** size of message (in bytes) */
Peter Mitsisd93078c2016-10-14 12:59:37 -04004411 size_t size;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004412 /** application-defined information value */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004413 uint32_t info;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004414 /** sender's message data buffer */
4415 void *tx_data;
4416 /** internal use only - needed for legacy API support */
4417 void *_rx_data;
4418 /** message data block descriptor */
4419 struct k_mem_block tx_block;
4420 /** source thread id */
4421 k_tid_t rx_source_thread;
4422 /** target thread id */
4423 k_tid_t tx_target_thread;
4424 /** internal use only - thread waiting on send (may be a dummy) */
4425 k_tid_t _syncing_thread;
4426#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
4427 /** internal use only - semaphore used during asynchronous send */
4428 struct k_sem *_async_sem;
4429#endif
4430};
Anas Nashife71293e2019-12-04 20:00:14 -05004431/**
4432 * @brief Mailbox Structure
4433 *
4434 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004435struct k_mbox {
Anas Nashife71293e2019-12-04 20:00:14 -05004436 /** Transmit messages queue */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004437 _wait_q_t tx_msg_queue;
Anas Nashife71293e2019-12-04 20:00:14 -05004438 /** Receive message queue */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004439 _wait_q_t rx_msg_queue;
Andy Ross9eeb6b82018-07-25 15:06:24 -07004440 struct k_spinlock lock;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004441
Flavio Ceolind1ed3362018-12-07 11:39:13 -08004442 _OBJECT_TRACING_NEXT_PTR(k_mbox)
Shih-Wei Teng5ebceeb2019-10-08 14:37:47 +08004443 _OBJECT_TRACING_LINKED_FLAG
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004444};
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004445/**
4446 * @cond INTERNAL_HIDDEN
4447 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004448
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004449#define Z_MBOX_INITIALIZER(obj) \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004450 { \
Patrik Flykt4344e272019-03-08 14:19:05 -07004451 .tx_msg_queue = Z_WAIT_Q_INIT(&obj.tx_msg_queue), \
4452 .rx_msg_queue = Z_WAIT_Q_INIT(&obj.rx_msg_queue), \
Anas Nashif2f203c22016-12-18 06:57:45 -05004453 _OBJECT_TRACING_INIT \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004454 }
4455
Peter Mitsis12092702016-10-14 12:57:23 -04004456/**
Allan Stephensc98da842016-11-11 15:45:03 -05004457 * INTERNAL_HIDDEN @endcond
4458 */
4459
4460/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004461 * @brief Statically define and initialize a mailbox.
Peter Mitsis12092702016-10-14 12:57:23 -04004462 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004463 * The mailbox is to be accessed outside the module where it is defined using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004464 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05004465 * @code extern struct k_mbox <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004466 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004467 * @param name Name of the mailbox.
Peter Mitsis12092702016-10-14 12:57:23 -04004468 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004469#define K_MBOX_DEFINE(name) \
Nicolas Pitreb1d37422019-06-03 10:51:32 -04004470 Z_STRUCT_SECTION_ITERABLE(k_mbox, name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004471 Z_MBOX_INITIALIZER(name) \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004472
Peter Mitsis12092702016-10-14 12:57:23 -04004473/**
4474 * @brief Initialize a mailbox.
4475 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004476 * This routine initializes a mailbox object, prior to its first use.
4477 *
4478 * @param mbox Address of the mailbox.
Peter Mitsis12092702016-10-14 12:57:23 -04004479 *
4480 * @return N/A
4481 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004482extern void k_mbox_init(struct k_mbox *mbox);
4483
Peter Mitsis12092702016-10-14 12:57:23 -04004484/**
4485 * @brief Send a mailbox message in a synchronous manner.
4486 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004487 * This routine sends a message to @a mbox and waits for a receiver to both
4488 * receive and process it. The message data may be in a buffer, in a memory
4489 * pool block, or non-existent (i.e. an empty message).
Peter Mitsis12092702016-10-14 12:57:23 -04004490 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004491 * @param mbox Address of the mailbox.
4492 * @param tx_msg Address of the transmit message descriptor.
Andy Ross78327382020-03-05 15:18:14 -08004493 * @param timeout Waiting period for the message to be received,
4494 * or one of the special values K_NO_WAIT
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004495 * and K_FOREVER. Once the message has been received,
4496 * this routine waits as long as necessary for the message
4497 * to be completely processed.
Peter Mitsis12092702016-10-14 12:57:23 -04004498 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05004499 * @retval 0 Message sent.
4500 * @retval -ENOMSG Returned without waiting.
4501 * @retval -EAGAIN Waiting period timed out.
Peter Mitsis12092702016-10-14 12:57:23 -04004502 */
Peter Mitsis40680f62016-10-14 10:04:55 -04004503extern int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
Andy Ross78327382020-03-05 15:18:14 -08004504 k_timeout_t timeout);
Peter Mitsis12092702016-10-14 12:57:23 -04004505
Peter Mitsis12092702016-10-14 12:57:23 -04004506/**
4507 * @brief Send a mailbox message in an asynchronous manner.
4508 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004509 * This routine sends a message to @a mbox without waiting for a receiver
4510 * to process it. The message data may be in a buffer, in a memory pool block,
4511 * or non-existent (i.e. an empty message). Optionally, the semaphore @a sem
4512 * will be given when the message has been both received and completely
4513 * processed by the receiver.
Peter Mitsis12092702016-10-14 12:57:23 -04004514 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004515 * @param mbox Address of the mailbox.
4516 * @param tx_msg Address of the transmit message descriptor.
4517 * @param sem Address of a semaphore, or NULL if none is needed.
Peter Mitsis12092702016-10-14 12:57:23 -04004518 *
4519 * @return N/A
4520 */
Peter Mitsis40680f62016-10-14 10:04:55 -04004521extern void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004522 struct k_sem *sem);
4523
Peter Mitsis12092702016-10-14 12:57:23 -04004524/**
4525 * @brief Receive a mailbox message.
4526 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004527 * This routine receives a message from @a mbox, then optionally retrieves
4528 * its data and disposes of the message.
Peter Mitsis12092702016-10-14 12:57:23 -04004529 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004530 * @param mbox Address of the mailbox.
4531 * @param rx_msg Address of the receive message descriptor.
4532 * @param buffer Address of the buffer to receive data, or NULL to defer data
4533 * retrieval and message disposal until later.
Andy Ross78327382020-03-05 15:18:14 -08004534 * @param timeout Waiting period for a message to be received,
4535 * or one of the special values K_NO_WAIT and K_FOREVER.
Peter Mitsis12092702016-10-14 12:57:23 -04004536 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05004537 * @retval 0 Message received.
4538 * @retval -ENOMSG Returned without waiting.
4539 * @retval -EAGAIN Waiting period timed out.
Peter Mitsis12092702016-10-14 12:57:23 -04004540 */
Peter Mitsis40680f62016-10-14 10:04:55 -04004541extern int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg,
Andy Ross78327382020-03-05 15:18:14 -08004542 void *buffer, k_timeout_t timeout);
Peter Mitsis12092702016-10-14 12:57:23 -04004543
4544/**
4545 * @brief Retrieve mailbox message data into a buffer.
4546 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004547 * This routine completes the processing of a received message by retrieving
4548 * its data into a buffer, then disposing of the message.
Peter Mitsis12092702016-10-14 12:57:23 -04004549 *
4550 * Alternatively, this routine can be used to dispose of a received message
4551 * without retrieving its data.
4552 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004553 * @param rx_msg Address of the receive message descriptor.
4554 * @param buffer Address of the buffer to receive data, or NULL to discard
4555 * the data.
Peter Mitsis12092702016-10-14 12:57:23 -04004556 *
4557 * @return N/A
4558 */
Peter Mitsis40680f62016-10-14 10:04:55 -04004559extern void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer);
Peter Mitsis12092702016-10-14 12:57:23 -04004560
Anas Nashif166f5192018-02-25 08:02:36 -06004561/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05004562
4563/**
Anas Nashifce78d162018-05-24 12:43:11 -05004564 * @defgroup pipe_apis Pipe APIs
4565 * @ingroup kernel_apis
4566 * @{
Allan Stephensc98da842016-11-11 15:45:03 -05004567 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004568
Anas Nashifce78d162018-05-24 12:43:11 -05004569/** Pipe Structure */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004570struct k_pipe {
Anas Nashifce78d162018-05-24 12:43:11 -05004571 unsigned char *buffer; /**< Pipe buffer: may be NULL */
4572 size_t size; /**< Buffer size */
4573 size_t bytes_used; /**< # bytes used in buffer */
4574 size_t read_index; /**< Where in buffer to read from */
4575 size_t write_index; /**< Where in buffer to write */
Andy Rossf582b552019-02-05 16:10:18 -08004576 struct k_spinlock lock; /**< Synchronization lock */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004577
4578 struct {
Anas Nashifce78d162018-05-24 12:43:11 -05004579 _wait_q_t readers; /**< Reader wait queue */
4580 _wait_q_t writers; /**< Writer wait queue */
Anas Nashif0ff33d12020-07-13 20:21:56 -04004581 } wait_q; /** Wait queue */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004582
Flavio Ceolind1ed3362018-12-07 11:39:13 -08004583 _OBJECT_TRACING_NEXT_PTR(k_pipe)
Shih-Wei Teng5ebceeb2019-10-08 14:37:47 +08004584 _OBJECT_TRACING_LINKED_FLAG
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004585 uint8_t flags; /**< Flags */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004586};
4587
Anas Nashifce78d162018-05-24 12:43:11 -05004588/**
4589 * @cond INTERNAL_HIDDEN
4590 */
4591#define K_PIPE_FLAG_ALLOC BIT(0) /** Buffer was allocated */
4592
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004593#define Z_PIPE_INITIALIZER(obj, pipe_buffer, pipe_buffer_size) \
Krzysztof Chruscinskibe063272019-02-13 11:19:54 +01004594 { \
4595 .buffer = pipe_buffer, \
4596 .size = pipe_buffer_size, \
4597 .bytes_used = 0, \
4598 .read_index = 0, \
4599 .write_index = 0, \
4600 .lock = {}, \
4601 .wait_q = { \
Patrik Flykt4344e272019-03-08 14:19:05 -07004602 .readers = Z_WAIT_Q_INIT(&obj.wait_q.readers), \
4603 .writers = Z_WAIT_Q_INIT(&obj.wait_q.writers) \
Krzysztof Chruscinskibe063272019-02-13 11:19:54 +01004604 }, \
4605 _OBJECT_TRACING_INIT \
4606 .flags = 0 \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004607 }
4608
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004609/**
Allan Stephensc98da842016-11-11 15:45:03 -05004610 * INTERNAL_HIDDEN @endcond
4611 */
4612
4613/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004614 * @brief Statically define and initialize a pipe.
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004615 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004616 * The pipe can be accessed outside the module where it is defined using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004617 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05004618 * @code extern struct k_pipe <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004619 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004620 * @param name Name of the pipe.
4621 * @param pipe_buffer_size Size of the pipe's ring buffer (in bytes),
4622 * or zero if no ring buffer is used.
4623 * @param pipe_align Alignment of the pipe's ring buffer (power of 2).
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04004624 *
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004625 */
Andrew Boie44fe8122018-04-12 17:38:12 -07004626#define K_PIPE_DEFINE(name, pipe_buffer_size, pipe_align) \
Andrew Boie41f60112019-01-31 15:53:24 -08004627 static unsigned char __noinit __aligned(pipe_align) \
Andrew Boie44fe8122018-04-12 17:38:12 -07004628 _k_pipe_buf_##name[pipe_buffer_size]; \
Nicolas Pitreb1d37422019-06-03 10:51:32 -04004629 Z_STRUCT_SECTION_ITERABLE(k_pipe, name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004630 Z_PIPE_INITIALIZER(name, _k_pipe_buf_##name, pipe_buffer_size)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004631
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004632/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004633 * @brief Initialize a pipe.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004634 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004635 * This routine initializes a pipe object, prior to its first use.
4636 *
4637 * @param pipe Address of the pipe.
4638 * @param buffer Address of the pipe's ring buffer, or NULL if no ring buffer
4639 * is used.
4640 * @param size Size of the pipe's ring buffer (in bytes), or zero if no ring
4641 * buffer is used.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004642 *
4643 * @return N/A
4644 */
Andrew Boie44fe8122018-04-12 17:38:12 -07004645void k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size);
4646
4647/**
4648 * @brief Release a pipe's allocated buffer
4649 *
4650 * If a pipe object was given a dynamically allocated buffer via
4651 * k_pipe_alloc_init(), this will free it. This function does nothing
4652 * if the buffer wasn't dynamically allocated.
4653 *
4654 * @param pipe Address of the pipe.
Anas Nashif361a84d2019-06-16 08:22:08 -04004655 * @retval 0 on success
4656 * @retval -EAGAIN nothing to cleanup
Andrew Boie44fe8122018-04-12 17:38:12 -07004657 */
Anas Nashif361a84d2019-06-16 08:22:08 -04004658int k_pipe_cleanup(struct k_pipe *pipe);
Andrew Boie44fe8122018-04-12 17:38:12 -07004659
4660/**
4661 * @brief Initialize a pipe and allocate a buffer for it
4662 *
4663 * Storage for the buffer region will be allocated from the calling thread's
4664 * resource pool. This memory will be released if k_pipe_cleanup() is called,
4665 * or userspace is enabled and the pipe object loses all references to it.
4666 *
4667 * This function should only be called on uninitialized pipe objects.
4668 *
4669 * @param pipe Address of the pipe.
4670 * @param size Size of the pipe's ring buffer (in bytes), or zero if no ring
4671 * buffer is used.
4672 * @retval 0 on success
David B. Kinderfcbd8fb2018-05-23 12:06:24 -07004673 * @retval -ENOMEM if memory couldn't be allocated
Andrew Boie44fe8122018-04-12 17:38:12 -07004674 */
4675__syscall int k_pipe_alloc_init(struct k_pipe *pipe, size_t size);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004676
4677/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004678 * @brief Write data to a pipe.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004679 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004680 * This routine writes up to @a bytes_to_write bytes of data to @a pipe.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004681 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004682 * @param pipe Address of the pipe.
4683 * @param data Address of data to write.
4684 * @param bytes_to_write Size of data (in bytes).
4685 * @param bytes_written Address of area to hold the number of bytes written.
4686 * @param min_xfer Minimum number of bytes to write.
Andy Ross78327382020-03-05 15:18:14 -08004687 * @param timeout Waiting period to wait for the data to be written,
4688 * or one of the special values K_NO_WAIT and K_FOREVER.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004689 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05004690 * @retval 0 At least @a min_xfer bytes of data were written.
4691 * @retval -EIO Returned without waiting; zero data bytes were written.
4692 * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004693 * minus one data bytes were written.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004694 */
Andrew Boieb9a05782017-09-29 16:05:32 -07004695__syscall int k_pipe_put(struct k_pipe *pipe, void *data,
4696 size_t bytes_to_write, size_t *bytes_written,
Andy Ross78327382020-03-05 15:18:14 -08004697 size_t min_xfer, k_timeout_t timeout);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004698
4699/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004700 * @brief Read data from a pipe.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004701 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004702 * This routine reads up to @a bytes_to_read bytes of data from @a pipe.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004703 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004704 * @param pipe Address of the pipe.
4705 * @param data Address to place the data read from pipe.
4706 * @param bytes_to_read Maximum number of data bytes to read.
4707 * @param bytes_read Address of area to hold the number of bytes read.
4708 * @param min_xfer Minimum number of data bytes to read.
Andy Ross78327382020-03-05 15:18:14 -08004709 * @param timeout Waiting period to wait for the data to be read,
4710 * or one of the special values K_NO_WAIT and K_FOREVER.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004711 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05004712 * @retval 0 At least @a min_xfer bytes of data were read.
Anas Nashif361a84d2019-06-16 08:22:08 -04004713 * @retval -EINVAL invalid parameters supplied
Allan Stephens9ef50f42016-11-16 15:33:31 -05004714 * @retval -EIO Returned without waiting; zero data bytes were read.
4715 * @retval -EAGAIN Waiting period timed out; between zero and @a min_xfer
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004716 * minus one data bytes were read.
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004717 */
Andrew Boieb9a05782017-09-29 16:05:32 -07004718__syscall int k_pipe_get(struct k_pipe *pipe, void *data,
4719 size_t bytes_to_read, size_t *bytes_read,
Andy Ross78327382020-03-05 15:18:14 -08004720 size_t min_xfer, k_timeout_t timeout);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004721
4722/**
Christopher Friedt3315f8f2020-05-06 18:43:58 -04004723 * @brief Query the number of bytes that may be read from @a pipe.
4724 *
4725 * @param pipe Address of the pipe.
4726 *
4727 * @retval a number n such that 0 <= n <= @ref k_pipe.size; the
4728 * result is zero for unbuffered pipes.
4729 */
4730__syscall size_t k_pipe_read_avail(struct k_pipe *pipe);
4731
4732/**
4733 * @brief Query the number of bytes that may be written to @a pipe
4734 *
4735 * @param pipe Address of the pipe.
4736 *
4737 * @retval a number n such that 0 <= n <= @ref k_pipe.size; the
4738 * result is zero for unbuffered pipes.
4739 */
4740__syscall size_t k_pipe_write_avail(struct k_pipe *pipe);
4741
Anas Nashif166f5192018-02-25 08:02:36 -06004742/** @} */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004743
Allan Stephensc98da842016-11-11 15:45:03 -05004744/**
4745 * @cond INTERNAL_HIDDEN
4746 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004747
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004748struct k_mem_slab {
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004749 _wait_q_t wait_q;
Nicolas Pitre2bed37e2021-04-13 11:10:22 -04004750 struct k_spinlock lock;
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004751 uint32_t num_blocks;
Peter Mitsisfb02d572016-10-13 16:55:45 -04004752 size_t block_size;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004753 char *buffer;
4754 char *free_list;
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004755 uint32_t num_used;
Kamil Lazowski104f1002020-09-11 14:27:55 +02004756#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
4757 uint32_t max_used;
4758#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004759
Flavio Ceolind1ed3362018-12-07 11:39:13 -08004760 _OBJECT_TRACING_NEXT_PTR(k_mem_slab)
Shih-Wei Teng5ebceeb2019-10-08 14:37:47 +08004761 _OBJECT_TRACING_LINKED_FLAG
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004762};
4763
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004764#define Z_MEM_SLAB_INITIALIZER(obj, slab_buffer, slab_block_size, \
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004765 slab_num_blocks) \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004766 { \
Nicolas Pitre2bed37e2021-04-13 11:10:22 -04004767 .lock = {}, \
Patrik Flykt4344e272019-03-08 14:19:05 -07004768 .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004769 .num_blocks = slab_num_blocks, \
4770 .block_size = slab_block_size, \
4771 .buffer = slab_buffer, \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004772 .free_list = NULL, \
4773 .num_used = 0, \
Anas Nashif2f203c22016-12-18 06:57:45 -05004774 _OBJECT_TRACING_INIT \
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004775 }
4776
Andrew Boie65a9d2a2017-06-27 10:51:23 -07004777
Peter Mitsis578f9112016-10-07 13:50:31 -04004778/**
Allan Stephensc98da842016-11-11 15:45:03 -05004779 * INTERNAL_HIDDEN @endcond
4780 */
4781
4782/**
4783 * @defgroup mem_slab_apis Memory Slab APIs
4784 * @ingroup kernel_apis
4785 * @{
4786 */
4787
4788/**
Allan Stephensda827222016-11-09 14:23:58 -06004789 * @brief Statically define and initialize a memory slab.
Peter Mitsis578f9112016-10-07 13:50:31 -04004790 *
Allan Stephensda827222016-11-09 14:23:58 -06004791 * The memory slab's buffer contains @a slab_num_blocks memory blocks
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004792 * that are @a slab_block_size bytes long. The buffer is aligned to a
Allan Stephensda827222016-11-09 14:23:58 -06004793 * @a slab_align -byte boundary. To ensure that each memory block is similarly
4794 * aligned to this boundary, @a slab_block_size must also be a multiple of
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004795 * @a slab_align.
Peter Mitsis578f9112016-10-07 13:50:31 -04004796 *
Allan Stephensda827222016-11-09 14:23:58 -06004797 * The memory slab can be accessed outside the module where it is defined
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004798 * using:
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004799 *
Allan Stephens82d4c3a2016-11-17 09:23:46 -05004800 * @code extern struct k_mem_slab <name>; @endcode
Peter Mitsis348eb4c2016-10-26 11:22:14 -04004801 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004802 * @param name Name of the memory slab.
4803 * @param slab_block_size Size of each memory block (in bytes).
4804 * @param slab_num_blocks Number memory blocks.
4805 * @param slab_align Alignment of the memory slab's buffer (power of 2).
Peter Mitsis578f9112016-10-07 13:50:31 -04004806 */
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004807#define K_MEM_SLAB_DEFINE(name, slab_block_size, slab_num_blocks, slab_align) \
Nicolas Pitre46cd5a02019-05-21 21:40:38 -04004808 char __noinit __aligned(WB_UP(slab_align)) \
4809 _k_mem_slab_buf_##name[(slab_num_blocks) * WB_UP(slab_block_size)]; \
Nicolas Pitreb1d37422019-06-03 10:51:32 -04004810 Z_STRUCT_SECTION_ITERABLE(k_mem_slab, name) = \
Anas Nashif45a1d8a2020-04-24 11:29:17 -04004811 Z_MEM_SLAB_INITIALIZER(name, _k_mem_slab_buf_##name, \
Nicolas Pitre46cd5a02019-05-21 21:40:38 -04004812 WB_UP(slab_block_size), slab_num_blocks)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004813
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004814/**
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004815 * @brief Initialize a memory slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004816 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004817 * Initializes a memory slab, prior to its first use.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004818 *
Allan Stephensda827222016-11-09 14:23:58 -06004819 * The memory slab's buffer contains @a slab_num_blocks memory blocks
4820 * that are @a slab_block_size bytes long. The buffer must be aligned to an
Nicolas Pitre46cd5a02019-05-21 21:40:38 -04004821 * N-byte boundary matching a word boundary, where N is a power of 2
4822 * (i.e. 4 on 32-bit systems, 8, 16, ...).
Allan Stephensda827222016-11-09 14:23:58 -06004823 * To ensure that each memory block is similarly aligned to this boundary,
4824 * @a slab_block_size must also be a multiple of N.
4825 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004826 * @param slab Address of the memory slab.
4827 * @param buffer Pointer to buffer used for the memory blocks.
4828 * @param block_size Size of each memory block (in bytes).
4829 * @param num_blocks Number of memory blocks.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004830 *
Anas Nashifdfc2bbc2019-06-16 09:22:21 -04004831 * @retval 0 on success
4832 * @retval -EINVAL invalid data supplied
4833 *
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004834 */
Anas Nashifdfc2bbc2019-06-16 09:22:21 -04004835extern int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004836 size_t block_size, uint32_t num_blocks);
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004837
4838/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004839 * @brief Allocate memory from a memory slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004840 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004841 * This routine allocates a memory block from a memory slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004842 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004843 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
Krzysztof Chruscinskic482a572021-04-19 10:52:34 +02004844 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004845 *
4846 * @funcprops \isr_ok
Spoorthy Priya Yerabolu04d3c3c2020-09-17 02:54:50 -07004847 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004848 * @param slab Address of the memory slab.
4849 * @param mem Pointer to block address area.
Andy Ross78327382020-03-05 15:18:14 -08004850 * @param timeout Non-negative waiting period to wait for operation to complete.
4851 * Use K_NO_WAIT to return without waiting,
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004852 * or K_FOREVER to wait as long as necessary.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004853 *
Allan Stephens9ef50f42016-11-16 15:33:31 -05004854 * @retval 0 Memory allocated. The block address area pointed at by @a mem
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004855 * is set to the starting address of the memory block.
Allan Stephens9ef50f42016-11-16 15:33:31 -05004856 * @retval -ENOMEM Returned without waiting.
4857 * @retval -EAGAIN Waiting period timed out.
Anas Nashifdfc2bbc2019-06-16 09:22:21 -04004858 * @retval -EINVAL Invalid data supplied
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004859 */
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004860extern int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem,
Andy Ross78327382020-03-05 15:18:14 -08004861 k_timeout_t timeout);
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004862
4863/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004864 * @brief Free memory allocated from a memory slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004865 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004866 * This routine releases a previously allocated memory block back to its
4867 * associated memory slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004868 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004869 * @param slab Address of the memory slab.
4870 * @param mem Pointer to block address area (as set by k_mem_slab_alloc()).
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004871 *
4872 * @return N/A
4873 */
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004874extern void k_mem_slab_free(struct k_mem_slab *slab, void **mem);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004875
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004876/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004877 * @brief Get the number of used blocks in a memory slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004878 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004879 * This routine gets the number of memory blocks that are currently
4880 * allocated in @a slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004881 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004882 * @param slab Address of the memory slab.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004883 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004884 * @return Number of allocated memory blocks.
Peter Mitsis4a5d62f2016-10-13 16:53:30 -04004885 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004886static inline uint32_t k_mem_slab_num_used_get(struct k_mem_slab *slab)
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004887{
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004888 return slab->num_used;
Benjamin Walsh456c6da2016-09-02 18:55:39 -04004889}
4890
Peter Mitsisc001aa82016-10-13 13:53:37 -04004891/**
Kamil Lazowski104f1002020-09-11 14:27:55 +02004892 * @brief Get the number of maximum used blocks so far in a memory slab.
4893 *
4894 * This routine gets the maximum number of memory blocks that were
4895 * allocated in @a slab.
4896 *
4897 * @param slab Address of the memory slab.
4898 *
4899 * @return Maximum number of allocated memory blocks.
4900 */
4901static inline uint32_t k_mem_slab_max_used_get(struct k_mem_slab *slab)
4902{
4903#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
4904 return slab->max_used;
4905#else
4906 ARG_UNUSED(slab);
4907 return 0;
4908#endif
4909}
4910
4911/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004912 * @brief Get the number of unused blocks in a memory slab.
Peter Mitsisc001aa82016-10-13 13:53:37 -04004913 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004914 * This routine gets the number of memory blocks that are currently
4915 * unallocated in @a slab.
Peter Mitsisc001aa82016-10-13 13:53:37 -04004916 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004917 * @param slab Address of the memory slab.
Peter Mitsisc001aa82016-10-13 13:53:37 -04004918 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05004919 * @return Number of unallocated memory blocks.
Peter Mitsisc001aa82016-10-13 13:53:37 -04004920 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05004921static inline uint32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
Peter Mitsisc001aa82016-10-13 13:53:37 -04004922{
Benjamin Walsh7ef0f622016-10-24 17:04:43 -04004923 return slab->num_blocks - slab->num_used;
Peter Mitsisc001aa82016-10-13 13:53:37 -04004924}
4925
Anas Nashif166f5192018-02-25 08:02:36 -06004926/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05004927
4928/**
Anas Nashifdbac76f2020-12-09 12:04:53 -05004929 * @addtogroup heap_apis
Allan Stephensc98da842016-11-11 15:45:03 -05004930 * @{
4931 */
4932
Andrew Boieb95e9602020-09-28 13:26:38 -07004933/* kernel synchronized heap struct */
4934
4935struct k_heap {
4936 struct sys_heap heap;
4937 _wait_q_t wait_q;
4938 struct k_spinlock lock;
4939};
4940
Allan Stephensc98da842016-11-11 15:45:03 -05004941/**
Andy Ross0dd83b82020-04-03 10:01:03 -07004942 * @brief Initialize a k_heap
4943 *
4944 * This constructs a synchronized k_heap object over a memory region
4945 * specified by the user. Note that while any alignment and size can
4946 * be passed as valid parameters, internal alignment restrictions
4947 * inside the inner sys_heap mean that not all bytes may be usable as
4948 * allocated memory.
4949 *
4950 * @param h Heap struct to initialize
4951 * @param mem Pointer to memory.
4952 * @param bytes Size of memory region, in bytes
4953 */
4954void k_heap_init(struct k_heap *h, void *mem, size_t bytes);
4955
Maximilian Bachmann34d7c782020-11-13 15:12:31 +01004956/** @brief Allocate aligned memory from a k_heap
4957 *
4958 * Behaves in all ways like k_heap_alloc(), except that the returned
4959 * memory (if available) will have a starting address in memory which
4960 * is a multiple of the specified power-of-two alignment value in
4961 * bytes. The resulting memory can be returned to the heap using
4962 * k_heap_free().
4963 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004964 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
Krzysztof Chruscinskic482a572021-04-19 10:52:34 +02004965 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004966 *
4967 * @funcprops \isr_ok
Maximilian Bachmann34d7c782020-11-13 15:12:31 +01004968 *
4969 * @param h Heap from which to allocate
4970 * @param align Alignment in bytes, must be a power of two
4971 * @param bytes Number of bytes requested
4972 * @param timeout How long to wait, or K_NO_WAIT
4973 * @return Pointer to memory the caller can now use
4974 */
4975void *k_heap_aligned_alloc(struct k_heap *h, size_t align, size_t bytes,
4976 k_timeout_t timeout);
4977
Andy Ross0dd83b82020-04-03 10:01:03 -07004978/**
4979 * @brief Allocate memory from a k_heap
4980 *
4981 * Allocates and returns a memory buffer from the memory region owned
4982 * by the heap. If no memory is available immediately, the call will
4983 * block for the specified timeout (constructed via the standard
4984 * timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory to be
4985 * freed. If the allocation cannot be performed by the expiration of
4986 * the timeout, NULL will be returned.
4987 *
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004988 * @note @a timeout must be set to K_NO_WAIT if called from ISR.
Krzysztof Chruscinskic482a572021-04-19 10:52:34 +02004989 * @note When CONFIG_MULTITHREADING=n any @a timeout is treated as K_NO_WAIT.
Gerard Marull-Paretas9de14e82021-03-04 19:50:02 +01004990 *
4991 * @funcprops \isr_ok
Spoorthy Priya Yerabolu04d3c3c2020-09-17 02:54:50 -07004992 *
Andy Ross0dd83b82020-04-03 10:01:03 -07004993 * @param h Heap from which to allocate
4994 * @param bytes Desired size of block to allocate
4995 * @param timeout How long to wait, or K_NO_WAIT
4996 * @return A pointer to valid heap memory, or NULL
4997 */
Maximilian Bachmann34d7c782020-11-13 15:12:31 +01004998static inline void *k_heap_alloc(struct k_heap *h, size_t bytes,
4999 k_timeout_t timeout)
5000{
5001 return k_heap_aligned_alloc(h, sizeof(void *), bytes, timeout);
5002}
Andy Ross0dd83b82020-04-03 10:01:03 -07005003
5004/**
5005 * @brief Free memory allocated by k_heap_alloc()
5006 *
5007 * Returns the specified memory block, which must have been returned
5008 * from k_heap_alloc(), to the heap for use by other callers. Passing
5009 * a NULL block is legal, and has no effect.
5010 *
5011 * @param h Heap to which to return the memory
5012 * @param mem A valid memory block, or NULL
5013 */
5014void k_heap_free(struct k_heap *h, void *mem);
5015
5016/**
5017 * @brief Define a static k_heap
5018 *
5019 * This macro defines and initializes a static memory region and
5020 * k_heap of the requested size. After kernel start, &name can be
5021 * used as if k_heap_init() had been called.
5022 *
5023 * @param name Symbol name for the struct k_heap object
5024 * @param bytes Size of memory region, in bytes
5025 */
5026#define K_HEAP_DEFINE(name, bytes) \
5027 char __aligned(sizeof(void *)) kheap_##name[bytes]; \
5028 Z_STRUCT_SECTION_ITERABLE(k_heap, name) = { \
5029 .heap = { \
5030 .init_mem = kheap_##name, \
5031 .init_bytes = (bytes), \
5032 }, \
5033 }
5034
Johan Hedberg7d887cb2018-01-11 20:45:27 +02005035/**
Anas Nashif166f5192018-02-25 08:02:36 -06005036 * @}
Allan Stephensc98da842016-11-11 15:45:03 -05005037 */
5038
5039/**
Anas Nashifdbac76f2020-12-09 12:04:53 -05005040 * @defgroup heap_apis Heap APIs
Allan Stephensc98da842016-11-11 15:45:03 -05005041 * @ingroup kernel_apis
5042 * @{
5043 */
5044
5045/**
Christopher Friedt135ffaf2020-11-26 08:19:10 -05005046 * @brief Allocate memory from the heap with a specified alignment.
5047 *
5048 * This routine provides semantics similar to aligned_alloc(); memory is
5049 * allocated from the heap with a specified alignment. However, one minor
5050 * difference is that k_aligned_alloc() accepts any non-zero @p size,
5051 * wherase aligned_alloc() only accepts a @p size that is an integral
5052 * multiple of @p align.
5053 *
5054 * Above, aligned_alloc() refers to:
5055 * C11 standard (ISO/IEC 9899:2011): 7.22.3.1
5056 * The aligned_alloc function (p: 347-348)
5057 *
5058 * @param align Alignment of memory requested (in bytes).
5059 * @param size Amount of memory requested (in bytes).
5060 *
5061 * @return Address of the allocated memory if successful; otherwise NULL.
5062 */
5063extern void *k_aligned_alloc(size_t align, size_t size);
5064
5065/**
5066 * @brief Allocate memory from the heap.
Peter Mitsis937042c2016-10-13 13:18:26 -04005067 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005068 * This routine provides traditional malloc() semantics. Memory is
Allan Stephens480a1312016-10-13 15:44:48 -05005069 * allocated from the heap memory pool.
Peter Mitsis937042c2016-10-13 13:18:26 -04005070 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005071 * @param size Amount of memory requested (in bytes).
Peter Mitsis937042c2016-10-13 13:18:26 -04005072 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005073 * @return Address of the allocated memory if successful; otherwise NULL.
Peter Mitsis937042c2016-10-13 13:18:26 -04005074 */
Christopher Friedt135ffaf2020-11-26 08:19:10 -05005075static inline void *k_malloc(size_t size)
5076{
5077 return k_aligned_alloc(sizeof(void *), size);
5078}
Peter Mitsis937042c2016-10-13 13:18:26 -04005079
5080/**
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005081 * @brief Free memory allocated from heap.
Allan Stephens480a1312016-10-13 15:44:48 -05005082 *
5083 * This routine provides traditional free() semantics. The memory being
Andrew Boiea2480bd2018-04-12 16:59:02 -07005084 * returned must have been allocated from the heap memory pool or
5085 * k_mem_pool_malloc().
Peter Mitsis937042c2016-10-13 13:18:26 -04005086 *
Anas Nashif345fdd52016-12-20 08:36:04 -05005087 * If @a ptr is NULL, no operation is performed.
5088 *
Allan Stephens5a7a86c2016-11-04 13:53:19 -05005089 * @param ptr Pointer to previously allocated memory.
Peter Mitsis937042c2016-10-13 13:18:26 -04005090 *
5091 * @return N/A
5092 */
5093extern void k_free(void *ptr);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005094
Allan Stephensc98da842016-11-11 15:45:03 -05005095/**
Andrew Boie7f95e832017-11-08 14:40:01 -08005096 * @brief Allocate memory from heap, array style
5097 *
5098 * This routine provides traditional calloc() semantics. Memory is
5099 * allocated from the heap memory pool and zeroed.
5100 *
5101 * @param nmemb Number of elements in the requested array
5102 * @param size Size of each array element (in bytes).
5103 *
5104 * @return Address of the allocated memory if successful; otherwise NULL.
5105 */
5106extern void *k_calloc(size_t nmemb, size_t size);
5107
Anas Nashif166f5192018-02-25 08:02:36 -06005108/** @} */
Allan Stephensc98da842016-11-11 15:45:03 -05005109
Benjamin Walshacc68c12017-01-29 18:57:45 -05005110/* polling API - PRIVATE */
5111
Benjamin Walshb0179862017-02-02 16:39:57 -05005112#ifdef CONFIG_POLL
Flavio Ceolin6fdc56d2018-09-18 12:32:27 -07005113#define _INIT_OBJ_POLL_EVENT(obj) do { (obj)->poll_event = NULL; } while (false)
Benjamin Walshb0179862017-02-02 16:39:57 -05005114#else
Flavio Ceolin6fdc56d2018-09-18 12:32:27 -07005115#define _INIT_OBJ_POLL_EVENT(obj) do { } while (false)
Benjamin Walshb0179862017-02-02 16:39:57 -05005116#endif
5117
Benjamin Walshacc68c12017-01-29 18:57:45 -05005118/* private - types bit positions */
5119enum _poll_types_bits {
5120 /* can be used to ignore an event */
5121 _POLL_TYPE_IGNORE,
5122
Flavio Ceolinaecd4ec2018-11-02 12:35:30 -07005123 /* to be signaled by k_poll_signal_raise() */
Benjamin Walshacc68c12017-01-29 18:57:45 -05005124 _POLL_TYPE_SIGNAL,
5125
5126 /* semaphore availability */
5127 _POLL_TYPE_SEM_AVAILABLE,
5128
Anas Nashif56821172020-07-08 14:14:25 -04005129 /* queue/FIFO/LIFO data availability */
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02005130 _POLL_TYPE_DATA_AVAILABLE,
Benjamin Walshacc68c12017-01-29 18:57:45 -05005131
Nick Gravesb445f132021-04-12 12:35:18 -07005132 /* msgq data availability */
5133 _POLL_TYPE_MSGQ_DATA_AVAILABLE,
5134
Benjamin Walshacc68c12017-01-29 18:57:45 -05005135 _POLL_NUM_TYPES
5136};
5137
Aastha Grover83b9f692020-08-20 16:47:11 -07005138#define Z_POLL_TYPE_BIT(type) (1U << ((type) - 1U))
Benjamin Walshacc68c12017-01-29 18:57:45 -05005139
5140/* private - states bit positions */
5141enum _poll_states_bits {
5142 /* default state when creating event */
5143 _POLL_STATE_NOT_READY,
5144
Flavio Ceolinaecd4ec2018-11-02 12:35:30 -07005145 /* signaled by k_poll_signal_raise() */
Benjamin Walshacc68c12017-01-29 18:57:45 -05005146 _POLL_STATE_SIGNALED,
5147
5148 /* semaphore is available */
5149 _POLL_STATE_SEM_AVAILABLE,
5150
Anas Nashif56821172020-07-08 14:14:25 -04005151 /* data is available to read on queue/FIFO/LIFO */
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02005152 _POLL_STATE_DATA_AVAILABLE,
Benjamin Walshacc68c12017-01-29 18:57:45 -05005153
Anas Nashif56821172020-07-08 14:14:25 -04005154 /* queue/FIFO/LIFO wait was cancelled */
Paul Sokolovsky45c0b202018-08-21 23:29:11 +03005155 _POLL_STATE_CANCELLED,
5156
Nick Gravesb445f132021-04-12 12:35:18 -07005157 /* data is available to read on a message queue */
5158 _POLL_STATE_MSGQ_DATA_AVAILABLE,
5159
Benjamin Walshacc68c12017-01-29 18:57:45 -05005160 _POLL_NUM_STATES
5161};
5162
Aastha Grover83b9f692020-08-20 16:47:11 -07005163#define Z_POLL_STATE_BIT(state) (1U << ((state) - 1U))
Benjamin Walshacc68c12017-01-29 18:57:45 -05005164
5165#define _POLL_EVENT_NUM_UNUSED_BITS \
Benjamin Walsh969d4a72017-02-02 11:25:11 -05005166 (32 - (0 \
5167 + 8 /* tag */ \
5168 + _POLL_NUM_TYPES \
5169 + _POLL_NUM_STATES \
5170 + 1 /* modes */ \
5171 ))
Benjamin Walshacc68c12017-01-29 18:57:45 -05005172
Benjamin Walshacc68c12017-01-29 18:57:45 -05005173/* end of polling API - PRIVATE */
5174
5175
5176/**
5177 * @defgroup poll_apis Async polling APIs
5178 * @ingroup kernel_apis
5179 * @{
5180 */
5181
5182/* Public polling API */
5183
5184/* public - values for k_poll_event.type bitfield */
5185#define K_POLL_TYPE_IGNORE 0
Patrik Flykt4344e272019-03-08 14:19:05 -07005186#define K_POLL_TYPE_SIGNAL Z_POLL_TYPE_BIT(_POLL_TYPE_SIGNAL)
5187#define K_POLL_TYPE_SEM_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_SEM_AVAILABLE)
5188#define K_POLL_TYPE_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_DATA_AVAILABLE)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02005189#define K_POLL_TYPE_FIFO_DATA_AVAILABLE K_POLL_TYPE_DATA_AVAILABLE
Nick Gravesb445f132021-04-12 12:35:18 -07005190#define K_POLL_TYPE_MSGQ_DATA_AVAILABLE Z_POLL_TYPE_BIT(_POLL_TYPE_MSGQ_DATA_AVAILABLE)
Benjamin Walshacc68c12017-01-29 18:57:45 -05005191
5192/* public - polling modes */
5193enum k_poll_modes {
5194 /* polling thread does not take ownership of objects when available */
5195 K_POLL_MODE_NOTIFY_ONLY = 0,
5196
5197 K_POLL_NUM_MODES
5198};
5199
5200/* public - values for k_poll_event.state bitfield */
5201#define K_POLL_STATE_NOT_READY 0
Patrik Flykt4344e272019-03-08 14:19:05 -07005202#define K_POLL_STATE_SIGNALED Z_POLL_STATE_BIT(_POLL_STATE_SIGNALED)
5203#define K_POLL_STATE_SEM_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE)
5204#define K_POLL_STATE_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02005205#define K_POLL_STATE_FIFO_DATA_AVAILABLE K_POLL_STATE_DATA_AVAILABLE
Nick Gravesb445f132021-04-12 12:35:18 -07005206#define K_POLL_STATE_MSGQ_DATA_AVAILABLE Z_POLL_STATE_BIT(_POLL_STATE_MSGQ_DATA_AVAILABLE)
Patrik Flykt4344e272019-03-08 14:19:05 -07005207#define K_POLL_STATE_CANCELLED Z_POLL_STATE_BIT(_POLL_STATE_CANCELLED)
Benjamin Walshacc68c12017-01-29 18:57:45 -05005208
5209/* public - poll signal object */
5210struct k_poll_signal {
Anas Nashife71293e2019-12-04 20:00:14 -05005211 /** PRIVATE - DO NOT TOUCH */
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +03005212 sys_dlist_t poll_events;
Benjamin Walshacc68c12017-01-29 18:57:45 -05005213
Anas Nashife71293e2019-12-04 20:00:14 -05005214 /**
Benjamin Walshacc68c12017-01-29 18:57:45 -05005215 * 1 if the event has been signaled, 0 otherwise. Stays set to 1 until
5216 * user resets it to 0.
5217 */
5218 unsigned int signaled;
5219
Anas Nashife71293e2019-12-04 20:00:14 -05005220 /** custom result value passed to k_poll_signal_raise() if needed */
Benjamin Walshacc68c12017-01-29 18:57:45 -05005221 int result;
5222};
5223
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +03005224#define K_POLL_SIGNAL_INITIALIZER(obj) \
Benjamin Walshacc68c12017-01-29 18:57:45 -05005225 { \
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +03005226 .poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events), \
Benjamin Walshacc68c12017-01-29 18:57:45 -05005227 .signaled = 0, \
5228 .result = 0, \
5229 }
Anas Nashife71293e2019-12-04 20:00:14 -05005230/**
5231 * @brief Poll Event
5232 *
5233 */
Benjamin Walshacc68c12017-01-29 18:57:45 -05005234struct k_poll_event {
Anas Nashife71293e2019-12-04 20:00:14 -05005235 /** PRIVATE - DO NOT TOUCH */
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +03005236 sys_dnode_t _node;
5237
Anas Nashife71293e2019-12-04 20:00:14 -05005238 /** PRIVATE - DO NOT TOUCH */
Andy Ross202adf52020-11-10 09:54:49 -08005239 struct z_poller *poller;
Benjamin Walshacc68c12017-01-29 18:57:45 -05005240
Anas Nashife71293e2019-12-04 20:00:14 -05005241 /** optional user-specified tag, opaque, untouched by the API */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005242 uint32_t tag:8;
Benjamin Walsh969d4a72017-02-02 11:25:11 -05005243
Anas Nashife71293e2019-12-04 20:00:14 -05005244 /** bitfield of event types (bitwise-ORed K_POLL_TYPE_xxx values) */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005245 uint32_t type:_POLL_NUM_TYPES;
Benjamin Walshacc68c12017-01-29 18:57:45 -05005246
Anas Nashife71293e2019-12-04 20:00:14 -05005247 /** bitfield of event states (bitwise-ORed K_POLL_STATE_xxx values) */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005248 uint32_t state:_POLL_NUM_STATES;
Benjamin Walshacc68c12017-01-29 18:57:45 -05005249
Anas Nashife71293e2019-12-04 20:00:14 -05005250 /** mode of operation, from enum k_poll_modes */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005251 uint32_t mode:1;
Benjamin Walshacc68c12017-01-29 18:57:45 -05005252
Anas Nashife71293e2019-12-04 20:00:14 -05005253 /** unused bits in 32-bit word */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005254 uint32_t unused:_POLL_EVENT_NUM_UNUSED_BITS;
Benjamin Walshacc68c12017-01-29 18:57:45 -05005255
Anas Nashife71293e2019-12-04 20:00:14 -05005256 /** per-type data */
Benjamin Walshacc68c12017-01-29 18:57:45 -05005257 union {
5258 void *obj;
5259 struct k_poll_signal *signal;
5260 struct k_sem *sem;
5261 struct k_fifo *fifo;
Luiz Augusto von Dentze5ed88f2017-02-21 15:27:20 +02005262 struct k_queue *queue;
Nick Gravesb445f132021-04-12 12:35:18 -07005263 struct k_msgq *msgq;
Benjamin Walshacc68c12017-01-29 18:57:45 -05005264 };
5265};
5266
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -07005267#define K_POLL_EVENT_INITIALIZER(_event_type, _event_mode, _event_obj) \
Benjamin Walshacc68c12017-01-29 18:57:45 -05005268 { \
5269 .poller = NULL, \
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -07005270 .type = _event_type, \
Benjamin Walshacc68c12017-01-29 18:57:45 -05005271 .state = K_POLL_STATE_NOT_READY, \
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -07005272 .mode = _event_mode, \
Benjamin Walshacc68c12017-01-29 18:57:45 -05005273 .unused = 0, \
Daniel Leung087fb942021-03-24 12:45:01 -07005274 { \
5275 .obj = _event_obj, \
5276 }, \
Benjamin Walsh969d4a72017-02-02 11:25:11 -05005277 }
5278
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -07005279#define K_POLL_EVENT_STATIC_INITIALIZER(_event_type, _event_mode, _event_obj, \
Benjamin Walsh969d4a72017-02-02 11:25:11 -05005280 event_tag) \
5281 { \
Benjamin Walsh969d4a72017-02-02 11:25:11 -05005282 .tag = event_tag, \
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -07005283 .type = _event_type, \
Benjamin Walsh969d4a72017-02-02 11:25:11 -05005284 .state = K_POLL_STATE_NOT_READY, \
Spoorthy Priya Yerabolu9247e8b2020-08-25 03:11:16 -07005285 .mode = _event_mode, \
Benjamin Walsh969d4a72017-02-02 11:25:11 -05005286 .unused = 0, \
Daniel Leung087fb942021-03-24 12:45:01 -07005287 { \
5288 .obj = _event_obj, \
5289 }, \
Benjamin Walshacc68c12017-01-29 18:57:45 -05005290 }
5291
5292/**
5293 * @brief Initialize one struct k_poll_event instance
5294 *
5295 * After this routine is called on a poll event, the event it ready to be
5296 * placed in an event array to be passed to k_poll().
5297 *
5298 * @param event The event to initialize.
5299 * @param type A bitfield of the types of event, from the K_POLL_TYPE_xxx
5300 * values. Only values that apply to the same object being polled
5301 * can be used together. Choosing K_POLL_TYPE_IGNORE disables the
5302 * event.
Paul Sokolovskycfef9792017-07-18 11:53:06 +03005303 * @param mode Future. Use K_POLL_MODE_NOTIFY_ONLY.
Benjamin Walshacc68c12017-01-29 18:57:45 -05005304 * @param obj Kernel object or poll signal.
5305 *
5306 * @return N/A
5307 */
5308
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005309extern void k_poll_event_init(struct k_poll_event *event, uint32_t type,
Benjamin Walshacc68c12017-01-29 18:57:45 -05005310 int mode, void *obj);
5311
5312/**
5313 * @brief Wait for one or many of multiple poll events to occur
5314 *
5315 * This routine allows a thread to wait concurrently for one or many of
5316 * multiple poll events to have occurred. Such events can be a kernel object
5317 * being available, like a semaphore, or a poll signal event.
5318 *
5319 * When an event notifies that a kernel object is available, the kernel object
5320 * is not "given" to the thread calling k_poll(): it merely signals the fact
5321 * that the object was available when the k_poll() call was in effect. Also,
5322 * all threads trying to acquire an object the regular way, i.e. by pending on
5323 * the object, have precedence over the thread polling on the object. This
5324 * means that the polling thread will never get the poll event on an object
5325 * until the object becomes available and its pend queue is empty. For this
5326 * reason, the k_poll() call is more effective when the objects being polled
5327 * only have one thread, the polling thread, trying to acquire them.
5328 *
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +03005329 * When k_poll() returns 0, the caller should loop on all the events that were
5330 * passed to k_poll() and check the state field for the values that were
5331 * expected and take the associated actions.
Benjamin Walshacc68c12017-01-29 18:57:45 -05005332 *
5333 * Before being reused for another call to k_poll(), the user has to reset the
5334 * state field to K_POLL_STATE_NOT_READY.
5335 *
Andrew Boie3772f772018-05-07 16:52:57 -07005336 * When called from user mode, a temporary memory allocation is required from
5337 * the caller's resource pool.
5338 *
Christian Taedcke7a7c4202020-06-30 12:02:14 +02005339 * @param events An array of events to be polled for.
Benjamin Walshacc68c12017-01-29 18:57:45 -05005340 * @param num_events The number of events in the array.
Andy Ross78327382020-03-05 15:18:14 -08005341 * @param timeout Waiting period for an event to be ready,
5342 * or one of the special values K_NO_WAIT and K_FOREVER.
Benjamin Walshacc68c12017-01-29 18:57:45 -05005343 *
5344 * @retval 0 One or more events are ready.
Benjamin Walshacc68c12017-01-29 18:57:45 -05005345 * @retval -EAGAIN Waiting period timed out.
Paul Sokolovsky45c0b202018-08-21 23:29:11 +03005346 * @retval -EINTR Polling has been interrupted, e.g. with
5347 * k_queue_cancel_wait(). All output events are still set and valid,
5348 * cancelled event(s) will be set to K_POLL_STATE_CANCELLED. In other
5349 * words, -EINTR status means that at least one of output events is
5350 * K_POLL_STATE_CANCELLED.
Andrew Boie3772f772018-05-07 16:52:57 -07005351 * @retval -ENOMEM Thread resource pool insufficient memory (user mode only)
5352 * @retval -EINVAL Bad parameters (user mode only)
Benjamin Walshacc68c12017-01-29 18:57:45 -05005353 */
5354
Andrew Boie3772f772018-05-07 16:52:57 -07005355__syscall int k_poll(struct k_poll_event *events, int num_events,
Andy Ross78327382020-03-05 15:18:14 -08005356 k_timeout_t timeout);
Benjamin Walshacc68c12017-01-29 18:57:45 -05005357
5358/**
Benjamin Walsha304f162017-02-02 16:46:09 -05005359 * @brief Initialize a poll signal object.
5360 *
Flavio Ceolinaecd4ec2018-11-02 12:35:30 -07005361 * Ready a poll signal object to be signaled via k_poll_signal_raise().
Benjamin Walsha304f162017-02-02 16:46:09 -05005362 *
Anas Nashifb503be22021-03-22 08:09:55 -04005363 * @param sig A poll signal.
Benjamin Walsha304f162017-02-02 16:46:09 -05005364 *
5365 * @return N/A
5366 */
5367
Anas Nashifb503be22021-03-22 08:09:55 -04005368__syscall void k_poll_signal_init(struct k_poll_signal *sig);
Andrew Boie3772f772018-05-07 16:52:57 -07005369
5370/*
5371 * @brief Reset a poll signal object's state to unsignaled.
5372 *
Anas Nashifb503be22021-03-22 08:09:55 -04005373 * @param sig A poll signal object
Andrew Boie3772f772018-05-07 16:52:57 -07005374 */
Anas Nashifb503be22021-03-22 08:09:55 -04005375__syscall void k_poll_signal_reset(struct k_poll_signal *sig);
Andrew Boie3772f772018-05-07 16:52:57 -07005376
Anas Nashifb503be22021-03-22 08:09:55 -04005377static inline void z_impl_k_poll_signal_reset(struct k_poll_signal *sig)
Andrew Boie3772f772018-05-07 16:52:57 -07005378{
Anas Nashifb503be22021-03-22 08:09:55 -04005379 sig->signaled = 0U;
Andrew Boie3772f772018-05-07 16:52:57 -07005380}
5381
5382/**
David B. Kinderfcbd8fb2018-05-23 12:06:24 -07005383 * @brief Fetch the signaled state and result value of a poll signal
Andrew Boie3772f772018-05-07 16:52:57 -07005384 *
Anas Nashifb503be22021-03-22 08:09:55 -04005385 * @param sig A poll signal object
Andrew Boie3772f772018-05-07 16:52:57 -07005386 * @param signaled An integer buffer which will be written nonzero if the
5387 * object was signaled
5388 * @param result An integer destination buffer which will be written with the
David B. Kinderfcbd8fb2018-05-23 12:06:24 -07005389 * result value if the object was signaled, or an undefined
Andrew Boie3772f772018-05-07 16:52:57 -07005390 * value if it was not.
5391 */
Anas Nashifb503be22021-03-22 08:09:55 -04005392__syscall void k_poll_signal_check(struct k_poll_signal *sig,
Andrew Boie3772f772018-05-07 16:52:57 -07005393 unsigned int *signaled, int *result);
Benjamin Walsha304f162017-02-02 16:46:09 -05005394
5395/**
Benjamin Walshacc68c12017-01-29 18:57:45 -05005396 * @brief Signal a poll signal object.
5397 *
5398 * This routine makes ready a poll signal, which is basically a poll event of
5399 * type K_POLL_TYPE_SIGNAL. If a thread was polling on that event, it will be
5400 * made ready to run. A @a result value can be specified.
5401 *
5402 * The poll signal contains a 'signaled' field that, when set by
Flavio Ceolinaecd4ec2018-11-02 12:35:30 -07005403 * k_poll_signal_raise(), stays set until the user sets it back to 0 with
Andrew Boie3772f772018-05-07 16:52:57 -07005404 * k_poll_signal_reset(). It thus has to be reset by the user before being
5405 * passed again to k_poll() or k_poll() will consider it being signaled, and
5406 * will return immediately.
Benjamin Walshacc68c12017-01-29 18:57:45 -05005407 *
Peter A. Bigot773bd982019-04-30 07:06:39 -05005408 * @note The result is stored and the 'signaled' field is set even if
5409 * this function returns an error indicating that an expiring poll was
5410 * not notified. The next k_poll() will detect the missed raise.
5411 *
Anas Nashifb503be22021-03-22 08:09:55 -04005412 * @param sig A poll signal.
Benjamin Walshacc68c12017-01-29 18:57:45 -05005413 * @param result The value to store in the result field of the signal.
5414 *
5415 * @retval 0 The signal was delivered successfully.
5416 * @retval -EAGAIN The polling thread's timeout is in the process of expiring.
5417 */
5418
Anas Nashifb503be22021-03-22 08:09:55 -04005419__syscall int k_poll_signal_raise(struct k_poll_signal *sig, int result);
Benjamin Walshacc68c12017-01-29 18:57:45 -05005420
Anas Nashif954d5502018-02-25 08:37:28 -06005421/**
5422 * @internal
5423 */
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005424extern void z_handle_obj_poll_events(sys_dlist_t *events, uint32_t state);
Benjamin Walshacc68c12017-01-29 18:57:45 -05005425
Anas Nashif166f5192018-02-25 08:02:36 -06005426/** @} */
Benjamin Walshacc68c12017-01-29 18:57:45 -05005427
Benjamin Walshc3a2bbb2016-12-14 13:04:36 -05005428/**
Anas Nashif30c3cff2019-01-22 08:18:13 -05005429 * @defgroup cpu_idle_apis CPU Idling APIs
5430 * @ingroup kernel_apis
5431 * @{
5432 */
Anas Nashif30c3cff2019-01-22 08:18:13 -05005433/**
Benjamin Walshc3a2bbb2016-12-14 13:04:36 -05005434 * @brief Make the CPU idle.
5435 *
5436 * This function makes the CPU idle until an event wakes it up.
5437 *
5438 * In a regular system, the idle thread should be the only thread responsible
5439 * for making the CPU idle and triggering any type of power management.
5440 * However, in some more constrained systems, such as a single-threaded system,
5441 * the only thread would be responsible for this if needed.
5442 *
Ioannis Glaropoulos91f6d982020-03-18 23:56:56 +01005443 * @note In some architectures, before returning, the function unmasks interrupts
5444 * unconditionally.
5445 *
Benjamin Walshc3a2bbb2016-12-14 13:04:36 -05005446 * @return N/A
5447 */
Andrew Boie07525a32019-09-21 16:17:23 -07005448static inline void k_cpu_idle(void)
5449{
Andrew Boie4f77c2a2019-11-07 12:43:29 -08005450 arch_cpu_idle();
Andrew Boie07525a32019-09-21 16:17:23 -07005451}
Benjamin Walshc3a2bbb2016-12-14 13:04:36 -05005452
5453/**
5454 * @brief Make the CPU idle in an atomic fashion.
5455 *
Peter Bigot88e756e2020-09-29 10:43:10 -05005456 * Similar to k_cpu_idle(), but must be called with interrupts locked.
5457 *
5458 * Enabling interrupts and entering a low-power mode will be atomic,
5459 * i.e. there will be no period of time where interrupts are enabled before
5460 * the processor enters a low-power mode.
5461 *
5462 * After waking up from the low-power mode, the interrupt lockout state will
5463 * be restored as if by irq_unlock(key).
Benjamin Walshc3a2bbb2016-12-14 13:04:36 -05005464 *
5465 * @param key Interrupt locking key obtained from irq_lock().
5466 *
5467 * @return N/A
5468 */
Andrew Boie07525a32019-09-21 16:17:23 -07005469static inline void k_cpu_atomic_idle(unsigned int key)
5470{
Andrew Boie4f77c2a2019-11-07 12:43:29 -08005471 arch_cpu_atomic_idle(key);
Andrew Boie07525a32019-09-21 16:17:23 -07005472}
Benjamin Walshc3a2bbb2016-12-14 13:04:36 -05005473
Anas Nashif30c3cff2019-01-22 08:18:13 -05005474/**
5475 * @}
5476 */
Anas Nashif954d5502018-02-25 08:37:28 -06005477
5478/**
5479 * @internal
5480 */
Andrew Boie4f77c2a2019-11-07 12:43:29 -08005481#ifdef ARCH_EXCEPT
Ioannis Glaropoulosdf029232019-10-07 11:24:36 +02005482/* This architecture has direct support for triggering a CPU exception */
Andrew Boie4f77c2a2019-11-07 12:43:29 -08005483#define z_except_reason(reason) ARCH_EXCEPT(reason)
Andrew Boiecdb94d62017-04-18 15:22:05 -07005484#else
5485
Joakim Anderssone04e4c22019-12-20 15:42:38 +01005486#if !defined(CONFIG_ASSERT_NO_FILE_INFO)
5487#define __EXCEPT_LOC() __ASSERT_PRINT("@ %s:%d\n", __FILE__, __LINE__)
5488#else
5489#define __EXCEPT_LOC()
5490#endif
5491
Andrew Boiecdb94d62017-04-18 15:22:05 -07005492/* NOTE: This is the implementation for arches that do not implement
Andrew Boie4f77c2a2019-11-07 12:43:29 -08005493 * ARCH_EXCEPT() to generate a real CPU exception.
Andrew Boiecdb94d62017-04-18 15:22:05 -07005494 *
5495 * We won't have a real exception frame to determine the PC value when
5496 * the oops occurred, so print file and line number before we jump into
5497 * the fatal error handler.
5498 */
Patrik Flykt4344e272019-03-08 14:19:05 -07005499#define z_except_reason(reason) do { \
Joakim Anderssone04e4c22019-12-20 15:42:38 +01005500 __EXCEPT_LOC(); \
Andrew Boie56236372019-07-15 15:22:29 -07005501 z_fatal_error(reason, NULL); \
Flavio Ceolin6fdc56d2018-09-18 12:32:27 -07005502 } while (false)
Andrew Boiecdb94d62017-04-18 15:22:05 -07005503
5504#endif /* _ARCH__EXCEPT */
5505
5506/**
5507 * @brief Fatally terminate a thread
5508 *
5509 * This should be called when a thread has encountered an unrecoverable
5510 * runtime condition and needs to terminate. What this ultimately
5511 * means is determined by the _fatal_error_handler() implementation, which
Andrew Boie71ce8ce2019-07-11 14:18:28 -07005512 * will be called will reason code K_ERR_KERNEL_OOPS.
Andrew Boiecdb94d62017-04-18 15:22:05 -07005513 *
5514 * If this is called from ISR context, the default system fatal error handler
5515 * will treat it as an unrecoverable system error, just like k_panic().
5516 */
Andrew Boie71ce8ce2019-07-11 14:18:28 -07005517#define k_oops() z_except_reason(K_ERR_KERNEL_OOPS)
Andrew Boiecdb94d62017-04-18 15:22:05 -07005518
5519/**
5520 * @brief Fatally terminate the system
5521 *
5522 * This should be called when the Zephyr kernel has encountered an
5523 * unrecoverable runtime condition and needs to terminate. What this ultimately
5524 * means is determined by the _fatal_error_handler() implementation, which
Andrew Boie71ce8ce2019-07-11 14:18:28 -07005525 * will be called will reason code K_ERR_KERNEL_PANIC.
Andrew Boiecdb94d62017-04-18 15:22:05 -07005526 */
Andrew Boie71ce8ce2019-07-11 14:18:28 -07005527#define k_panic() z_except_reason(K_ERR_KERNEL_PANIC)
Andrew Boiecdb94d62017-04-18 15:22:05 -07005528
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005529/*
5530 * private APIs that are utilized by one or more public APIs
5531 */
5532
Stephanos Ioannidis2d746042019-10-25 00:08:21 +09005533/**
5534 * @internal
5535 */
5536extern void z_init_thread_base(struct _thread_base *thread_base,
Kumar Galaa1b77fd2020-05-27 11:26:57 -05005537 int priority, uint32_t initial_state,
Stephanos Ioannidis2d746042019-10-25 00:08:21 +09005538 unsigned int options);
5539
Benjamin Walshb12a8e02016-12-14 15:24:12 -05005540#ifdef CONFIG_MULTITHREADING
Anas Nashif954d5502018-02-25 08:37:28 -06005541/**
5542 * @internal
5543 */
Patrik Flykt4344e272019-03-08 14:19:05 -07005544extern void z_init_static_threads(void);
Benjamin Walshb12a8e02016-12-14 15:24:12 -05005545#else
Anas Nashif954d5502018-02-25 08:37:28 -06005546/**
5547 * @internal
5548 */
Patrik Flykt4344e272019-03-08 14:19:05 -07005549#define z_init_static_threads() do { } while (false)
Benjamin Walshb12a8e02016-12-14 15:24:12 -05005550#endif
5551
Anas Nashif954d5502018-02-25 08:37:28 -06005552/**
5553 * @internal
5554 */
Patrik Flykt4344e272019-03-08 14:19:05 -07005555extern bool z_is_thread_essential(void);
Guennadi Liakhovetski8d07b772021-04-01 13:46:57 +02005556
5557#ifdef CONFIG_SMP
5558void z_smp_thread_init(void *arg, struct k_thread *thread);
5559void z_smp_thread_swap(void);
5560#endif
5561
Anas Nashif954d5502018-02-25 08:37:28 -06005562/**
5563 * @internal
5564 */
Patrik Flykt4344e272019-03-08 14:19:05 -07005565extern void z_timer_expiration_handler(struct _timeout *t);
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005566
Andrew Boied76ae462020-01-02 11:57:43 -08005567#ifdef CONFIG_PRINTK
Andrew Boie756f9072017-10-10 16:01:49 -07005568/**
5569 * @brief Emit a character buffer to the console device
5570 *
5571 * @param c String of characters to print
5572 * @param n The length of the string
Anas Nashifc8e0d0c2018-05-21 11:09:59 -04005573 *
Andrew Boie756f9072017-10-10 16:01:49 -07005574 */
5575__syscall void k_str_out(char *c, size_t n);
Andrew Boied76ae462020-01-02 11:57:43 -08005576#endif
Andrew Boie756f9072017-10-10 16:01:49 -07005577
Ioannis Glaropoulosa6cb8b02019-05-09 21:55:10 +02005578/**
5579 * @brief Disable preservation of floating point context information.
5580 *
5581 * This routine informs the kernel that the specified thread
5582 * will no longer be using the floating point registers.
5583 *
5584 * @warning
5585 * Some architectures apply restrictions on how the disabling of floating
Andrew Boie4f77c2a2019-11-07 12:43:29 -08005586 * point preservation may be requested, see arch_float_disable.
Ioannis Glaropoulosa6cb8b02019-05-09 21:55:10 +02005587 *
5588 * @warning
5589 * This routine should only be used to disable floating point support for
5590 * a thread that currently has such support enabled.
5591 *
5592 * @param thread ID of thread.
5593 *
Katsuhiro Suzuki19db4852021-03-24 01:54:15 +09005594 * @retval 0 On success.
5595 * @retval -ENOTSUP If the floating point disabling is not implemented.
5596 * -EINVAL If the floating point disabling could not be performed.
Ioannis Glaropoulosa6cb8b02019-05-09 21:55:10 +02005597 */
5598__syscall int k_float_disable(struct k_thread *thread);
5599
Katsuhiro Suzuki59903e22021-02-01 15:16:53 +09005600/**
5601 * @brief Enable preservation of floating point context information.
5602 *
5603 * This routine informs the kernel that the specified thread
5604 * will use the floating point registers.
5605
5606 * Invoking this routine initializes the thread's floating point context info
5607 * to that of an FPU that has been reset. The next time the thread is scheduled
5608 * by z_swap() it will either inherit an FPU that is guaranteed to be in a
5609 * "sane" state (if the most recent user of the FPU was cooperatively swapped
5610 * out) or the thread's own floating point context will be loaded (if the most
5611 * recent user of the FPU was preempted, or if this thread is the first user
5612 * of the FPU). Thereafter, the kernel will protect the thread's FP context
5613 * so that it is not altered during a preemptive context switch.
5614 *
5615 * The @a options parameter indicates which floating point register sets will
5616 * be used by the specified thread.
5617 *
5618 * For x86 options:
5619 *
5620 * - K_FP_REGS indicates x87 FPU and MMX registers only
5621 * - K_SSE_REGS indicates SSE registers (and also x87 FPU and MMX registers)
5622 *
5623 * @warning
5624 * Some architectures apply restrictions on how the enabling of floating
5625 * point preservation may be requested, see arch_float_enable.
5626 *
5627 * @warning
5628 * This routine should only be used to enable floating point support for
5629 * a thread that currently has such support enabled.
5630 *
5631 * @param thread ID of thread.
5632 * @param options architecture dependent options
5633 *
5634 * @retval 0 On success.
5635 * @retval -ENOTSUP If the floating point enabling is not implemented.
5636 * -EINVAL If the floating point enabling could not be performed.
5637 */
5638__syscall int k_float_enable(struct k_thread *thread, unsigned int options);
5639
Daniel Leungfc577c42020-08-27 13:54:14 -07005640#ifdef CONFIG_THREAD_RUNTIME_STATS
5641
5642/**
5643 * @brief Get the runtime statistics of a thread
5644 *
5645 * @param thread ID of thread.
5646 * @param stats Pointer to struct to copy statistics into.
5647 * @return -EINVAL if null pointers, otherwise 0
5648 */
5649int k_thread_runtime_stats_get(k_tid_t thread,
5650 k_thread_runtime_stats_t *stats);
5651
5652/**
5653 * @brief Get the runtime statistics of all threads
5654 *
5655 * @param stats Pointer to struct to copy statistics into.
5656 * @return -EINVAL if null pointers, otherwise 0
5657 */
5658int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats);
5659
5660#endif
5661
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005662#ifdef __cplusplus
5663}
5664#endif
5665
Anas Nashif73008b42020-02-06 09:14:51 -05005666#include <tracing/tracing.h>
Andrew Boiefa94ee72017-09-28 16:54:35 -07005667#include <syscalls/kernel.h>
5668
Benjamin Walshdfa7ce52017-01-22 17:06:05 -05005669#endif /* !_ASMLANGUAGE */
5670
Flavio Ceolin67ca1762018-09-14 10:43:44 -07005671#endif /* ZEPHYR_INCLUDE_KERNEL_H_ */