blob: b65902ebe4fb9389294d3dbf6e7bd7ae569f3ad5 [file] [log] [blame]
/*
* Copyright (c) 2016, Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
*
* @brief Public kernel APIs.
*/
#ifndef ZEPHYR_INCLUDE_KERNEL_H_
#define ZEPHYR_INCLUDE_KERNEL_H_
#if !defined(_ASMLANGUAGE)
#include <zephyr/kernel_includes.h>
#include <errno.h>
#include <limits.h>
#include <stdbool.h>
#include <zephyr/toolchain.h>
#include <zephyr/tracing/tracing_macros.h>
#include <zephyr/sys/mem_stats.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Zephyr currently assumes the size of a couple standard types to simplify
* print string formats. Let's make sure this doesn't change without notice.
*/
BUILD_ASSERT(sizeof(int32_t) == sizeof(int));
BUILD_ASSERT(sizeof(int64_t) == sizeof(long long));
BUILD_ASSERT(sizeof(intptr_t) == sizeof(long));
/**
* @brief Kernel APIs
* @defgroup kernel_apis Kernel APIs
* @{
* @}
*/
#define K_ANY NULL
#define K_END NULL
#if CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES == 0
#error Zero available thread priorities defined!
#endif
#define K_PRIO_COOP(x) (-(CONFIG_NUM_COOP_PRIORITIES - (x)))
#define K_PRIO_PREEMPT(x) (x)
#define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
#define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES
#define K_IDLE_PRIO K_LOWEST_THREAD_PRIO
#define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO)
#define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)
#ifdef CONFIG_POLL
#define _POLL_EVENT_OBJ_INIT(obj) \
.poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events),
#define _POLL_EVENT sys_dlist_t poll_events
#else
#define _POLL_EVENT_OBJ_INIT(obj)
#define _POLL_EVENT
#endif
struct k_thread;
struct k_mutex;
struct k_sem;
struct k_msgq;
struct k_mbox;
struct k_pipe;
struct k_queue;
struct k_fifo;
struct k_lifo;
struct k_stack;
struct k_mem_slab;
struct k_mem_pool;
struct k_timer;
struct k_poll_event;
struct k_poll_signal;
struct k_mem_domain;
struct k_mem_partition;
struct k_futex;
struct k_event;
enum execution_context_types {
K_ISR = 0,
K_COOP_THREAD,
K_PREEMPT_THREAD,
};
/* private, used by k_poll and k_work_poll */
struct k_work_poll;
typedef int (*_poller_cb_t)(struct k_poll_event *event, uint32_t state);
/**
* @addtogroup thread_apis
* @{
*/
typedef void (*k_thread_user_cb_t)(const struct k_thread *thread,
void *user_data);
/**
* @brief Iterate over all the threads in the system.
*
* This routine iterates over all the threads in the system and
* calls the user_cb function for each thread.
*
* @param user_cb Pointer to the user callback function.
* @param user_data Pointer to user data.
*
* @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
* to be effective.
* @note This API uses @ref k_spin_lock to protect the _kernel.threads
* list which means creation of new threads and terminations of existing
* threads are blocked until this API returns.
*/
extern void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data);
/**
* @brief Iterate over all the threads in the system without locking.
*
* This routine works exactly the same like @ref k_thread_foreach
* but unlocks interrupts when user_cb is executed.
*
* @param user_cb Pointer to the user callback function.
* @param user_data Pointer to user data.
*
* @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
* to be effective.
* @note This API uses @ref k_spin_lock only when accessing the _kernel.threads
* queue elements. It unlocks it during user callback function processing.
* If a new task is created when this @c foreach function is in progress,
* the added new task would not be included in the enumeration.
* If a task is aborted during this enumeration, there would be a race here
* and there is a possibility that this aborted task would be included in the
* enumeration.
* @note If the task is aborted and the memory occupied by its @c k_thread
* structure is reused when this @c k_thread_foreach_unlocked is in progress
* it might even lead to the system behave unstable.
* This function may never return, as it would follow some @c next task
* pointers treating given pointer as a pointer to the k_thread structure
* while it is something different right now.
* Do not reuse the memory that was occupied by k_thread structure of aborted
* task if it was aborted after this function was called in any context.
*/
extern void k_thread_foreach_unlocked(
k_thread_user_cb_t user_cb, void *user_data);
/** @} */
/**
* @defgroup thread_apis Thread APIs
* @ingroup kernel_apis
* @{
*/
#endif /* !_ASMLANGUAGE */
/*
* Thread user options. May be needed by assembly code. Common part uses low
* bits, arch-specific use high bits.
*/
/**
* @brief system thread that must not abort
* */
#define K_ESSENTIAL (BIT(0))
#if defined(CONFIG_FPU_SHARING)
/**
* @brief FPU registers are managed by context switch
*
* @details
* This option indicates that the thread uses the CPU's floating point
* registers. This instructs the kernel to take additional steps to save
* and restore the contents of these registers when scheduling the thread.
* No effect if @kconfig{CONFIG_FPU_SHARING} is not enabled.
*/
#define K_FP_REGS (BIT(1))
#endif
/**
* @brief user mode thread
*
* This thread has dropped from supervisor mode to user mode and consequently
* has additional restrictions
*/
#define K_USER (BIT(2))
/**
* @brief Inherit Permissions
*
* @details
* Indicates that the thread being created should inherit all kernel object
* permissions from the thread that created it. No effect if
* @kconfig{CONFIG_USERSPACE} is not enabled.
*/
#define K_INHERIT_PERMS (BIT(3))
/**
* @brief Callback item state
*
* @details
* This is a single bit of state reserved for "callback manager"
* utilities (p4wq initially) who need to track operations invoked
* from within a user-provided callback they have been invoked.
* Effectively it serves as a tiny bit of zero-overhead TLS data.
*/
#define K_CALLBACK_STATE (BIT(4))
#ifdef CONFIG_X86
/* x86 Bitmask definitions for threads user options */
#if defined(CONFIG_FPU_SHARING) && defined(CONFIG_X86_SSE)
/**
* @brief FP and SSE registers are managed by context switch on x86
*
* @details
* This option indicates that the thread uses the x86 CPU's floating point
* and SSE registers. This instructs the kernel to take additional steps to
* save and restore the contents of these registers when scheduling
* the thread. No effect if @kconfig{CONFIG_X86_SSE} is not enabled.
*/
#define K_SSE_REGS (BIT(7))
#endif
#endif
/* end - thread options */
#if !defined(_ASMLANGUAGE)
/**
* @brief Create a thread.
*
* This routine initializes a thread, then schedules it for execution.
*
* The new thread may be scheduled for immediate execution or a delayed start.
* If the newly spawned thread does not have a delayed start the kernel
* scheduler may preempt the current thread to allow the new thread to
* execute.
*
* Thread options are architecture-specific, and can include K_ESSENTIAL,
* K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
* them using "|" (the logical OR operator).
*
* Stack objects passed to this function must be originally defined with
* either of these macros in order to be portable:
*
* - K_THREAD_STACK_DEFINE() - For stacks that may support either user or
* supervisor threads.
* - K_KERNEL_STACK_DEFINE() - For stacks that may support supervisor
* threads only. These stacks use less memory if CONFIG_USERSPACE is
* enabled.
*
* The stack_size parameter has constraints. It must either be:
*
* - The original size value passed to K_THREAD_STACK_DEFINE() or
* K_KERNEL_STACK_DEFINE()
* - The return value of K_THREAD_STACK_SIZEOF(stack) if the stack was
* defined with K_THREAD_STACK_DEFINE()
* - The return value of K_KERNEL_STACK_SIZEOF(stack) if the stack was
* defined with K_KERNEL_STACK_DEFINE().
*
* Using other values, or sizeof(stack) may produce undefined behavior.
*
* @param new_thread Pointer to uninitialized struct k_thread
* @param stack Pointer to the stack space.
* @param stack_size Stack size in bytes.
* @param entry Thread entry function.
* @param p1 1st entry point parameter.
* @param p2 2nd entry point parameter.
* @param p3 3rd entry point parameter.
* @param prio Thread priority.
* @param options Thread options.
* @param delay Scheduling delay, or K_NO_WAIT (for no delay).
*
* @return ID of new thread.
*
*/
__syscall k_tid_t k_thread_create(struct k_thread *new_thread,
k_thread_stack_t *stack,
size_t stack_size,
k_thread_entry_t entry,
void *p1, void *p2, void *p3,
int prio, uint32_t options, k_timeout_t delay);
/**
* @brief Drop a thread's privileges permanently to user mode
*
* This allows a supervisor thread to be re-used as a user thread.
* This function does not return, but control will transfer to the provided
* entry point as if this was a new user thread.
*
* The implementation ensures that the stack buffer contents are erased.
* Any thread-local storage will be reverted to a pristine state.
*
* Memory domain membership, resource pool assignment, kernel object
* permissions, priority, and thread options are preserved.
*
* A common use of this function is to re-use the main thread as a user thread
* once all supervisor mode-only tasks have been completed.
*
* @param entry Function to start executing from
* @param p1 1st entry point parameter
* @param p2 2nd entry point parameter
* @param p3 3rd entry point parameter
*/
extern FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
void *p1, void *p2,
void *p3);
/**
* @brief Grant a thread access to a set of kernel objects
*
* This is a convenience function. For the provided thread, grant access to
* the remaining arguments, which must be pointers to kernel objects.
*
* The thread object must be initialized (i.e. running). The objects don't
* need to be.
* Note that NULL shouldn't be passed as an argument.
*
* @param thread Thread to grant access to objects
* @param ... list of kernel object pointers
*/
#define k_thread_access_grant(thread, ...) \
FOR_EACH_FIXED_ARG(k_object_access_grant, (;), thread, __VA_ARGS__)
/**
* @brief Assign a resource memory pool to a thread
*
* By default, threads have no resource pool assigned unless their parent
* thread has a resource pool, in which case it is inherited. Multiple
* threads may be assigned to the same memory pool.
*
* Changing a thread's resource pool will not migrate allocations from the
* previous pool.
*
* @param thread Target thread to assign a memory pool for resource requests.
* @param heap Heap object to use for resources,
* or NULL if the thread should no longer have a memory pool.
*/
static inline void k_thread_heap_assign(struct k_thread *thread,
struct k_heap *heap)
{
thread->resource_pool = heap;
}
#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
/**
* @brief Obtain stack usage information for the specified thread
*
* User threads will need to have permission on the target thread object.
*
* Some hardware may prevent inspection of a stack buffer currently in use.
* If this API is called from supervisor mode, on the currently running thread,
* on a platform which selects @kconfig{CONFIG_NO_UNUSED_STACK_INSPECTION}, an
* error will be generated.
*
* @param thread Thread to inspect stack information
* @param unused_ptr Output parameter, filled in with the unused stack space
* of the target thread in bytes.
* @return 0 on success
* @return -EBADF Bad thread object (user mode only)
* @return -EPERM No permissions on thread object (user mode only)
* #return -ENOTSUP Forbidden by hardware policy
* @return -EINVAL Thread is uninitialized or exited (user mode only)
* @return -EFAULT Bad memory address for unused_ptr (user mode only)
*/
__syscall int k_thread_stack_space_get(const struct k_thread *thread,
size_t *unused_ptr);
#endif
#if (CONFIG_HEAP_MEM_POOL_SIZE > 0)
/**
* @brief Assign the system heap as a thread's resource pool
*
* Similar to z_thread_heap_assign(), but the thread will use
* the kernel heap to draw memory.
*
* Use with caution, as a malicious thread could perform DoS attacks on the
* kernel heap.
*
* @param thread Target thread to assign the system heap for resource requests
*
*/
void k_thread_system_pool_assign(struct k_thread *thread);
#endif /* (CONFIG_HEAP_MEM_POOL_SIZE > 0) */
/**
* @brief Sleep until a thread exits
*
* The caller will be put to sleep until the target thread exits, either due
* to being aborted, self-exiting, or taking a fatal error. This API returns
* immediately if the thread isn't running.
*
* This API may only be called from ISRs with a K_NO_WAIT timeout,
* where it can be useful as a predicate to detect when a thread has
* aborted.
*
* @param thread Thread to wait to exit
* @param timeout upper bound time to wait for the thread to exit.
* @retval 0 success, target thread has exited or wasn't running
* @retval -EBUSY returned without waiting
* @retval -EAGAIN waiting period timed out
* @retval -EDEADLK target thread is joining on the caller, or target thread
* is the caller
*/
__syscall int k_thread_join(struct k_thread *thread, k_timeout_t timeout);
/**
* @brief Put the current thread to sleep.
*
* This routine puts the current thread to sleep for @a duration,
* specified as a k_timeout_t object.
*
* @note if @a timeout is set to K_FOREVER then the thread is suspended.
*
* @param timeout Desired duration of sleep.
*
* @return Zero if the requested time has elapsed or the number of milliseconds
* left to sleep, if thread was woken up by \ref k_wakeup call.
*/
__syscall int32_t k_sleep(k_timeout_t timeout);
/**
* @brief Put the current thread to sleep.
*
* This routine puts the current thread to sleep for @a duration milliseconds.
*
* @param ms Number of milliseconds to sleep.
*
* @return Zero if the requested time has elapsed or the number of milliseconds
* left to sleep, if thread was woken up by \ref k_wakeup call.
*/
static inline int32_t k_msleep(int32_t ms)
{
return k_sleep(Z_TIMEOUT_MS(ms));
}
/**
* @brief Put the current thread to sleep with microsecond resolution.
*
* This function is unlikely to work as expected without kernel tuning.
* In particular, because the lower bound on the duration of a sleep is
* the duration of a tick, @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} must be
* adjusted to achieve the resolution desired. The implications of doing
* this must be understood before attempting to use k_usleep(). Use with
* caution.
*
* @param us Number of microseconds to sleep.
*
* @return Zero if the requested time has elapsed or the number of microseconds
* left to sleep, if thread was woken up by \ref k_wakeup call.
*/
__syscall int32_t k_usleep(int32_t us);
/**
* @brief Cause the current thread to busy wait.
*
* This routine causes the current thread to execute a "do nothing" loop for
* @a usec_to_wait microseconds.
*
* @note The clock used for the microsecond-resolution delay here may
* be skewed relative to the clock used for system timeouts like
* k_sleep(). For example k_busy_wait(1000) may take slightly more or
* less time than k_sleep(K_MSEC(1)), with the offset dependent on
* clock tolerances.
*/
__syscall void k_busy_wait(uint32_t usec_to_wait);
/**
* @brief Check whether it is possible to yield in the current context.
*
* This routine checks whether the kernel is in a state where it is possible to
* yield or call blocking API's. It should be used by code that needs to yield
* to perform correctly, but can feasibly be called from contexts where that
* is not possible. For example in the PRE_KERNEL initialization step, or when
* being run from the idle thread.
*
* @return True if it is possible to yield in the current context, false otherwise.
*/
bool k_can_yield(void);
/**
* @brief Yield the current thread.
*
* This routine causes the current thread to yield execution to another
* thread of the same or higher priority. If there are no other ready threads
* of the same or higher priority, the routine returns immediately.
*/
__syscall void k_yield(void);
/**
* @brief Wake up a sleeping thread.
*
* This routine prematurely wakes up @a thread from sleeping.
*
* If @a thread is not currently sleeping, the routine has no effect.
*
* @param thread ID of thread to wake.
*/
__syscall void k_wakeup(k_tid_t thread);
/**
* @brief Get thread ID of the current thread.
*
* This unconditionally queries the kernel via a system call.
*
* @return ID of current thread.
*/
__attribute_const__
__syscall k_tid_t z_current_get(void);
#ifdef CONFIG_THREAD_LOCAL_STORAGE
/* Thread-local cache of current thread ID, set in z_thread_entry() */
extern __thread k_tid_t z_tls_current;
#endif
/**
* @brief Get thread ID of the current thread.
*
* @return ID of current thread.
*
*/
__attribute_const__
static inline k_tid_t k_current_get(void)
{
#ifdef CONFIG_THREAD_LOCAL_STORAGE
return z_tls_current;
#else
return z_current_get();
#endif
}
/**
* @brief Abort a thread.
*
* This routine permanently stops execution of @a thread. The thread is taken
* off all kernel queues it is part of (i.e. the ready queue, the timeout
* queue, or a kernel object wait queue). However, any kernel resources the
* thread might currently own (such as mutexes or memory blocks) are not
* released. It is the responsibility of the caller of this routine to ensure
* all necessary cleanup is performed.
*
* After k_thread_abort() returns, the thread is guaranteed not to be
* running or to become runnable anywhere on the system. Normally
* this is done via blocking the caller (in the same manner as
* k_thread_join()), but in interrupt context on SMP systems the
* implementation is required to spin for threads that are running on
* other CPUs. Note that as specified, this means that on SMP
* platforms it is possible for application code to create a deadlock
* condition by simultaneously aborting a cycle of threads using at
* least one termination from interrupt context. Zephyr cannot detect
* all such conditions.
*
* @param thread ID of thread to abort.
*/
__syscall void k_thread_abort(k_tid_t thread);
/**
* @brief Start an inactive thread
*
* If a thread was created with K_FOREVER in the delay parameter, it will
* not be added to the scheduling queue until this function is called
* on it.
*
* @param thread thread to start
*/
__syscall void k_thread_start(k_tid_t thread);
extern k_ticks_t z_timeout_expires(const struct _timeout *timeout);
extern k_ticks_t z_timeout_remaining(const struct _timeout *timeout);
#ifdef CONFIG_SYS_CLOCK_EXISTS
/**
* @brief Get time when a thread wakes up, in system ticks
*
* This routine computes the system uptime when a waiting thread next
* executes, in units of system ticks. If the thread is not waiting,
* it returns current system time.
*/
__syscall k_ticks_t k_thread_timeout_expires_ticks(const struct k_thread *t);
static inline k_ticks_t z_impl_k_thread_timeout_expires_ticks(
const struct k_thread *t)
{
return z_timeout_expires(&t->base.timeout);
}
/**
* @brief Get time remaining before a thread wakes up, in system ticks
*
* This routine computes the time remaining before a waiting thread
* next executes, in units of system ticks. If the thread is not
* waiting, it returns zero.
*/
__syscall k_ticks_t k_thread_timeout_remaining_ticks(const struct k_thread *t);
static inline k_ticks_t z_impl_k_thread_timeout_remaining_ticks(
const struct k_thread *t)
{
return z_timeout_remaining(&t->base.timeout);
}
#endif /* CONFIG_SYS_CLOCK_EXISTS */
/**
* @cond INTERNAL_HIDDEN
*/
/* timeout has timed out and is not on _timeout_q anymore */
#define _EXPIRED (-2)
struct _static_thread_data {
struct k_thread *init_thread;
k_thread_stack_t *init_stack;
unsigned int init_stack_size;
k_thread_entry_t init_entry;
void *init_p1;
void *init_p2;
void *init_p3;
int init_prio;
uint32_t init_options;
int32_t init_delay;
void (*init_abort)(void);
const char *init_name;
};
#define Z_THREAD_INITIALIZER(thread, stack, stack_size, \
entry, p1, p2, p3, \
prio, options, delay, abort, tname) \
{ \
.init_thread = (thread), \
.init_stack = (stack), \
.init_stack_size = (stack_size), \
.init_entry = (k_thread_entry_t)entry, \
.init_p1 = (void *)p1, \
.init_p2 = (void *)p2, \
.init_p3 = (void *)p3, \
.init_prio = (prio), \
.init_options = (options), \
.init_delay = (delay), \
.init_abort = (abort), \
.init_name = STRINGIFY(tname), \
}
/**
* INTERNAL_HIDDEN @endcond
*/
/**
* @brief Statically define and initialize a thread.
*
* The thread may be scheduled for immediate execution or a delayed start.
*
* Thread options are architecture-specific, and can include K_ESSENTIAL,
* K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
* them using "|" (the logical OR operator).
*
* The ID of the thread can be accessed using:
*
* @code extern const k_tid_t <name>; @endcode
*
* @param name Name of the thread.
* @param stack_size Stack size in bytes.
* @param entry Thread entry function.
* @param p1 1st entry point parameter.
* @param p2 2nd entry point parameter.
* @param p3 3rd entry point parameter.
* @param prio Thread priority.
* @param options Thread options.
* @param delay Scheduling delay (in milliseconds), zero for no delay.
*
*
* @internal It has been observed that the x86 compiler by default aligns
* these _static_thread_data structures to 32-byte boundaries, thereby
* wasting space. To work around this, force a 4-byte alignment.
*
*/
#define K_THREAD_DEFINE(name, stack_size, \
entry, p1, p2, p3, \
prio, options, delay) \
K_THREAD_STACK_DEFINE(_k_thread_stack_##name, stack_size); \
struct k_thread _k_thread_obj_##name; \
STRUCT_SECTION_ITERABLE(_static_thread_data, _k_thread_data_##name) = \
Z_THREAD_INITIALIZER(&_k_thread_obj_##name, \
_k_thread_stack_##name, stack_size, \
entry, p1, p2, p3, prio, options, delay, \
NULL, name); \
const k_tid_t name = (k_tid_t)&_k_thread_obj_##name
/**
* @brief Get a thread's priority.
*
* This routine gets the priority of @a thread.
*
* @param thread ID of thread whose priority is needed.
*
* @return Priority of @a thread.
*/
__syscall int k_thread_priority_get(k_tid_t thread);
/**
* @brief Set a thread's priority.
*
* This routine immediately changes the priority of @a thread.
*
* Rescheduling can occur immediately depending on the priority @a thread is
* set to:
*
* - If its priority is raised above the priority of the caller of this
* function, and the caller is preemptible, @a thread will be scheduled in.
*
* - If the caller operates on itself, it lowers its priority below that of
* other threads in the system, and the caller is preemptible, the thread of
* highest priority will be scheduled in.
*
* Priority can be assigned in the range of -CONFIG_NUM_COOP_PRIORITIES to
* CONFIG_NUM_PREEMPT_PRIORITIES-1, where -CONFIG_NUM_COOP_PRIORITIES is the
* highest priority.
*
* @param thread ID of thread whose priority is to be set.
* @param prio New priority.
*
* @warning Changing the priority of a thread currently involved in mutex
* priority inheritance may result in undefined behavior.
*/
__syscall void k_thread_priority_set(k_tid_t thread, int prio);
#ifdef CONFIG_SCHED_DEADLINE
/**
* @brief Set deadline expiration time for scheduler
*
* This sets the "deadline" expiration as a time delta from the
* current time, in the same units used by k_cycle_get_32(). The
* scheduler (when deadline scheduling is enabled) will choose the
* next expiring thread when selecting between threads at the same
* static priority. Threads at different priorities will be scheduled
* according to their static priority.
*
* @note Deadlines are stored internally using 32 bit unsigned
* integers. The number of cycles between the "first" deadline in the
* scheduler queue and the "last" deadline must be less than 2^31 (i.e
* a signed non-negative quantity). Failure to adhere to this rule
* may result in scheduled threads running in an incorrect deadline
* order.
*
* @note Despite the API naming, the scheduler makes no guarantees the
* the thread WILL be scheduled within that deadline, nor does it take
* extra metadata (like e.g. the "runtime" and "period" parameters in
* Linux sched_setattr()) that allows the kernel to validate the
* scheduling for achievability. Such features could be implemented
* above this call, which is simply input to the priority selection
* logic.
*
* @note You should enable @kconfig{CONFIG_SCHED_DEADLINE} in your project
* configuration.
*
* @param thread A thread on which to set the deadline
* @param deadline A time delta, in cycle units
*
*/
__syscall void k_thread_deadline_set(k_tid_t thread, int deadline);
#endif
#ifdef CONFIG_SCHED_CPU_MASK
/**
* @brief Sets all CPU enable masks to zero
*
* After this returns, the thread will no longer be schedulable on any
* CPUs. The thread must not be currently runnable.
*
* @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
* configuration.
*
* @param thread Thread to operate upon
* @return Zero on success, otherwise error code
*/
int k_thread_cpu_mask_clear(k_tid_t thread);
/**
* @brief Sets all CPU enable masks to one
*
* After this returns, the thread will be schedulable on any CPU. The
* thread must not be currently runnable.
*
* @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
* configuration.
*
* @param thread Thread to operate upon
* @return Zero on success, otherwise error code
*/
int k_thread_cpu_mask_enable_all(k_tid_t thread);
/**
* @brief Enable thread to run on specified CPU
*
* The thread must not be currently runnable.
*
* @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
* configuration.
*
* @param thread Thread to operate upon
* @param cpu CPU index
* @return Zero on success, otherwise error code
*/
int k_thread_cpu_mask_enable(k_tid_t thread, int cpu);
/**
* @brief Prevent thread to run on specified CPU
*
* The thread must not be currently runnable.
*
* @note You should enable @kconfig{CONFIG_SCHED_CPU_MASK} in your project
* configuration.
*
* @param thread Thread to operate upon
* @param cpu CPU index
* @return Zero on success, otherwise error code
*/
int k_thread_cpu_mask_disable(k_tid_t thread, int cpu);
/**
* @brief Pin a thread to a CPU
*
* Pin a thread to a CPU by first clearing the cpu mask and then enabling the
* thread on the selected CPU.
*
* @param thread Thread to operate upon
* @param cpu CPU index
* @return Zero on success, otherwise error code
*/
int k_thread_cpu_pin(k_tid_t thread, int cpu);
#endif
/**
* @brief Suspend a thread.
*
* This routine prevents the kernel scheduler from making @a thread
* the current thread. All other internal operations on @a thread are
* still performed; for example, kernel objects it is waiting on are
* still handed to it. Note that any existing timeouts
* (e.g. k_sleep(), or a timeout argument to k_sem_take() et. al.)
* will be canceled. On resume, the thread will begin running
* immediately and return from the blocked call.
*
* If @a thread is already suspended, the routine has no effect.
*
* @param thread ID of thread to suspend.
*/
__syscall void k_thread_suspend(k_tid_t thread);
/**
* @brief Resume a suspended thread.
*
* This routine allows the kernel scheduler to make @a thread the current
* thread, when it is next eligible for that role.
*
* If @a thread is not currently suspended, the routine has no effect.
*
* @param thread ID of thread to resume.
*/
__syscall void k_thread_resume(k_tid_t thread);
/**
* @brief Set time-slicing period and scope.
*
* This routine specifies how the scheduler will perform time slicing of
* preemptible threads.
*
* To enable time slicing, @a slice must be non-zero. The scheduler
* ensures that no thread runs for more than the specified time limit
* before other threads of that priority are given a chance to execute.
* Any thread whose priority is higher than @a prio is exempted, and may
* execute as long as desired without being preempted due to time slicing.
*
* Time slicing only limits the maximum amount of time a thread may continuously
* execute. Once the scheduler selects a thread for execution, there is no
* minimum guaranteed time the thread will execute before threads of greater or
* equal priority are scheduled.
*
* When the current thread is the only one of that priority eligible
* for execution, this routine has no effect; the thread is immediately
* rescheduled after the slice period expires.
*
* To disable timeslicing, set both @a slice and @a prio to zero.
*
* @param slice Maximum time slice length (in milliseconds).
* @param prio Highest thread priority level eligible for time slicing.
*/
extern void k_sched_time_slice_set(int32_t slice, int prio);
/**
* @brief Set thread time slice
*
* As for k_sched_time_slice_set, but (when
* CONFIG_TIMESLICE_PER_THREAD=y) sets the timeslice for a specific
* thread. When non-zero, this timeslice will take precedence over
* the global value.
*
* When such a thread's timeslice expires, the configured callback
* will be called before the thread is removed/re-added to the run
* queue. This callback will occur in interrupt context, and the
* specified thread is guaranteed to have been preempted by the
* currently-executing ISR. Such a callback is free to, for example,
* modify the thread priority or slice time for future execution,
* suspend the thread, etc...
*
* @note Unlike the older API, the time slice parameter here is
* specified in ticks, not milliseconds. Ticks have always been the
* internal unit, and not all platforms have integer conversions
* between the two.
*
* @note Threads with a non-zero slice time set will be timesliced
* always, even if they are higher priority than the maximum timeslice
* priority set via k_sched_time_slice_set().
*
* @note The callback notification for slice expiration happens, as it
* must, while the thread is still "current", and thus it happens
* before any registered timeouts at this tick. This has the somewhat
* confusing side effect that the tick time (c.f. k_uptime_get()) does
* not yet reflect the expired ticks. Applications wishing to make
* fine-grained timing decisions within this callback should use the
* cycle API, or derived facilities like k_thread_runtime_stats_get().
*
* @param th A valid, initialized thread
* @param slice_ticks Maximum timeslice, in ticks
* @param expired Callback function called on slice expiration
* @param data Parameter for the expiration handler
*/
void k_thread_time_slice_set(struct k_thread *th, int32_t slice_ticks,
k_thread_timeslice_fn_t expired, void *data);
/** @} */
/**
* @addtogroup isr_apis
* @{
*/
/**
* @brief Determine if code is running at interrupt level.
*
* This routine allows the caller to customize its actions, depending on
* whether it is a thread or an ISR.
*
* @funcprops \isr_ok
*
* @return false if invoked by a thread.
* @return true if invoked by an ISR.
*/
extern bool k_is_in_isr(void);
/**
* @brief Determine if code is running in a preemptible thread.
*
* This routine allows the caller to customize its actions, depending on
* whether it can be preempted by another thread. The routine returns a 'true'
* value if all of the following conditions are met:
*
* - The code is running in a thread, not at ISR.
* - The thread's priority is in the preemptible range.
* - The thread has not locked the scheduler.
*
* @funcprops \isr_ok
*
* @return 0 if invoked by an ISR or by a cooperative thread.
* @return Non-zero if invoked by a preemptible thread.
*/
__syscall int k_is_preempt_thread(void);
/**
* @brief Test whether startup is in the before-main-task phase.
*
* This routine allows the caller to customize its actions, depending on
* whether it being invoked before the kernel is fully active.
*
* @funcprops \isr_ok
*
* @return true if invoked before post-kernel initialization
* @return false if invoked during/after post-kernel initialization
*/
static inline bool k_is_pre_kernel(void)
{
extern bool z_sys_post_kernel; /* in init.c */
return !z_sys_post_kernel;
}
/**
* @}
*/
/**
* @addtogroup thread_apis
* @{
*/
/**
* @brief Lock the scheduler.
*
* This routine prevents the current thread from being preempted by another
* thread by instructing the scheduler to treat it as a cooperative thread.
* If the thread subsequently performs an operation that makes it unready,
* it will be context switched out in the normal manner. When the thread
* again becomes the current thread, its non-preemptible status is maintained.
*
* This routine can be called recursively.
*
* @note k_sched_lock() and k_sched_unlock() should normally be used
* when the operation being performed can be safely interrupted by ISRs.
* However, if the amount of processing involved is very small, better
* performance may be obtained by using irq_lock() and irq_unlock().
*/
extern void k_sched_lock(void);
/**
* @brief Unlock the scheduler.
*
* This routine reverses the effect of a previous call to k_sched_lock().
* A thread must call the routine once for each time it called k_sched_lock()
* before the thread becomes preemptible.
*/
extern void k_sched_unlock(void);
/**
* @brief Set current thread's custom data.
*
* This routine sets the custom data for the current thread to @ value.
*
* Custom data is not used by the kernel itself, and is freely available
* for a thread to use as it sees fit. It can be used as a framework
* upon which to build thread-local storage.
*
* @param value New custom data value.
*
*/
__syscall void k_thread_custom_data_set(void *value);
/**
* @brief Get current thread's custom data.
*
* This routine returns the custom data for the current thread.
*
* @return Current custom data value.
*/
__syscall void *k_thread_custom_data_get(void);
/**
* @brief Set current thread name
*
* Set the name of the thread to be used when @kconfig{CONFIG_THREAD_MONITOR}
* is enabled for tracing and debugging.
*
* @param thread Thread to set name, or NULL to set the current thread
* @param str Name string
* @retval 0 on success
* @retval -EFAULT Memory access error with supplied string
* @retval -ENOSYS Thread name configuration option not enabled
* @retval -EINVAL Thread name too long
*/
__syscall int k_thread_name_set(k_tid_t thread, const char *str);
/**
* @brief Get thread name
*
* Get the name of a thread
*
* @param thread Thread ID
* @retval Thread name, or NULL if configuration not enabled
*/
const char *k_thread_name_get(k_tid_t thread);
/**
* @brief Copy the thread name into a supplied buffer
*
* @param thread Thread to obtain name information
* @param buf Destination buffer
* @param size Destination buffer size
* @retval -ENOSPC Destination buffer too small
* @retval -EFAULT Memory access error
* @retval -ENOSYS Thread name feature not enabled
* @retval 0 Success
*/
__syscall int k_thread_name_copy(k_tid_t thread, char *buf,
size_t size);
/**
* @brief Get thread state string
*
* This routine generates a human friendly string containing the thread's
* state, and copies as much of it as possible into @a buf.
*
* @param thread_id Thread ID
* @param buf Buffer into which to copy state strings
* @param buf_size Size of the buffer
*
* @retval Pointer to @a buf if data was copied, else a pointer to "".
*/
const char *k_thread_state_str(k_tid_t thread_id, char *buf, size_t buf_size);
/**
* @}
*/
/**
* @addtogroup clock_apis
* @{
*/
/**
* @brief Generate null timeout delay.
*
* This macro generates a timeout delay that instructs a kernel API
* not to wait if the requested operation cannot be performed immediately.
*
* @return Timeout delay value.
*/
#define K_NO_WAIT Z_TIMEOUT_NO_WAIT
/**
* @brief Generate timeout delay from nanoseconds.
*
* This macro generates a timeout delay that instructs a kernel API to
* wait up to @a t nanoseconds to perform the requested operation.
* Note that timer precision is limited to the tick rate, not the
* requested value.
*
* @param t Duration in nanoseconds.
*
* @return Timeout delay value.
*/
#define K_NSEC(t) Z_TIMEOUT_NS(t)
/**
* @brief Generate timeout delay from microseconds.
*
* This macro generates a timeout delay that instructs a kernel API
* to wait up to @a t microseconds to perform the requested operation.
* Note that timer precision is limited to the tick rate, not the
* requested value.
*
* @param t Duration in microseconds.
*
* @return Timeout delay value.
*/
#define K_USEC(t) Z_TIMEOUT_US(t)
/**
* @brief Generate timeout delay from cycles.
*
* This macro generates a timeout delay that instructs a kernel API
* to wait up to @a t cycles to perform the requested operation.
*
* @param t Duration in cycles.
*
* @return Timeout delay value.
*/
#define K_CYC(t) Z_TIMEOUT_CYC(t)
/**
* @brief Generate timeout delay from system ticks.
*
* This macro generates a timeout delay that instructs a kernel API
* to wait up to @a t ticks to perform the requested operation.
*
* @param t Duration in system ticks.
*
* @return Timeout delay value.
*/
#define K_TICKS(t) Z_TIMEOUT_TICKS(t)
/**
* @brief Generate timeout delay from milliseconds.
*
* This macro generates a timeout delay that instructs a kernel API
* to wait up to @a ms milliseconds to perform the requested operation.
*
* @param ms Duration in milliseconds.
*
* @return Timeout delay value.
*/
#define K_MSEC(ms) Z_TIMEOUT_MS(ms)
/**
* @brief Generate timeout delay from seconds.
*
* This macro generates a timeout delay that instructs a kernel API
* to wait up to @a s seconds to perform the requested operation.
*
* @param s Duration in seconds.
*
* @return Timeout delay value.
*/
#define K_SECONDS(s) K_MSEC((s) * MSEC_PER_SEC)
/**
* @brief Generate timeout delay from minutes.
* This macro generates a timeout delay that instructs a kernel API
* to wait up to @a m minutes to perform the requested operation.
*
* @param m Duration in minutes.
*
* @return Timeout delay value.
*/
#define K_MINUTES(m) K_SECONDS((m) * 60)
/**
* @brief Generate timeout delay from hours.
*
* This macro generates a timeout delay that instructs a kernel API
* to wait up to @a h hours to perform the requested operation.
*
* @param h Duration in hours.
*
* @return Timeout delay value.
*/
#define K_HOURS(h) K_MINUTES((h) * 60)
/**
* @brief Generate infinite timeout delay.
*
* This macro generates a timeout delay that instructs a kernel API
* to wait as long as necessary to perform the requested operation.
*
* @return Timeout delay value.
*/
#define K_FOREVER Z_FOREVER
#ifdef CONFIG_TIMEOUT_64BIT
/**
* @brief Generates an absolute/uptime timeout value from system ticks
*
* This macro generates a timeout delay that represents an expiration
* at the absolute uptime value specified, in system ticks. That is, the
* timeout will expire immediately after the system uptime reaches the
* specified tick count.
*
* @param t Tick uptime value
* @return Timeout delay value
*/
#define K_TIMEOUT_ABS_TICKS(t) \
Z_TIMEOUT_TICKS(Z_TICK_ABS((k_ticks_t)MAX(t, 0)))
/**
* @brief Generates an absolute/uptime timeout value from milliseconds
*
* This macro generates a timeout delay that represents an expiration
* at the absolute uptime value specified, in milliseconds. That is,
* the timeout will expire immediately after the system uptime reaches
* the specified tick count.
*
* @param t Millisecond uptime value
* @return Timeout delay value
*/
#define K_TIMEOUT_ABS_MS(t) K_TIMEOUT_ABS_TICKS(k_ms_to_ticks_ceil64(t))
/**
* @brief Generates an absolute/uptime timeout value from microseconds
*
* This macro generates a timeout delay that represents an expiration
* at the absolute uptime value specified, in microseconds. That is,
* the timeout will expire immediately after the system uptime reaches
* the specified time. Note that timer precision is limited by the
* system tick rate and not the requested timeout value.
*
* @param t Microsecond uptime value
* @return Timeout delay value
*/
#define K_TIMEOUT_ABS_US(t) K_TIMEOUT_ABS_TICKS(k_us_to_ticks_ceil64(t))
/**
* @brief Generates an absolute/uptime timeout value from nanoseconds
*
* This macro generates a timeout delay that represents an expiration
* at the absolute uptime value specified, in nanoseconds. That is,
* the timeout will expire immediately after the system uptime reaches
* the specified time. Note that timer precision is limited by the
* system tick rate and not the requested timeout value.
*
* @param t Nanosecond uptime value
* @return Timeout delay value
*/
#define K_TIMEOUT_ABS_NS(t) K_TIMEOUT_ABS_TICKS(k_ns_to_ticks_ceil64(t))
/**
* @brief Generates an absolute/uptime timeout value from system cycles
*
* This macro generates a timeout delay that represents an expiration
* at the absolute uptime value specified, in cycles. That is, the
* timeout will expire immediately after the system uptime reaches the
* specified time. Note that timer precision is limited by the system
* tick rate and not the requested timeout value.
*
* @param t Cycle uptime value
* @return Timeout delay value
*/
#define K_TIMEOUT_ABS_CYC(t) K_TIMEOUT_ABS_TICKS(k_cyc_to_ticks_ceil64(t))
#endif
/**
* @}
*/
/**
* @cond INTERNAL_HIDDEN
*/
struct k_timer {
/*
* _timeout structure must be first here if we want to use
* dynamic timer allocation. timeout.node is used in the double-linked
* list of free timers
*/
struct _timeout timeout;
/* wait queue for the (single) thread waiting on this timer */
_wait_q_t wait_q;
/* runs in ISR context */
void (*expiry_fn)(struct k_timer *timer);
/* runs in the context of the thread that calls k_timer_stop() */
void (*stop_fn)(struct k_timer *timer);
/* timer period */
k_timeout_t period;
/* timer status */
uint32_t status;
/* user-specific data, also used to support legacy features */
void *user_data;
SYS_PORT_TRACING_TRACKING_FIELD(k_timer)
};
#define Z_TIMER_INITIALIZER(obj, expiry, stop) \
{ \
.timeout = { \
.node = {},\
.fn = z_timer_expiration_handler, \
.dticks = 0, \
}, \
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
.expiry_fn = expiry, \
.stop_fn = stop, \
.status = 0, \
.user_data = 0, \
}
/**
* INTERNAL_HIDDEN @endcond
*/
/**
* @defgroup timer_apis Timer APIs
* @ingroup kernel_apis
* @{
*/
/**
* @typedef k_timer_expiry_t
* @brief Timer expiry function type.
*
* A timer's expiry function is executed by the system clock interrupt handler
* each time the timer expires. The expiry function is optional, and is only
* invoked if the timer has been initialized with one.
*
* @param timer Address of timer.
*/
typedef void (*k_timer_expiry_t)(struct k_timer *timer);
/**
* @typedef k_timer_stop_t
* @brief Timer stop function type.
*
* A timer's stop function is executed if the timer is stopped prematurely.
* The function runs in the context of call that stops the timer. As
* k_timer_stop() can be invoked from an ISR, the stop function must be
* callable from interrupt context (isr-ok).
*
* The stop function is optional, and is only invoked if the timer has been
* initialized with one.
*
* @param timer Address of timer.
*/
typedef void (*k_timer_stop_t)(struct k_timer *timer);
/**
* @brief Statically define and initialize a timer.
*
* The timer can be accessed outside the module where it is defined using:
*
* @code extern struct k_timer <name>; @endcode
*
* @param name Name of the timer variable.
* @param expiry_fn Function to invoke each time the timer expires.
* @param stop_fn Function to invoke if the timer is stopped while running.
*/
#define K_TIMER_DEFINE(name, expiry_fn, stop_fn) \
STRUCT_SECTION_ITERABLE(k_timer, name) = \
Z_TIMER_INITIALIZER(name, expiry_fn, stop_fn)
/**
* @brief Initialize a timer.
*
* This routine initializes a timer, prior to its first use.
*
* @param timer Address of timer.
* @param expiry_fn Function to invoke each time the timer expires.
* @param stop_fn Function to invoke if the timer is stopped while running.
*/
extern void k_timer_init(struct k_timer *timer,
k_timer_expiry_t expiry_fn,
k_timer_stop_t stop_fn);
/**
* @brief Start a timer.
*
* This routine starts a timer, and resets its status to zero. The timer
* begins counting down using the specified duration and period values.
*
* Attempting to start a timer that is already running is permitted.
* The timer's status is reset to zero and the timer begins counting down
* using the new duration and period values.
*
* @param timer Address of timer.
* @param duration Initial timer duration.
* @param period Timer period.
*/
__syscall void k_timer_start(struct k_timer *timer,
k_timeout_t duration, k_timeout_t period);
/**
* @brief Stop a timer.
*
* This routine stops a running timer prematurely. The timer's stop function,
* if one exists, is invoked by the caller.
*
* Attempting to stop a timer that is not running is permitted, but has no
* effect on the timer.
*
* @note The stop handler has to be callable from ISRs if @a k_timer_stop is to
* be called from ISRs.
*
* @funcprops \isr_ok
*
* @param timer Address of timer.
*/
__syscall void k_timer_stop(struct k_timer *timer);
/**
* @brief Read timer status.
*
* This routine reads the timer's status, which indicates the number of times
* it has expired since its status was last read.
*
* Calling this routine resets the timer's status to zero.
*
* @param timer Address of timer.
*
* @return Timer status.
*/
__syscall uint32_t k_timer_status_get(struct k_timer *timer);
/**
* @brief Synchronize thread to timer expiration.
*
* This routine blocks the calling thread until the timer's status is non-zero
* (indicating that it has expired at least once since it was last examined)
* or the timer is stopped. If the timer status is already non-zero,
* or the timer is already stopped, the caller continues without waiting.
*
* Calling this routine resets the timer's status to zero.
*
* This routine must not be used by interrupt handlers, since they are not
* allowed to block.
*
* @param timer Address of timer.
*
* @return Timer status.
*/
__syscall uint32_t k_timer_status_sync(struct k_timer *timer);
#ifdef CONFIG_SYS_CLOCK_EXISTS
/**
* @brief Get next expiration time of a timer, in system ticks
*
* This routine returns the future system uptime reached at the next
* time of expiration of the timer, in units of system ticks. If the
* timer is not running, current system time is returned.
*
* @param timer The timer object
* @return Uptime of expiration, in ticks
*/
__syscall k_ticks_t k_timer_expires_ticks(const struct k_timer *timer);
static inline k_ticks_t z_impl_k_timer_expires_ticks(
const struct k_timer *timer)
{
return z_timeout_expires(&timer->timeout);
}
/**
* @brief Get time remaining before a timer next expires, in system ticks
*
* This routine computes the time remaining before a running timer
* next expires, in units of system ticks. If the timer is not
* running, it returns zero.
*/
__syscall k_ticks_t k_timer_remaining_ticks(const struct k_timer *timer);
static inline k_ticks_t z_impl_k_timer_remaining_ticks(
const struct k_timer *timer)
{
return z_timeout_remaining(&timer->timeout);
}
/**
* @brief Get time remaining before a timer next expires.
*
* This routine computes the (approximate) time remaining before a running
* timer next expires. If the timer is not running, it returns zero.
*
* @param timer Address of timer.
*
* @return Remaining time (in milliseconds).
*/
static inline uint32_t k_timer_remaining_get(struct k_timer *timer)
{
return k_ticks_to_ms_floor32(k_timer_remaining_ticks(timer));
}
#endif /* CONFIG_SYS_CLOCK_EXISTS */
/**
* @brief Associate user-specific data with a timer.
*
* This routine records the @a user_data with the @a timer, to be retrieved
* later.
*
* It can be used e.g. in a timer handler shared across multiple subsystems to
* retrieve data specific to the subsystem this timer is associated with.
*
* @param timer Address of timer.
* @param user_data User data to associate with the timer.
*/
__syscall void k_timer_user_data_set(struct k_timer *timer, void *user_data);
/**
* @internal
*/
static inline void z_impl_k_timer_user_data_set(struct k_timer *timer,
void *user_data)
{
timer->user_data = user_data;
}
/**
* @brief Retrieve the user-specific data from a timer.
*
* @param timer Address of timer.
*
* @return The user data.
*/
__syscall void *k_timer_user_data_get(const struct k_timer *timer);
static inline void *z_impl_k_timer_user_data_get(const struct k_timer *timer)
{
return timer->user_data;
}
/** @} */
/**
* @addtogroup clock_apis
* @ingroup kernel_apis
* @{
*/
/**
* @brief Get system uptime, in system ticks.
*
* This routine returns the elapsed time since the system booted, in
* ticks (c.f. @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC}), which is the
* fundamental unit of resolution of kernel timekeeping.
*
* @return Current uptime in ticks.
*/
__syscall int64_t k_uptime_ticks(void);
/**
* @brief Get system uptime.
*
* This routine returns the elapsed time since the system booted,
* in milliseconds.
*
* @note
* While this function returns time in milliseconds, it does
* not mean it has millisecond resolution. The actual resolution depends on
* @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option.
*
* @return Current uptime in milliseconds.
*/
static inline int64_t k_uptime_get(void)
{
return k_ticks_to_ms_floor64(k_uptime_ticks());
}
/**
* @brief Get system uptime (32-bit version).
*
* This routine returns the lower 32 bits of the system uptime in
* milliseconds.
*
* Because correct conversion requires full precision of the system
* clock there is no benefit to using this over k_uptime_get() unless
* you know the application will never run long enough for the system
* clock to approach 2^32 ticks. Calls to this function may involve
* interrupt blocking and 64-bit math.
*
* @note
* While this function returns time in milliseconds, it does
* not mean it has millisecond resolution. The actual resolution depends on
* @kconfig{CONFIG_SYS_CLOCK_TICKS_PER_SEC} config option
*
* @return The low 32 bits of the current uptime, in milliseconds.
*/
static inline uint32_t k_uptime_get_32(void)
{
return (uint32_t)k_uptime_get();
}
/**
* @brief Get elapsed time.
*
* This routine computes the elapsed time between the current system uptime
* and an earlier reference time, in milliseconds.
*
* @param reftime Pointer to a reference time, which is updated to the current
* uptime upon return.
*
* @return Elapsed time.
*/
static inline int64_t k_uptime_delta(int64_t *reftime)
{
int64_t uptime, delta;
uptime = k_uptime_get();
delta = uptime - *reftime;
*reftime = uptime;
return delta;
}
/**
* @brief Read the hardware clock.
*
* This routine returns the current time, as measured by the system's hardware
* clock.
*
* @return Current hardware clock up-counter (in cycles).
*/
static inline uint32_t k_cycle_get_32(void)
{
return arch_k_cycle_get_32();
}
/**
* @brief Read the 64-bit hardware clock.
*
* This routine returns the current time in 64-bits, as measured by the
* system's hardware clock, if available.
*
* @see CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER
*
* @return Current hardware clock up-counter (in cycles).
*/
static inline uint64_t k_cycle_get_64(void)
{
if (!IS_ENABLED(CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER)) {
__ASSERT(0, "64-bit cycle counter not enabled on this platform. "
"See CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER");
return 0;
}
return arch_k_cycle_get_64();
}
/**
* @}
*/
/**
* @cond INTERNAL_HIDDEN
*/
struct k_queue {
sys_sflist_t data_q;
struct k_spinlock lock;
_wait_q_t wait_q;
_POLL_EVENT;
SYS_PORT_TRACING_TRACKING_FIELD(k_queue)
};
#define Z_QUEUE_INITIALIZER(obj) \
{ \
.data_q = SYS_SFLIST_STATIC_INIT(&obj.data_q), \
.lock = { }, \
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
_POLL_EVENT_OBJ_INIT(obj) \
}
extern void *z_queue_node_peek(sys_sfnode_t *node, bool needs_free);
/**
* INTERNAL_HIDDEN @endcond
*/
/**
* @defgroup queue_apis Queue APIs
* @ingroup kernel_apis
* @{
*/
/**
* @brief Initialize a queue.
*
* This routine initializes a queue object, prior to its first use.
*
* @param queue Address of the queue.
*/
__syscall void k_queue_init(struct k_queue *queue);
/**
* @brief Cancel waiting on a queue.
*
* This routine causes first thread pending on @a queue, if any, to
* return from k_queue_get() call with NULL value (as if timeout expired).
* If the queue is being waited on by k_poll(), it will return with
* -EINTR and K_POLL_STATE_CANCELLED state (and per above, subsequent
* k_queue_get() will return NULL).
*
* @funcprops \isr_ok
*
* @param queue Address of the queue.
*/
__syscall void k_queue_cancel_wait(struct k_queue *queue);
/**
* @brief Append an element to the end of a queue.
*
* This routine appends a data item to @a queue. A queue data item must be
* aligned on a word boundary, and the first word of the item is reserved
* for the kernel's use.
*
* @funcprops \isr_ok
*
* @param queue Address of the queue.
* @param data Address of the data item.
*/
extern void k_queue_append(struct k_queue *queue, void *data);
/**
* @brief Append an element to a queue.
*
* This routine appends a data item to @a queue. There is an implicit memory
* allocation to create an additional temporary bookkeeping data structure from
* the calling thread's resource pool, which is automatically freed when the
* item is removed. The data itself is not copied.
*
* @funcprops \isr_ok
*
* @param queue Address of the queue.
* @param data Address of the data item.
*
* @retval 0 on success
* @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
*/
__syscall int32_t k_queue_alloc_append(struct k_queue *queue, void *data);
/**
* @brief Prepend an element to a queue.
*
* This routine prepends a data item to @a queue. A queue data item must be
* aligned on a word boundary, and the first word of the item is reserved
* for the kernel's use.
*
* @funcprops \isr_ok
*
* @param queue Address of the queue.
* @param data Address of the data item.
*/
extern void k_queue_prepend(struct k_queue *queue, void *data);
/**
* @brief Prepend an element to a queue.
*
* This routine prepends a data item to @a queue. There is an implicit memory
* allocation to create an additional temporary bookkeeping data structure from
* the calling thread's resource pool, which is automatically freed when the
* item is removed. The data itself is not copied.
*
* @funcprops \isr_ok
*
* @param queue Address of the queue.
* @param data Address of the data item.
*
* @retval 0 on success
* @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
*/
__syscall int32_t k_queue_alloc_prepend(struct k_queue *queue, void *data);
/**
* @brief Inserts an element to a queue.
*
* This routine inserts a data item to @a queue after previous item. A queue
* data item must be aligned on a word boundary, and the first word of
* the item is reserved for the kernel's use.
*
* @funcprops \isr_ok
*
* @param queue Address of the queue.
* @param prev Address of the previous data item.
* @param data Address of the data item.
*/
extern void k_queue_insert(struct k_queue *queue, void *prev, void *data);
/**
* @brief Atomically append a list of elements to a queue.
*
* This routine adds a list of data items to @a queue in one operation.
* The data items must be in a singly-linked list, with the first word
* in each data item pointing to the next data item; the list must be
* NULL-terminated.
*
* @funcprops \isr_ok
*
* @param queue Address of the queue.
* @param head Pointer to first node in singly-linked list.
* @param tail Pointer to last node in singly-linked list.
*
* @retval 0 on success
* @retval -EINVAL on invalid supplied data
*
*/
extern int k_queue_append_list(struct k_queue *queue, void *head, void *tail);
/**
* @brief Atomically add a list of elements to a queue.
*
* This routine adds a list of data items to @a queue in one operation.
* The data items must be in a singly-linked list implemented using a
* sys_slist_t object. Upon completion, the original list is empty.
*
* @funcprops \isr_ok
*
* @param queue Address of the queue.
* @param list Pointer to sys_slist_t object.
*
* @retval 0 on success
* @retval -EINVAL on invalid data
*/
extern int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list);
/**
* @brief Get an element from a queue.
*
* This routine removes first data item from @a queue. The first word of the
* data item is reserved for the kernel's use.
*
* @note @a timeout must be set to K_NO_WAIT if called from ISR.
*
* @funcprops \isr_ok
*
* @param queue Address of the queue.
* @param timeout Non-negative waiting period to obtain a data item
* or one of the special values K_NO_WAIT and
* K_FOREVER.
*
* @return Address of the data item if successful; NULL if returned
* without waiting, or waiting period timed out.
*/
__syscall void *k_queue_get(struct k_queue *queue, k_timeout_t timeout);
/**
* @brief Remove an element from a queue.
*
* This routine removes data item from @a queue. The first word of the
* data item is reserved for the kernel's use. Removing elements from k_queue
* rely on sys_slist_find_and_remove which is not a constant time operation.
*
* @note @a timeout must be set to K_NO_WAIT if called from ISR.
*
* @funcprops \isr_ok
*
* @param queue Address of the queue.
* @param data Address of the data item.
*
* @return true if data item was removed
*/
bool k_queue_remove(struct k_queue *queue, void *data);
/**
* @brief Append an element to a queue only if it's not present already.
*
* This routine appends data item to @a queue. The first word of the data
* item is reserved for the kernel's use. Appending elements to k_queue
* relies on sys_slist_is_node_in_list which is not a constant time operation.
*
* @funcprops \isr_ok
*
* @param queue Address of the queue.
* @param data Address of the data item.
*
* @return true if data item was added, false if not
*/
bool k_queue_unique_append(struct k_queue *queue, void *data);
/**
* @brief Query a queue to see if it has data available.
*
* Note that the data might be already gone by the time this function returns
* if other threads are also trying to read from the queue.
*
* @funcprops \isr_ok
*
* @param queue Address of the queue.
*
* @return Non-zero if the queue is empty.
* @return 0 if data is available.
*/
__syscall int k_queue_is_empty(struct k_queue *queue);
static inline int z_impl_k_queue_is_empty(struct k_queue *queue)
{
return (int)sys_sflist_is_empty(&queue->data_q);
}
/**
* @brief Peek element at the head of queue.
*
* Return element from the head of queue without removing it.
*
* @param queue Address of the queue.
*
* @return Head element, or NULL if queue is empty.
*/
__syscall void *k_queue_peek_head(struct k_queue *queue);
/**
* @brief Peek element at the tail of queue.
*
* Return element from the tail of queue without removing it.
*
* @param queue Address of the queue.
*
* @return Tail element, or NULL if queue is empty.
*/
__syscall void *k_queue_peek_tail(struct k_queue *queue);
/**
* @brief Statically define and initialize a queue.
*
* The queue can be accessed outside the module where it is defined using:
*
* @code extern struct k_queue <name>; @endcode
*
* @param name Name of the queue.
*/
#define K_QUEUE_DEFINE(name) \
STRUCT_SECTION_ITERABLE(k_queue, name) = \
Z_QUEUE_INITIALIZER(name)
/** @} */
#ifdef CONFIG_USERSPACE
/**
* @brief futex structure
*
* A k_futex is a lightweight mutual exclusion primitive designed
* to minimize kernel involvement. Uncontended operation relies
* only on atomic access to shared memory. k_futex are tracked as
* kernel objects and can live in user memory so that any access
* bypasses the kernel object permission management mechanism.
*/
struct k_futex {
atomic_t val;
};
/**
* @brief futex kernel data structure
*
* z_futex_data are the helper data structure for k_futex to complete
* futex contended operation on kernel side, structure z_futex_data
* of every futex object is invisible in user mode.
*/
struct z_futex_data {
_wait_q_t wait_q;
struct k_spinlock lock;
};
#define Z_FUTEX_DATA_INITIALIZER(obj) \
{ \
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q) \
}
/**
* @defgroup futex_apis FUTEX APIs
* @ingroup kernel_apis
* @{
*/
/**
* @brief Pend the current thread on a futex
*
* Tests that the supplied futex contains the expected value, and if so,
* goes to sleep until some other thread calls k_futex_wake() on it.
*
* @param futex Address of the futex.
* @param expected Expected value of the futex, if it is different the caller
* will not wait on it.
* @param timeout Non-negative waiting period on the futex, or
* one of the special values K_NO_WAIT or K_FOREVER.
* @retval -EACCES Caller does not have read access to futex address.
* @retval -EAGAIN If the futex value did not match the expected parameter.
* @retval -EINVAL Futex parameter address not recognized by the kernel.
* @retval -ETIMEDOUT Thread woke up due to timeout and not a futex wakeup.
* @retval 0 if the caller went to sleep and was woken up. The caller
* should check the futex's value on wakeup to determine if it needs
* to block again.
*/
__syscall int k_futex_wait(struct k_futex *futex, int expected,
k_timeout_t timeout);
/**
* @brief Wake one/all threads pending on a futex
*
* Wake up the highest priority thread pending on the supplied futex, or
* wakeup all the threads pending on the supplied futex, and the behavior
* depends on wake_all.
*
* @param futex Futex to wake up pending threads.
* @param wake_all If true, wake up all pending threads; If false,
* wakeup the highest priority thread.
* @retval -EACCES Caller does not have access to the futex address.
* @retval -EINVAL Futex parameter address not recognized by the kernel.
* @retval Number of threads that were woken up.
*/
__syscall int k_futex_wake(struct k_futex *futex, bool wake_all);
/** @} */
#endif
/**
* @defgroup event_apis Event APIs
* @ingroup kernel_apis
* @{
*/
/**
* Event Structure
* @ingroup event_apis
*/
struct k_event {
_wait_q_t wait_q;
uint32_t events;
struct k_spinlock lock;
};
#define Z_EVENT_INITIALIZER(obj) \
{ \
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
.events = 0 \
}
/**
* @brief Initialize an event object
*
* This routine initializes an event object, prior to its first use.
*
* @param event Address of the event object.
*/
__syscall void k_event_init(struct k_event *event);
/**
* @brief Post one or more events to an event object
*
* This routine posts one or more events to an event object. All tasks waiting
* on the event object @a event whose waiting conditions become met by this
* posting immediately unpend.
*
* Posting differs from setting in that posted events are merged together with
* the current set of events tracked by the event object.
*
* @param event Address of the event object
* @param events Set of events to post to @a event
*/
__syscall void k_event_post(struct k_event *event, uint32_t events);
/**
* @brief Set the events in an event object
*
* This routine sets the events stored in event object to the specified value.
* All tasks waiting on the event object @a event whose waiting conditions
* become met by this immediately unpend.
*
* Setting differs from posting in that set events replace the current set of
* events tracked by the event object.
*
* @param event Address of the event object
* @param events Set of events to post to @a event
*/
__syscall void k_event_set(struct k_event *event, uint32_t events);
/**
* @brief Set or clear the events in an event object
*
* This routine sets the events stored in event object to the specified value.
* All tasks waiting on the event object @a event whose waiting conditions
* become met by this immediately unpend. Unlike @ref k_event_set, this routine
* allows specific event bits to be set and cleared as determined by the mask.
*
* @param event Address of the event object
* @param events Set of events to post to @a event
* @param events_mask Mask to be applied to @a events
*/
__syscall void k_event_set_masked(struct k_event *event, uint32_t events,
uint32_t events_mask);
/**
* @brief Wait for any of the specified events
*
* This routine waits on event object @a event until any of the specified
* events have been delivered to the event object, or the maximum wait time
* @a timeout has expired. A thread may wait on up to 32 distinctly numbered
* events that are expressed as bits in a single 32-bit word.
*
* @note The caller must be careful when resetting if there are multiple threads
* waiting for the event object @a event.
*
* @param event Address of the event object
* @param events Set of desired events on which to wait
* @param reset If true, clear the set of events tracked by the event object
* before waiting. If false, do not clear the events.
* @param timeout Waiting period for the desired set of events or one of the
* special values K_NO_WAIT and K_FOREVER.
*
* @retval set of matching events upon success
* @retval 0 if matching events were not received within the specified time
*/
__syscall uint32_t k_event_wait(struct k_event *event, uint32_t events,
bool reset, k_timeout_t timeout);
/**
* @brief Wait for any of the specified events
*
* This routine waits on event object @a event until all of the specified
* events have been delivered to the event object, or the maximum wait time
* @a timeout has expired. A thread may wait on up to 32 distinctly numbered
* events that are expressed as bits in a single 32-bit word.
*
* @note The caller must be careful when resetting if there are multiple threads
* waiting for the event object @a event.
*
* @param event Address of the event object
* @param events Set of desired events on which to wait
* @param reset If true, clear the set of events tracked by the event object
* before waiting. If false, do not clear the events.
* @param timeout Waiting period for the desired set of events or one of the
* special values K_NO_WAIT and K_FOREVER.
*
* @retval set of matching events upon success
* @retval 0 if matching events were not received within the specified time
*/
__syscall uint32_t k_event_wait_all(struct k_event *event, uint32_t events,
bool reset, k_timeout_t timeout);
/**
* @brief Statically define and initialize an event object
*
* The event can be accessed outside the module where it is defined using:
*
* @code extern struct k_event <name>; @endcode
*
* @param name Name of the event object.
*/
#define K_EVENT_DEFINE(name) \
STRUCT_SECTION_ITERABLE(k_event, name) = \
Z_EVENT_INITIALIZER(name);
/** @} */
struct k_fifo {
struct k_queue _queue;
};
/**
* @cond INTERNAL_HIDDEN
*/
#define Z_FIFO_INITIALIZER(obj) \
{ \
._queue = Z_QUEUE_INITIALIZER(obj._queue) \
}
/**
* INTERNAL_HIDDEN @endcond
*/
/**
* @defgroup fifo_apis FIFO APIs
* @ingroup kernel_apis
* @{
*/
/**
* @brief Initialize a FIFO queue.
*
* This routine initializes a FIFO queue, prior to its first use.
*
* @param fifo Address of the FIFO queue.
*/
#define k_fifo_init(fifo) \
({ \
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, init, fifo); \
k_queue_init(&(fifo)->_queue); \
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, init, fifo); \
})
/**
* @brief Cancel waiting on a FIFO queue.
*
* This routine causes first thread pending on @a fifo, if any, to
* return from k_fifo_get() call with NULL value (as if timeout
* expired).
*
* @funcprops \isr_ok
*
* @param fifo Address of the FIFO queue.
*/
#define k_fifo_cancel_wait(fifo) \
({ \
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, cancel_wait, fifo); \
k_queue_cancel_wait(&(fifo)->_queue); \
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, cancel_wait, fifo); \
})
/**
* @brief Add an element to a FIFO queue.
*
* This routine adds a data item to @a fifo. A FIFO data item must be
* aligned on a word boundary, and the first word of the item is reserved
* for the kernel's use.
*
* @funcprops \isr_ok
*
* @param fifo Address of the FIFO.
* @param data Address of the data item.
*/
#define k_fifo_put(fifo, data) \
({ \
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put, fifo, data); \
k_queue_append(&(fifo)->_queue, data); \
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put, fifo, data); \
})
/**
* @brief Add an element to a FIFO queue.
*
* This routine adds a data item to @a fifo. There is an implicit memory
* allocation to create an additional temporary bookkeeping data structure from
* the calling thread's resource pool, which is automatically freed when the
* item is removed. The data itself is not copied.
*
* @funcprops \isr_ok
*
* @param fifo Address of the FIFO.
* @param data Address of the data item.
*
* @retval 0 on success
* @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
*/
#define k_fifo_alloc_put(fifo, data) \
({ \
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, alloc_put, fifo, data); \
int ret = k_queue_alloc_append(&(fifo)->_queue, data); \
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, alloc_put, fifo, data, ret); \
ret; \
})
/**
* @brief Atomically add a list of elements to a FIFO.
*
* This routine adds a list of data items to @a fifo in one operation.
* The data items must be in a singly-linked list, with the first word of
* each data item pointing to the next data item; the list must be
* NULL-terminated.
*
* @funcprops \isr_ok
*
* @param fifo Address of the FIFO queue.
* @param head Pointer to first node in singly-linked list.
* @param tail Pointer to last node in singly-linked list.
*/
#define k_fifo_put_list(fifo, head, tail) \
({ \
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_list, fifo, head, tail); \
k_queue_append_list(&(fifo)->_queue, head, tail); \
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_list, fifo, head, tail); \
})
/**
* @brief Atomically add a list of elements to a FIFO queue.
*
* This routine adds a list of data items to @a fifo in one operation.
* The data items must be in a singly-linked list implemented using a
* sys_slist_t object. Upon completion, the sys_slist_t object is invalid
* and must be re-initialized via sys_slist_init().
*
* @funcprops \isr_ok
*
* @param fifo Address of the FIFO queue.
* @param list Pointer to sys_slist_t object.
*/
#define k_fifo_put_slist(fifo, list) \
({ \
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, put_slist, fifo, list); \
k_queue_merge_slist(&(fifo)->_queue, list); \
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, put_slist, fifo, list); \
})
/**
* @brief Get an element from a FIFO queue.
*
* This routine removes a data item from @a fifo in a "first in, first out"
* manner. The first word of the data item is reserved for the kernel's use.
*
* @note @a timeout must be set to K_NO_WAIT if called from ISR.
*
* @funcprops \isr_ok
*
* @param fifo Address of the FIFO queue.
* @param timeout Waiting period to obtain a data item,
* or one of the special values K_NO_WAIT and K_FOREVER.
*
* @return Address of the data item if successful; NULL if returned
* without waiting, or waiting period timed out.
*/
#define k_fifo_get(fifo, timeout) \
({ \
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, get, fifo, timeout); \
void *ret = k_queue_get(&(fifo)->_queue, timeout); \
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, get, fifo, timeout, ret); \
ret; \
})
/**
* @brief Query a FIFO queue to see if it has data available.
*
* Note that the data might be already gone by the time this function returns
* if other threads is also trying to read from the FIFO.
*
* @funcprops \isr_ok
*
* @param fifo Address of the FIFO queue.
*
* @return Non-zero if the FIFO queue is empty.
* @return 0 if data is available.
*/
#define k_fifo_is_empty(fifo) \
k_queue_is_empty(&(fifo)->_queue)
/**
* @brief Peek element at the head of a FIFO queue.
*
* Return element from the head of FIFO queue without removing it. A usecase
* for this is if elements of the FIFO object are themselves containers. Then
* on each iteration of processing, a head container will be peeked,
* and some data processed out of it, and only if the container is empty,
* it will be completely remove from the FIFO queue.
*
* @param fifo Address of the FIFO queue.
*
* @return Head element, or NULL if the FIFO queue is empty.
*/
#define k_fifo_peek_head(fifo) \
({ \
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_head, fifo); \
void *ret = k_queue_peek_head(&(fifo)->_queue); \
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_head, fifo, ret); \
ret; \
})
/**
* @brief Peek element at the tail of FIFO queue.
*
* Return element from the tail of FIFO queue (without removing it). A usecase
* for this is if elements of the FIFO queue are themselves containers. Then
* it may be useful to add more data to the last container in a FIFO queue.
*
* @param fifo Address of the FIFO queue.
*
* @return Tail element, or NULL if a FIFO queue is empty.
*/
#define k_fifo_peek_tail(fifo) \
({ \
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_fifo, peek_tail, fifo); \
void *ret = k_queue_peek_tail(&(fifo)->_queue); \
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_fifo, peek_tail, fifo, ret); \
ret; \
})
/**
* @brief Statically define and initialize a FIFO queue.
*
* The FIFO queue can be accessed outside the module where it is defined using:
*
* @code extern struct k_fifo <name>; @endcode
*
* @param name Name of the FIFO queue.
*/
#define K_FIFO_DEFINE(name) \
STRUCT_SECTION_ITERABLE_ALTERNATE(k_queue, k_fifo, name) = \
Z_FIFO_INITIALIZER(name)
/** @} */
struct k_lifo {
struct k_queue _queue;
};
/**
* @cond INTERNAL_HIDDEN
*/
#define Z_LIFO_INITIALIZER(obj) \
{ \
._queue = Z_QUEUE_INITIALIZER(obj._queue) \
}
/**
* INTERNAL_HIDDEN @endcond
*/
/**
* @defgroup lifo_apis LIFO APIs
* @ingroup kernel_apis
* @{
*/
/**
* @brief Initialize a LIFO queue.
*
* This routine initializes a LIFO queue object, prior to its first use.
*
* @param lifo Address of the LIFO queue.
*/
#define k_lifo_init(lifo) \
({ \
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, init, lifo); \
k_queue_init(&(lifo)->_queue); \
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, init, lifo); \
})
/**
* @brief Add an element to a LIFO queue.
*
* This routine adds a data item to @a lifo. A LIFO queue data item must be
* aligned on a word boundary, and the first word of the item is
* reserved for the kernel's use.
*
* @funcprops \isr_ok
*
* @param lifo Address of the LIFO queue.
* @param data Address of the data item.
*/
#define k_lifo_put(lifo, data) \
({ \
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, put, lifo, data); \
k_queue_prepend(&(lifo)->_queue, data); \
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, put, lifo, data); \
})
/**
* @brief Add an element to a LIFO queue.
*
* This routine adds a data item to @a lifo. There is an implicit memory
* allocation to create an additional temporary bookkeeping data structure from
* the calling thread's resource pool, which is automatically freed when the
* item is removed. The data itself is not copied.
*
* @funcprops \isr_ok
*
* @param lifo Address of the LIFO.
* @param data Address of the data item.
*
* @retval 0 on success
* @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
*/
#define k_lifo_alloc_put(lifo, data) \
({ \
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, alloc_put, lifo, data); \
int ret = k_queue_alloc_prepend(&(lifo)->_queue, data); \
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, alloc_put, lifo, data, ret); \
ret; \
})
/**
* @brief Get an element from a LIFO queue.
*
* This routine removes a data item from @a LIFO in a "last in, first out"
* manner. The first word of the data item is reserved for the kernel's use.
*
* @note @a timeout must be set to K_NO_WAIT if called from ISR.
*
* @funcprops \isr_ok
*
* @param lifo Address of the LIFO queue.
* @param timeout Waiting period to obtain a data item,
* or one of the special values K_NO_WAIT and K_FOREVER.
*
* @return Address of the data item if successful; NULL if returned
* without waiting, or waiting period timed out.
*/
#define k_lifo_get(lifo, timeout) \
({ \
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_lifo, get, lifo, timeout); \
void *ret = k_queue_get(&(lifo)->_queue, timeout); \
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_lifo, get, lifo, timeout, ret); \
ret; \
})
/**
* @brief Statically define and initialize a LIFO queue.
*
* The LIFO queue can be accessed outside the module where it is defined using:
*
* @code extern struct k_lifo <name>; @endcode
*
* @param name Name of the fifo.
*/
#define K_LIFO_DEFINE(name) \
STRUCT_SECTION_ITERABLE_ALTERNATE(k_queue, k_lifo, name) = \
Z_LIFO_INITIALIZER(name)
/** @} */
/**
* @cond INTERNAL_HIDDEN
*/
#define K_STACK_FLAG_ALLOC ((uint8_t)1) /* Buffer was allocated */
typedef uintptr_t stack_data_t;
struct k_stack {
_wait_q_t wait_q;
struct k_spinlock lock;
stack_data_t *base, *next, *top;
uint8_t flags;
SYS_PORT_TRACING_TRACKING_FIELD(k_stack)
};
#define Z_STACK_INITIALIZER(obj, stack_buffer, stack_num_entries) \
{ \
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
.base = stack_buffer, \
.next = stack_buffer, \
.top = stack_buffer + stack_num_entries, \
}
/**
* INTERNAL_HIDDEN @endcond
*/
/**
* @defgroup stack_apis Stack APIs
* @ingroup kernel_apis
* @{
*/
/**
* @brief Initialize a stack.
*
* This routine initializes a stack object, prior to its first use.
*
* @param stack Address of the stack.
* @param buffer Address of array used to hold stacked values.
* @param num_entries Maximum number of values that can be stacked.
*/
void k_stack_init(struct k_stack *stack,
stack_data_t *buffer, uint32_t num_entries);
/**
* @brief Initialize a stack.
*
* This routine initializes a stack object, prior to its first use. Internal
* buffers will be allocated from the calling thread's resource pool.
* This memory will be released if k_stack_cleanup() is called, or
* userspace is enabled and the stack object loses all references to it.
*
* @param stack Address of the stack.
* @param num_entries Maximum number of values that can be stacked.
*
* @return -ENOMEM if memory couldn't be allocated
*/
__syscall int32_t k_stack_alloc_init(struct k_stack *stack,
uint32_t num_entries);
/**
* @brief Release a stack's allocated buffer
*
* If a stack object was given a dynamically allocated buffer via
* k_stack_alloc_init(), this will free it. This function does nothing
* if the buffer wasn't dynamically allocated.
*
* @param stack Address of the stack.
* @retval 0 on success
* @retval -EAGAIN when object is still in use
*/
int k_stack_cleanup(struct k_stack *stack);
/**
* @brief Push an element onto a stack.
*
* This routine adds a stack_data_t value @a data to @a stack.
*
* @funcprops \isr_ok
*
* @param stack Address of the stack.
* @param data Value to push onto the stack.
*
* @retval 0 on success
* @retval -ENOMEM if stack is full
*/
__syscall int k_stack_push(struct k_stack *stack, stack_data_t data);
/**
* @brief Pop an element from a stack.
*
* This routine removes a stack_data_t value from @a stack in a "last in,
* first out" manner and stores the value in @a data.
*
* @note @a timeout must be set to K_NO_WAIT if called from ISR.
*
* @funcprops \isr_ok
*
* @param stack Address of the stack.
* @param data Address of area to hold the value popped from the stack.
* @param timeout Waiting period to obtain a value,
* or one of the special values K_NO_WAIT and
* K_FOREVER.
*
* @retval 0 Element popped from stack.
* @retval -EBUSY Returned without waiting.
* @retval -EAGAIN Waiting period timed out.
*/
__syscall int k_stack_pop(struct k_stack *stack, stack_data_t *data,
k_timeout_t timeout);
/**
* @brief Statically define and initialize a stack
*
* The stack can be accessed outside the module where it is defined using:
*
* @code extern struct k_stack <name>; @endcode
*
* @param name Name of the stack.
* @param stack_num_entries Maximum number of values that can be stacked.
*/
#define K_STACK_DEFINE(name, stack_num_entries) \
stack_data_t __noinit \
_k_stack_buf_##name[stack_num_entries]; \
STRUCT_SECTION_ITERABLE(k_stack, name) = \
Z_STACK_INITIALIZER(name, _k_stack_buf_##name, \
stack_num_entries)
/** @} */
/**
* @cond INTERNAL_HIDDEN
*/
struct k_work;
struct k_work_q;
struct k_work_queue_config;
extern struct k_work_q k_sys_work_q;
/**
* INTERNAL_HIDDEN @endcond
*/
/**
* @defgroup mutex_apis Mutex APIs
* @ingroup kernel_apis
* @{
*/
/**
* Mutex Structure
* @ingroup mutex_apis
*/
struct k_mutex {
/** Mutex wait queue */
_wait_q_t wait_q;
/** Mutex owner */
struct k_thread *owner;
/** Current lock count */
uint32_t lock_count;
/** Original thread priority */
int owner_orig_prio;
SYS_PORT_TRACING_TRACKING_FIELD(k_mutex)
};
/**
* @cond INTERNAL_HIDDEN
*/
#define Z_MUTEX_INITIALIZER(obj) \
{ \
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
.owner = NULL, \
.lock_count = 0, \
.owner_orig_prio = K_LOWEST_APPLICATION_THREAD_PRIO, \
}
/**
* INTERNAL_HIDDEN @endcond
*/
/**
* @brief Statically define and initialize a mutex.
*
* The mutex can be accessed outside the module where it is defined using:
*
* @code extern struct k_mutex <name>; @endcode
*
* @param name Name of the mutex.
*/
#define K_MUTEX_DEFINE(name) \
STRUCT_SECTION_ITERABLE(k_mutex, name) = \
Z_MUTEX_INITIALIZER(name)
/**
* @brief Initialize a mutex.
*
* This routine initializes a mutex object, prior to its first use.
*
* Upon completion, the mutex is available and does not have an owner.
*
* @param mutex Address of the mutex.
*
* @retval 0 Mutex object created
*
*/
__syscall int k_mutex_init(struct k_mutex *mutex);
/**
* @brief Lock a mutex.
*
* This routine locks @a mutex. If the mutex is locked by another thread,
* the calling thread waits until the mutex becomes available or until
* a timeout occurs.
*
* A thread is permitted to lock a mutex it has already locked. The operation
* completes immediately and the lock count is increased by 1.
*
* Mutexes may not be locked in ISRs.
*
* @param mutex Address of the mutex.
* @param timeout Waiting period to lock the mutex,
* or one of the special values K_NO_WAIT and
* K_FOREVER.
*
* @retval 0 Mutex locked.
* @retval -EBUSY Returned without waiting.
* @retval -EAGAIN Waiting period timed out.
*/
__syscall int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout);
/**
* @brief Unlock a mutex.
*
* This routine unlocks @a mutex. The mutex must already be locked by the
* calling thread.
*
* The mutex cannot be claimed by another thread until it has been unlocked by
* the calling thread as many times as it was previously locked by that
* thread.
*
* Mutexes may not be unlocked in ISRs, as mutexes must only be manipulated
* in thread context due to ownership and priority inheritance semantics.
*
* @param mutex Address of the mutex.
*
* @retval 0 Mutex unlocked.
* @retval -EPERM The current thread does not own the mutex
* @retval -EINVAL The mutex is not locked
*
*/
__syscall int k_mutex_unlock(struct k_mutex *mutex);
/**
* @}
*/
struct k_condvar {
_wait_q_t wait_q;
};
#define Z_CONDVAR_INITIALIZER(obj) \
{ \
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
}
/**
* @defgroup condvar_apis Condition Variables APIs
* @ingroup kernel_apis
* @{
*/
/**
* @brief Initialize a condition variable
*
* @param condvar pointer to a @p k_condvar structure
* @retval 0 Condition variable created successfully
*/
__syscall int k_condvar_init(struct k_condvar *condvar);
/**
* @brief Signals one thread that is pending on the condition variable
*
* @param condvar pointer to a @p k_condvar structure
* @retval 0 On success
*/
__syscall int k_condvar_signal(struct k_condvar *condvar);
/**
* @brief Unblock all threads that are pending on the condition
* variable
*
* @param condvar pointer to a @p k_condvar structure
* @return An integer with number of woken threads on success
*/
__syscall int k_condvar_broadcast(struct k_condvar *condvar);
/**
* @brief Waits on the condition variable releasing the mutex lock
*
* Atomically releases the currently owned mutex, blocks the current thread
* waiting on the condition variable specified by @a condvar,
* and finally acquires the mutex again.
*
* The waiting thread unblocks only after another thread calls
* k_condvar_signal, or k_condvar_broadcast with the same condition variable.
*
* @param condvar pointer to a @p k_condvar structure
* @param mutex Address of the mutex.
* @param timeout Waiting period for the condition variable
* or one of the special values K_NO_WAIT and K_FOREVER.
* @retval 0 On success
* @retval -EAGAIN Waiting period timed out.
*/
__syscall int k_condvar_wait(struct k_condvar *condvar, struct k_mutex *mutex,
k_timeout_t timeout);
/**
* @brief Statically define and initialize a condition variable.
*
* The condition variable can be accessed outside the module where it is
* defined using:
*
* @code extern struct k_condvar <name>; @endcode
*
* @param name Name of the condition variable.
*/
#define K_CONDVAR_DEFINE(name) \
STRUCT_SECTION_ITERABLE(k_condvar, name) = \
Z_CONDVAR_INITIALIZER(name)
/**
* @}
*/
/**
* @cond INTERNAL_HIDDEN
*/
struct k_sem {
_wait_q_t wait_q;
unsigned int count;
unsigned int limit;
_POLL_EVENT;
SYS_PORT_TRACING_TRACKING_FIELD(k_sem)
};
#define Z_SEM_INITIALIZER(obj, initial_count, count_limit) \
{ \
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
.count = initial_count, \
.limit = count_limit, \
_POLL_EVENT_OBJ_INIT(obj) \
}
/**
* INTERNAL_HIDDEN @endcond
*/
/**
* @defgroup semaphore_apis Semaphore APIs
* @ingroup kernel_apis
* @{
*/
/**
* @brief Maximum limit value allowed for a semaphore.
*
* This is intended for use when a semaphore does not have
* an explicit maximum limit, and instead is just used for
* counting purposes.
*
*/
#define K_SEM_MAX_LIMIT UINT_MAX
/**
* @brief Initialize a semaphore.
*
* This routine initializes a semaphore object, prior to its first use.
*
* @param sem Address of the semaphore.
* @param initial_count Initial semaphore count.
* @param limit Maximum permitted semaphore count.
*
* @see K_SEM_MAX_LIMIT
*
* @retval 0 Semaphore created successfully
* @retval -EINVAL Invalid values
*
*/
__syscall int k_sem_init(struct k_sem *sem, unsigned int initial_count,
unsigned int limit);
/**
* @brief Take a semaphore.
*
* This routine takes @a sem.
*
* @note @a timeout must be set to K_NO_WAIT if called from ISR.
*
* @funcprops \isr_ok
*
* @param sem Address of the semaphore.
* @param timeout Waiting period to take the semaphore,
* or one of the special values K_NO_WAIT and K_FOREVER.
*
* @retval 0 Semaphore taken.
* @retval -EBUSY Returned without waiting.
* @retval -EAGAIN Waiting period timed out,
* or the semaphore was reset during the waiting period.
*/
__syscall int k_sem_take(struct k_sem *sem, k_timeout_t timeout);
/**
* @brief Give a semaphore.
*
* This routine gives @a sem, unless the semaphore is already at its maximum
* permitted count.
*
* @funcprops \isr_ok
*
* @param sem Address of the semaphore.
*/
__syscall void k_sem_give(struct k_sem *sem);
/**
* @brief Resets a semaphore's count to zero.
*
* This routine sets the count of @a sem to zero.
* Any outstanding semaphore takes will be aborted
* with -EAGAIN.
*
* @param sem Address of the semaphore.
*/
__syscall void k_sem_reset(struct k_sem *sem);
/**
* @brief Get a semaphore's count.
*
* This routine returns the current count of @a sem.
*
* @param sem Address of the semaphore.
*
* @return Current semaphore count.
*/
__syscall unsigned int k_sem_count_get(struct k_sem *sem);
/**
* @internal
*/
static inline unsigned int z_impl_k_sem_count_get(struct k_sem *sem)
{
return sem->count;
}
/**
* @brief Statically define and initialize a semaphore.
*
* The semaphore can be accessed outside the module where it is defined using:
*
* @code extern struct k_sem <name>; @endcode
*
* @param name Name of the semaphore.
* @param initial_count Initial semaphore count.
* @param count_limit Maximum permitted semaphore count.
*/
#define K_SEM_DEFINE(name, initial_count, count_limit) \
STRUCT_SECTION_ITERABLE(k_sem, name) = \
Z_SEM_INITIALIZER(name, initial_count, count_limit); \
BUILD_ASSERT(((count_limit) != 0) && \
((initial_count) <= (count_limit)) && \
((count_limit) <= K_SEM_MAX_LIMIT));
/** @} */
/**
* @cond INTERNAL_HIDDEN
*/
struct k_work_delayable;
struct k_work_sync;
/**
* INTERNAL_HIDDEN @endcond
*/
/**
* @defgroup workqueue_apis Work Queue APIs
* @ingroup kernel_apis
* @{
*/
/** @brief The signature for a work item handler function.
*
* The function will be invoked by the thread animating a work queue.
*
* @param work the work item that provided the handler.
*/
typedef void (*k_work_handler_t)(struct k_work *work);
/** @brief Initialize a (non-delayable) work structure.
*
* This must be invoked before submitting a work structure for the first time.
* It need not be invoked again on the same work structure. It can be
* re-invoked to change the associated handler, but this must be done when the
* work item is idle.
*
* @funcprops \isr_ok
*
* @param work the work structure to be initialized.
*
* @param handler the handler to be invoked by the work item.
*/
void k_work_init(struct k_work *work,
k_work_handler_t handler);
/** @brief Busy state flags from the work item.
*
* A zero return value indicates the work item appears to be idle.
*
* @note This is a live snapshot of state, which may change before the result
* is checked. Use locks where appropriate.
*
* @funcprops \isr_ok
*
* @param work pointer to the work item.
*
* @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED,
* K_WORK_RUNNING, and K_WORK_CANCELING.
*/
int k_work_busy_get(const struct k_work *work);
/** @brief Test whether a work item is currently pending.
*
* Wrapper to determine whether a work item is in a non-idle dstate.
*
* @note This is a live snapshot of state, which may change before the result
* is checked. Use locks where appropriate.
*
* @funcprops \isr_ok
*
* @param work pointer to the work item.
*
* @return true if and only if k_work_busy_get() returns a non-zero value.
*/
static inline bool k_work_is_pending(const struct k_work *work);
/** @brief Submit a work item to a queue.
*
* @param queue pointer to the work queue on which the item should run. If
* NULL the queue from the most recent submission will be used.
*
* @funcprops \isr_ok
*
* @param work pointer to the work item.
*
* @retval 0 if work was already submitted to a queue
* @retval 1 if work was not submitted and has been queued to @p queue
* @retval 2 if work was running and has been queued to the queue that was
* running it
* @retval -EBUSY
* * if work submission was rejected because the work item is cancelling; or
* * @p queue is draining; or
* * @p queue is plugged.
* @retval -EINVAL if @p queue is null and the work item has never been run.
* @retval -ENODEV if @p queue has not been started.
*/
int k_work_submit_to_queue(struct k_work_q *queue,
struct k_work *work);
/** @brief Submit a work item to the system queue.
*
* @funcprops \isr_ok
*
* @param work pointer to the work item.
*
* @return as with k_work_submit_to_queue().
*/
extern int k_work_submit(struct k_work *work);
/** @brief Wait for last-submitted instance to complete.
*
* Resubmissions may occur while waiting, including chained submissions (from
* within the handler).
*
* @note Be careful of caller and work queue thread relative priority. If
* this function sleeps it will not return until the work queue thread
* completes the tasks that allow this thread to resume.
*
* @note Behavior is undefined if this function is invoked on @p work from a
* work queue running @p work.
*
* @param work pointer to the work item.
*
* @param sync pointer to an opaque item containing state related to the
* pending cancellation. The object must persist until the call returns, and
* be accessible from both the caller thread and the work queue thread. The
* object must not be used for any other flush or cancel operation until this
* one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
* must be allocated in coherent memory.
*
* @retval true if call had to wait for completion
* @retval false if work was already idle
*/
bool k_work_flush(struct k_work *work,
struct k_work_sync *sync);
/** @brief Cancel a work item.
*
* This attempts to prevent a pending (non-delayable) work item from being
* processed by removing it from the work queue. If the item is being
* processed, the work item will continue to be processed, but resubmissions
* are rejected until cancellation completes.
*
* If this returns zero cancellation is complete, otherwise something
* (probably a work queue thread) is still referencing the item.
*
* See also k_work_cancel_sync().
*
* @funcprops \isr_ok
*
* @param work pointer to the work item.
*
* @return the k_work_busy_get() status indicating the state of the item after all
* cancellation steps performed by this call are completed.
*/
int k_work_cancel(struct k_work *work);
/** @brief Cancel a work item and wait for it to complete.
*
* Same as k_work_cancel() but does not return until cancellation is complete.
* This can be invoked by a thread after k_work_cancel() to synchronize with a
* previous cancellation.
*
* On return the work structure will be idle unless something submits it after
* the cancellation was complete.
*
* @note Be careful of caller and work queue thread relative priority. If
* this function sleeps it will not return until the work queue thread
* completes the tasks that allow this thread to resume.
*
* @note Behavior is undefined if this function is invoked on @p work from a
* work queue running @p work.
*
* @param work pointer to the work item.
*
* @param sync pointer to an opaque item containing state related to the
* pending cancellation. The object must persist until the call returns, and
* be accessible from both the caller thread and the work queue thread. The
* object must not be used for any other flush or cancel operation until this
* one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
* must be allocated in coherent memory.
*
* @retval true if work was pending (call had to wait for cancellation of a
* running handler to complete, or scheduled or submitted operations were
* cancelled);
* @retval false otherwise
*/
bool k_work_cancel_sync(struct k_work *work, struct k_work_sync *sync);
/** @brief Initialize a work queue structure.
*
* This must be invoked before starting a work queue structure for the first time.
* It need not be invoked again on the same work queue structure.
*
* @funcprops \isr_ok
*
* @param queue the queue structure to be initialized.
*/
void k_work_queue_init(struct k_work_q *queue);
/** @brief Initialize a work queue.
*
* This configures the work queue thread and starts it running. The function
* should not be re-invoked on a queue.
*
* @param queue pointer to the queue structure. It must be initialized
* in zeroed/bss memory or with @ref k_work_queue_init before
* use.
*
* @param stack pointer to the work thread stack area.
*
* @param stack_size size of the the work thread stack area, in bytes.
*
* @param prio initial thread priority
*
* @param cfg optional additional configuration parameters. Pass @c
* NULL if not required, to use the defaults documented in
* k_work_queue_config.
*/
void k_work_queue_start(struct k_work_q *queue,
k_thread_stack_t *stack, size_t stack_size,
int prio, const struct k_work_queue_config *cfg);
/** @brief Access the thread that animates a work queue.
*
* This is necessary to grant a work queue thread access to things the work
* items it will process are expected to use.
*
* @param queue pointer to the queue structure.
*
* @return the thread associated with the work queue.
*/
static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue);
/** @brief Wait until the work queue has drained, optionally plugging it.
*
* This blocks submission to the work queue except when coming from queue
* thread, and blocks the caller until no more work items are available in the
* queue.
*
* If @p plug is true then submission will continue to be blocked after the
* drain operation completes until k_work_queue_unplug() is invoked.
*
* Note that work items that are delayed are not yet associated with their
* work queue. They must be cancelled externally if a goal is to ensure the
* work queue remains empty. The @p plug feature can be used to prevent
* delayed items from being submitted after the drain completes.
*
* @param queue pointer to the queue structure.
*
* @param plug if true the work queue will continue to block new submissions
* after all items have drained.
*
* @retval 1 if call had to wait for the drain to complete
* @retval 0 if call did not have to wait
* @retval negative if wait was interrupted or failed
*/
int k_work_queue_drain(struct k_work_q *queue, bool plug);
/** @brief Release a work queue to accept new submissions.
*
* This releases the block on new submissions placed when k_work_queue_drain()
* is invoked with the @p plug option enabled. If this is invoked before the
* drain completes new items may be submitted as soon as the drain completes.
*
* @funcprops \isr_ok
*
* @param queue pointer to the queue structure.
*
* @retval 0 if successfully unplugged
* @retval -EALREADY if the work queue was not plugged.
*/
int k_work_queue_unplug(struct k_work_q *queue);
/** @brief Initialize a delayable work structure.
*
* This must be invoked before scheduling a delayable work structure for the
* first time. It need not be invoked again on the same work structure. It
* can be re-invoked to change the associated handler, but this must be done
* when the work item is idle.
*
* @funcprops \isr_ok
*
* @param dwork the delayable work structure to be initialized.
*
* @param handler the handler to be invoked by the work item.
*/
void k_work_init_delayable(struct k_work_delayable *dwork,
k_work_handler_t handler);
/**
* @brief Get the parent delayable work structure from a work pointer.
*
* This function is necessary when a @c k_work_handler_t function is passed to
* k_work_schedule_for_queue() and the handler needs to access data from the
* container of the containing `k_work_delayable`.
*
* @param work Address passed to the work handler
*
* @return Address of the containing @c k_work_delayable structure.
*/
static inline struct k_work_delayable *
k_work_delayable_from_work(struct k_work *work);
/** @brief Busy state flags from the delayable work item.
*
* @funcprops \isr_ok
*
* @note This is a live snapshot of state, which may change before the result
* can be inspected. Use locks where appropriate.
*
* @param dwork pointer to the delayable work item.
*
* @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED, K_WORK_RUNNING, and
* K_WORK_CANCELING. A zero return value indicates the work item appears to
* be idle.
*/
int k_work_delayable_busy_get(const struct k_work_delayable *dwork);
/** @brief Test whether a delayed work item is currently pending.
*
* Wrapper to determine whether a delayed work item is in a non-idle state.
*
* @note This is a live snapshot of state, which may change before the result
* can be inspected. Use locks where appropriate.
*
* @funcprops \isr_ok
*
* @param dwork pointer to the delayable work item.
*
* @return true if and only if k_work_delayable_busy_get() returns a non-zero
* value.
*/
static inline bool k_work_delayable_is_pending(
const struct k_work_delayable *dwork);
/** @brief Get the absolute tick count at which a scheduled delayable work
* will be submitted.
*
* @note This is a live snapshot of state, which may change before the result
* can be inspected. Use locks where appropriate.
*
* @funcprops \isr_ok
*
* @param dwork pointer to the delayable work item.
*
* @return the tick count when the timer that will schedule the work item will
* expire, or the current tick count if the work is not scheduled.
*/
static inline k_ticks_t k_work_delayable_expires_get(
const struct k_work_delayable *dwork);
/** @brief Get the number of ticks until a scheduled delayable work will be
* submitted.
*
* @note This is a live snapshot of state, which may change before the result
* can be inspected. Use locks where appropriate.
*
* @funcprops \isr_ok
*
* @param dwork pointer to the delayable work item.
*
* @return the number of ticks until the timer that will schedule the work
* item will expire, or zero if the item is not scheduled.
*/
static inline k_ticks_t k_work_delayable_remaining_get(
const struct k_work_delayable *dwork);
/** @brief Submit an idle work item to a queue after a delay.
*
* Unlike k_work_reschedule_for_queue() this is a no-op if the work item is
* already scheduled or submitted, even if @p delay is @c K_NO_WAIT.
*
* @funcprops \isr_ok
*
* @param queue the queue on which the work item should be submitted after the
* delay.
*
* @param dwork pointer to the delayable work item.
*
* @param delay the time to wait before submitting the work item. If @c
* K_NO_WAIT and the work is not pending this is equivalent to
* k_work_submit_to_queue().
*
* @retval 0 if work was already scheduled or submitted.
* @retval 1 if work has been scheduled.
* @retval -EBUSY if @p delay is @c K_NO_WAIT and
* k_work_submit_to_queue() fails with this code.
* @retval -EINVAL if @p delay is @c K_NO_WAIT and
* k_work_submit_to_queue() fails with this code.
* @retval -ENODEV if @p delay is @c K_NO_WAIT and
* k_work_submit_to_queue() fails with this code.
*/
int k_work_schedule_for_queue(struct k_work_q *queue,
struct k_work_delayable *dwork,
k_timeout_t delay);
/** @brief Submit an idle work item to the system work queue after a
* delay.
*
* This is a thin wrapper around k_work_schedule_for_queue(), with all the API
* characteristics of that function.
*
* @param dwork pointer to the delayable work item.
*
* @param delay the time to wait before submitting the work item. If @c
* K_NO_WAIT this is equivalent to k_work_submit_to_queue().
*
* @return as with k_work_schedule_for_queue().
*/
extern int k_work_schedule(struct k_work_delayable *dwork,
k_timeout_t delay);
/** @brief Reschedule a work item to a queue after a delay.
*
* Unlike k_work_schedule_for_queue() this function can change the deadline of
* a scheduled work item, and will schedule a work item that isn't idle
* (e.g. is submitted or running). This function does not affect ("unsubmit")
* a work item that has been submitted to a queue.
*
* @funcprops \isr_ok
*
* @param queue the queue on which the work item should be submitted after the
* delay.
*
* @param dwork pointer to the delayable work item.
*
* @param delay the time to wait before submitting the work item. If @c
* K_NO_WAIT this is equivalent to k_work_submit_to_queue() after canceling
* any previous scheduled submission.
*
* @note If delay is @c K_NO_WAIT ("no delay") the return values are as with
* k_work_submit_to_queue().
*
* @retval 0 if delay is @c K_NO_WAIT and work was already on a queue
* @retval 1 if
* * delay is @c K_NO_WAIT and work was not submitted but has now been queued
* to @p queue; or
* * delay not @c K_NO_WAIT and work has been scheduled
* @retval 2 if delay is @c K_NO_WAIT and work was running and has been queued
* to the queue that was running it
* @retval -EBUSY if @p delay is @c K_NO_WAIT and
* k_work_submit_to_queue() fails with this code.
* @retval -EINVAL if @p delay is @c K_NO_WAIT and
* k_work_submit_to_queue() fails with this code.
* @retval -ENODEV if @p delay is @c K_NO_WAIT and
* k_work_submit_to_queue() fails with this code.
*/
int k_work_reschedule_for_queue(struct k_work_q *queue,
struct k_work_delayable *dwork,
k_timeout_t delay);
/** @brief Reschedule a work item to the system work queue after a
* delay.
*
* This is a thin wrapper around k_work_reschedule_for_queue(), with all the
* API characteristics of that function.
*
* @param dwork pointer to the delayable work item.
*
* @param delay the time to wait before submitting the work item.
*
* @return as with k_work_reschedule_for_queue().
*/
extern int k_work_reschedule(struct k_work_delayable *dwork,
k_timeout_t delay);
/** @brief Flush delayable work.
*
* If the work is scheduled, it is immediately submitted. Then the caller
* blocks until the work completes, as with k_work_flush().
*
* @note Be careful of caller and work queue thread relative priority. If
* this function sleeps it will not return until the work queue thread
* completes the tasks that allow this thread to resume.
*
* @note Behavior is undefined if this function is invoked on @p dwork from a
* work queue running @p dwork.
*
* @param dwork pointer to the delayable work item.
*
* @param sync pointer to an opaque item containing state related to the
* pending cancellation. The object must persist until the call returns, and
* be accessible from both the caller thread and the work queue thread. The
* object must not be used for any other flush or cancel operation until this
* one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
* must be allocated in coherent memory.
*
* @retval true if call had to wait for completion
* @retval false if work was already idle
*/
bool k_work_flush_delayable(struct k_work_delayable *dwork,
struct k_work_sync *sync);
/** @brief Cancel delayable work.
*
* Similar to k_work_cancel() but for delayable work. If the work is
* scheduled or submitted it is canceled. This function does not wait for the
* cancellation to complete.
*
* @note The work may still be running when this returns. Use
* k_work_flush_delayable() or k_work_cancel_delayable_sync() to ensure it is
* not running.
*
* @note Canceling delayable work does not prevent rescheduling it. It does
* prevent submitting it until the cancellation completes.
*
* @funcprops \isr_ok
*
* @param dwork pointer to the delayable work item.
*
* @return the k_work_delayable_busy_get() status indicating the state of the
* item after all cancellation steps performed by this call are completed.
*/
int k_work_cancel_delayable(struct k_work_delayable *dwork);
/** @brief Cancel delayable work and wait.
*
* Like k_work_cancel_delayable() but waits until the work becomes idle.
*
* @note Canceling delayable work does not prevent rescheduling it. It does
* prevent submitting it until the cancellation completes.
*
* @note Be careful of caller and work queue thread relative priority. If
* this function sleeps it will not return until the work queue thread
* completes the tasks that allow this thread to resume.
*
* @note Behavior is undefined if this function is invoked on @p dwork from a
* work queue running @p dwork.
*
* @param dwork pointer to the delayable work item.
*
* @param sync pointer to an opaque item containing state related to the
* pending cancellation. The object must persist until the call returns, and
* be accessible from both the caller thread and the work queue thread. The
* object must not be used for any other flush or cancel operation until this
* one completes. On architectures with CONFIG_KERNEL_COHERENCE the object
* must be allocated in coherent memory.
*
* @retval true if work was not idle (call had to wait for cancellation of a
* running handler to complete, or scheduled or submitted operations were
* cancelled);
* @retval false otherwise
*/
bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork,
struct k_work_sync *sync);
enum {
/**
* @cond INTERNAL_HIDDEN
*/
/* The atomic API is used for all work and queue flags fields to
* enforce sequential consistency in SMP environments.
*/
/* Bits that represent the work item states. At least nine of the
* combinations are distinct valid stable states.
*/
K_WORK_RUNNING_BIT = 0,
K_WORK_CANCELING_BIT = 1,
K_WORK_QUEUED_BIT = 2,
K_WORK_DELAYED_BIT = 3,
K_WORK_MASK = BIT(K_WORK_DELAYED_BIT) | BIT(K_WORK_QUEUED_BIT)
| BIT(K_WORK_RUNNING_BIT) | BIT(K_WORK_CANCELING_BIT),
/* Static work flags */
K_WORK_DELAYABLE_BIT = 8,
K_WORK_DELAYABLE = BIT(K_WORK_DELAYABLE_BIT),
/* Dynamic work queue flags */
K_WORK_QUEUE_STARTED_BIT = 0,
K_WORK_QUEUE_STARTED = BIT(K_WORK_QUEUE_STARTED_BIT),
K_WORK_QUEUE_BUSY_BIT = 1,
K_WORK_QUEUE_BUSY = BIT(K_WORK_QUEUE_BUSY_BIT),
K_WORK_QUEUE_DRAIN_BIT = 2,
K_WORK_QUEUE_DRAIN = BIT(K_WORK_QUEUE_DRAIN_BIT),
K_WORK_QUEUE_PLUGGED_BIT = 3,
K_WORK_QUEUE_PLUGGED = BIT(K_WORK_QUEUE_PLUGGED_BIT),
/* Static work queue flags */
K_WORK_QUEUE_NO_YIELD_BIT = 8,
K_WORK_QUEUE_NO_YIELD = BIT(K_WORK_QUEUE_NO_YIELD_BIT),
/**
* INTERNAL_HIDDEN @endcond
*/
/* Transient work flags */
/** @brief Flag indicating a work item that is running under a work
* queue thread.
*
* Accessed via k_work_busy_get(). May co-occur with other flags.
*/
K_WORK_RUNNING = BIT(K_WORK_RUNNING_BIT),
/** @brief Flag indicating a work item that is being canceled.
*
* Accessed via k_work_busy_get(). May co-occur with other flags.
*/
K_WORK_CANCELING = BIT(K_WORK_CANCELING_BIT),
/** @brief Flag indicating a work item that has been submitted to a
* queue but has not started running.
*
* Accessed via k_work_busy_get(). May co-occur with other flags.
*/
K_WORK_QUEUED = BIT(K_WORK_QUEUED_BIT),
/** @brief Flag indicating a delayed work item that is scheduled for
* submission to a queue.
*
* Accessed via k_work_busy_get(). May co-occur with other flags.
*/
K_WORK_DELAYED = BIT(K_WORK_DELAYED_BIT),
};
/** @brief A structure used to submit work. */
struct k_work {
/* All fields are protected by the work module spinlock. No fields
* are to be accessed except through kernel API.
*/
/* Node to link into k_work_q pending list. */
sys_snode_t node;
/* The function to be invoked by the work queue thread. */
k_work_handler_t handler;
/* The queue on which the work item was last submitted. */
struct k_work_q *queue;
/* State of the work item.
*
* The item can be DELAYED, QUEUED, and RUNNING simultaneously.
*
* It can be RUNNING and CANCELING simultaneously.
*/
uint32_t flags;
};
#define Z_WORK_INITIALIZER(work_handler) { \
.handler = work_handler, \
}
/** @brief A structure used to submit work after a delay. */
struct k_work_delayable {
/* The work item. */
struct k_work work;
/* Timeout used to submit work after a delay. */
struct _timeout timeout;
/* The queue to which the work should be submitted. */
struct k_work_q *queue;
};
#define Z_WORK_DELAYABLE_INITIALIZER(work_handler) { \
.work = { \
.handler = work_handler, \
.flags = K_WORK_DELAYABLE, \
}, \
}
/**
* @brief Initialize a statically-defined delayable work item.
*
* This macro can be used to initialize a statically-defined delayable
* work item, prior to its first use. For example,
*
* @code static K_WORK_DELAYABLE_DEFINE(<dwork>, <work_handler>); @endcode
*
* Note that if the runtime dependencies support initialization with
* k_work_init_delayable() using that will eliminate the initialized
* object in ROM that is produced by this macro and copied in at
* system startup.
*
* @param work Symbol name for delayable work item object
* @param work_handler Function to invoke each time work item is processed.
*/
#define K_WORK_DELAYABLE_DEFINE(work, work_handler) \
struct k_work_delayable work \
= Z_WORK_DELAYABLE_INITIALIZER(work_handler)
/**
* @cond INTERNAL_HIDDEN
*/
/* Record used to wait for work to flush.
*
* The work item is inserted into the queue that will process (or is
* processing) the item, and will be processed as soon as the item
* completes. When the flusher is processed the semaphore will be
* signaled, releasing the thread waiting for the flush.
*/
struct z_work_flusher {
struct k_work work;
struct k_sem sem;
};
/* Record used to wait for work to complete a cancellation.
*
* The work item is inserted into a global queue of pending cancels.
* When a cancelling work item goes idle any matching waiters are
* removed from pending_cancels and are woken.
*/
struct z_work_canceller {
sys_snode_t node;
struct k_work *work;
struct k_sem sem;
};
/**
* INTERNAL_HIDDEN @endcond
*/
/** @brief A structure holding internal state for a pending synchronous
* operation on a work item or queue.
*
* Instances of this type are provided by the caller for invocation of
* k_work_flush(), k_work_cancel_sync() and sibling flush and cancel APIs. A
* referenced object must persist until the call returns, and be accessible
* from both the caller thread and the work queue thread.
*
* @note If CONFIG_KERNEL_COHERENCE is enabled the object must be allocated in
* coherent memory; see arch_mem_coherent(). The stack on these architectures
* is generally not coherent. be stack-allocated. Violations are detected by
* runtime assertion.
*/
struct k_work_sync {
union {
struct z_work_flusher flusher;
struct z_work_canceller canceller;
};
};
/** @brief A structure holding optional configuration items for a work
* queue.
*
* This structure, and values it references, are not retained by
* k_work_queue_start().
*/
struct k_work_queue_config {
/** The name to be given to the work queue thread.
*
* If left null the thread will not have a name.
*/
const char *name;
/** Control whether the work queue thread should yield between
* items.
*
* Yielding between items helps guarantee the work queue
* thread does not starve other threads, including cooperative
* ones released by a work item. This is the default behavior.
*
* Set this to @c true to prevent the work queue thread from
* yielding between items. This may be appropriate when a
* sequence of items should complete without yielding
* control.
*/
bool no_yield;
};
/** @brief A structure used to hold work until it can be processed. */
struct k_work_q {
/* The thread that animates the work. */
struct k_thread thread;
/* All the following fields must be accessed only while the
* work module spinlock is held.
*/
/* List of k_work items to be worked. */
sys_slist_t pending;
/* Wait queue for idle work thread. */
_wait_q_t notifyq;
/* Wait queue for threads waiting for the queue to drain. */
_wait_q_t drainq;
/* Flags describing queue state. */
uint32_t flags;
};
/* Provide the implementation for inline functions declared above */
static inline bool k_work_is_pending(const struct k_work *work)
{
return k_work_busy_get(work) != 0;
}
static inline struct k_work_delayable *
k_work_delayable_from_work(struct k_work *work)
{
return CONTAINER_OF(work, struct k_work_delayable, work);
}
static inline bool k_work_delayable_is_pending(
const struct k_work_delayable *dwork)
{
return k_work_delayable_busy_get(dwork) != 0;
}
static inline k_ticks_t k_work_delayable_expires_get(
const struct k_work_delayable *dwork)
{
return z_timeout_expires(&dwork->timeout);
}
static inline k_ticks_t k_work_delayable_remaining_get(
const struct k_work_delayable *dwork)
{
return z_timeout_remaining(&dwork->timeout);
}
static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue)
{
return &queue->thread;
}
/** @} */
struct k_work_user;
/**
* @addtogroup workqueue_apis
* @{
*/
/**
* @typedef k_work_user_handler_t
* @brief Work item handler function type for user work queues.
*
* A work item's handler function is executed by a user workqueue's thread
* when the work item is processed by the workqueue.
*
* @param work Address of the work item.
*/
typedef void (*k_work_user_handler_t)(struct k_work_user *work);
/**
* @cond INTERNAL_HIDDEN
*/
struct k_work_user_q {
struct k_queue queue;
struct k_thread thread;
};
enum {
K_WORK_USER_STATE_PENDING, /* Work item pending state */
};
struct k_work_user {
void *_reserved; /* Used by k_queue implementation. */
k_work_user_handler_t handler;
atomic_t flags;
};
/**
* INTERNAL_HIDDEN @endcond
*/
#if defined(__cplusplus) && ((__cplusplus - 0) < 202002L)
#define Z_WORK_USER_INITIALIZER(work_handler) { NULL, work_handler, 0 }
#else