kernel/timeout: Make timeout arguments an opaque type
Add a k_timeout_t type, and use it everywhere that kernel API
functions were accepting a millisecond timeout argument. Instead of
forcing milliseconds everywhere (which are often not integrally
representable as system ticks), do the conversion to ticks at the
point where the timeout is created. This avoids an extra unit
conversion in some application code, and allows us to express the
timeout in units other than milliseconds to achieve greater precision.
The existing K_MSEC() et. al. macros now return initializers for a
k_timeout_t.
The K_NO_WAIT and K_FOREVER constants have now become k_timeout_t
values, which means they cannot be operated on as integers.
Applications which have their own APIs that need to inspect these
vs. user-provided timeouts can now use a K_TIMEOUT_EQ() predicate to
test for equality.
Timer drivers, which receive an integer tick count in ther
z_clock_set_timeout() functions, now use the integer-valued
K_TICKS_FOREVER constant instead of K_FOREVER.
For the initial release, to preserve source compatibility, a
CONFIG_LEGACY_TIMEOUT_API kconfig is provided. When true, the
k_timeout_t will remain a compatible 32 bit value that will work with
any legacy Zephyr application.
Some subsystems present timeout (or timeout-like) values to their own
users as APIs that would re-use the kernel's own constants and
conventions. These will require some minor design work to adapt to
the new scheme (in most cases just using k_timeout_t directly in their
own API), and they have not been changed in this patch, instead
selecting CONFIG_LEGACY_TIMEOUT_API via kconfig. These subsystems
include: CAN Bus, the Microbit display driver, I2S, LoRa modem
drivers, the UART Async API, Video hardware drivers, the console
subsystem, and the network buffer abstraction.
k_sleep() now takes a k_timeout_t argument, with a k_msleep() variant
provided that works identically to the original API.
Most of the changes here are just type/configuration management and
documentation, but there are logic changes in mempool, where a loop
that used a timeout numerically has been reworked using a new
z_timeout_end_calc() predicate. Also in queue.c, a (when POLL was
enabled) a similar loop was needlessly used to try to retry the
k_poll() call after a spurious failure. But k_poll() does not fail
spuriously, so the loop was removed.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
diff --git a/boards/arm/qemu_cortex_m0/nrf_timer_timer.c b/boards/arm/qemu_cortex_m0/nrf_timer_timer.c
index d89aeb0..d702089 100644
--- a/boards/arm/qemu_cortex_m0/nrf_timer_timer.c
+++ b/boards/arm/qemu_cortex_m0/nrf_timer_timer.c
@@ -108,7 +108,7 @@
ARG_UNUSED(idle);
#ifdef CONFIG_TICKLESS_KERNEL
- ticks = (ticks == K_FOREVER) ? MAX_TICKS : ticks;
+ ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks;
ticks = MAX(MIN(ticks - 1, (s32_t)MAX_TICKS), 0);
k_spinlock_key_t key = k_spin_lock(&lock);
diff --git a/drivers/can/Kconfig b/drivers/can/Kconfig
index e6c473f..f5b2f28 100644
--- a/drivers/can/Kconfig
+++ b/drivers/can/Kconfig
@@ -8,6 +8,7 @@
#
menuconfig CAN
bool "CAN Drivers"
+ select LEGACY_TIMEOUT_API
help
Enable CAN Driver Configuration
diff --git a/drivers/display/Kconfig.microbit b/drivers/display/Kconfig.microbit
index 6e18887..a4d4a9d 100644
--- a/drivers/display/Kconfig.microbit
+++ b/drivers/display/Kconfig.microbit
@@ -8,6 +8,7 @@
depends on BOARD_BBC_MICROBIT
depends on PRINTK
depends on GPIO
+ select LEGACY_TIMEOUT_API
help
Enable this to be able to display images and text on the 5x5
LED matrix display on the BBC micro:bit.
diff --git a/drivers/i2s/Kconfig b/drivers/i2s/Kconfig
index 2ac1441..7dd99d0 100644
--- a/drivers/i2s/Kconfig
+++ b/drivers/i2s/Kconfig
@@ -8,6 +8,7 @@
#
menuconfig I2S
bool "I2S bus drivers"
+ select LEGACY_TIMEOUT_API
help
Enable support for the I2S (Inter-IC Sound) hardware bus.
diff --git a/drivers/lora/Kconfig b/drivers/lora/Kconfig
index 2b900c7..ba50871 100644
--- a/drivers/lora/Kconfig
+++ b/drivers/lora/Kconfig
@@ -9,6 +9,7 @@
menuconfig LORA
bool "LoRa support"
depends on NEWLIB_LIBC
+ select LEGACY_TIMEOUT_API
help
Include LoRa drivers in the system configuration.
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 52ef051..bb52923 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -34,6 +34,7 @@
config UART_ASYNC_API
bool "Enable new asynchronous UART API [EXPERIMENTAL]"
depends on SERIAL_SUPPORT_ASYNC
+ select LEGACY_TIMEOUT_API
help
This option enables new asynchronous UART API.
diff --git a/drivers/timer/apic_timer.c b/drivers/timer/apic_timer.c
index 6ede81a..f1866e0 100644
--- a/drivers/timer/apic_timer.c
+++ b/drivers/timer/apic_timer.c
@@ -89,7 +89,7 @@
if (n < 1) {
full_ticks = 0;
- } else if ((n == K_FOREVER) || (n > MAX_TICKS)) {
+ } else if ((n == K_TICKS_FOREVER) || (n > MAX_TICKS)) {
full_ticks = MAX_TICKS - 1;
} else {
full_ticks = n - 1;
diff --git a/drivers/timer/arcv2_timer0.c b/drivers/timer/arcv2_timer0.c
index 8d300c7..72a46f4 100644
--- a/drivers/timer/arcv2_timer0.c
+++ b/drivers/timer/arcv2_timer0.c
@@ -242,7 +242,7 @@
* However for single core using 32-bits arc timer, idle cannot
* be ignored, as 32-bits timer will overflow in a not-long time.
*/
- if (IS_ENABLED(CONFIG_TICKLESS_IDLE) && ticks == K_FOREVER) {
+ if (IS_ENABLED(CONFIG_TICKLESS_IDLE) && ticks == K_TICKS_FOREVER) {
timer0_control_register_set(0);
timer0_count_register_set(0);
timer0_limit_register_set(0);
@@ -268,7 +268,8 @@
arch_irq_unlock(key);
#endif
#else
- if (IS_ENABLED(CONFIG_TICKLESS_IDLE) && idle && ticks == K_FOREVER) {
+ if (IS_ENABLED(CONFIG_TICKLESS_IDLE) && idle
+ && ticks == K_TICKS_FOREVER) {
timer0_control_register_set(0);
timer0_count_register_set(0);
timer0_limit_register_set(0);
diff --git a/drivers/timer/arm_arch_timer.c b/drivers/timer/arm_arch_timer.c
index ed9a7f0..d9d52a2 100644
--- a/drivers/timer/arm_arch_timer.c
+++ b/drivers/timer/arm_arch_timer.c
@@ -68,7 +68,7 @@
return;
}
- ticks = (ticks == K_FOREVER) ? MAX_TICKS : ticks;
+ ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks;
ticks = MAX(MIN(ticks - 1, (s32_t)MAX_TICKS), 0);
k_spinlock_key_t key = k_spin_lock(&lock);
diff --git a/drivers/timer/cavs_timer.c b/drivers/timer/cavs_timer.c
index 3052a61..607e8f7 100644
--- a/drivers/timer/cavs_timer.c
+++ b/drivers/timer/cavs_timer.c
@@ -120,7 +120,7 @@
ARG_UNUSED(idle);
#ifdef CONFIG_TICKLESS_KERNEL
- ticks = ticks == K_FOREVER ? MAX_TICKS : ticks;
+ ticks = ticks == K_TICKS_FOREVER ? MAX_TICKS : ticks;
ticks = MAX(MIN(ticks - 1, (s32_t)MAX_TICKS), 0);
k_spinlock_key_t key = k_spin_lock(&lock);
diff --git a/drivers/timer/cc13x2_cc26x2_rtc_timer.c b/drivers/timer/cc13x2_cc26x2_rtc_timer.c
index 65197c6..e542a05 100644
--- a/drivers/timer/cc13x2_cc26x2_rtc_timer.c
+++ b/drivers/timer/cc13x2_cc26x2_rtc_timer.c
@@ -207,7 +207,7 @@
#ifdef CONFIG_TICKLESS_KERNEL
- ticks = (ticks == K_FOREVER) ? MAX_TICKS : ticks;
+ ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks;
ticks = MAX(MIN(ticks - 1, (s32_t) MAX_TICKS), 0);
k_spinlock_key_t key = k_spin_lock(&lock);
diff --git a/drivers/timer/cortex_m_systick.c b/drivers/timer/cortex_m_systick.c
index 7a42832..fbcef22 100644
--- a/drivers/timer/cortex_m_systick.c
+++ b/drivers/timer/cortex_m_systick.c
@@ -172,7 +172,8 @@
* the counter. (Note: we can assume if idle==true that
* interrupts are already disabled)
*/
- if (IS_ENABLED(CONFIG_TICKLESS_IDLE) && idle && ticks == K_FOREVER) {
+ if (IS_ENABLED(CONFIG_TICKLESS_IDLE) && idle
+ && ticks == K_TICKS_FOREVER) {
SysTick->CTRL &= ~SysTick_CTRL_ENABLE_Msk;
last_load = TIMER_STOPPED;
return;
@@ -181,7 +182,7 @@
#if defined(CONFIG_TICKLESS_KERNEL)
u32_t delay;
- ticks = (ticks == K_FOREVER) ? MAX_TICKS : ticks;
+ ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks;
ticks = MAX(MIN(ticks - 1, (s32_t)MAX_TICKS), 0);
k_spinlock_key_t key = k_spin_lock(&lock);
diff --git a/drivers/timer/hpet.c b/drivers/timer/hpet.c
index 8cccfd6..05aba21 100644
--- a/drivers/timer/hpet.c
+++ b/drivers/timer/hpet.c
@@ -129,12 +129,12 @@
ARG_UNUSED(idle);
#if defined(CONFIG_TICKLESS_KERNEL) && !defined(CONFIG_QEMU_TICKLESS_WORKAROUND)
- if (ticks == K_FOREVER && idle) {
+ if (ticks == K_TICKS_FOREVER && idle) {
GENERAL_CONF_REG &= ~GCONF_ENABLE;
return;
}
- ticks = ticks == K_FOREVER ? max_ticks : ticks;
+ ticks = ticks == K_TICKS_FOREVER ? max_ticks : ticks;
ticks = MAX(MIN(ticks - 1, (s32_t)max_ticks), 0);
k_spinlock_key_t key = k_spin_lock(&lock);
diff --git a/drivers/timer/legacy_api.h b/drivers/timer/legacy_api.h
index 41d7ddc..8f0c42a 100644
--- a/drivers/timer/legacy_api.h
+++ b/drivers/timer/legacy_api.h
@@ -34,7 +34,7 @@
if (idle) {
z_timer_idle_enter(ticks);
} else {
- z_set_time(ticks == K_FOREVER ? 0 : ticks);
+ z_set_time(ticks == K_TICKS_FOREVER ? 0 : ticks);
}
#endif
}
diff --git a/drivers/timer/loapic_timer.c b/drivers/timer/loapic_timer.c
index fe91503..bd3a11d 100644
--- a/drivers/timer/loapic_timer.c
+++ b/drivers/timer/loapic_timer.c
@@ -390,7 +390,7 @@
)
{
#ifdef CONFIG_TICKLESS_KERNEL
- if (ticks != K_FOREVER) {
+ if (ticks != K_TICKS_FOREVER) {
/* Need to reprogram only if current program is smaller */
if (ticks > programmed_full_ticks) {
z_set_time(ticks);
@@ -417,7 +417,7 @@
cycles = current_count_register_get();
- if ((ticks == K_FOREVER) || (ticks > max_system_ticks)) {
+ if ((ticks == K_TICKS_FOREVER) || (ticks > max_system_ticks)) {
/*
* The number of cycles until the timer must fire next might not fit
* in the 32-bit counter register. To work around this, program
diff --git a/drivers/timer/mchp_xec_rtos_timer.c b/drivers/timer/mchp_xec_rtos_timer.c
index 8ed2adf..db2a1fb 100644
--- a/drivers/timer/mchp_xec_rtos_timer.c
+++ b/drivers/timer/mchp_xec_rtos_timer.c
@@ -135,7 +135,7 @@
u32_t full_cycles; /* full_ticks represented as cycles */
u32_t partial_cycles; /* number of cycles to first tick boundary */
- if (idle && (n == K_FOREVER)) {
+ if (idle && (n == K_TICKS_FOREVER)) {
/*
* We are not in a locked section. Are writes to two
* global objects safe from pre-emption?
@@ -147,7 +147,7 @@
if (n < 1) {
full_ticks = 0;
- } else if ((n == K_FOREVER) || (n > MAX_TICKS)) {
+ } else if ((n == K_TICKS_FOREVER) || (n > MAX_TICKS)) {
full_ticks = MAX_TICKS - 1;
} else {
full_ticks = n - 1;
diff --git a/drivers/timer/native_posix_timer.c b/drivers/timer/native_posix_timer.c
index 874dbd3..7a96696 100644
--- a/drivers/timer/native_posix_timer.c
+++ b/drivers/timer/native_posix_timer.c
@@ -90,7 +90,7 @@
/* Note that we treat INT_MAX literally as anyhow the maximum amount of
* ticks we can report with z_clock_announce() is INT_MAX
*/
- if (ticks == K_FOREVER) {
+ if (ticks == K_TICKS_FOREVER) {
silent_ticks = INT64_MAX;
} else if (ticks > 0) {
silent_ticks = ticks - 1;
diff --git a/drivers/timer/nrf_rtc_timer.c b/drivers/timer/nrf_rtc_timer.c
index a3ed40d..46ee3cc 100644
--- a/drivers/timer/nrf_rtc_timer.c
+++ b/drivers/timer/nrf_rtc_timer.c
@@ -117,7 +117,7 @@
ARG_UNUSED(idle);
#ifdef CONFIG_TICKLESS_KERNEL
- ticks = (ticks == K_FOREVER) ? MAX_TICKS : ticks;
+ ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks;
ticks = MAX(MIN(ticks - 1, (s32_t)MAX_TICKS), 0);
k_spinlock_key_t key = k_spin_lock(&lock);
diff --git a/drivers/timer/riscv_machine_timer.c b/drivers/timer/riscv_machine_timer.c
index 06c735c..2d0e574 100644
--- a/drivers/timer/riscv_machine_timer.c
+++ b/drivers/timer/riscv_machine_timer.c
@@ -104,7 +104,7 @@
return;
}
- ticks = ticks == K_FOREVER ? MAX_TICKS : ticks;
+ ticks = ticks == K_TICKS_FOREVER ? MAX_TICKS : ticks;
ticks = MAX(MIN(ticks - 1, (s32_t)MAX_TICKS), 0);
k_spinlock_key_t key = k_spin_lock(&lock);
diff --git a/drivers/timer/xlnx_psttc_timer.c b/drivers/timer/xlnx_psttc_timer.c
index 6bd8ad6..c802457 100644
--- a/drivers/timer/xlnx_psttc_timer.c
+++ b/drivers/timer/xlnx_psttc_timer.c
@@ -161,7 +161,7 @@
cycles = read_count();
/* Calculate timeout counter value */
- if (ticks == K_FOREVER) {
+ if (ticks == K_TICKS_FOREVER) {
next_cycles = cycles + CYCLES_NEXT_MAX;
} else {
next_cycles = cycles + ((u32_t)ticks * CYCLES_PER_TICK);
diff --git a/drivers/timer/xtensa_sys_timer.c b/drivers/timer/xtensa_sys_timer.c
index ddc609e..783d11a 100644
--- a/drivers/timer/xtensa_sys_timer.c
+++ b/drivers/timer/xtensa_sys_timer.c
@@ -71,7 +71,7 @@
ARG_UNUSED(idle);
#if defined(CONFIG_TICKLESS_KERNEL) && !defined(CONFIG_QEMU_TICKLESS_WORKAROUND)
- ticks = ticks == K_FOREVER ? MAX_TICKS : ticks;
+ ticks = ticks == K_TICKS_FOREVER ? MAX_TICKS : ticks;
ticks = MAX(MIN(ticks - 1, (s32_t)MAX_TICKS), 0);
k_spinlock_key_t key = k_spin_lock(&lock);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 0f35edb..3848766 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -8,6 +8,7 @@
#
menuconfig VIDEO
bool "VIDEO hardware support"
+ select LEGACY_TIMEOUT_API
help
Enable support for the VIDEO.
diff --git a/include/drivers/timer/system_timer.h b/include/drivers/timer/system_timer.h
index 9c3c416..f3c4dc9 100644
--- a/include/drivers/timer/system_timer.h
+++ b/include/drivers/timer/system_timer.h
@@ -59,7 +59,7 @@
* treated identically: it simply indicates the kernel would like the
* next tick announcement as soon as possible.
*
- * Note that ticks can also be passed the special value K_FOREVER,
+ * Note that ticks can also be passed the special value K_TICKS_FOREVER,
* indicating that no future timer interrupts are expected or required
* and that the system is permitted to enter an indefinite sleep even
* if this could cause rollover of the internal counter (i.e. the
diff --git a/include/kernel.h b/include/kernel.h
index d5a266d..94bf673 100644
--- a/include/kernel.h
+++ b/include/kernel.h
@@ -810,7 +810,7 @@
* @param p3 3rd entry point parameter.
* @param prio Thread priority.
* @param options Thread options.
- * @param delay Scheduling delay (in milliseconds), or K_NO_WAIT (for no delay).
+ * @param delay Scheduling delay, or K_NO_WAIT (for no delay).
*
* @return ID of new thread.
*
@@ -821,7 +821,7 @@
size_t stack_size,
k_thread_entry_t entry,
void *p1, void *p2, void *p3,
- int prio, u32_t options, s32_t delay);
+ int prio, u32_t options, k_timeout_t delay);
/**
* @brief Drop a thread's privileges permanently to user mode
@@ -926,15 +926,27 @@
* This API may only be called from ISRs with a K_NO_WAIT timeout.
*
* @param thread Thread to wait to exit
- * @param timeout non-negative upper bound time in ms to wait for the thread
- * to exit.
+ * @param timeout upper bound time to wait for the thread to exit.
* @retval 0 success, target thread has exited or wasn't running
* @retval -EBUSY returned without waiting
* @retval -EAGAIN waiting period timed out
* @retval -EDEADLK target thread is joining on the caller, or target thread
* is the caller
*/
-__syscall int k_thread_join(struct k_thread *thread, s32_t timeout);
+__syscall int k_thread_join(struct k_thread *thread, k_timeout_t timeout);
+
+/**
+ * @brief Put the current thread to sleep.
+ *
+ * This routine puts the current thread to sleep for @a duration,
+ * specified as a k_timeout_t object.
+ *
+ * @param timeout Desired duration of sleep.
+ *
+ * @return Zero if the requested time has elapsed or the number of milliseconds
+ * left to sleep, if thread was woken up by \ref k_wakeup call.
+ */
+__syscall s32_t k_sleep(k_timeout_t timeout);
/**
* @brief Put the current thread to sleep.
@@ -946,7 +958,10 @@
* @return Zero if the requested time has elapsed or the number of milliseconds
* left to sleep, if thread was woken up by \ref k_wakeup call.
*/
-__syscall s32_t k_sleep(s32_t ms);
+static inline s32_t k_msleep(s32_t ms)
+{
+ return k_sleep(Z_TIMEOUT_MS(ms));
+}
/**
* @brief Put the current thread to sleep with microsecond resolution.
@@ -1531,7 +1546,7 @@
*
* @return Timeout delay value.
*/
-#define K_NO_WAIT 0
+#define K_NO_WAIT Z_TIMEOUT_NO_WAIT
/**
* @brief Generate timeout delay from milliseconds.
@@ -1543,7 +1558,7 @@
*
* @return Timeout delay value.
*/
-#define K_MSEC(ms) (ms)
+#define K_MSEC(ms) Z_TIMEOUT_MS(ms)
/**
* @brief Generate timeout delay from seconds.
@@ -1589,7 +1604,7 @@
*
* @return Timeout delay value.
*/
-#define K_FOREVER (-1)
+#define K_FOREVER Z_FOREVER
/**
* @}
@@ -1617,7 +1632,7 @@
void (*stop_fn)(struct k_timer *timer);
/* timer period */
- s32_t period;
+ k_timeout_t period;
/* timer status */
u32_t status;
@@ -1639,7 +1654,6 @@
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
.expiry_fn = expiry, \
.stop_fn = stop, \
- .period = 0, \
.status = 0, \
.user_data = 0, \
_OBJECT_TRACING_INIT \
@@ -1727,13 +1741,13 @@
* using the new duration and period values.
*
* @param timer Address of timer.
- * @param duration Initial timer duration (in milliseconds).
- * @param period Timer period (in milliseconds).
+ * @param duration Initial timer duration.
+ * @param period Timer period.
*
* @return N/A
*/
__syscall void k_timer_start(struct k_timer *timer,
- s32_t duration, s32_t period);
+ k_timeout_t duration, k_timeout_t period);
/**
* @brief Stop a timer.
@@ -2189,14 +2203,14 @@
* @note Can be called by ISRs, but @a timeout must be set to K_NO_WAIT.
*
* @param queue Address of the queue.
- * @param timeout Non-negative waiting period to obtain a data item (in
- * milliseconds), or one of the special values K_NO_WAIT and
+ * @param timeout Non-negative waiting period to obtain a data item
+ * or one of the special values K_NO_WAIT and
* K_FOREVER.
*
* @return Address of the data item if successful; NULL if returned
* without waiting, or waiting period timed out.
*/
-__syscall void *k_queue_get(struct k_queue *queue, s32_t timeout);
+__syscall void *k_queue_get(struct k_queue *queue, k_timeout_t timeout);
/**
* @brief Remove an element from a queue.
@@ -2358,7 +2372,7 @@
* @param futex Address of the futex.
* @param expected Expected value of the futex, if it is different the caller
* will not wait on it.
- * @param timeout Non-negative waiting period on the futex, in milliseconds, or
+ * @param timeout Non-negative waiting period on the futex, or
* one of the special values K_NO_WAIT or K_FOREVER.
* @retval -EACCES Caller does not have read access to futex address.
* @retval -EAGAIN If the futex value did not match the expected parameter.
@@ -2368,7 +2382,8 @@
* should check the futex's value on wakeup to determine if it needs
* to block again.
*/
-__syscall int k_futex_wait(struct k_futex *futex, int expected, s32_t timeout);
+__syscall int k_futex_wait(struct k_futex *futex, int expected,
+ k_timeout_t timeout);
/**
* @brief Wake one/all threads pending on a futex
@@ -2529,7 +2544,7 @@
* @note Can be called by ISRs, but @a timeout must be set to K_NO_WAIT.
*
* @param fifo Address of the FIFO queue.
- * @param timeout Waiting period to obtain a data item (in milliseconds),
+ * @param timeout Waiting period to obtain a data item,
* or one of the special values K_NO_WAIT and K_FOREVER.
*
* @return Address of the data item if successful; NULL if returned
@@ -2689,7 +2704,7 @@
* @note Can be called by ISRs, but @a timeout must be set to K_NO_WAIT.
*
* @param lifo Address of the LIFO queue.
- * @param timeout Waiting period to obtain a data item (in milliseconds),
+ * @param timeout Waiting period to obtain a data item,
* or one of the special values K_NO_WAIT and K_FOREVER.
*
* @return Address of the data item if successful; NULL if returned
@@ -2827,8 +2842,8 @@
*
* @param stack Address of the stack.
* @param data Address of area to hold the value popped from the stack.
- * @param timeout Non-negative waiting period to obtain a value (in
- * milliseconds), or one of the special values K_NO_WAIT and
+ * @param timeout Waiting period to obtain a value,
+ * or one of the special values K_NO_WAIT and
* K_FOREVER.
*
* @retval 0 Element popped from stack.
@@ -2837,7 +2852,7 @@
* @req K-STACK-001
*/
__syscall int k_stack_pop(struct k_stack *stack, stack_data_t *data,
- s32_t timeout);
+ k_timeout_t timeout);
/**
* @brief Statically define and initialize a stack
@@ -3142,8 +3157,7 @@
*
* @param work_q Address of workqueue.
* @param work Address of delayed work item.
- * @param delay Non-negative delay before submitting the work item (in
- * milliseconds).
+ * @param delay Delay before submitting the work item
*
* @retval 0 Work item countdown started.
* @retval -EINVAL Work item is being processed or has completed its work.
@@ -3152,7 +3166,7 @@
*/
extern int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
struct k_delayed_work *work,
- s32_t delay);
+ k_timeout_t delay);
/**
* @brief Cancel a delayed work item.
@@ -3228,8 +3242,7 @@
* @note Can be called by ISRs.
*
* @param work Address of delayed work item.
- * @param delay Non-negative delay before submitting the work item (in
- * milliseconds).
+ * @param delay Delay before submitting the work item
*
* @retval 0 Work item countdown started.
* @retval -EINVAL Work item is being processed or has completed its work.
@@ -3237,7 +3250,7 @@
* @req K-DWORK-001
*/
static inline int k_delayed_work_submit(struct k_delayed_work *work,
- s32_t delay)
+ k_timeout_t delay)
{
return k_delayed_work_submit_to_queue(&k_sys_work_q, work, delay);
}
@@ -3299,7 +3312,7 @@
* @param work Address of delayed work item.
* @param events An array of pointers to events which trigger the work.
* @param num_events The number of events in the array.
- * @param timeout Non-negative timeout after which the work will be scheduled
+ * @param timeout Timeout after which the work will be scheduled
* for execution even if not triggered.
*
*
@@ -3311,7 +3324,7 @@
struct k_work_poll *work,
struct k_poll_event *events,
int num_events,
- s32_t timeout);
+ k_timeout_t timeout);
/**
* @brief Submit a triggered work item to the system workqueue.
@@ -3337,7 +3350,7 @@
* @param work Address of delayed work item.
* @param events An array of pointers to events which trigger the work.
* @param num_events The number of events in the array.
- * @param timeout Non-negative timeout after which the work will be scheduled
+ * @param timeout Timeout after which the work will be scheduled
* for execution even if not triggered.
*
* @retval 0 Work item started watching for events.
@@ -3347,7 +3360,7 @@
static inline int k_work_poll_submit(struct k_work_poll *work,
struct k_poll_event *events,
int num_events,
- s32_t timeout)
+ k_timeout_t timeout)
{
return k_work_poll_submit_to_queue(&k_sys_work_q, work,
events, num_events, timeout);
@@ -3455,8 +3468,8 @@
* completes immediately and the lock count is increased by 1.
*
* @param mutex Address of the mutex.
- * @param timeout Non-negative waiting period to lock the mutex (in
- * milliseconds), or one of the special values K_NO_WAIT and
+ * @param timeout Waiting period to lock the mutex,
+ * or one of the special values K_NO_WAIT and
* K_FOREVER.
*
* @retval 0 Mutex locked.
@@ -3464,7 +3477,7 @@
* @retval -EAGAIN Waiting period timed out.
* @req K-MUTEX-002
*/
-__syscall int k_mutex_lock(struct k_mutex *mutex, s32_t timeout);
+__syscall int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout);
/**
* @brief Unlock a mutex.
@@ -3550,16 +3563,15 @@
* @note Can be called by ISRs, but @a timeout must be set to K_NO_WAIT.
*
* @param sem Address of the semaphore.
- * @param timeout Non-negative waiting period to take the semaphore (in
- * milliseconds), or one of the special values K_NO_WAIT and
- * K_FOREVER.
+ * @param timeout Waiting period to take the semaphore,
+ * or one of the special values K_NO_WAIT and K_FOREVER.
*
* @retval 0 Semaphore taken.
* @retval -EBUSY Returned without waiting.
* @retval -EAGAIN Waiting period timed out.
* @req K-SEM-001
*/
-__syscall int k_sem_take(struct k_sem *sem, s32_t timeout);
+__syscall int k_sem_take(struct k_sem *sem, k_timeout_t timeout);
/**
* @brief Give a semaphore.
@@ -3803,8 +3815,8 @@
*
* @param msgq Address of the message queue.
* @param data Pointer to the message.
- * @param timeout Non-negative waiting period to add the message (in
- * milliseconds), or one of the special values K_NO_WAIT and
+ * @param timeout Non-negative waiting period to add the message,
+ * or one of the special values K_NO_WAIT and
* K_FOREVER.
*
* @retval 0 Message sent.
@@ -3812,7 +3824,7 @@
* @retval -EAGAIN Waiting period timed out.
* @req K-MSGQ-002
*/
-__syscall int k_msgq_put(struct k_msgq *msgq, void *data, s32_t timeout);
+__syscall int k_msgq_put(struct k_msgq *msgq, void *data, k_timeout_t timeout);
/**
* @brief Receive a message from a message queue.
@@ -3824,8 +3836,8 @@
*
* @param msgq Address of the message queue.
* @param data Address of area to hold the received message.
- * @param timeout Non-negative waiting period to receive the message (in
- * milliseconds), or one of the special values K_NO_WAIT and
+ * @param timeout Waiting period to receive the message,
+ * or one of the special values K_NO_WAIT and
* K_FOREVER.
*
* @retval 0 Message received.
@@ -3833,7 +3845,7 @@
* @retval -EAGAIN Waiting period timed out.
* @req K-MSGQ-002
*/
-__syscall int k_msgq_get(struct k_msgq *msgq, void *data, s32_t timeout);
+__syscall int k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout);
/**
* @brief Peek/read a message from a message queue.
@@ -4042,8 +4054,8 @@
*
* @param mbox Address of the mailbox.
* @param tx_msg Address of the transmit message descriptor.
- * @param timeout Non-negative waiting period for the message to be received (in
- * milliseconds), or one of the special values K_NO_WAIT
+ * @param timeout Waiting period for the message to be received,
+ * or one of the special values K_NO_WAIT
* and K_FOREVER. Once the message has been received,
* this routine waits as long as necessary for the message
* to be completely processed.
@@ -4054,7 +4066,7 @@
* @req K-MBOX-002
*/
extern int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
- s32_t timeout);
+ k_timeout_t timeout);
/**
* @brief Send a mailbox message in an asynchronous manner.
@@ -4085,9 +4097,8 @@
* @param rx_msg Address of the receive message descriptor.
* @param buffer Address of the buffer to receive data, or NULL to defer data
* retrieval and message disposal until later.
- * @param timeout Non-negative waiting period for a message to be received (in
- * milliseconds), or one of the special values K_NO_WAIT
- * and K_FOREVER.
+ * @param timeout Waiting period for a message to be received,
+ * or one of the special values K_NO_WAIT and K_FOREVER.
*
* @retval 0 Message received.
* @retval -ENOMSG Returned without waiting.
@@ -4095,7 +4106,7 @@
* @req K-MBOX-002
*/
extern int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg,
- void *buffer, s32_t timeout);
+ void *buffer, k_timeout_t timeout);
/**
* @brief Retrieve mailbox message data into a buffer.
@@ -4137,8 +4148,8 @@
* @param rx_msg Address of a receive message descriptor.
* @param pool Address of memory pool, or NULL to discard data.
* @param block Address of the area to hold memory pool block info.
- * @param timeout Non-negative waiting period to wait for a memory pool block
- * (in milliseconds), or one of the special values K_NO_WAIT
+ * @param timeout Time to wait for a memory pool block,
+ * or one of the special values K_NO_WAIT
* and K_FOREVER.
*
* @retval 0 Data retrieved.
@@ -4148,7 +4159,8 @@
*/
extern int k_mbox_data_block_get(struct k_mbox_msg *rx_msg,
struct k_mem_pool *pool,
- struct k_mem_block *block, s32_t timeout);
+ struct k_mem_block *block,
+ k_timeout_t timeout);
/** @} */
@@ -4282,9 +4294,8 @@
* @param bytes_to_write Size of data (in bytes).
* @param bytes_written Address of area to hold the number of bytes written.
* @param min_xfer Minimum number of bytes to write.
- * @param timeout Non-negative waiting period to wait for the data to be written
- * (in milliseconds), or one of the special values K_NO_WAIT
- * and K_FOREVER.
+ * @param timeout Waiting period to wait for the data to be written,
+ * or one of the special values K_NO_WAIT and K_FOREVER.
*
* @retval 0 At least @a min_xfer bytes of data were written.
* @retval -EIO Returned without waiting; zero data bytes were written.
@@ -4294,7 +4305,7 @@
*/
__syscall int k_pipe_put(struct k_pipe *pipe, void *data,
size_t bytes_to_write, size_t *bytes_written,
- size_t min_xfer, s32_t timeout);
+ size_t min_xfer, k_timeout_t timeout);
/**
* @brief Read data from a pipe.
@@ -4306,9 +4317,8 @@
* @param bytes_to_read Maximum number of data bytes to read.
* @param bytes_read Address of area to hold the number of bytes read.
* @param min_xfer Minimum number of data bytes to read.
- * @param timeout Non-negative waiting period to wait for the data to be read
- * (in milliseconds), or one of the special values K_NO_WAIT
- * and K_FOREVER.
+ * @param timeout Waiting period to wait for the data to be read,
+ * or one of the special values K_NO_WAIT and K_FOREVER.
*
* @retval 0 At least @a min_xfer bytes of data were read.
* @retval -EINVAL invalid parameters supplied
@@ -4319,7 +4329,7 @@
*/
__syscall int k_pipe_get(struct k_pipe *pipe, void *data,
size_t bytes_to_read, size_t *bytes_read,
- size_t min_xfer, s32_t timeout);
+ size_t min_xfer, k_timeout_t timeout);
/**
* @brief Write memory block to a pipe.
@@ -4441,8 +4451,8 @@
*
* @param slab Address of the memory slab.
* @param mem Pointer to block address area.
- * @param timeout Non-negative waiting period to wait for operation to complete
- * (in milliseconds). Use K_NO_WAIT to return without waiting,
+ * @param timeout Non-negative waiting period to wait for operation to complete.
+ * Use K_NO_WAIT to return without waiting,
* or K_FOREVER to wait as long as necessary.
*
* @retval 0 Memory allocated. The block address area pointed at by @a mem
@@ -4453,7 +4463,7 @@
* @req K-MSLAB-002
*/
extern int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem,
- s32_t timeout);
+ k_timeout_t timeout);
/**
* @brief Free memory allocated from a memory slab.
@@ -4565,8 +4575,8 @@
* @param pool Address of the memory pool.
* @param block Pointer to block descriptor for the allocated memory.
* @param size Amount of memory to allocate (in bytes).
- * @param timeout Non-negative waiting period to wait for operation to complete
- * (in milliseconds). Use K_NO_WAIT to return without waiting,
+ * @param timeout Waiting period to wait for operation to complete.
+ * Use K_NO_WAIT to return without waiting,
* or K_FOREVER to wait as long as necessary.
*
* @retval 0 Memory allocated. The @a data field of the block descriptor
@@ -4576,7 +4586,7 @@
* @req K-MPOOL-002
*/
extern int k_mem_pool_alloc(struct k_mem_pool *pool, struct k_mem_block *block,
- size_t size, s32_t timeout);
+ size_t size, k_timeout_t timeout);
/**
* @brief Allocate memory from a memory pool with malloc() semantics
@@ -4890,9 +4900,8 @@
*
* @param events An array of pointers to events to be polled for.
* @param num_events The number of events in the array.
- * @param timeout Non-negative waiting period for an event to be ready (in
- * milliseconds), or one of the special values K_NO_WAIT and
- * K_FOREVER.
+ * @param timeout Waiting period for an event to be ready,
+ * or one of the special values K_NO_WAIT and K_FOREVER.
*
* @retval 0 One or more events are ready.
* @retval -EAGAIN Waiting period timed out.
@@ -4907,7 +4916,7 @@
*/
__syscall int k_poll(struct k_poll_event *events, int num_events,
- s32_t timeout);
+ k_timeout_t timeout);
/**
* @brief Initialize a poll signal object.
diff --git a/include/sys/mutex.h b/include/sys/mutex.h
index 9d97a41..d5d6913 100644
--- a/include/sys/mutex.h
+++ b/include/sys/mutex.h
@@ -19,6 +19,7 @@
#ifdef CONFIG_USERSPACE
#include <sys/atomic.h>
#include <zephyr/types.h>
+#include <sys_clock.h>
struct sys_mutex {
/* Currently unused, but will be used to store state for fast mutexes
@@ -54,7 +55,8 @@
*/
}
-__syscall int z_sys_mutex_kernel_lock(struct sys_mutex *mutex, s32_t timeout);
+__syscall int z_sys_mutex_kernel_lock(struct sys_mutex *mutex,
+ k_timeout_t timeout);
__syscall int z_sys_mutex_kernel_unlock(struct sys_mutex *mutex);
@@ -69,7 +71,7 @@
* completes immediately and the lock count is increased by 1.
*
* @param mutex Address of the mutex, which may reside in user memory
- * @param timeout Waiting period to lock the mutex (in milliseconds),
+ * @param timeout Waiting period to lock the mutex,
* or one of the special values K_NO_WAIT and K_FOREVER.
*
* @retval 0 Mutex locked.
@@ -78,7 +80,7 @@
* @retval -EACCESS Caller has no access to provided mutex address
* @retval -EINVAL Provided mutex not recognized by the kernel
*/
-static inline int sys_mutex_lock(struct sys_mutex *mutex, s32_t timeout)
+static inline int sys_mutex_lock(struct sys_mutex *mutex, k_timeout_t timeout)
{
/* For now, make the syscall unconditionally */
return z_sys_mutex_kernel_lock(mutex, timeout);
@@ -126,7 +128,7 @@
k_mutex_init(&mutex->kernel_mutex);
}
-static inline int sys_mutex_lock(struct sys_mutex *mutex, s32_t timeout)
+static inline int sys_mutex_lock(struct sys_mutex *mutex, k_timeout_t timeout)
{
return k_mutex_lock(&mutex->kernel_mutex, timeout);
}
diff --git a/include/sys/sem.h b/include/sys/sem.h
index 1e02a1f..7aea494 100644
--- a/include/sys/sem.h
+++ b/include/sys/sem.h
@@ -110,7 +110,7 @@
* This routine takes @a sem.
*
* @param sem Address of the sys_sem.
- * @param timeout Waiting period to take the sys_sem (in milliseconds),
+ * @param timeout Waiting period to take the sys_sem,
* or one of the special values K_NO_WAIT and K_FOREVER.
*
* @retval 0 sys_sem taken.
@@ -118,7 +118,7 @@
* @retval -ETIMEDOUT Waiting period timed out.
* @retval -EACCES Caller does not have enough access.
*/
-int sys_sem_take(struct sys_sem *sem, s32_t timeout);
+int sys_sem_take(struct sys_sem *sem, k_timeout_t timeout);
/**
* @brief Get sys_sem's value
diff --git a/include/sys_clock.h b/include/sys_clock.h
index 39f749e..123ad82 100644
--- a/include/sys_clock.h
+++ b/include/sys_clock.h
@@ -28,6 +28,59 @@
extern "C" {
#endif
+/**
+ * @addtogroup clock_apis
+ * @{
+ */
+
+typedef u32_t k_ticks_t;
+
+#define K_TICKS_FOREVER ((k_ticks_t) -1)
+
+#ifndef CONFIG_LEGACY_TIMEOUT_API
+
+typedef struct {
+ k_ticks_t ticks;
+} k_timeout_t;
+
+/**
+ * @brief Compare timeouts for equality
+ *
+ * The k_timeout_t object is an opaque struct that should not be
+ * inspected by application code. This macro exists so that users can
+ * test timeout objects for equality with known constants
+ * (e.g. K_NO_WAIT and K_FOREVER) when implementing their own APIs in
+ * terms of Zephyr timeout constants.
+ *
+ * @return True if the timeout objects are identical
+ */
+#define K_TIMEOUT_EQ(a, b) ((a).ticks == (b).ticks)
+
+#define Z_TIMEOUT_NO_WAIT ((k_timeout_t) {})
+#define Z_TIMEOUT_TICKS(t) ((k_timeout_t) { .ticks = (t) })
+#define Z_FOREVER Z_TIMEOUT_TICKS(K_TICKS_FOREVER)
+#define Z_TIMEOUT_MS(t) Z_TIMEOUT_TICKS(k_ms_to_ticks_ceil32(MAX(t, 0)))
+#define Z_TIMEOUT_US(t) Z_TIMEOUT_TICKS(k_us_to_ticks_ceil32(MAX(t, 0)))
+#define Z_TIMEOUT_NS(t) Z_TIMEOUT_TICKS(k_ns_to_ticks_ceil32(MAX(t, 0)))
+#define Z_TIMEOUT_CYC(t) Z_TIMEOUT_TICKS(k_cyc_to_ticks_ceil32(MAX(t, 0)))
+
+#else
+
+/* Legacy timeout API */
+typedef s32_t k_timeout_t;
+#define K_TIMEOUT_EQ(a, b) ((a) == (b))
+#define Z_TIMEOUT_NO_WAIT 0
+#define Z_TIMEOUT_TICKS(t) k_ticks_to_ms_ceil32(t)
+#define Z_FOREVER K_TICKS_FOREVER
+#define Z_TIMEOUT_MS(t) (t)
+#define Z_TIMEOUT_US(t) ((t) * 1000)
+#define Z_TIMEOUT_NS(t) ((t) * 1000000)
+#define Z_TIMEOUT_CYC(t) k_cyc_to_ms_ceil32(MAX((t), 0))
+
+#endif
+
+/** @} */
+
#ifdef CONFIG_TICKLESS_KERNEL
extern int _sys_clock_always_on;
extern void z_enable_sys_clock(void);
@@ -53,8 +106,6 @@
/* number of nanoseconds per second */
#define NSEC_PER_SEC ((NSEC_PER_USEC) * (USEC_PER_MSEC) * (MSEC_PER_SEC))
-#define k_msleep(ms) k_sleep(ms)
-#define K_TIMEOUT_EQ(a, b) ((a) == (b))
/* kernel clocks */
@@ -132,6 +183,8 @@
#define z_tick_get_32() (0)
#endif
+u64_t z_timeout_end_calc(k_timeout_t timeout);
+
/* timeouts */
struct _timeout;
diff --git a/include/timeout_q.h b/include/timeout_q.h
index 0615bf1..061be6d 100644
--- a/include/timeout_q.h
+++ b/include/timeout_q.h
@@ -27,7 +27,8 @@
sys_dnode_init(&t->node);
}
-void z_add_timeout(struct _timeout *to, _timeout_func_t fn, s32_t ticks);
+void z_add_timeout(struct _timeout *to, _timeout_func_t fn,
+ k_timeout_t timeout);
int z_abort_timeout(struct _timeout *to);
@@ -43,7 +44,7 @@
extern void z_thread_timeout(struct _timeout *to);
-static inline void z_add_thread_timeout(struct k_thread *th, s32_t ticks)
+static inline void z_add_thread_timeout(struct k_thread *th, k_timeout_t ticks)
{
z_add_timeout(&th->base.timeout, z_thread_timeout, ticks);
}
@@ -63,12 +64,17 @@
/* Stubs when !CONFIG_SYS_CLOCK_EXISTS */
#define z_init_thread_timeout(t) do {} while (false)
-#define z_add_thread_timeout(th, to) do {} while (false && to && (void *)th)
#define z_abort_thread_timeout(t) (0)
#define z_is_inactive_timeout(t) 0
-#define z_get_next_timeout_expiry() (K_FOREVER)
+#define z_get_next_timeout_expiry() (K_TICKS_FOREVER)
#define z_set_timeout_expiry(t, i) do {} while (false)
+static inline void z_add_thread_timeout(struct k_thread *th, k_timeout_t ticks)
+{
+ ARG_UNUSED(th);
+ ARG_UNUSED(ticks);
+}
+
#endif
#ifdef __cplusplus
diff --git a/kernel/Kconfig b/kernel/Kconfig
index 4e9e749..90125a6 100644
--- a/kernel/Kconfig
+++ b/kernel/Kconfig
@@ -570,6 +570,15 @@
this is disabled. Obviously timeout-related APIs will not
work.
+config LEGACY_TIMEOUT_API
+ bool "Support legacy k_timeout_t API"
+ help
+ The k_timeout_t API has changed to become an opaque type
+ that must be initialized with macros. Older applications
+ can choose this to continue using the old style of timeouts
+ (which were s32_t counts of milliseconds), at the cost of
+ not being able to use new features.
+
config XIP
bool "Execute in place"
help
diff --git a/kernel/futex.c b/kernel/futex.c
index 3ec3b05..c52f90d 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -62,7 +62,8 @@
}
#include <syscalls/k_futex_wake_mrsh.c>
-int z_impl_k_futex_wait(struct k_futex *futex, int expected, s32_t timeout)
+int z_impl_k_futex_wait(struct k_futex *futex, int expected,
+ k_timeout_t timeout)
{
int ret;
k_spinlock_key_t key;
@@ -90,7 +91,7 @@
}
static inline int z_vrfy_k_futex_wait(struct k_futex *futex, int expected,
- s32_t timeout)
+ k_timeout_t timeout)
{
if (Z_SYSCALL_MEMORY_WRITE(futex, sizeof(struct k_futex)) != 0) {
return -EACCES;
diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h
index 62a4c0c..4a88bef 100644
--- a/kernel/include/ksched.h
+++ b/kernel/include/ksched.h
@@ -42,9 +42,10 @@
int z_is_thread_time_slicing(struct k_thread *thread);
void z_unpend_thread_no_timeout(struct k_thread *thread);
int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
- _wait_q_t *wait_q, s32_t timeout);
-int z_pend_curr_irqlock(u32_t key, _wait_q_t *wait_q, s32_t timeout);
-void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout);
+ _wait_q_t *wait_q, k_timeout_t timeout);
+int z_pend_curr_irqlock(u32_t key, _wait_q_t *wait_q, k_timeout_t timeout);
+void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
+ k_timeout_t timeout);
void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key);
void z_reschedule_irqlock(u32_t key);
struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q);
@@ -63,7 +64,7 @@
void z_sched_start(struct k_thread *thread);
void z_ready_thread(struct k_thread *thread);
-static inline void z_pend_curr_unlocked(_wait_q_t *wait_q, s32_t timeout)
+static inline void z_pend_curr_unlocked(_wait_q_t *wait_q, k_timeout_t timeout)
{
(void) z_pend_curr_irqlock(arch_irq_lock(), wait_q, timeout);
}
diff --git a/kernel/mailbox.c b/kernel/mailbox.c
index d727904..cd9016f 100644
--- a/kernel/mailbox.c
+++ b/kernel/mailbox.c
@@ -233,7 +233,7 @@
* @return 0 if successful, -ENOMSG if failed immediately, -EAGAIN if timed out
*/
static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
- s32_t timeout)
+ k_timeout_t timeout)
{
struct k_thread *sending_thread;
struct k_thread *receiving_thread;
@@ -286,7 +286,7 @@
}
/* didn't find a matching receiver: don't wait for one */
- if (timeout == K_NO_WAIT) {
+ if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
k_spin_unlock(&mbox->lock, key);
return -ENOMSG;
}
@@ -304,7 +304,8 @@
return z_pend_curr(&mbox->lock, key, &mbox->tx_msg_queue, timeout);
}
-int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, s32_t timeout)
+int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
+ k_timeout_t timeout)
{
/* configure things for a synchronous send, then send the message */
tx_msg->_syncing_thread = _current;
@@ -351,7 +352,7 @@
}
int k_mbox_data_block_get(struct k_mbox_msg *rx_msg, struct k_mem_pool *pool,
- struct k_mem_block *block, s32_t timeout)
+ struct k_mem_block *block, k_timeout_t timeout)
{
int result;
@@ -416,7 +417,7 @@
}
int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
- s32_t timeout)
+ k_timeout_t timeout)
{
struct k_thread *sending_thread;
struct k_mbox_msg *tx_msg;
@@ -445,7 +446,7 @@
/* didn't find a matching sender */
- if (timeout == K_NO_WAIT) {
+ if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
/* don't wait for a matching sender to appear */
k_spin_unlock(&mbox->lock, key);
return -ENOMSG;
diff --git a/kernel/mem_slab.c b/kernel/mem_slab.c
index df83d64..6c4dc80 100644
--- a/kernel/mem_slab.c
+++ b/kernel/mem_slab.c
@@ -101,7 +101,7 @@
return rc;
}
-int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, s32_t timeout)
+int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout)
{
k_spinlock_key_t key = k_spin_lock(&lock);
int result;
@@ -112,7 +112,7 @@
slab->free_list = *(char **)(slab->free_list);
slab->num_used++;
result = 0;
- } else if (timeout == K_NO_WAIT) {
+ } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
/* don't wait for a free block to become available */
*mem = NULL;
result = -ENOMEM;
diff --git a/kernel/mempool.c b/kernel/mempool.c
index 85f0fed..49df124 100644
--- a/kernel/mempool.c
+++ b/kernel/mempool.c
@@ -47,16 +47,14 @@
SYS_INIT(init_static_pools, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block,
- size_t size, s32_t timeout)
+ size_t size, k_timeout_t timeout)
{
int ret;
- s64_t end = 0;
+ u64_t end = 0;
- __ASSERT(!(arch_is_in_isr() && timeout != K_NO_WAIT), "");
+ __ASSERT(!(arch_is_in_isr() && !K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");
- if (timeout > 0) {
- end = k_uptime_get() + timeout;
- }
+ end = z_timeout_end_calc(timeout);
while (true) {
u32_t level_num, block_num;
@@ -68,18 +66,20 @@
block->id.level = level_num;
block->id.block = block_num;
- if (ret == 0 || timeout == K_NO_WAIT ||
+ if (ret == 0 || K_TIMEOUT_EQ(timeout, K_NO_WAIT) ||
ret != -ENOMEM) {
return ret;
}
z_pend_curr_unlocked(&p->wait_q, timeout);
- if (timeout != K_FOREVER) {
- timeout = end - k_uptime_get();
- if (timeout <= 0) {
+ if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
+ s64_t remaining = end - z_tick_get();
+
+ if (remaining <= 0) {
break;
}
+ timeout = Z_TIMEOUT_TICKS(remaining);
}
}
diff --git a/kernel/msg_q.c b/kernel/msg_q.c
index 1d172f5..f7351f2 100644
--- a/kernel/msg_q.c
+++ b/kernel/msg_q.c
@@ -113,9 +113,9 @@
}
-int z_impl_k_msgq_put(struct k_msgq *msgq, void *data, s32_t timeout)
+int z_impl_k_msgq_put(struct k_msgq *msgq, void *data, k_timeout_t timeout)
{
- __ASSERT(!arch_is_in_isr() || timeout == K_NO_WAIT, "");
+ __ASSERT(!arch_is_in_isr() || K_TIMEOUT_EQ(timeout, K_NO_WAIT), "");
struct k_thread *pending_thread;
k_spinlock_key_t key;
@@ -145,7 +145,7 @@
msgq->used_msgs++;
}
result = 0;
- } else if (timeout == K_NO_WAIT) {
+ } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
/* don't wait for message space to become available */
result = -ENOMSG;
} else {
@@ -160,7 +160,8 @@
}
#ifdef CONFIG_USERSPACE
-static inline int z_vrfy_k_msgq_put(struct k_msgq *q, void *data, s32_t timeout)
+static inline int z_vrfy_k_msgq_put(struct k_msgq *q, void *data,
+ k_timeout_t timeout)
{
Z_OOPS(Z_SYSCALL_OBJ(q, K_OBJ_MSGQ));
Z_OOPS(Z_SYSCALL_MEMORY_READ(data, q->msg_size));
@@ -188,9 +189,9 @@
#include <syscalls/k_msgq_get_attrs_mrsh.c>
#endif
-int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, s32_t timeout)
+int z_impl_k_msgq_get(struct k_msgq *msgq, void *data, k_timeout_t timeout)
{
- __ASSERT(!arch_is_in_isr() || timeout == K_NO_WAIT, "");
+ __ASSERT(!arch_is_in_isr() || K_TIMEOUT_EQ(timeout, K_NO_WAIT), "");
k_spinlock_key_t key;
struct k_thread *pending_thread;
@@ -226,7 +227,7 @@
return 0;
}
result = 0;
- } else if (timeout == K_NO_WAIT) {
+ } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
/* don't wait for a message to become available */
result = -ENOMSG;
} else {
@@ -241,7 +242,8 @@
}
#ifdef CONFIG_USERSPACE
-static inline int z_vrfy_k_msgq_get(struct k_msgq *q, void *data, s32_t timeout)
+static inline int z_vrfy_k_msgq_get(struct k_msgq *q, void *data,
+ k_timeout_t timeout)
{
Z_OOPS(Z_SYSCALL_OBJ(q, K_OBJ_MSGQ));
Z_OOPS(Z_SYSCALL_MEMORY_WRITE(data, q->msg_size));
diff --git a/kernel/mutex.c b/kernel/mutex.c
index fb40b87..b22c9ae 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -116,7 +116,7 @@
return false;
}
-int z_impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout)
+int z_impl_k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout)
{
int new_prio;
k_spinlock_key_t key;
@@ -144,7 +144,7 @@
return 0;
}
- if (unlikely(timeout == (s32_t)K_NO_WAIT)) {
+ if (unlikely(K_TIMEOUT_EQ(timeout, K_NO_WAIT))) {
k_spin_unlock(&lock, key);
sys_trace_end_call(SYS_TRACE_ID_MUTEX_LOCK);
return -EBUSY;
@@ -198,7 +198,8 @@
}
#ifdef CONFIG_USERSPACE
-static inline int z_vrfy_k_mutex_lock(struct k_mutex *mutex, s32_t timeout)
+static inline int z_vrfy_k_mutex_lock(struct k_mutex *mutex,
+ k_timeout_t timeout)
{
Z_OOPS(Z_SYSCALL_OBJ(mutex, K_OBJ_MUTEX));
return z_impl_k_mutex_lock(mutex, timeout);
diff --git a/kernel/pipes.c b/kernel/pipes.c
index f315b43..efd2855 100644
--- a/kernel/pipes.c
+++ b/kernel/pipes.c
@@ -318,13 +318,13 @@
size_t pipe_space,
size_t bytes_to_xfer,
size_t min_xfer,
- s32_t timeout)
+ k_timeout_t timeout)
{
struct k_thread *thread;
struct k_pipe_desc *desc;
size_t num_bytes = 0;
- if (timeout == K_NO_WAIT) {
+ if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
_WAIT_Q_FOR_EACH(wait_q, thread) {
desc = (struct k_pipe_desc *)thread->base.swap_data;
@@ -429,7 +429,7 @@
int z_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
unsigned char *data, size_t bytes_to_write,
size_t *bytes_written, size_t min_xfer,
- s32_t timeout)
+ k_timeout_t timeout)
{
struct k_thread *reader;
struct k_pipe_desc *desc;
@@ -555,7 +555,7 @@
pipe_desc.buffer = data + num_bytes_written;
pipe_desc.bytes_to_xfer = bytes_to_write - num_bytes_written;
- if (timeout != K_NO_WAIT) {
+ if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
_current->base.swap_data = &pipe_desc;
/*
* Lock interrupts and unlock the scheduler before
@@ -576,7 +576,7 @@
}
int z_impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
- size_t *bytes_read, size_t min_xfer, s32_t timeout)
+ size_t *bytes_read, size_t min_xfer, k_timeout_t timeout)
{
struct k_thread *writer;
struct k_pipe_desc *desc;
@@ -701,7 +701,7 @@
pipe_desc.buffer = (u8_t *)data + num_bytes_read;
pipe_desc.bytes_to_xfer = bytes_to_read - num_bytes_read;
- if (timeout != K_NO_WAIT) {
+ if (!K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
_current->base.swap_data = &pipe_desc;
k_spinlock_key_t key = k_spin_lock(&pipe->lock);
@@ -720,7 +720,7 @@
#ifdef CONFIG_USERSPACE
int z_vrfy_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
- size_t *bytes_read, size_t min_xfer, s32_t timeout)
+ size_t *bytes_read, size_t min_xfer, k_timeout_t timeout)
{
Z_OOPS(Z_SYSCALL_OBJ(pipe, K_OBJ_PIPE));
Z_OOPS(Z_SYSCALL_MEMORY_WRITE(bytes_read, sizeof(*bytes_read)));
@@ -734,7 +734,8 @@
#endif
int z_impl_k_pipe_put(struct k_pipe *pipe, void *data, size_t bytes_to_write,
- size_t *bytes_written, size_t min_xfer, s32_t timeout)
+ size_t *bytes_written, size_t min_xfer,
+ k_timeout_t timeout)
{
return z_pipe_put_internal(pipe, NULL, data,
bytes_to_write, bytes_written,
@@ -743,7 +744,8 @@
#ifdef CONFIG_USERSPACE
int z_vrfy_k_pipe_put(struct k_pipe *pipe, void *data, size_t bytes_to_write,
- size_t *bytes_written, size_t min_xfer, s32_t timeout)
+ size_t *bytes_written, size_t min_xfer,
+ k_timeout_t timeout)
{
Z_OOPS(Z_SYSCALL_OBJ(pipe, K_OBJ_PIPE));
Z_OOPS(Z_SYSCALL_MEMORY_WRITE(bytes_written, sizeof(*bytes_written)));
diff --git a/kernel/poll.c b/kernel/poll.c
index 699a33d..4fe88ff 100644
--- a/kernel/poll.c
+++ b/kernel/poll.c
@@ -244,7 +244,8 @@
return 0;
}
-int z_impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
+int z_impl_k_poll(struct k_poll_event *events, int num_events,
+ k_timeout_t timeout)
{
int events_registered;
k_spinlock_key_t key;
@@ -257,7 +258,7 @@
__ASSERT(num_events >= 0, "<0 events\n");
events_registered = register_events(events, num_events, &poller,
- (timeout == K_NO_WAIT));
+ K_TIMEOUT_EQ(timeout, K_NO_WAIT));
key = k_spin_lock(&lock);
@@ -274,7 +275,7 @@
poller.is_polling = false;
- if (timeout == K_NO_WAIT) {
+ if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
k_spin_unlock(&lock, key);
return -EAGAIN;
}
@@ -301,7 +302,7 @@
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_poll(struct k_poll_event *events,
- int num_events, s32_t timeout)
+ int num_events, k_timeout_t timeout)
{
int ret;
k_spinlock_key_t key;
@@ -582,7 +583,7 @@
struct k_work_poll *work,
struct k_poll_event *events,
int num_events,
- s32_t timeout)
+ k_timeout_t timeout)
{
int events_registered;
k_spinlock_key_t key;
@@ -626,7 +627,7 @@
&work->poller, false);
key = k_spin_lock(&lock);
- if (work->poller.is_polling && timeout != K_NO_WAIT) {
+ if (work->poller.is_polling && !K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
/*
* Poller is still polling.
* No event is ready and all are watched.
@@ -634,11 +635,15 @@
__ASSERT(num_events == events_registered,
"Some events were not registered!\n");
+#ifdef CONFIG_LEGACY_TIMEOUT_API
+ timeout = k_ms_to_ticks_ceil32(timeout);
+#endif
+
/* Setup timeout if such action is requested */
- if (timeout != K_FOREVER) {
+ if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
z_add_timeout(&work->timeout,
triggered_work_expiration_handler,
- k_ms_to_ticks_ceil32(timeout));
+ timeout);
}
/* From now, any event will result in submitted work. */
diff --git a/kernel/queue.c b/kernel/queue.c
index 329a281..c60c9f7 100644
--- a/kernel/queue.c
+++ b/kernel/queue.c
@@ -293,45 +293,32 @@
}
#if defined(CONFIG_POLL)
-static void *k_queue_poll(struct k_queue *queue, s32_t timeout)
+static void *k_queue_poll(struct k_queue *queue, k_timeout_t timeout)
{
struct k_poll_event event;
- int err, elapsed = 0, done = 0;
+ int err;
k_spinlock_key_t key;
void *val;
- u32_t start;
k_poll_event_init(&event, K_POLL_TYPE_FIFO_DATA_AVAILABLE,
K_POLL_MODE_NOTIFY_ONLY, queue);
- if (timeout != K_FOREVER) {
- start = k_uptime_get_32();
+ event.state = K_POLL_STATE_NOT_READY;
+ err = k_poll(&event, 1, timeout);
+
+ if (err && err != -EAGAIN) {
+ return NULL;
}
- do {
- event.state = K_POLL_STATE_NOT_READY;
-
- err = k_poll(&event, 1, timeout - elapsed);
-
- if (err && err != -EAGAIN) {
- return NULL;
- }
-
- key = k_spin_lock(&queue->lock);
- val = z_queue_node_peek(sys_sflist_get(&queue->data_q), true);
- k_spin_unlock(&queue->lock, key);
-
- if ((val == NULL) && (timeout != K_FOREVER)) {
- elapsed = k_uptime_get_32() - start;
- done = elapsed > timeout;
- }
- } while (!val && !done);
+ key = k_spin_lock(&queue->lock);
+ val = z_queue_node_peek(sys_sflist_get(&queue->data_q), true);
+ k_spin_unlock(&queue->lock, key);
return val;
}
#endif /* CONFIG_POLL */
-void *z_impl_k_queue_get(struct k_queue *queue, s32_t timeout)
+void *z_impl_k_queue_get(struct k_queue *queue, k_timeout_t timeout)
{
k_spinlock_key_t key = k_spin_lock(&queue->lock);
void *data;
@@ -345,7 +332,7 @@
return data;
}
- if (timeout == K_NO_WAIT) {
+ if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
k_spin_unlock(&queue->lock, key);
return NULL;
}
@@ -363,7 +350,8 @@
}
#ifdef CONFIG_USERSPACE
-static inline void *z_vrfy_k_queue_get(struct k_queue *queue, s32_t timeout)
+static inline void *z_vrfy_k_queue_get(struct k_queue *queue,
+ k_timeout_t timeout)
{
Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
return z_impl_k_queue_get(queue, timeout);
diff --git a/kernel/sched.c b/kernel/sched.c
index 4bdfe6b..d4a5594 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -582,31 +582,28 @@
}
}
-static void add_thread_timeout_ms(struct k_thread *thread, s32_t timeout)
+static void add_thread_timeout(struct k_thread *thread, k_timeout_t timeout)
{
- if (timeout != K_FOREVER) {
- s32_t ticks;
-
- if (timeout < 0) {
- timeout = 0;
- }
-
- ticks = _TICK_ALIGN + k_ms_to_ticks_ceil32(timeout);
-
- z_add_thread_timeout(thread, ticks);
+ if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
+#ifdef CONFIG_LEGACY_TIMEOUT_API
+ timeout = _TICK_ALIGN + k_ms_to_ticks_ceil32(timeout);
+#endif
+ z_add_thread_timeout(thread, timeout);
}
}
-static void pend(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout)
+static void pend(struct k_thread *thread, _wait_q_t *wait_q,
+ k_timeout_t timeout)
{
LOCKED(&sched_spinlock) {
add_to_waitq_locked(thread, wait_q);
}
- add_thread_timeout_ms(thread, timeout);
+ add_thread_timeout(thread, timeout);
}
-void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout)
+void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
+ k_timeout_t timeout)
{
__ASSERT_NO_MSG(thread == _current || is_thread_dummy(thread));
pend(thread, wait_q, timeout);
@@ -651,7 +648,7 @@
}
#endif
-int z_pend_curr_irqlock(u32_t key, _wait_q_t *wait_q, s32_t timeout)
+int z_pend_curr_irqlock(u32_t key, _wait_q_t *wait_q, k_timeout_t timeout)
{
pend(_current, wait_q, timeout);
@@ -671,7 +668,7 @@
}
int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
- _wait_q_t *wait_q, s32_t timeout)
+ _wait_q_t *wait_q, k_timeout_t timeout)
{
#if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
pending_current = _current;
@@ -1159,7 +1156,15 @@
return 0;
}
+ k_timeout_t timeout;
+
+#ifndef CONFIG_LEGACY_TIMEOUT_API
+ timeout = Z_TIMEOUT_TICKS(ticks);
+#else
ticks += _TICK_ALIGN;
+ timeout = (k_ticks_t) ticks;
+#endif
+
expected_wakeup_time = ticks + z_tick_get_32();
/* Spinlock purely for local interrupt locking to prevent us
@@ -1173,7 +1178,7 @@
pending_current = _current;
#endif
z_remove_thread_from_ready_q(_current);
- z_add_thread_timeout(_current, ticks);
+ z_add_thread_timeout(_current, timeout);
z_mark_thread_as_suspended(_current);
(void)z_swap(&local_lock, key);
@@ -1189,26 +1194,31 @@
return 0;
}
-s32_t z_impl_k_sleep(int ms)
+s32_t z_impl_k_sleep(k_timeout_t timeout)
{
- s32_t ticks;
+ k_ticks_t ticks;
__ASSERT(!arch_is_in_isr(), "");
- if (ms == K_FOREVER) {
+ if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
k_thread_suspend(_current);
- return K_FOREVER;
+ return K_TICKS_FOREVER;
}
- ticks = k_ms_to_ticks_ceil32(ms);
+#ifdef CONFIG_LEGACY_TIMEOUT_API
+ ticks = k_ms_to_ticks_ceil32(timeout);
+#else
+ ticks = timeout.ticks;
+#endif
+
ticks = z_tick_sleep(ticks);
return k_ticks_to_ms_floor64(ticks);
}
#ifdef CONFIG_USERSPACE
-static inline s32_t z_vrfy_k_sleep(int ms)
+static inline s32_t z_vrfy_k_sleep(k_timeout_t timeout)
{
- return z_impl_k_sleep(ms);
+ return z_impl_k_sleep(timeout);
}
#include <syscalls/k_sleep_mrsh.c>
#endif
@@ -1407,12 +1417,13 @@
#endif /* CONFIG_SCHED_CPU_MASK */
-int z_impl_k_thread_join(struct k_thread *thread, s32_t timeout)
+int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout)
{
k_spinlock_key_t key;
int ret;
- __ASSERT(((arch_is_in_isr() == false) || (timeout == K_NO_WAIT)), "");
+ __ASSERT(((arch_is_in_isr() == false) ||
+ K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");
key = k_spin_lock(&sched_spinlock);
@@ -1427,7 +1438,7 @@
goto out;
}
- if (timeout == K_NO_WAIT) {
+ if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
ret = -EBUSY;
goto out;
}
@@ -1436,7 +1447,7 @@
pending_current = _current;
#endif
add_to_waitq_locked(_current, &thread->base.join_waiters);
- add_thread_timeout_ms(_current, timeout);
+ add_thread_timeout(_current, timeout);
return z_swap(&sched_spinlock, key);
out:
@@ -1472,7 +1483,8 @@
CODE_UNREACHABLE;
}
-static inline int z_vrfy_k_thread_join(struct k_thread *thread, s32_t timeout)
+static inline int z_vrfy_k_thread_join(struct k_thread *thread,
+ k_timeout_t timeout)
{
if (thread_obj_validate(thread)) {
return 0;
diff --git a/kernel/sem.c b/kernel/sem.c
index db317c8..acf175e 100644
--- a/kernel/sem.c
+++ b/kernel/sem.c
@@ -133,11 +133,12 @@
#include <syscalls/k_sem_give_mrsh.c>
#endif
-int z_impl_k_sem_take(struct k_sem *sem, s32_t timeout)
+int z_impl_k_sem_take(struct k_sem *sem, k_timeout_t timeout)
{
int ret = 0;
- __ASSERT(((arch_is_in_isr() == false) || (timeout == K_NO_WAIT)), "");
+ __ASSERT(((arch_is_in_isr() == false) ||
+ K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");
sys_trace_void(SYS_TRACE_ID_SEMA_TAKE);
k_spinlock_key_t key = k_spin_lock(&lock);
@@ -149,7 +150,7 @@
goto out;
}
- if (timeout == K_NO_WAIT) {
+ if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
k_spin_unlock(&lock, key);
ret = -EBUSY;
goto out;
@@ -163,7 +164,7 @@
}
#ifdef CONFIG_USERSPACE
-static inline int z_vrfy_k_sem_take(struct k_sem *sem, s32_t timeout)
+static inline int z_vrfy_k_sem_take(struct k_sem *sem, k_timeout_t timeout)
{
Z_OOPS(Z_SYSCALL_OBJ(sem, K_OBJ_SEM));
return z_impl_k_sem_take((struct k_sem *)sem, timeout);
diff --git a/kernel/stack.c b/kernel/stack.c
index fc737a5..be00a1f 100644
--- a/kernel/stack.c
+++ b/kernel/stack.c
@@ -133,7 +133,8 @@
#include <syscalls/k_stack_push_mrsh.c>
#endif
-int z_impl_k_stack_pop(struct k_stack *stack, stack_data_t *data, s32_t timeout)
+int z_impl_k_stack_pop(struct k_stack *stack, stack_data_t *data,
+ k_timeout_t timeout)
{
k_spinlock_key_t key;
int result;
@@ -147,7 +148,7 @@
return 0;
}
- if (timeout == K_NO_WAIT) {
+ if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
k_spin_unlock(&stack->lock, key);
return -EBUSY;
}
@@ -163,7 +164,7 @@
#ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_stack_pop(struct k_stack *stack,
- stack_data_t *data, s32_t timeout)
+ stack_data_t *data, k_timeout_t timeout)
{
Z_OOPS(Z_SYSCALL_OBJ(stack, K_OBJ_STACK));
Z_OOPS(Z_SYSCALL_MEMORY_WRITE(data, sizeof(stack_data_t)));
diff --git a/kernel/thread.c b/kernel/thread.c
index fc8af576..e219de1 100644
--- a/kernel/thread.c
+++ b/kernel/thread.c
@@ -404,15 +404,17 @@
#endif
#ifdef CONFIG_MULTITHREADING
-static void schedule_new_thread(struct k_thread *thread, s32_t delay)
+static void schedule_new_thread(struct k_thread *thread, k_timeout_t delay)
{
#ifdef CONFIG_SYS_CLOCK_EXISTS
- if (delay == 0) {
+ if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) {
k_thread_start(thread);
} else {
- s32_t ticks = _TICK_ALIGN + k_ms_to_ticks_ceil32(delay);
+#ifdef CONFIG_LEGACY_TIMEOUT_API
+ delay = _TICK_ALIGN + k_ms_to_ticks_ceil32(delay);
+#endif
- z_add_thread_timeout(thread, ticks);
+ z_add_thread_timeout(thread, delay);
}
#else
ARG_UNUSED(delay);
@@ -612,7 +614,7 @@
k_thread_stack_t *stack,
size_t stack_size, k_thread_entry_t entry,
void *p1, void *p2, void *p3,
- int prio, u32_t options, s32_t delay)
+ int prio, u32_t options, k_timeout_t delay)
{
__ASSERT(!arch_is_in_isr(), "Threads may not be created in ISRs");
@@ -626,7 +628,7 @@
z_setup_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3,
prio, options, NULL);
- if (delay != K_FOREVER) {
+ if (!K_TIMEOUT_EQ(delay, K_FOREVER)) {
schedule_new_thread(new_thread, delay);
}
@@ -639,7 +641,7 @@
k_thread_stack_t *stack,
size_t stack_size, k_thread_entry_t entry,
void *p1, void *p2, void *p3,
- int prio, u32_t options, s32_t delay)
+ int prio, u32_t options, k_timeout_t delay)
{
size_t total_size, stack_obj_size;
struct z_object *stack_object;
@@ -689,7 +691,7 @@
z_setup_new_thread(new_thread, stack, stack_size,
entry, p1, p2, p3, prio, options, NULL);
- if (delay != K_FOREVER) {
+ if (!K_TIMEOUT_EQ(delay, K_FOREVER)) {
schedule_new_thread(new_thread, delay);
}
@@ -747,9 +749,9 @@
*/
k_sched_lock();
_FOREACH_STATIC_THREAD(thread_data) {
- if (thread_data->init_delay != K_FOREVER) {
+ if (thread_data->init_delay != K_TICKS_FOREVER) {
schedule_new_thread(thread_data->init_thread,
- thread_data->init_delay);
+ K_MSEC(thread_data->init_delay));
}
}
k_sched_unlock();
diff --git a/kernel/timeout.c b/kernel/timeout.c
index 2623667..11aeed7 100644
--- a/kernel/timeout.c
+++ b/kernel/timeout.c
@@ -24,7 +24,7 @@
static struct k_spinlock timeout_lock;
#define MAX_WAIT (IS_ENABLED(CONFIG_SYSTEM_CLOCK_SLOPPY_IDLE) \
- ? K_FOREVER : INT_MAX)
+ ? K_TICKS_FOREVER : INT_MAX)
/* Cycles left to process in the currently-executing z_clock_announce() */
static int announce_remaining;
@@ -83,8 +83,15 @@
return ret;
}
-void z_add_timeout(struct _timeout *to, _timeout_func_t fn, s32_t ticks)
+void z_add_timeout(struct _timeout *to, _timeout_func_t fn,
+ k_timeout_t timeout)
{
+#ifdef CONFIG_LEGACY_TIMEOUT_API
+ k_ticks_t ticks = timeout;
+#else
+ k_ticks_t ticks = timeout.ticks + 1;
+#endif
+
__ASSERT(!sys_dnode_is_linked(&to->node), "");
to->fn = fn;
ticks = MAX(1, ticks);
@@ -150,7 +157,7 @@
s32_t z_get_next_timeout_expiry(void)
{
- s32_t ret = K_FOREVER;
+ s32_t ret = K_TICKS_FOREVER;
LOCKED(&timeout_lock) {
ret = next_timeout();
@@ -162,7 +169,7 @@
{
LOCKED(&timeout_lock) {
int next = next_timeout();
- bool sooner = (next == K_FOREVER) || (ticks < next);
+ bool sooner = (next == K_TICKS_FOREVER) || (ticks < next);
bool imminent = next <= 1;
/* Only set new timeouts when they are sooner than
@@ -248,3 +255,24 @@
}
#include <syscalls/k_uptime_get_mrsh.c>
#endif
+
+/* Returns the uptime expiration (relative to an unlocked "now"!) of a
+ * timeout object.
+ */
+u64_t z_timeout_end_calc(k_timeout_t timeout)
+{
+ k_ticks_t dt;
+
+ if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
+ return UINT64_MAX;
+ } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
+ return z_tick_get();
+ }
+
+#ifdef CONFIG_LEGACY_TIMEOUT_API
+ dt = k_ms_to_ticks_ceil32(timeout);
+#else
+ dt = timeout.ticks;
+#endif
+ return z_tick_get() + MAX(1, dt);
+}
diff --git a/kernel/timer.c b/kernel/timer.c
index 47c5639..bcaec8a 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -52,7 +52,8 @@
* if the timer is periodic, start it again; don't add _TICK_ALIGN
* since we're already aligned to a tick boundary
*/
- if (timer->period > 0) {
+ if (!K_TIMEOUT_EQ(timer->period, K_NO_WAIT) &&
+ !K_TIMEOUT_EQ(timer->period, K_FOREVER)) {
z_add_timeout(&timer->timeout, z_timer_expiration_handler,
timer->period);
}
@@ -105,29 +106,43 @@
}
-void z_impl_k_timer_start(struct k_timer *timer, s32_t duration, s32_t period)
+void z_impl_k_timer_start(struct k_timer *timer, k_timeout_t duration,
+ k_timeout_t period)
{
- __ASSERT(duration >= 0 && period >= 0 &&
- (duration != 0 || period != 0), "invalid parameters\n");
-
- volatile s32_t period_in_ticks, duration_in_ticks;
-
- period_in_ticks = k_ms_to_ticks_ceil32(period);
- duration_in_ticks = k_ms_to_ticks_ceil32(duration);
+#ifdef CONFIG_LEGACY_TIMEOUT_API
+ duration = k_ms_to_ticks_ceil32(duration);
+ period = k_ms_to_ticks_ceil32(period);
+#else
+ /* z_add_timeout() always adds one to the incoming tick count
+ * to round up to the next tick (by convention it waits for
+ * "at least as long as the specified timeout"), but the
+ * period interval is always guaranteed to be reset from
+ * within the timer ISR, so no round up is desired. Subtract
+ * one.
+ *
+ * Note that the duration (!) value gets the same treatment
+ * for backwards compatibility. This is unfortunate
+ * (i.e. k_timer_start() doesn't treat its initial sleep
+ * argument the same way k_sleep() does), but historical. The
+ * timer_api test relies on this behavior.
+ */
+ period.ticks = MAX(period.ticks - 1, 0);
+ duration.ticks = MAX(duration.ticks - 1, 0);
+#endif
(void)z_abort_timeout(&timer->timeout);
- timer->period = period_in_ticks;
+ timer->period = period;
timer->status = 0U;
+
z_add_timeout(&timer->timeout, z_timer_expiration_handler,
- duration_in_ticks);
+ duration);
}
#ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_timer_start(struct k_timer *timer,
- s32_t duration, s32_t period)
+ k_timeout_t duration,
+ k_timeout_t period)
{
- Z_OOPS(Z_SYSCALL_VERIFY(duration >= 0 && period >= 0 &&
- (duration != 0 || period != 0)));
Z_OOPS(Z_SYSCALL_OBJ(timer, K_OBJ_TIMER));
z_impl_k_timer_start(timer, duration, period);
}
diff --git a/kernel/work_q.c b/kernel/work_q.c
index 96cd2f6..a0706d6 100644
--- a/kernel/work_q.c
+++ b/kernel/work_q.c
@@ -82,7 +82,7 @@
int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
struct k_delayed_work *work,
- s32_t delay)
+ k_timeout_t delay)
{
k_spinlock_key_t key = k_spin_lock(&lock);
int err = 0;
@@ -112,15 +112,18 @@
/* Submit work directly if no delay. Note that this is a
* blocking operation, so release the lock first.
*/
- if (delay == 0) {
+ if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) {
k_spin_unlock(&lock, key);
k_work_submit_to_queue(work_q, &work->work);
return 0;
}
+#ifdef CONFIG_LEGACY_TIMEOUT_API
+ delay = _TICK_ALIGN + k_ms_to_ticks_ceil32(delay);
+#endif
+
/* Add timeout */
- z_add_timeout(&work->timeout, work_timeout,
- _TICK_ALIGN + k_ms_to_ticks_ceil32(delay));
+ z_add_timeout(&work->timeout, work_timeout, delay);
done:
k_spin_unlock(&lock, key);
diff --git a/lib/cmsis_rtos_v1/Kconfig b/lib/cmsis_rtos_v1/Kconfig
index fdfbc19..37c631c 100644
--- a/lib/cmsis_rtos_v1/Kconfig
+++ b/lib/cmsis_rtos_v1/Kconfig
@@ -5,6 +5,7 @@
bool "CMSIS RTOS v1 API"
depends on THREAD_CUSTOM_DATA
depends on POLL
+ select LEGACY_TIMEOUT_API
help
This enables CMSIS RTOS v1 API support. This is an OS-integration
layer which allows applications using CMSIS RTOS APIs to build on
diff --git a/lib/cmsis_rtos_v2/Kconfig b/lib/cmsis_rtos_v2/Kconfig
index d79fb04..cb54769 100644
--- a/lib/cmsis_rtos_v2/Kconfig
+++ b/lib/cmsis_rtos_v2/Kconfig
@@ -9,6 +9,7 @@
depends on THREAD_MONITOR
depends on INIT_STACKS
depends on NUM_PREEMPT_PRIORITIES >= 56
+ select LEGACY_TIMEOUT_API
help
This enables CMSIS RTOS v2 API support. This is an OS-integration
layer which allows applications using CMSIS RTOS V2 APIs to build
diff --git a/lib/os/mutex.c b/lib/os/mutex.c
index 43fc254..953ee57 100644
--- a/lib/os/mutex.c
+++ b/lib/os/mutex.c
@@ -30,7 +30,7 @@
return Z_SYSCALL_MEMORY_WRITE(addr, sizeof(struct sys_mutex));
}
-int z_impl_z_sys_mutex_kernel_lock(struct sys_mutex *mutex, s32_t timeout)
+int z_impl_z_sys_mutex_kernel_lock(struct sys_mutex *mutex, k_timeout_t timeout)
{
struct k_mutex *kernel_mutex = get_k_mutex(mutex);
@@ -42,7 +42,7 @@
}
static inline int z_vrfy_z_sys_mutex_kernel_lock(struct sys_mutex *mutex,
- s32_t timeout)
+ k_timeout_t timeout)
{
if (check_sys_mutex_addr(mutex)) {
return -EACCES;
diff --git a/lib/os/sem.c b/lib/os/sem.c
index 02d3d1f..cb81d68 100644
--- a/lib/os/sem.c
+++ b/lib/os/sem.c
@@ -79,7 +79,7 @@
return ret;
}
-int sys_sem_take(struct sys_sem *sem, s32_t timeout)
+int sys_sem_take(struct sys_sem *sem, k_timeout_t timeout)
{
int ret = 0;
atomic_t old_value;
@@ -120,7 +120,7 @@
return 0;
}
-int sys_sem_take(struct sys_sem *sem, s32_t timeout)
+int sys_sem_take(struct sys_sem *sem, k_timeout_t timeout)
{
int ret_value = 0;
diff --git a/lib/posix/Kconfig b/lib/posix/Kconfig
index 3e9e85c..0fadc77 100644
--- a/lib/posix/Kconfig
+++ b/lib/posix/Kconfig
@@ -12,6 +12,7 @@
config POSIX_API
depends on !ARCH_POSIX
bool "POSIX APIs"
+ select LEGACY_TIMEOUT_API
help
Enable mostly-standards-compliant implementations of
various POSIX (IEEE 1003.1) APIs.
diff --git a/lib/posix/pthread_common.c b/lib/posix/pthread_common.c
index 3043a19..a2dbfa7 100644
--- a/lib/posix/pthread_common.c
+++ b/lib/posix/pthread_common.c
@@ -24,7 +24,7 @@
nsecs = abstime->tv_nsec - curtime.tv_nsec;
if (secs < 0 || (secs == 0 && nsecs < NSEC_PER_MSEC)) {
- milli_secs = K_NO_WAIT;
+ milli_secs = 0;
} else {
milli_secs = secs * MSEC_PER_SEC + nsecs / NSEC_PER_MSEC;
}
diff --git a/samples/cpp_synchronization/src/main.cpp b/samples/cpp_synchronization/src/main.cpp
index 3959012..7a20469 100644
--- a/samples/cpp_synchronization/src/main.cpp
+++ b/samples/cpp_synchronization/src/main.cpp
@@ -94,7 +94,7 @@
*/
int cpp_semaphore::wait(int timeout)
{
- return k_sem_take(&_sema_internal, timeout);
+ return k_sem_take(&_sema_internal, K_MSEC(timeout));
}
/**
@@ -127,7 +127,7 @@
printk("%s: Hello World!\n", __FUNCTION__);
/* wait a while, then let main thread have a turn */
- k_timer_start(&timer, SLEEPTIME, 0);
+ k_timer_start(&timer, K_MSEC(SLEEPTIME), K_NO_WAIT);
k_timer_status_sync(&timer);
sem_main.give();
}
@@ -139,7 +139,7 @@
k_thread_create(&coop_thread, coop_stack, STACKSIZE,
(k_thread_entry_t) coop_thread_entry,
- NULL, NULL, NULL, K_PRIO_COOP(7), 0, 0);
+ NULL, NULL, NULL, K_PRIO_COOP(7), 0, K_NO_WAIT);
k_timer_init(&timer, NULL, NULL);
while (1) {
@@ -147,7 +147,7 @@
printk("%s: Hello World!\n", __FUNCTION__);
/* wait a while, then let coop thread have a turn */
- k_timer_start(&timer, SLEEPTIME, 0);
+ k_timer_start(&timer, K_MSEC(SLEEPTIME), K_NO_WAIT);
k_timer_status_sync(&timer);
sem_coop.give();
diff --git a/samples/scheduler/metairq_dispatch/src/msgdev.c b/samples/scheduler/metairq_dispatch/src/msgdev.c
index 6236686..ea5b11f 100644
--- a/samples/scheduler/metairq_dispatch/src/msgdev.c
+++ b/samples/scheduler/metairq_dispatch/src/msgdev.c
@@ -77,7 +77,11 @@
{
u32_t ticks = rand32() % MAX_EVENT_DELAY_TICKS;
+#ifdef CONFIG_LEGACY_TIMEOUT_API
z_add_timeout(&timeout, dev_timer_expired, ticks);
+#else
+ z_add_timeout(&timeout, dev_timer_expired, Z_TIMEOUT_TICKS(ticks));
+#endif
}
void message_dev_init(void)
diff --git a/soc/arm/ti_simplelink/Kconfig b/soc/arm/ti_simplelink/Kconfig
index e35796e..06a3fa6 100644
--- a/soc/arm/ti_simplelink/Kconfig
+++ b/soc/arm/ti_simplelink/Kconfig
@@ -4,6 +4,7 @@
config SOC_FAMILY_TISIMPLELINK
bool
+ select LEGACY_TIMEOUT_API
if SOC_FAMILY_TISIMPLELINK
diff --git a/subsys/console/Kconfig b/subsys/console/Kconfig
index 8693baa..c52d503 100644
--- a/subsys/console/Kconfig
+++ b/subsys/console/Kconfig
@@ -3,6 +3,7 @@
menuconfig CONSOLE_SUBSYS
bool "Console subsystem/support routines [EXPERIMENTAL]"
+ select LEGACY_TIMEOUT_API
help
Console subsystem and helper functions
diff --git a/subsys/net/Kconfig b/subsys/net/Kconfig
index e0fde62..a4da7f0 100644
--- a/subsys/net/Kconfig
+++ b/subsys/net/Kconfig
@@ -7,6 +7,7 @@
config NET_BUF
bool "Network buffer support"
+ select LEGACY_TIMEOUT_API
help
This option enables support for generic network protocol
buffers.
diff --git a/subsys/power/policy/policy_residency.c b/subsys/power/policy/policy_residency.c
index 849f30c..ebe228b 100644
--- a/subsys/power/policy/policy_residency.c
+++ b/subsys/power/policy/policy_residency.c
@@ -49,7 +49,7 @@
{
int i;
- if ((ticks != K_FOREVER) && (ticks < pm_min_residency[0])) {
+ if ((ticks != K_TICKS_FOREVER) && (ticks < pm_min_residency[0])) {
LOG_DBG("Not enough time for PM operations: %d", ticks);
return SYS_POWER_STATE_ACTIVE;
}
@@ -60,7 +60,7 @@
continue;
}
#endif
- if ((ticks == K_FOREVER) ||
+ if ((ticks == K_TICKS_FOREVER) ||
(ticks >= pm_min_residency[i])) {
LOG_DBG("Selected power state %d "
"(ticks: %d, min_residency: %u)",
diff --git a/tests/kernel/lifo/lifo_usage/src/main.c b/tests/kernel/lifo/lifo_usage/src/main.c
index 3787b3d..d5ea87a 100644
--- a/tests/kernel/lifo/lifo_usage/src/main.c
+++ b/tests/kernel/lifo/lifo_usage/src/main.c
@@ -35,7 +35,7 @@
struct timeout_order_data {
void *link_in_lifo;
struct k_lifo *klifo;
- s32_t timeout;
+ k_ticks_t timeout;
s32_t timeout_order;
s32_t q_order;
};
diff --git a/tests/kernel/mbox/mbox_usage/src/main.c b/tests/kernel/mbox/mbox_usage/src/main.c
index 34586cd..d97ae50 100644
--- a/tests/kernel/mbox/mbox_usage/src/main.c
+++ b/tests/kernel/mbox/mbox_usage/src/main.c
@@ -28,7 +28,7 @@
TARGET_SOURCE
} info_type;
-static void msg_sender(struct k_mbox *pmbox, s32_t timeout)
+static void msg_sender(struct k_mbox *pmbox, k_timeout_t timeout)
{
struct k_mbox_msg mmsg;
@@ -53,7 +53,8 @@
}
}
-static void msg_receiver(struct k_mbox *pmbox, k_tid_t thd_id, s32_t timeout)
+static void msg_receiver(struct k_mbox *pmbox, k_tid_t thd_id,
+ k_timeout_t timeout)
{
struct k_mbox_msg mmsg;
char rxdata[MAIL_LEN];
diff --git a/tests/kernel/mem_protect/futex/prj.conf b/tests/kernel/mem_protect/futex/prj.conf
index af4c2c1..251902f 100644
--- a/tests/kernel/mem_protect/futex/prj.conf
+++ b/tests/kernel/mem_protect/futex/prj.conf
@@ -2,3 +2,4 @@
CONFIG_IRQ_OFFLOAD=y
CONFIG_TEST_USERSPACE=y
CONFIG_MP_NUM_CPUS=1
+CONFIG_LEGACY_TIMEOUT_API=y
diff --git a/tests/kernel/mem_protect/futex/src/main.c b/tests/kernel/mem_protect/futex/src/main.c
index 9a4884d..7fa59a8 100644
--- a/tests/kernel/mem_protect/futex/src/main.c
+++ b/tests/kernel/mem_protect/futex/src/main.c
@@ -64,7 +64,8 @@
s32_t ret_value;
int time_val = *(int *)p1;
- zassert_true(time_val >= (int)K_FOREVER, "invalid timeout parameter");
+ zassert_true(time_val >= (int)K_TICKS_FOREVER,
+ "invalid timeout parameter");
ret_value = k_futex_wait(&simple_futex,
atomic_get(&simple_futex.val), time_val);
diff --git a/tests/kernel/pending/src/main.c b/tests/kernel/pending/src/main.c
index b1b38cc..f5d05bd 100644
--- a/tests/kernel/pending/src/main.c
+++ b/tests/kernel/pending/src/main.c
@@ -116,7 +116,7 @@
static void fifo_tests(s32_t timeout, volatile int *state,
void *(*get)(struct k_fifo *, s32_t),
- int (*sem_take)(struct k_sem *, s32_t))
+ int (*sem_take)(struct k_sem *, k_timeout_t))
{
struct fifo_data *data;
@@ -154,7 +154,7 @@
static void lifo_tests(s32_t timeout, volatile int *state,
void *(*get)(struct k_lifo *, s32_t),
- int (*sem_take)(struct k_sem *, s32_t))
+ int (*sem_take)(struct k_sem *, k_timeout_t))
{
struct lifo_data *data;
diff --git a/tests/kernel/pipe/pipe_api/src/test_pipe_contexts.c b/tests/kernel/pipe/pipe_api/src/test_pipe_contexts.c
index 2a8c4de..fbc1fff 100644
--- a/tests/kernel/pipe/pipe_api/src/test_pipe_contexts.c
+++ b/tests/kernel/pipe/pipe_api/src/test_pipe_contexts.c
@@ -42,7 +42,7 @@
#endif
K_MEM_POOL_DEFINE(test_pool, SZ, SZ, 4, 4);
-static void tpipe_put(struct k_pipe *ppipe, s32_t timeout)
+static void tpipe_put(struct k_pipe *ppipe, k_timeout_t timeout)
{
size_t to_wt, wt_byte = 0;
@@ -57,7 +57,7 @@
}
static void tpipe_block_put(struct k_pipe *ppipe, struct k_sem *sema,
- s32_t timeout)
+ k_timeout_t timeout)
{
struct k_mem_block block;
@@ -73,7 +73,7 @@
}
}
-static void tpipe_get(struct k_pipe *ppipe, s32_t timeout)
+static void tpipe_get(struct k_pipe *ppipe, k_timeout_t timeout)
{
unsigned char rx_data[PIPE_LEN];
size_t to_rd, rd_byte = 0;
diff --git a/tests/kernel/sleep/src/main.c b/tests/kernel/sleep/src/main.c
index 224c3f1..574355d 100644
--- a/tests/kernel/sleep/src/main.c
+++ b/tests/kernel/sleep/src/main.c
@@ -251,7 +251,7 @@
s32_t ret;
ret = k_sleep(K_FOREVER);
- zassert_equal(ret, K_FOREVER, "unexpected return value");
+ zassert_equal(ret, K_TICKS_FOREVER, "unexpected return value");
k_sem_give(&test_thread_sem);
}
diff --git a/tests/kernel/workq/work_queue/src/main.c b/tests/kernel/workq/work_queue/src/main.c
index 74705cc..7a8998d 100644
--- a/tests/kernel/workq/work_queue/src/main.c
+++ b/tests/kernel/workq/work_queue/src/main.c
@@ -480,7 +480,7 @@
*
* @see k_work_poll_init(), k_work_poll_submit()
*/
-static void test_triggered_submit(s32_t timeout)
+static void test_triggered_submit(k_timeout_t timeout)
{
int i;