sys_clock: Fix unsafe tick count usage
The system tick count is a 64 bit quantity that gets updated from
interrupt context, meaning that it's dangerously non-atomic and has to
be locked. The core kernel clock code did this right.
But the value was also exposed to the rest of the universe as a global
variable, and virtually nothing else was doing this correctly. Even
in the timer ISRs themselves, the interrupts may be themselves
preempted (most of our architectures support nested interrupts) by
code that wants to set timeouts and inspect system uptime.
Define a z_tick_{get,set}() API, eliminate the old variable, and make
sure everyone uses the right mechanism.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
diff --git a/arch/arc/core/timestamp.c b/arch/arc/core/timestamp.c
index 74d588b..d3d0a76 100644
--- a/arch/arc/core/timestamp.c
+++ b/arch/arc/core/timestamp.c
@@ -15,9 +15,6 @@
#include <toolchain.h>
#include <kernel_structs.h>
-extern volatile u64_t _sys_clock_tick_count;
-extern int sys_clock_hw_cycles_per_tick();
-
/*
* @brief Read 64-bit timestamp value
*
@@ -33,7 +30,7 @@
u32_t count;
key = irq_lock();
- t = (u64_t)_sys_clock_tick_count;
+ t = (u64_t)z_tick_get();
count = _arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT);
irq_unlock(key);
t *= (u64_t)sys_clock_hw_cycles_per_tick();
diff --git a/drivers/timer/arcv2_timer0.c b/drivers/timer/arcv2_timer0.c
index d58edd0..05a9279 100644
--- a/drivers/timer/arcv2_timer0.c
+++ b/drivers/timer/arcv2_timer0.c
@@ -196,7 +196,7 @@
#ifdef CONFIG_TICKLESS_KERNEL
if (!programmed_ticks) {
if (_sys_clock_always_on) {
- _sys_clock_tick_count = _get_elapsed_clock_time();
+ z_tick_set(_get_elapsed_clock_time());
program_max_cycles();
}
return;
@@ -216,7 +216,7 @@
/* _sys_clock_tick_announce() could cause new programming */
if (!programmed_ticks && _sys_clock_always_on) {
- _sys_clock_tick_count = _get_elapsed_clock_time();
+ z_tick_set(_get_elapsed_clock_time());
program_max_cycles();
}
#else
@@ -280,7 +280,7 @@
programmed_ticks = time > max_system_ticks ? max_system_ticks : time;
- _sys_clock_tick_count = _get_elapsed_clock_time();
+ z_tick_set(_get_elapsed_clock_time());
timer0_limit_register_set(programmed_ticks * cycles_per_tick);
timer0_count_register_set(0);
@@ -306,7 +306,7 @@
elapsed = timer0_count_register_get();
}
- elapsed += _sys_clock_tick_count * cycles_per_tick;
+ elapsed += z_tick_get() * cycles_per_tick;
return elapsed;
}
diff --git a/drivers/timer/cortex_m_systick.c b/drivers/timer/cortex_m_systick.c
index f68a6a3..bebcb71 100644
--- a/drivers/timer/cortex_m_systick.c
+++ b/drivers/timer/cortex_m_systick.c
@@ -242,7 +242,7 @@
#if defined(CONFIG_TICKLESS_KERNEL)
if (!idle_original_ticks) {
if (_sys_clock_always_on) {
- _sys_clock_tick_count = _get_elapsed_clock_time();
+ z_tick_set(_get_elapsed_clock_time());
/* clear overflow tracking flag as it is accounted */
timer_overflow = 0;
sysTickStop();
@@ -272,7 +272,7 @@
/* _sys_clock_tick_announce() could cause new programming */
if (!idle_original_ticks && _sys_clock_always_on) {
- _sys_clock_tick_count = _get_elapsed_clock_time();
+ z_tick_set(_get_elapsed_clock_time());
/* clear overflow tracking flag as it is accounted */
timer_overflow = 0;
sysTickStop();
@@ -389,7 +389,7 @@
idle_original_ticks = time > max_system_ticks ? max_system_ticks : time;
- _sys_clock_tick_count = _get_elapsed_clock_time();
+ z_tick_set(_get_elapsed_clock_time());
/* clear overflow tracking flag as it is accounted */
timer_overflow = 0;
@@ -415,14 +415,14 @@
if ((SysTick->CTRL & SysTick_CTRL_COUNTFLAG_Msk) || (timer_overflow)) {
elapsed = SysTick->LOAD;
/* Keep track of overflow till it is accounted in
- * _sys_clock_tick_count as COUNTFLAG bit is clear on read
+ * z_tick_get() as COUNTFLAG bit is clear on read
*/
timer_overflow = 1;
} else {
elapsed = (SysTick->LOAD - SysTick->VAL);
}
- elapsed += (_sys_clock_tick_count * default_load_value);
+ elapsed += (z_tick_get() * default_load_value);
return elapsed;
}
@@ -604,7 +604,7 @@
if (idle_mode == IDLE_TICKLESS) {
idle_mode = IDLE_NOT_TICKLESS;
if (!idle_original_ticks && _sys_clock_always_on) {
- _sys_clock_tick_count = _get_elapsed_clock_time();
+ z_tick_set(_get_elapsed_clock_time());
timer_overflow = 0;
sysTickReloadSet(max_load_value);
sysTickStart();
diff --git a/drivers/timer/hpet.c b/drivers/timer/hpet.c
index b8f1f89..02156db 100644
--- a/drivers/timer/hpet.c
+++ b/drivers/timer/hpet.c
@@ -272,7 +272,7 @@
/* If timer not programmed or already consumed exit */
if (!programmed_ticks) {
if (_sys_clock_always_on) {
- _sys_clock_tick_count = _get_elapsed_clock_time();
+ z_tick_set(_get_elapsed_clock_time());
program_max_cycles();
}
return;
@@ -301,7 +301,7 @@
/* _sys_clock_tick_announce() could cause new programming */
if (!programmed_ticks && _sys_clock_always_on) {
- _sys_clock_tick_count = _get_elapsed_clock_time();
+ z_tick_set(_get_elapsed_clock_time());
program_max_cycles();
}
#else
@@ -354,7 +354,7 @@
programmed_ticks = time;
- _sys_clock_tick_count = _get_elapsed_clock_time();
+ z_tick_set(_get_elapsed_clock_time());
stale_irq_check = 1;
@@ -375,7 +375,7 @@
{
u64_t elapsed;
- elapsed = _sys_clock_tick_count;
+ elapsed = z_tick_get();
elapsed += ((s64_t)(_hpetMainCounterAtomic() -
counter_last_value) / counter_load_value);
diff --git a/drivers/timer/loapic_timer.c b/drivers/timer/loapic_timer.c
index 39d6837..70f8210 100644
--- a/drivers/timer/loapic_timer.c
+++ b/drivers/timer/loapic_timer.c
@@ -295,7 +295,7 @@
#if defined(CONFIG_TICKLESS_KERNEL)
if (!programmed_full_ticks) {
if (_sys_clock_always_on) {
- _sys_clock_tick_count = _get_elapsed_clock_time();
+ z_tick_set(_get_elapsed_clock_time());
program_max_cycles();
}
return;
@@ -321,7 +321,7 @@
/* _sys_clock_tick_announce() could cause new programming */
if (!programmed_full_ticks && _sys_clock_always_on) {
- _sys_clock_tick_count = _get_elapsed_clock_time();
+ z_tick_set(_get_elapsed_clock_time());
program_max_cycles();
}
#else
@@ -409,7 +409,7 @@
programmed_full_ticks =
time > max_system_ticks ? max_system_ticks : time;
- _sys_clock_tick_count = _get_elapsed_clock_time();
+ z_tick_set(_get_elapsed_clock_time());
programmed_cycles = programmed_full_ticks * cycles_per_tick;
initial_count_register_set(programmed_cycles);
@@ -426,7 +426,7 @@
{
u64_t elapsed;
- elapsed = _sys_clock_tick_count;
+ elapsed = z_tick_get();
if (programmed_cycles) {
elapsed +=
(programmed_cycles -
diff --git a/drivers/timer/nrf_rtc_timer.c b/drivers/timer/nrf_rtc_timer.c
index 81f6c51..16abcd6 100644
--- a/drivers/timer/nrf_rtc_timer.c
+++ b/drivers/timer/nrf_rtc_timer.c
@@ -213,15 +213,15 @@
#ifdef CONFIG_TICKLESS_KERNEL
/*
* Set RTC Counter Compare (CC) register to max value
- * and update the _sys_clock_tick_count.
+ * and update the z_tick_get().
*/
static inline void program_max_cycles(void)
{
u32_t max_cycles = _get_max_clock_time();
- _sys_clock_tick_count = _get_elapsed_clock_time();
+ z_tick_set(_get_elapsed_clock_time());
/* Update rtc_past to track rtc timer count*/
- rtc_past = (_sys_clock_tick_count *
+ rtc_past = (z_tick_get() *
sys_clock_hw_cycles_per_tick()) & RTC_MASK;
/* Programe RTC compare register to generate interrupt*/
@@ -305,9 +305,9 @@
/* Update expected_sys_ticls to time to programe*/
expected_sys_ticks = time;
- _sys_clock_tick_count = _get_elapsed_clock_time();
+ z_tick_set(_get_elapsed_clock_time());
/* Update rtc_past to track rtc timer count*/
- rtc_past = (_sys_clock_tick_count * sys_clock_hw_cycles_per_tick()) & RTC_MASK;
+ rtc_past = (z_tick_get() * sys_clock_hw_cycles_per_tick()) & RTC_MASK;
expected_sys_ticks = expected_sys_ticks > _get_max_clock_time() ?
_get_max_clock_time() : expected_sys_ticks;
@@ -368,8 +368,8 @@
u64_t elapsed;
u32_t rtc_elapsed, rtc_past_copy;
- /* Read _sys_clock_tick_count and rtc_past before RTC_COUNTER */
- elapsed = _sys_clock_tick_count;
+ /* Read z_tick_get() and rtc_past before RTC_COUNTER */
+ elapsed = z_tick_get();
rtc_past_copy = rtc_past;
/* Make sure that compiler will not reverse access to RTC and
@@ -549,17 +549,17 @@
u32_t elapsed_cycles;
/* Number of timer cycles announced as ticks so far. */
- ticked_cycles = _sys_clock_tick_count * sys_clock_hw_cycles_per_tick();
+ ticked_cycles = z_tick_get() * sys_clock_hw_cycles_per_tick();
/* Make sure that compiler will not reverse access to RTC and
- * _sys_clock_tick_count.
+ * z_tick_get().
*/
compiler_barrier();
/* Number of timer cycles since last announced tick we know about.
*
* The value of RTC_COUNTER is not reset on tick, so it will
- * compensate potentialy missed update of _sys_clock_tick_count
+ * compensate potentialy missed update of z_tick_get()
* which could have happen between the ticked_cycles calculation
* and the code below.
*/
diff --git a/drivers/timer/xtensa_sys_timer.c b/drivers/timer/xtensa_sys_timer.c
index d22d668..ab3cde5 100644
--- a/drivers/timer/xtensa_sys_timer.c
+++ b/drivers/timer/xtensa_sys_timer.c
@@ -165,7 +165,7 @@
unsigned int key;
key = irq_lock();
- _sys_clock_tick_count = _get_elapsed_clock_time();
+ z_tick_set(_get_elapsed_clock_time());
last_timer_value = GET_TIMER_CURRENT_TIME();
irq_unlock(key);
SET_TIMER_FIRE_TIME(MAX_TIMER_CYCLES); /* Program timer to max value */
@@ -189,7 +189,7 @@
}
key = irq_lock();
/* Update System Level Ticks Time Keeping */
- _sys_clock_tick_count = _get_elapsed_clock_time();
+ z_tick_set(_get_elapsed_clock_time());
C = GET_TIMER_CURRENT_TIME();
last_timer_value = C;
irq_unlock(key);
@@ -233,7 +233,7 @@
C = GET_TIMER_CURRENT_TIME();
elapsed = (last_timer_value <= C) ? (C - last_timer_value) :
(MAX_TIMER_CYCLES - last_timer_value) + C;
- total = (_sys_clock_tick_count + (elapsed / cycles_per_tick));
+ total = (z_tick_get() + (elapsed / cycles_per_tick));
irq_unlock(key);
return total;
diff --git a/ext/hal/libmetal/libmetal/lib/system/zephyr/time.c b/ext/hal/libmetal/libmetal/lib/system/zephyr/time.c
index 3458e84..67e040a 100644
--- a/ext/hal/libmetal/libmetal/lib/system/zephyr/time.c
+++ b/ext/hal/libmetal/libmetal/lib/system/zephyr/time.c
@@ -12,10 +12,8 @@
#include <metal/time.h>
#include <sys_clock.h>
-extern volatile u64_t _sys_clock_tick_count;
-
unsigned long long metal_get_timestamp(void)
{
- return (unsigned long long)_sys_clock_tick_count;
+ return (unsigned long long)z_tick_get();
}
diff --git a/include/sys_clock.h b/include/sys_clock.h
index 3a6bdae..abcda46 100644
--- a/include/sys_clock.h
+++ b/include/sys_clock.h
@@ -185,7 +185,32 @@
* @} end defgroup clock_apis
*/
-extern volatile u64_t _sys_clock_tick_count;
+/**
+ *
+ * @brief Return the lower part of the current system tick count
+ *
+ * @return the current system tick count
+ *
+ */
+u32_t z_tick_get_32(void);
+
+/**
+ *
+ * @brief Return the current system tick count
+ *
+ * @return the current system tick count
+ *
+ */
+s64_t z_tick_get(void);
+
+/**
+ *
+ * @brief Sets the current system tick count
+ *
+ * @param ticks Ticks since system start
+ *
+ */
+void z_tick_set(s64_t ticks);
/* timeouts */
diff --git a/kernel/mempool.c b/kernel/mempool.c
index cac51df..2f839f7 100644
--- a/kernel/mempool.c
+++ b/kernel/mempool.c
@@ -16,8 +16,6 @@
extern struct k_mem_pool _k_mem_pool_list_start[];
extern struct k_mem_pool _k_mem_pool_list_end[];
-s64_t _tick_get(void);
-
static struct k_mem_pool *get_pool(int id)
{
return &_k_mem_pool_list_start[id];
@@ -57,7 +55,7 @@
__ASSERT(!(_is_in_isr() && timeout != K_NO_WAIT), "");
if (timeout > 0) {
- end = _tick_get() + _ms_to_ticks(timeout);
+ end = z_tick_get() + _ms_to_ticks(timeout);
}
while (true) {
@@ -95,7 +93,7 @@
(void)_pend_current_thread(irq_lock(), &p->wait_q, timeout);
if (timeout != K_FOREVER) {
- timeout = end - _tick_get();
+ timeout = end - z_tick_get();
if (timeout < 0) {
break;
diff --git a/kernel/sys_clock.c b/kernel/sys_clock.c
index 6bf6c2a..c7c1954 100644
--- a/kernel/sys_clock.c
+++ b/kernel/sys_clock.c
@@ -33,7 +33,11 @@
/* updated by timer driver for tickless, stays at 1 for non-tickless */
s32_t _sys_idle_elapsed_ticks = 1;
-volatile u64_t _sys_clock_tick_count;
+/* Note that this value is 64 bits, and thus non-atomic on almost all
+ * Zephyr archtictures. And of course it's routinely updated inside
+ * timer interrupts. Access to it must be locked.
+ */
+static volatile u64_t tick_count;
#ifdef CONFIG_TICKLESS_KERNEL
/*
@@ -46,22 +50,15 @@
static u32_t next_ts;
#endif
-/**
- *
- * @brief Return the lower part of the current system tick count
- *
- * @return the current system tick count
- *
- */
-u32_t _tick_get_32(void)
+
+u32_t z_tick_get_32(void)
{
#ifdef CONFIG_TICKLESS_KERNEL
return (u32_t)_get_elapsed_clock_time();
#else
- return (u32_t)_sys_clock_tick_count;
+ return (u32_t)tick_count;
#endif
}
-FUNC_ALIAS(_tick_get_32, sys_tick_get_32, u32_t);
u32_t _impl_k_uptime_get_32(void)
{
@@ -69,7 +66,7 @@
__ASSERT(_sys_clock_always_on,
"Call k_enable_sys_clock_always_on to use clock API");
#endif
- return __ticks_to_ms(_tick_get_32());
+ return __ticks_to_ms(z_tick_get_32());
}
#ifdef CONFIG_USERSPACE
@@ -82,33 +79,26 @@
}
#endif
-/**
- *
- * @brief Return the current system tick count
- *
- * @return the current system tick count
- *
- */
-s64_t _tick_get(void)
+s64_t z_tick_get(void)
{
- s64_t tmp_sys_clock_tick_count;
- /*
- * Lock the interrupts when reading _sys_clock_tick_count 64-bit
- * variable. Some architectures (x86) do not handle 64-bit atomically,
- * so we have to lock the timer interrupt that causes change of
- * _sys_clock_tick_count
- */
- unsigned int imask = irq_lock();
-
#ifdef CONFIG_TICKLESS_KERNEL
- tmp_sys_clock_tick_count = _get_elapsed_clock_time();
+ return _get_elapsed_clock_time();
#else
- tmp_sys_clock_tick_count = _sys_clock_tick_count;
+ unsigned int key = irq_lock();
+ s64_t ret = tick_count;
+
+ irq_unlock(key);
+ return ret;
#endif
- irq_unlock(imask);
- return tmp_sys_clock_tick_count;
}
-FUNC_ALIAS(_tick_get, sys_tick_get, s64_t);
+
+void z_tick_set(s64_t val)
+{
+ unsigned int key = irq_lock();
+
+ tick_count = val;
+ irq_unlock(key);
+}
s64_t _impl_k_uptime_get(void)
{
@@ -116,7 +106,7 @@
__ASSERT(_sys_clock_always_on,
"Call k_enable_sys_clock_always_on to use clock API");
#endif
- return __ticks_to_ms(_tick_get());
+ return __ticks_to_ms(z_tick_get());
}
#ifdef CONFIG_USERSPACE
@@ -316,9 +306,8 @@
K_DEBUG("ticks: %d\n", ticks);
- /* 64-bit value, ensure atomic access with irq lock */
key = irq_lock();
- _sys_clock_tick_count += ticks;
+ tick_count += ticks;
irq_unlock(key);
#endif
handle_timeouts(ticks);
diff --git a/tests/benchmarks/footprint/README.txt b/tests/benchmarks/footprint/README.txt
index 1ef71bc..90f07f9 100644
--- a/tests/benchmarks/footprint/README.txt
+++ b/tests/benchmarks/footprint/README.txt
@@ -79,9 +79,9 @@
-------
This configuration does NOT produce any output. To observe its operation,
invoke it using gdb and observe that:
-- the kernel's timer ISR & main thread increment "_sys_clock_tick_count" on a
+- the kernel's timer ISR & main thread increment "z_tick_get()" on a
regular basis
-- k_cpu_idle() is invoked by the idle task each time _sys_clock_tick_count
+- k_cpu_idle() is invoked by the idle task each time z_tick_get()
is incremented
regular
diff --git a/tests/benchmarks/sys_kernel/src/syskernel.c b/tests/benchmarks/sys_kernel/src/syskernel.c
index 344a802..a2dd95b 100644
--- a/tests/benchmarks/sys_kernel/src/syskernel.c
+++ b/tests/benchmarks/sys_kernel/src/syskernel.c
@@ -147,11 +147,11 @@
/* The following code is needed to make the benchmakring run on
* slower platforms.
*/
- u64_t time_stamp = _sys_clock_tick_count;
+ u64_t time_stamp = z_tick_get();
k_sleep(1);
- u64_t time_stamp_2 = _sys_clock_tick_count;
+ u64_t time_stamp_2 = z_tick_get();
if (time_stamp_2 - time_stamp > 1) {
number_of_loops = 10;
diff --git a/tests/kernel/context/src/main.c b/tests/kernel/context/src/main.c
index 6c0f540..08b61e4 100644
--- a/tests/kernel/context/src/main.c
+++ b/tests/kernel/context/src/main.c
@@ -24,6 +24,7 @@
#include <kernel_structs.h>
#include <arch/cpu.h>
#include <irq_offload.h>
+#include <sys_clock.h>
/*
* Include board.h from platform to get IRQ number.
@@ -95,9 +96,6 @@
-extern u32_t _tick_get_32(void);
-extern s64_t _tick_get(void);
-
typedef struct {
int command; /* command to process */
int error; /* error value (if any) */
@@ -310,15 +308,15 @@
int imask;
/* Align to a "tick boundary" */
- tick = _tick_get_32();
- while (_tick_get_32() == tick) {
+ tick = z_tick_get_32();
+ while (z_tick_get_32() == tick) {
#if defined(CONFIG_ARCH_POSIX)
k_busy_wait(1000);
#endif
}
tick++;
- while (_tick_get_32() == tick) {
+ while (z_tick_get_32() == tick) {
#if defined(CONFIG_ARCH_POSIX)
k_busy_wait(1000);
#endif
@@ -335,15 +333,15 @@
count <<= 4;
imask = disable_int(irq);
- tick = _tick_get_32();
+ tick = z_tick_get_32();
for (i = 0; i < count; i++) {
- _tick_get_32();
+ z_tick_get_32();
#if defined(CONFIG_ARCH_POSIX)
k_busy_wait(1000);
#endif
}
- tick2 = _tick_get_32();
+ tick2 = z_tick_get_32();
/*
* Re-enable interrupts before returning (for both success and failure
@@ -356,13 +354,13 @@
/* Now repeat with interrupts unlocked. */
for (i = 0; i < count; i++) {
- _tick_get_32();
+ z_tick_get_32();
#if defined(CONFIG_ARCH_POSIX)
k_busy_wait(1000);
#endif
}
- tick2 = _tick_get_32();
+ tick2 = z_tick_get_32();
zassert_not_equal(tick, tick2,
"tick didn't advance as expected");
}
diff --git a/tests/kernel/fp_sharing/src/main.c b/tests/kernel/fp_sharing/src/main.c
index a9d5b2f..aaf0059 100644
--- a/tests/kernel/fp_sharing/src/main.c
+++ b/tests/kernel/fp_sharing/src/main.c
@@ -97,7 +97,6 @@
static volatile unsigned int load_store_low_count;
static volatile unsigned int load_store_high_count;
-extern u32_t _tick_get_32(void);
extern void calculate_pi_low(void);
extern void calculate_pi_high(void);
@@ -169,11 +168,11 @@
* thread an opportunity to run when the low priority thread is
* using the floating point registers.
*
- * IMPORTANT: This logic requires that sys_tick_get_32() not
+ * IMPORTANT: This logic requires that z_tick_get_32() not
* perform any floating point operations!
*/
- while ((_tick_get_32() % 5) != 0) {
+ while ((z_tick_get_32() % 5) != 0) {
/*
* Use a volatile variable to prevent compiler
* optimizing out the spin loop.
diff --git a/tests/kernel/mem_protect/app_memory/src/main.c b/tests/kernel/mem_protect/app_memory/src/main.c
index a289c1b..61c4d77 100644
--- a/tests/kernel/mem_protect/app_memory/src/main.c
+++ b/tests/kernel/mem_protect/app_memory/src/main.c
@@ -8,6 +8,7 @@
#include <tc_util.h>
#include <linker/linker-defs.h>
#include <ztest.h>
+#include <kernel_structs.h>
/**
* @brief Memory protection tests
@@ -28,7 +29,7 @@
struct test_struct __kernel_noinit kernel_noinit;
/* Real kernel variable, check it is in the right place */
-extern volatile u64_t _sys_clock_tick_count;
+extern struct _kernel _kernel;
struct test_struct app_data = {3, 4, NULL};
struct test_struct app_bss;
@@ -73,7 +74,7 @@
zassert_true(kernel_loc(&kernel_bss), "not in kernel memory");
zassert_true(kernel_loc(&kernel_noinit), "not in kernel memory");
- zassert_true(kernel_loc((void *)&_sys_clock_tick_count),
+ zassert_true(kernel_loc((void *)&_kernel),
"not in kernel memory");
}