Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2018 Intel Corporation |
| 3 | * |
| 4 | * SPDX-License-Identifier: Apache-2.0 |
| 5 | */ |
Stephanos Ioannidis | 2d74604 | 2019-10-25 00:08:21 +0900 | [diff] [blame] | 6 | |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 7 | #include <zephyr/kernel.h> |
| 8 | #include <zephyr/spinlock.h> |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 9 | #include <ksched.h> |
Anas Nashif | fcf50ed | 2023-08-29 19:32:46 +0000 | [diff] [blame] | 10 | #include <timeout_q.h> |
Anas Nashif | 4e39617 | 2023-09-26 22:46:01 +0000 | [diff] [blame] | 11 | #include <zephyr/internal/syscall_handler.h> |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 12 | #include <zephyr/drivers/timer/system_timer.h> |
| 13 | #include <zephyr/sys_clock.h> |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 14 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 15 | static uint64_t curr_tick; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 16 | |
| 17 | static sys_dlist_t timeout_list = SYS_DLIST_STATIC_INIT(&timeout_list); |
| 18 | |
| 19 | static struct k_spinlock timeout_lock; |
| 20 | |
Andy Ross | 1db9f18 | 2019-06-25 10:09:45 -0700 | [diff] [blame] | 21 | #define MAX_WAIT (IS_ENABLED(CONFIG_SYSTEM_CLOCK_SLOPPY_IDLE) \ |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 22 | ? K_TICKS_FOREVER : INT_MAX) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 23 | |
Florian Grandel | 5fa5534 | 2023-06-28 23:17:45 +0200 | [diff] [blame] | 24 | /* Ticks left to process in the currently-executing sys_clock_announce() */ |
Andy Ross | 1cfff07 | 2018-10-03 08:50:52 -0700 | [diff] [blame] | 25 | static int announce_remaining; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 26 | |
| 27 | #if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME) |
Alberto Escolar Piedras | 2f5e939 | 2024-05-06 10:57:02 +0200 | [diff] [blame] | 28 | int z_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC; |
Andrew Boie | fd49cf7 | 2019-05-21 14:02:26 -0700 | [diff] [blame] | 29 | |
| 30 | #ifdef CONFIG_USERSPACE |
Anas Nashif | a387221 | 2021-03-13 08:16:53 -0500 | [diff] [blame] | 31 | static inline int z_vrfy_sys_clock_hw_cycles_per_sec_runtime_get(void) |
Andrew Boie | fd49cf7 | 2019-05-21 14:02:26 -0700 | [diff] [blame] | 32 | { |
Anas Nashif | a387221 | 2021-03-13 08:16:53 -0500 | [diff] [blame] | 33 | return z_impl_sys_clock_hw_cycles_per_sec_runtime_get(); |
Andrew Boie | fd49cf7 | 2019-05-21 14:02:26 -0700 | [diff] [blame] | 34 | } |
Anas Nashif | a387221 | 2021-03-13 08:16:53 -0500 | [diff] [blame] | 35 | #include <syscalls/sys_clock_hw_cycles_per_sec_runtime_get_mrsh.c> |
Andrew Boie | fd49cf7 | 2019-05-21 14:02:26 -0700 | [diff] [blame] | 36 | #endif /* CONFIG_USERSPACE */ |
| 37 | #endif /* CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME */ |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 38 | |
| 39 | static struct _timeout *first(void) |
| 40 | { |
| 41 | sys_dnode_t *t = sys_dlist_peek_head(&timeout_list); |
| 42 | |
Hess Nathan | 6d417d5 | 2024-04-30 13:26:35 +0200 | [diff] [blame] | 43 | return (t == NULL) ? NULL : CONTAINER_OF(t, struct _timeout, node); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 44 | } |
| 45 | |
| 46 | static struct _timeout *next(struct _timeout *t) |
| 47 | { |
| 48 | sys_dnode_t *n = sys_dlist_peek_next(&timeout_list, &t->node); |
| 49 | |
Hess Nathan | 6d417d5 | 2024-04-30 13:26:35 +0200 | [diff] [blame] | 50 | return (n == NULL) ? NULL : CONTAINER_OF(n, struct _timeout, node); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 51 | } |
| 52 | |
Andy Ross | 386894c | 2018-10-17 08:29:19 -0700 | [diff] [blame] | 53 | static void remove_timeout(struct _timeout *t) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 54 | { |
Peter A. Bigot | 25fbe7b | 2018-12-30 06:05:03 -0600 | [diff] [blame] | 55 | if (next(t) != NULL) { |
| 56 | next(t)->dticks += t->dticks; |
Andy Ross | 71f5e56 | 2018-12-06 15:39:28 -0800 | [diff] [blame] | 57 | } |
Peter A. Bigot | 25fbe7b | 2018-12-30 06:05:03 -0600 | [diff] [blame] | 58 | |
| 59 | sys_dlist_remove(&t->node); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 60 | } |
| 61 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 62 | static int32_t elapsed(void) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 63 | { |
Florian Grandel | 5fa5534 | 2023-06-28 23:17:45 +0200 | [diff] [blame] | 64 | /* While sys_clock_announce() is executing, new relative timeouts will be |
| 65 | * scheduled relatively to the currently firing timeout's original tick |
| 66 | * value (=curr_tick) rather than relative to the current |
| 67 | * sys_clock_elapsed(). |
| 68 | * |
| 69 | * This means that timeouts being scheduled from within timeout callbacks |
| 70 | * will be scheduled at well-defined offsets from the currently firing |
| 71 | * timeout. |
| 72 | * |
| 73 | * As a side effect, the same will happen if an ISR with higher priority |
| 74 | * preempts a timeout callback and schedules a timeout. |
| 75 | * |
| 76 | * The distinction is implemented by looking at announce_remaining which |
| 77 | * will be non-zero while sys_clock_announce() is executing and zero |
| 78 | * otherwise. |
| 79 | */ |
Anas Nashif | 9c1efe6 | 2021-02-25 15:33:15 -0500 | [diff] [blame] | 80 | return announce_remaining == 0 ? sys_clock_elapsed() : 0U; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 81 | } |
| 82 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 83 | static int32_t next_timeout(void) |
Andy Ross | e664c78 | 2019-01-16 08:54:38 -0800 | [diff] [blame] | 84 | { |
Andy Ross | e664c78 | 2019-01-16 08:54:38 -0800 | [diff] [blame] | 85 | struct _timeout *to = first(); |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 86 | int32_t ticks_elapsed = elapsed(); |
Flavio Ceolin | 47b7c2e | 2022-02-08 15:28:09 -0800 | [diff] [blame] | 87 | int32_t ret; |
| 88 | |
| 89 | if ((to == NULL) || |
| 90 | ((int64_t)(to->dticks - ticks_elapsed) > (int64_t)INT_MAX)) { |
| 91 | ret = MAX_WAIT; |
| 92 | } else { |
| 93 | ret = MAX(0, to->dticks - ticks_elapsed); |
| 94 | } |
Andy Ross | e664c78 | 2019-01-16 08:54:38 -0800 | [diff] [blame] | 95 | |
Andy Ross | e664c78 | 2019-01-16 08:54:38 -0800 | [diff] [blame] | 96 | return ret; |
| 97 | } |
| 98 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 99 | void z_add_timeout(struct _timeout *to, _timeout_func_t fn, |
| 100 | k_timeout_t timeout) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 101 | { |
Andy Ross | 12bd187 | 2020-04-21 11:07:07 -0700 | [diff] [blame] | 102 | if (K_TIMEOUT_EQ(timeout, K_FOREVER)) { |
| 103 | return; |
| 104 | } |
| 105 | |
Anas Nashif | 39f632e | 2020-12-07 13:15:42 -0500 | [diff] [blame] | 106 | #ifdef CONFIG_KERNEL_COHERENCE |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 107 | __ASSERT_NO_MSG(arch_mem_coherent(to)); |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 108 | #endif /* CONFIG_KERNEL_COHERENCE */ |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 109 | |
Peter A. Bigot | b4ece0a | 2019-01-02 08:29:43 -0600 | [diff] [blame] | 110 | __ASSERT(!sys_dnode_is_linked(&to->node), ""); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 111 | to->fn = fn; |
| 112 | |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 113 | K_SPINLOCK(&timeout_lock) { |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 114 | struct _timeout *t; |
| 115 | |
Andrzej GÅ‚Ä…bek | 59b21a2 | 2021-05-24 11:24:13 +0200 | [diff] [blame] | 116 | if (IS_ENABLED(CONFIG_TIMEOUT_64BIT) && |
Hess Nathan | 6d417d5 | 2024-04-30 13:26:35 +0200 | [diff] [blame] | 117 | (Z_TICK_ABS(timeout.ticks) >= 0)) { |
Andrzej GÅ‚Ä…bek | 59b21a2 | 2021-05-24 11:24:13 +0200 | [diff] [blame] | 118 | k_ticks_t ticks = Z_TICK_ABS(timeout.ticks) - curr_tick; |
| 119 | |
| 120 | to->dticks = MAX(1, ticks); |
| 121 | } else { |
| 122 | to->dticks = timeout.ticks + 1 + elapsed(); |
| 123 | } |
| 124 | |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 125 | for (t = first(); t != NULL; t = next(t)) { |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 126 | if (t->dticks > to->dticks) { |
| 127 | t->dticks -= to->dticks; |
Andy Ross | eda4c02 | 2019-01-28 09:35:27 -0800 | [diff] [blame] | 128 | sys_dlist_insert(&t->node, &to->node); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 129 | break; |
| 130 | } |
| 131 | to->dticks -= t->dticks; |
| 132 | } |
| 133 | |
| 134 | if (t == NULL) { |
| 135 | sys_dlist_append(&timeout_list, &to->node); |
| 136 | } |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 137 | |
Krzysztof Chruściński | aee8dd1 | 2024-03-26 16:33:06 +0100 | [diff] [blame] | 138 | if (to == first() && announce_remaining == 0) { |
Anas Nashif | 9c1efe6 | 2021-02-25 15:33:15 -0500 | [diff] [blame] | 139 | sys_clock_set_timeout(next_timeout(), false); |
Pawel Dunaj | baea224 | 2018-11-22 11:49:32 +0100 | [diff] [blame] | 140 | } |
Andy Ross | 02165d7 | 2018-11-20 08:26:34 -0800 | [diff] [blame] | 141 | } |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 142 | } |
| 143 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 144 | int z_abort_timeout(struct _timeout *to) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 145 | { |
Peter A. Bigot | b4ece0a | 2019-01-02 08:29:43 -0600 | [diff] [blame] | 146 | int ret = -EINVAL; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 147 | |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 148 | K_SPINLOCK(&timeout_lock) { |
Peter A. Bigot | 25fbe7b | 2018-12-30 06:05:03 -0600 | [diff] [blame] | 149 | if (sys_dnode_is_linked(&to->node)) { |
Andy Ross | 386894c | 2018-10-17 08:29:19 -0700 | [diff] [blame] | 150 | remove_timeout(to); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 151 | ret = 0; |
| 152 | } |
| 153 | } |
| 154 | |
| 155 | return ret; |
| 156 | } |
| 157 | |
Andy Ross | 5a5d3da | 2020-03-09 13:59:15 -0700 | [diff] [blame] | 158 | /* must be locked */ |
Peter A. Bigot | 16a4081 | 2020-09-18 16:24:57 -0500 | [diff] [blame] | 159 | static k_ticks_t timeout_rem(const struct _timeout *timeout) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 160 | { |
Andy Ross | 5a5d3da | 2020-03-09 13:59:15 -0700 | [diff] [blame] | 161 | k_ticks_t ticks = 0; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 162 | |
Andy Ross | 5a5d3da | 2020-03-09 13:59:15 -0700 | [diff] [blame] | 163 | for (struct _timeout *t = first(); t != NULL; t = next(t)) { |
| 164 | ticks += t->dticks; |
| 165 | if (timeout == t) { |
| 166 | break; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 167 | } |
| 168 | } |
| 169 | |
Nicolas Pitre | c31d646 | 2024-03-06 11:12:42 -0500 | [diff] [blame] | 170 | return ticks; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 171 | } |
| 172 | |
Peter A. Bigot | 16a4081 | 2020-09-18 16:24:57 -0500 | [diff] [blame] | 173 | k_ticks_t z_timeout_remaining(const struct _timeout *timeout) |
Andy Ross | 5a5d3da | 2020-03-09 13:59:15 -0700 | [diff] [blame] | 174 | { |
| 175 | k_ticks_t ticks = 0; |
| 176 | |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 177 | K_SPINLOCK(&timeout_lock) { |
Nicolas Pitre | c31d646 | 2024-03-06 11:12:42 -0500 | [diff] [blame] | 178 | if (!z_is_inactive_timeout(timeout)) { |
| 179 | ticks = timeout_rem(timeout) - elapsed(); |
| 180 | } |
Andy Ross | 5a5d3da | 2020-03-09 13:59:15 -0700 | [diff] [blame] | 181 | } |
| 182 | |
| 183 | return ticks; |
| 184 | } |
| 185 | |
Peter A. Bigot | 16a4081 | 2020-09-18 16:24:57 -0500 | [diff] [blame] | 186 | k_ticks_t z_timeout_expires(const struct _timeout *timeout) |
Andy Ross | 5a5d3da | 2020-03-09 13:59:15 -0700 | [diff] [blame] | 187 | { |
| 188 | k_ticks_t ticks = 0; |
| 189 | |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 190 | K_SPINLOCK(&timeout_lock) { |
Nicolas Pitre | c31d646 | 2024-03-06 11:12:42 -0500 | [diff] [blame] | 191 | ticks = curr_tick; |
| 192 | if (!z_is_inactive_timeout(timeout)) { |
| 193 | ticks += timeout_rem(timeout); |
| 194 | } |
Andy Ross | 5a5d3da | 2020-03-09 13:59:15 -0700 | [diff] [blame] | 195 | } |
| 196 | |
| 197 | return ticks; |
| 198 | } |
| 199 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 200 | int32_t z_get_next_timeout_expiry(void) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 201 | { |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 202 | int32_t ret = (int32_t) K_TICKS_FOREVER; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 203 | |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 204 | K_SPINLOCK(&timeout_lock) { |
Andy Ross | e664c78 | 2019-01-16 08:54:38 -0800 | [diff] [blame] | 205 | ret = next_timeout(); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 206 | } |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 207 | return ret; |
| 208 | } |
| 209 | |
Anas Nashif | 9c1efe6 | 2021-02-25 15:33:15 -0500 | [diff] [blame] | 210 | void sys_clock_announce(int32_t ticks) |
Andy Ross | 43ab8da | 2018-12-20 09:23:31 -0800 | [diff] [blame] | 211 | { |
Andy Ross | 43ab8da | 2018-12-20 09:23:31 -0800 | [diff] [blame] | 212 | k_spinlock_key_t key = k_spin_lock(&timeout_lock); |
| 213 | |
Andy Ross | 0b2ed38 | 2022-04-12 09:52:39 -0700 | [diff] [blame] | 214 | /* We release the lock around the callbacks below, so on SMP |
| 215 | * systems someone might be already running the loop. Don't |
| 216 | * race (which will cause paralllel execution of "sequential" |
| 217 | * timeouts and confuse apps), just increment the tick count |
| 218 | * and return. |
| 219 | */ |
Peter Mitsis | 3e2f30a | 2022-07-29 09:29:32 -0400 | [diff] [blame] | 220 | if (IS_ENABLED(CONFIG_SMP) && (announce_remaining != 0)) { |
Andy Ross | 0b2ed38 | 2022-04-12 09:52:39 -0700 | [diff] [blame] | 221 | announce_remaining += ticks; |
| 222 | k_spin_unlock(&timeout_lock, key); |
| 223 | return; |
| 224 | } |
| 225 | |
Andy Ross | 43ab8da | 2018-12-20 09:23:31 -0800 | [diff] [blame] | 226 | announce_remaining = ticks; |
| 227 | |
Andrei Emeltchenko | 9551f27 | 2023-08-21 16:00:22 +0300 | [diff] [blame] | 228 | struct _timeout *t; |
Peter Mitsis | 42db096 | 2022-12-14 11:53:58 -0500 | [diff] [blame] | 229 | |
| 230 | for (t = first(); |
| 231 | (t != NULL) && (t->dticks <= announce_remaining); |
| 232 | t = first()) { |
Andy Ross | 43ab8da | 2018-12-20 09:23:31 -0800 | [diff] [blame] | 233 | int dt = t->dticks; |
| 234 | |
| 235 | curr_tick += dt; |
Andy Ross | 43ab8da | 2018-12-20 09:23:31 -0800 | [diff] [blame] | 236 | t->dticks = 0; |
| 237 | remove_timeout(t); |
| 238 | |
| 239 | k_spin_unlock(&timeout_lock, key); |
| 240 | t->fn(t); |
| 241 | key = k_spin_lock(&timeout_lock); |
Peter Mitsis | 3e2f30a | 2022-07-29 09:29:32 -0400 | [diff] [blame] | 242 | announce_remaining -= dt; |
Andy Ross | 43ab8da | 2018-12-20 09:23:31 -0800 | [diff] [blame] | 243 | } |
| 244 | |
Peter Mitsis | 42db096 | 2022-12-14 11:53:58 -0500 | [diff] [blame] | 245 | if (t != NULL) { |
| 246 | t->dticks -= announce_remaining; |
Andy Ross | 43ab8da | 2018-12-20 09:23:31 -0800 | [diff] [blame] | 247 | } |
| 248 | |
| 249 | curr_tick += announce_remaining; |
| 250 | announce_remaining = 0; |
| 251 | |
Anas Nashif | 9c1efe6 | 2021-02-25 15:33:15 -0500 | [diff] [blame] | 252 | sys_clock_set_timeout(next_timeout(), false); |
Andy Ross | 43ab8da | 2018-12-20 09:23:31 -0800 | [diff] [blame] | 253 | |
| 254 | k_spin_unlock(&timeout_lock, key); |
Andy Ross | f3afd5a | 2023-03-06 14:31:35 -0800 | [diff] [blame] | 255 | |
| 256 | #ifdef CONFIG_TIMESLICING |
| 257 | z_time_slice(); |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 258 | #endif /* CONFIG_TIMESLICING */ |
Andy Ross | 43ab8da | 2018-12-20 09:23:31 -0800 | [diff] [blame] | 259 | } |
| 260 | |
Anas Nashif | fe0872c | 2021-03-13 08:21:21 -0500 | [diff] [blame] | 261 | int64_t sys_clock_tick_get(void) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 262 | { |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 263 | uint64_t t = 0U; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 264 | |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 265 | K_SPINLOCK(&timeout_lock) { |
Peter Mitsis | 71ef669 | 2022-08-03 16:11:32 -0400 | [diff] [blame] | 266 | t = curr_tick + elapsed(); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 267 | } |
| 268 | return t; |
| 269 | } |
| 270 | |
Anas Nashif | 5c90ceb | 2021-03-13 08:19:53 -0500 | [diff] [blame] | 271 | uint32_t sys_clock_tick_get_32(void) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 272 | { |
Andy Ross | d8421ad | 2018-10-02 11:12:08 -0700 | [diff] [blame] | 273 | #ifdef CONFIG_TICKLESS_KERNEL |
Anas Nashif | fe0872c | 2021-03-13 08:21:21 -0500 | [diff] [blame] | 274 | return (uint32_t)sys_clock_tick_get(); |
Andy Ross | d8421ad | 2018-10-02 11:12:08 -0700 | [diff] [blame] | 275 | #else |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 276 | return (uint32_t)curr_tick; |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 277 | #endif /* CONFIG_TICKLESS_KERNEL */ |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 278 | } |
| 279 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 280 | int64_t z_impl_k_uptime_ticks(void) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 281 | { |
Anas Nashif | fe0872c | 2021-03-13 08:21:21 -0500 | [diff] [blame] | 282 | return sys_clock_tick_get(); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 283 | } |
| 284 | |
| 285 | #ifdef CONFIG_USERSPACE |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 286 | static inline int64_t z_vrfy_k_uptime_ticks(void) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 287 | { |
Andy Ross | 914205c | 2020-03-10 15:26:38 -0700 | [diff] [blame] | 288 | return z_impl_k_uptime_ticks(); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 289 | } |
Andy Ross | 914205c | 2020-03-10 15:26:38 -0700 | [diff] [blame] | 290 | #include <syscalls/k_uptime_ticks_mrsh.c> |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 291 | #endif /* CONFIG_USERSPACE */ |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 292 | |
Nicolas Pitre | 52e2f83 | 2023-07-06 13:56:01 -0400 | [diff] [blame] | 293 | k_timepoint_t sys_timepoint_calc(k_timeout_t timeout) |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 294 | { |
Nicolas Pitre | 52e2f83 | 2023-07-06 13:56:01 -0400 | [diff] [blame] | 295 | k_timepoint_t timepoint; |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 296 | |
| 297 | if (K_TIMEOUT_EQ(timeout, K_FOREVER)) { |
Nicolas Pitre | 52e2f83 | 2023-07-06 13:56:01 -0400 | [diff] [blame] | 298 | timepoint.tick = UINT64_MAX; |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 299 | } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { |
Nicolas Pitre | 52e2f83 | 2023-07-06 13:56:01 -0400 | [diff] [blame] | 300 | timepoint.tick = 0; |
Jennifer Williams | dc11ffb | 2021-03-20 00:36:55 +0200 | [diff] [blame] | 301 | } else { |
Nicolas Pitre | 52e2f83 | 2023-07-06 13:56:01 -0400 | [diff] [blame] | 302 | k_ticks_t dt = timeout.ticks; |
Andy Ross | 4c7b77a | 2020-03-09 09:35:35 -0700 | [diff] [blame] | 303 | |
Jennifer Williams | dc11ffb | 2021-03-20 00:36:55 +0200 | [diff] [blame] | 304 | if (IS_ENABLED(CONFIG_TIMEOUT_64BIT) && Z_TICK_ABS(dt) >= 0) { |
Nicolas Pitre | 52e2f83 | 2023-07-06 13:56:01 -0400 | [diff] [blame] | 305 | timepoint.tick = Z_TICK_ABS(dt); |
| 306 | } else { |
| 307 | timepoint.tick = sys_clock_tick_get() + MAX(1, dt); |
Jennifer Williams | dc11ffb | 2021-03-20 00:36:55 +0200 | [diff] [blame] | 308 | } |
Andy Ross | 4c7b77a | 2020-03-09 09:35:35 -0700 | [diff] [blame] | 309 | } |
Nicolas Pitre | 52e2f83 | 2023-07-06 13:56:01 -0400 | [diff] [blame] | 310 | |
| 311 | return timepoint; |
| 312 | } |
| 313 | |
| 314 | k_timeout_t sys_timepoint_timeout(k_timepoint_t timepoint) |
| 315 | { |
| 316 | uint64_t now, remaining; |
| 317 | |
| 318 | if (timepoint.tick == UINT64_MAX) { |
| 319 | return K_FOREVER; |
| 320 | } |
| 321 | if (timepoint.tick == 0) { |
| 322 | return K_NO_WAIT; |
| 323 | } |
| 324 | |
| 325 | now = sys_clock_tick_get(); |
| 326 | remaining = (timepoint.tick > now) ? (timepoint.tick - now) : 0; |
| 327 | return K_TICKS(remaining); |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 328 | } |
Chris Friedt | 4108e14 | 2022-12-15 10:14:43 -0800 | [diff] [blame] | 329 | |
| 330 | #ifdef CONFIG_ZTEST |
| 331 | void z_impl_sys_clock_tick_set(uint64_t tick) |
| 332 | { |
| 333 | curr_tick = tick; |
| 334 | } |
| 335 | |
| 336 | void z_vrfy_sys_clock_tick_set(uint64_t tick) |
| 337 | { |
| 338 | z_impl_sys_clock_tick_set(tick); |
| 339 | } |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 340 | #endif /* CONFIG_ZTEST */ |