Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2018 Intel Corporation |
| 3 | * |
| 4 | * SPDX-License-Identifier: Apache-2.0 |
| 5 | */ |
Stephanos Ioannidis | 2d74604 | 2019-10-25 00:08:21 +0900 | [diff] [blame] | 6 | |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 7 | #include <zephyr/kernel.h> |
| 8 | #include <zephyr/spinlock.h> |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 9 | #include <ksched.h> |
Anas Nashif | fcf50ed | 2023-08-29 19:32:46 +0000 | [diff] [blame] | 10 | #include <timeout_q.h> |
Anas Nashif | 4e39617 | 2023-09-26 22:46:01 +0000 | [diff] [blame] | 11 | #include <zephyr/internal/syscall_handler.h> |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 12 | #include <zephyr/drivers/timer/system_timer.h> |
| 13 | #include <zephyr/sys_clock.h> |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 14 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 15 | static uint64_t curr_tick; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 16 | |
| 17 | static sys_dlist_t timeout_list = SYS_DLIST_STATIC_INIT(&timeout_list); |
| 18 | |
Peter Mitsis | 90e2498 | 2025-01-09 13:50:38 -0800 | [diff] [blame] | 19 | /* |
| 20 | * The timeout code shall take no locks other than its own (timeout_lock), nor |
| 21 | * shall it call any other subsystem while holding this lock. |
| 22 | */ |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 23 | static struct k_spinlock timeout_lock; |
| 24 | |
Andy Ross | 1db9f18 | 2019-06-25 10:09:45 -0700 | [diff] [blame] | 25 | #define MAX_WAIT (IS_ENABLED(CONFIG_SYSTEM_CLOCK_SLOPPY_IDLE) \ |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 26 | ? K_TICKS_FOREVER : INT_MAX) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 27 | |
Florian Grandel | 5fa5534 | 2023-06-28 23:17:45 +0200 | [diff] [blame] | 28 | /* Ticks left to process in the currently-executing sys_clock_announce() */ |
Andy Ross | 1cfff07 | 2018-10-03 08:50:52 -0700 | [diff] [blame] | 29 | static int announce_remaining; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 30 | |
| 31 | #if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME) |
Anisetti Avinash Krishna | 1392117 | 2025-03-25 21:04:02 +0530 | [diff] [blame] | 32 | unsigned int z_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC; |
Andrew Boie | fd49cf7 | 2019-05-21 14:02:26 -0700 | [diff] [blame] | 33 | |
| 34 | #ifdef CONFIG_USERSPACE |
Anisetti Avinash Krishna | 1392117 | 2025-03-25 21:04:02 +0530 | [diff] [blame] | 35 | static inline unsigned int z_vrfy_sys_clock_hw_cycles_per_sec_runtime_get(void) |
Andrew Boie | fd49cf7 | 2019-05-21 14:02:26 -0700 | [diff] [blame] | 36 | { |
Anas Nashif | a387221 | 2021-03-13 08:16:53 -0500 | [diff] [blame] | 37 | return z_impl_sys_clock_hw_cycles_per_sec_runtime_get(); |
Andrew Boie | fd49cf7 | 2019-05-21 14:02:26 -0700 | [diff] [blame] | 38 | } |
Yong Cong Sin | bbe5e1e | 2024-01-24 17:35:04 +0800 | [diff] [blame] | 39 | #include <zephyr/syscalls/sys_clock_hw_cycles_per_sec_runtime_get_mrsh.c> |
Andrew Boie | fd49cf7 | 2019-05-21 14:02:26 -0700 | [diff] [blame] | 40 | #endif /* CONFIG_USERSPACE */ |
| 41 | #endif /* CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME */ |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 42 | |
| 43 | static struct _timeout *first(void) |
| 44 | { |
| 45 | sys_dnode_t *t = sys_dlist_peek_head(&timeout_list); |
| 46 | |
Hess Nathan | 6d417d5 | 2024-04-30 13:26:35 +0200 | [diff] [blame] | 47 | return (t == NULL) ? NULL : CONTAINER_OF(t, struct _timeout, node); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 48 | } |
| 49 | |
| 50 | static struct _timeout *next(struct _timeout *t) |
| 51 | { |
| 52 | sys_dnode_t *n = sys_dlist_peek_next(&timeout_list, &t->node); |
| 53 | |
Hess Nathan | 6d417d5 | 2024-04-30 13:26:35 +0200 | [diff] [blame] | 54 | return (n == NULL) ? NULL : CONTAINER_OF(n, struct _timeout, node); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 55 | } |
| 56 | |
Andy Ross | 386894c | 2018-10-17 08:29:19 -0700 | [diff] [blame] | 57 | static void remove_timeout(struct _timeout *t) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 58 | { |
Peter A. Bigot | 25fbe7b | 2018-12-30 06:05:03 -0600 | [diff] [blame] | 59 | if (next(t) != NULL) { |
| 60 | next(t)->dticks += t->dticks; |
Andy Ross | 71f5e56 | 2018-12-06 15:39:28 -0800 | [diff] [blame] | 61 | } |
Peter A. Bigot | 25fbe7b | 2018-12-30 06:05:03 -0600 | [diff] [blame] | 62 | |
| 63 | sys_dlist_remove(&t->node); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 64 | } |
| 65 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 66 | static int32_t elapsed(void) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 67 | { |
Florian Grandel | 5fa5534 | 2023-06-28 23:17:45 +0200 | [diff] [blame] | 68 | /* While sys_clock_announce() is executing, new relative timeouts will be |
| 69 | * scheduled relatively to the currently firing timeout's original tick |
| 70 | * value (=curr_tick) rather than relative to the current |
| 71 | * sys_clock_elapsed(). |
| 72 | * |
| 73 | * This means that timeouts being scheduled from within timeout callbacks |
| 74 | * will be scheduled at well-defined offsets from the currently firing |
| 75 | * timeout. |
| 76 | * |
| 77 | * As a side effect, the same will happen if an ISR with higher priority |
| 78 | * preempts a timeout callback and schedules a timeout. |
| 79 | * |
| 80 | * The distinction is implemented by looking at announce_remaining which |
| 81 | * will be non-zero while sys_clock_announce() is executing and zero |
| 82 | * otherwise. |
| 83 | */ |
Anas Nashif | 9c1efe6 | 2021-02-25 15:33:15 -0500 | [diff] [blame] | 84 | return announce_remaining == 0 ? sys_clock_elapsed() : 0U; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 85 | } |
| 86 | |
Krzysztof Chruściński | e7d5905 | 2025-03-31 13:25:03 +0200 | [diff] [blame] | 87 | static int32_t next_timeout(int32_t ticks_elapsed) |
Andy Ross | e664c78 | 2019-01-16 08:54:38 -0800 | [diff] [blame] | 88 | { |
Andy Ross | e664c78 | 2019-01-16 08:54:38 -0800 | [diff] [blame] | 89 | struct _timeout *to = first(); |
Flavio Ceolin | 47b7c2e | 2022-02-08 15:28:09 -0800 | [diff] [blame] | 90 | int32_t ret; |
| 91 | |
| 92 | if ((to == NULL) || |
| 93 | ((int64_t)(to->dticks - ticks_elapsed) > (int64_t)INT_MAX)) { |
| 94 | ret = MAX_WAIT; |
| 95 | } else { |
| 96 | ret = MAX(0, to->dticks - ticks_elapsed); |
| 97 | } |
Andy Ross | e664c78 | 2019-01-16 08:54:38 -0800 | [diff] [blame] | 98 | |
Andy Ross | e664c78 | 2019-01-16 08:54:38 -0800 | [diff] [blame] | 99 | return ret; |
| 100 | } |
| 101 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 102 | void z_add_timeout(struct _timeout *to, _timeout_func_t fn, |
| 103 | k_timeout_t timeout) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 104 | { |
Andy Ross | 12bd187 | 2020-04-21 11:07:07 -0700 | [diff] [blame] | 105 | if (K_TIMEOUT_EQ(timeout, K_FOREVER)) { |
| 106 | return; |
| 107 | } |
| 108 | |
Anas Nashif | 39f632e | 2020-12-07 13:15:42 -0500 | [diff] [blame] | 109 | #ifdef CONFIG_KERNEL_COHERENCE |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 110 | __ASSERT_NO_MSG(arch_mem_coherent(to)); |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 111 | #endif /* CONFIG_KERNEL_COHERENCE */ |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 112 | |
Peter A. Bigot | b4ece0a | 2019-01-02 08:29:43 -0600 | [diff] [blame] | 113 | __ASSERT(!sys_dnode_is_linked(&to->node), ""); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 114 | to->fn = fn; |
| 115 | |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 116 | K_SPINLOCK(&timeout_lock) { |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 117 | struct _timeout *t; |
Krzysztof Chruściński | e7d5905 | 2025-03-31 13:25:03 +0200 | [diff] [blame] | 118 | int32_t ticks_elapsed; |
| 119 | bool has_elapsed = false; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 120 | |
Peter Mitsis | 701aab9 | 2025-02-25 16:02:46 -0800 | [diff] [blame] | 121 | if (Z_IS_TIMEOUT_RELATIVE(timeout)) { |
Krzysztof Chruściński | e7d5905 | 2025-03-31 13:25:03 +0200 | [diff] [blame] | 122 | ticks_elapsed = elapsed(); |
| 123 | has_elapsed = true; |
| 124 | to->dticks = timeout.ticks + 1 + ticks_elapsed; |
Peter Mitsis | 701aab9 | 2025-02-25 16:02:46 -0800 | [diff] [blame] | 125 | } else { |
Andrzej Głąbek | 59b21a2 | 2021-05-24 11:24:13 +0200 | [diff] [blame] | 126 | k_ticks_t ticks = Z_TICK_ABS(timeout.ticks) - curr_tick; |
| 127 | |
| 128 | to->dticks = MAX(1, ticks); |
Andrzej Głąbek | 59b21a2 | 2021-05-24 11:24:13 +0200 | [diff] [blame] | 129 | } |
| 130 | |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 131 | for (t = first(); t != NULL; t = next(t)) { |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 132 | if (t->dticks > to->dticks) { |
| 133 | t->dticks -= to->dticks; |
Andy Ross | eda4c02 | 2019-01-28 09:35:27 -0800 | [diff] [blame] | 134 | sys_dlist_insert(&t->node, &to->node); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 135 | break; |
| 136 | } |
| 137 | to->dticks -= t->dticks; |
| 138 | } |
| 139 | |
| 140 | if (t == NULL) { |
| 141 | sys_dlist_append(&timeout_list, &to->node); |
| 142 | } |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 143 | |
Krzysztof Chruściński | aee8dd1 | 2024-03-26 16:33:06 +0100 | [diff] [blame] | 144 | if (to == first() && announce_remaining == 0) { |
Krzysztof Chruściński | e7d5905 | 2025-03-31 13:25:03 +0200 | [diff] [blame] | 145 | if (!has_elapsed) { |
| 146 | /* In case of absolute timeout that is first to expire |
| 147 | * elapsed need to be read from the system clock. |
| 148 | */ |
| 149 | ticks_elapsed = elapsed(); |
| 150 | } |
| 151 | sys_clock_set_timeout(next_timeout(ticks_elapsed), false); |
Pawel Dunaj | baea224 | 2018-11-22 11:49:32 +0100 | [diff] [blame] | 152 | } |
Andy Ross | 02165d7 | 2018-11-20 08:26:34 -0800 | [diff] [blame] | 153 | } |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 154 | } |
| 155 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 156 | int z_abort_timeout(struct _timeout *to) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 157 | { |
Peter A. Bigot | b4ece0a | 2019-01-02 08:29:43 -0600 | [diff] [blame] | 158 | int ret = -EINVAL; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 159 | |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 160 | K_SPINLOCK(&timeout_lock) { |
Peter A. Bigot | 25fbe7b | 2018-12-30 06:05:03 -0600 | [diff] [blame] | 161 | if (sys_dnode_is_linked(&to->node)) { |
Dong Wang | dd5f11c | 2025-01-02 13:48:40 +0800 | [diff] [blame] | 162 | bool is_first = (to == first()); |
| 163 | |
Andy Ross | 386894c | 2018-10-17 08:29:19 -0700 | [diff] [blame] | 164 | remove_timeout(to); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 165 | ret = 0; |
Dong Wang | dd5f11c | 2025-01-02 13:48:40 +0800 | [diff] [blame] | 166 | if (is_first) { |
Krzysztof Chruściński | e7d5905 | 2025-03-31 13:25:03 +0200 | [diff] [blame] | 167 | sys_clock_set_timeout(next_timeout(elapsed()), false); |
Dong Wang | dd5f11c | 2025-01-02 13:48:40 +0800 | [diff] [blame] | 168 | } |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 169 | } |
| 170 | } |
| 171 | |
| 172 | return ret; |
| 173 | } |
| 174 | |
Andy Ross | 5a5d3da | 2020-03-09 13:59:15 -0700 | [diff] [blame] | 175 | /* must be locked */ |
Peter A. Bigot | 16a4081 | 2020-09-18 16:24:57 -0500 | [diff] [blame] | 176 | static k_ticks_t timeout_rem(const struct _timeout *timeout) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 177 | { |
Andy Ross | 5a5d3da | 2020-03-09 13:59:15 -0700 | [diff] [blame] | 178 | k_ticks_t ticks = 0; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 179 | |
Andy Ross | 5a5d3da | 2020-03-09 13:59:15 -0700 | [diff] [blame] | 180 | for (struct _timeout *t = first(); t != NULL; t = next(t)) { |
| 181 | ticks += t->dticks; |
| 182 | if (timeout == t) { |
| 183 | break; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 184 | } |
| 185 | } |
| 186 | |
Nicolas Pitre | c31d646 | 2024-03-06 11:12:42 -0500 | [diff] [blame] | 187 | return ticks; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 188 | } |
| 189 | |
Peter A. Bigot | 16a4081 | 2020-09-18 16:24:57 -0500 | [diff] [blame] | 190 | k_ticks_t z_timeout_remaining(const struct _timeout *timeout) |
Andy Ross | 5a5d3da | 2020-03-09 13:59:15 -0700 | [diff] [blame] | 191 | { |
| 192 | k_ticks_t ticks = 0; |
| 193 | |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 194 | K_SPINLOCK(&timeout_lock) { |
Nicolas Pitre | c31d646 | 2024-03-06 11:12:42 -0500 | [diff] [blame] | 195 | if (!z_is_inactive_timeout(timeout)) { |
| 196 | ticks = timeout_rem(timeout) - elapsed(); |
| 197 | } |
Andy Ross | 5a5d3da | 2020-03-09 13:59:15 -0700 | [diff] [blame] | 198 | } |
| 199 | |
| 200 | return ticks; |
| 201 | } |
| 202 | |
Peter A. Bigot | 16a4081 | 2020-09-18 16:24:57 -0500 | [diff] [blame] | 203 | k_ticks_t z_timeout_expires(const struct _timeout *timeout) |
Andy Ross | 5a5d3da | 2020-03-09 13:59:15 -0700 | [diff] [blame] | 204 | { |
| 205 | k_ticks_t ticks = 0; |
| 206 | |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 207 | K_SPINLOCK(&timeout_lock) { |
Nicolas Pitre | c31d646 | 2024-03-06 11:12:42 -0500 | [diff] [blame] | 208 | ticks = curr_tick; |
| 209 | if (!z_is_inactive_timeout(timeout)) { |
| 210 | ticks += timeout_rem(timeout); |
| 211 | } |
Andy Ross | 5a5d3da | 2020-03-09 13:59:15 -0700 | [diff] [blame] | 212 | } |
| 213 | |
| 214 | return ticks; |
| 215 | } |
| 216 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 217 | int32_t z_get_next_timeout_expiry(void) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 218 | { |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 219 | int32_t ret = (int32_t) K_TICKS_FOREVER; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 220 | |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 221 | K_SPINLOCK(&timeout_lock) { |
Krzysztof Chruściński | e7d5905 | 2025-03-31 13:25:03 +0200 | [diff] [blame] | 222 | ret = next_timeout(elapsed()); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 223 | } |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 224 | return ret; |
| 225 | } |
| 226 | |
Anas Nashif | 9c1efe6 | 2021-02-25 15:33:15 -0500 | [diff] [blame] | 227 | void sys_clock_announce(int32_t ticks) |
Andy Ross | 43ab8da | 2018-12-20 09:23:31 -0800 | [diff] [blame] | 228 | { |
Andy Ross | 43ab8da | 2018-12-20 09:23:31 -0800 | [diff] [blame] | 229 | k_spinlock_key_t key = k_spin_lock(&timeout_lock); |
| 230 | |
Andy Ross | 0b2ed38 | 2022-04-12 09:52:39 -0700 | [diff] [blame] | 231 | /* We release the lock around the callbacks below, so on SMP |
| 232 | * systems someone might be already running the loop. Don't |
Pisit Sawangvonganan | 5ed3cd4 | 2024-07-06 01:12:07 +0700 | [diff] [blame] | 233 | * race (which will cause parallel execution of "sequential" |
Andy Ross | 0b2ed38 | 2022-04-12 09:52:39 -0700 | [diff] [blame] | 234 | * timeouts and confuse apps), just increment the tick count |
| 235 | * and return. |
| 236 | */ |
Peter Mitsis | 3e2f30a | 2022-07-29 09:29:32 -0400 | [diff] [blame] | 237 | if (IS_ENABLED(CONFIG_SMP) && (announce_remaining != 0)) { |
Andy Ross | 0b2ed38 | 2022-04-12 09:52:39 -0700 | [diff] [blame] | 238 | announce_remaining += ticks; |
| 239 | k_spin_unlock(&timeout_lock, key); |
| 240 | return; |
| 241 | } |
| 242 | |
Andy Ross | 43ab8da | 2018-12-20 09:23:31 -0800 | [diff] [blame] | 243 | announce_remaining = ticks; |
| 244 | |
Andrei Emeltchenko | 9551f27 | 2023-08-21 16:00:22 +0300 | [diff] [blame] | 245 | struct _timeout *t; |
Peter Mitsis | 42db096 | 2022-12-14 11:53:58 -0500 | [diff] [blame] | 246 | |
| 247 | for (t = first(); |
| 248 | (t != NULL) && (t->dticks <= announce_remaining); |
| 249 | t = first()) { |
Andy Ross | 43ab8da | 2018-12-20 09:23:31 -0800 | [diff] [blame] | 250 | int dt = t->dticks; |
| 251 | |
| 252 | curr_tick += dt; |
Andy Ross | 43ab8da | 2018-12-20 09:23:31 -0800 | [diff] [blame] | 253 | t->dticks = 0; |
| 254 | remove_timeout(t); |
| 255 | |
| 256 | k_spin_unlock(&timeout_lock, key); |
| 257 | t->fn(t); |
| 258 | key = k_spin_lock(&timeout_lock); |
Peter Mitsis | 3e2f30a | 2022-07-29 09:29:32 -0400 | [diff] [blame] | 259 | announce_remaining -= dt; |
Andy Ross | 43ab8da | 2018-12-20 09:23:31 -0800 | [diff] [blame] | 260 | } |
| 261 | |
Peter Mitsis | 42db096 | 2022-12-14 11:53:58 -0500 | [diff] [blame] | 262 | if (t != NULL) { |
| 263 | t->dticks -= announce_remaining; |
Andy Ross | 43ab8da | 2018-12-20 09:23:31 -0800 | [diff] [blame] | 264 | } |
| 265 | |
| 266 | curr_tick += announce_remaining; |
| 267 | announce_remaining = 0; |
| 268 | |
Krzysztof Chruściński | e7d5905 | 2025-03-31 13:25:03 +0200 | [diff] [blame] | 269 | sys_clock_set_timeout(next_timeout(0), false); |
Andy Ross | 43ab8da | 2018-12-20 09:23:31 -0800 | [diff] [blame] | 270 | |
| 271 | k_spin_unlock(&timeout_lock, key); |
Andy Ross | f3afd5a | 2023-03-06 14:31:35 -0800 | [diff] [blame] | 272 | |
| 273 | #ifdef CONFIG_TIMESLICING |
| 274 | z_time_slice(); |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 275 | #endif /* CONFIG_TIMESLICING */ |
Andy Ross | 43ab8da | 2018-12-20 09:23:31 -0800 | [diff] [blame] | 276 | } |
| 277 | |
Anas Nashif | fe0872c | 2021-03-13 08:21:21 -0500 | [diff] [blame] | 278 | int64_t sys_clock_tick_get(void) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 279 | { |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 280 | uint64_t t = 0U; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 281 | |
Florian Grandel | e256b7d | 2023-07-07 09:12:38 +0200 | [diff] [blame] | 282 | K_SPINLOCK(&timeout_lock) { |
Peter Mitsis | 71ef669 | 2022-08-03 16:11:32 -0400 | [diff] [blame] | 283 | t = curr_tick + elapsed(); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 284 | } |
| 285 | return t; |
| 286 | } |
| 287 | |
Anas Nashif | 5c90ceb | 2021-03-13 08:19:53 -0500 | [diff] [blame] | 288 | uint32_t sys_clock_tick_get_32(void) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 289 | { |
Andy Ross | d8421ad | 2018-10-02 11:12:08 -0700 | [diff] [blame] | 290 | #ifdef CONFIG_TICKLESS_KERNEL |
Anas Nashif | fe0872c | 2021-03-13 08:21:21 -0500 | [diff] [blame] | 291 | return (uint32_t)sys_clock_tick_get(); |
Andy Ross | d8421ad | 2018-10-02 11:12:08 -0700 | [diff] [blame] | 292 | #else |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 293 | return (uint32_t)curr_tick; |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 294 | #endif /* CONFIG_TICKLESS_KERNEL */ |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 295 | } |
| 296 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 297 | int64_t z_impl_k_uptime_ticks(void) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 298 | { |
Anas Nashif | fe0872c | 2021-03-13 08:21:21 -0500 | [diff] [blame] | 299 | return sys_clock_tick_get(); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 300 | } |
| 301 | |
| 302 | #ifdef CONFIG_USERSPACE |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 303 | static inline int64_t z_vrfy_k_uptime_ticks(void) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 304 | { |
Andy Ross | 914205c | 2020-03-10 15:26:38 -0700 | [diff] [blame] | 305 | return z_impl_k_uptime_ticks(); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 306 | } |
Yong Cong Sin | bbe5e1e | 2024-01-24 17:35:04 +0800 | [diff] [blame] | 307 | #include <zephyr/syscalls/k_uptime_ticks_mrsh.c> |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 308 | #endif /* CONFIG_USERSPACE */ |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 309 | |
Nicolas Pitre | 52e2f83 | 2023-07-06 13:56:01 -0400 | [diff] [blame] | 310 | k_timepoint_t sys_timepoint_calc(k_timeout_t timeout) |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 311 | { |
Nicolas Pitre | 52e2f83 | 2023-07-06 13:56:01 -0400 | [diff] [blame] | 312 | k_timepoint_t timepoint; |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 313 | |
| 314 | if (K_TIMEOUT_EQ(timeout, K_FOREVER)) { |
Nicolas Pitre | 52e2f83 | 2023-07-06 13:56:01 -0400 | [diff] [blame] | 315 | timepoint.tick = UINT64_MAX; |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 316 | } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { |
Nicolas Pitre | 52e2f83 | 2023-07-06 13:56:01 -0400 | [diff] [blame] | 317 | timepoint.tick = 0; |
Jennifer Williams | dc11ffb | 2021-03-20 00:36:55 +0200 | [diff] [blame] | 318 | } else { |
Nicolas Pitre | 52e2f83 | 2023-07-06 13:56:01 -0400 | [diff] [blame] | 319 | k_ticks_t dt = timeout.ticks; |
Andy Ross | 4c7b77a | 2020-03-09 09:35:35 -0700 | [diff] [blame] | 320 | |
Peter Mitsis | 701aab9 | 2025-02-25 16:02:46 -0800 | [diff] [blame] | 321 | if (Z_IS_TIMEOUT_RELATIVE(timeout)) { |
Nicolas Pitre | 52e2f83 | 2023-07-06 13:56:01 -0400 | [diff] [blame] | 322 | timepoint.tick = sys_clock_tick_get() + MAX(1, dt); |
Peter Mitsis | 701aab9 | 2025-02-25 16:02:46 -0800 | [diff] [blame] | 323 | } else { |
| 324 | timepoint.tick = Z_TICK_ABS(dt); |
Jennifer Williams | dc11ffb | 2021-03-20 00:36:55 +0200 | [diff] [blame] | 325 | } |
Andy Ross | 4c7b77a | 2020-03-09 09:35:35 -0700 | [diff] [blame] | 326 | } |
Nicolas Pitre | 52e2f83 | 2023-07-06 13:56:01 -0400 | [diff] [blame] | 327 | |
| 328 | return timepoint; |
| 329 | } |
| 330 | |
| 331 | k_timeout_t sys_timepoint_timeout(k_timepoint_t timepoint) |
| 332 | { |
| 333 | uint64_t now, remaining; |
| 334 | |
| 335 | if (timepoint.tick == UINT64_MAX) { |
| 336 | return K_FOREVER; |
| 337 | } |
| 338 | if (timepoint.tick == 0) { |
| 339 | return K_NO_WAIT; |
| 340 | } |
| 341 | |
| 342 | now = sys_clock_tick_get(); |
| 343 | remaining = (timepoint.tick > now) ? (timepoint.tick - now) : 0; |
| 344 | return K_TICKS(remaining); |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 345 | } |
Chris Friedt | 4108e14 | 2022-12-15 10:14:43 -0800 | [diff] [blame] | 346 | |
| 347 | #ifdef CONFIG_ZTEST |
| 348 | void z_impl_sys_clock_tick_set(uint64_t tick) |
| 349 | { |
| 350 | curr_tick = tick; |
| 351 | } |
| 352 | |
| 353 | void z_vrfy_sys_clock_tick_set(uint64_t tick) |
| 354 | { |
| 355 | z_impl_sys_clock_tick_set(tick); |
| 356 | } |
Simon Hein | bcd1d19 | 2024-03-08 12:00:10 +0100 | [diff] [blame] | 357 | #endif /* CONFIG_ZTEST */ |