Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2018 Intel Corporation |
| 3 | * |
| 4 | * SPDX-License-Identifier: Apache-2.0 |
| 5 | */ |
Stephanos Ioannidis | 2d74604 | 2019-10-25 00:08:21 +0900 | [diff] [blame] | 6 | |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 7 | #include <zephyr/kernel.h> |
| 8 | #include <zephyr/spinlock.h> |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 9 | #include <ksched.h> |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 10 | #include <zephyr/timeout_q.h> |
| 11 | #include <zephyr/syscall_handler.h> |
| 12 | #include <zephyr/drivers/timer/system_timer.h> |
| 13 | #include <zephyr/sys_clock.h> |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 14 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 15 | static uint64_t curr_tick; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 16 | |
| 17 | static sys_dlist_t timeout_list = SYS_DLIST_STATIC_INIT(&timeout_list); |
| 18 | |
| 19 | static struct k_spinlock timeout_lock; |
| 20 | |
Andy Ross | 1db9f18 | 2019-06-25 10:09:45 -0700 | [diff] [blame] | 21 | #define MAX_WAIT (IS_ENABLED(CONFIG_SYSTEM_CLOCK_SLOPPY_IDLE) \ |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 22 | ? K_TICKS_FOREVER : INT_MAX) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 23 | |
Anas Nashif | 9c1efe6 | 2021-02-25 15:33:15 -0500 | [diff] [blame] | 24 | /* Cycles left to process in the currently-executing sys_clock_announce() */ |
Andy Ross | 1cfff07 | 2018-10-03 08:50:52 -0700 | [diff] [blame] | 25 | static int announce_remaining; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 26 | |
| 27 | #if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME) |
| 28 | int z_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC; |
Andrew Boie | fd49cf7 | 2019-05-21 14:02:26 -0700 | [diff] [blame] | 29 | |
| 30 | #ifdef CONFIG_USERSPACE |
Anas Nashif | a387221 | 2021-03-13 08:16:53 -0500 | [diff] [blame] | 31 | static inline int z_vrfy_sys_clock_hw_cycles_per_sec_runtime_get(void) |
Andrew Boie | fd49cf7 | 2019-05-21 14:02:26 -0700 | [diff] [blame] | 32 | { |
Anas Nashif | a387221 | 2021-03-13 08:16:53 -0500 | [diff] [blame] | 33 | return z_impl_sys_clock_hw_cycles_per_sec_runtime_get(); |
Andrew Boie | fd49cf7 | 2019-05-21 14:02:26 -0700 | [diff] [blame] | 34 | } |
Anas Nashif | a387221 | 2021-03-13 08:16:53 -0500 | [diff] [blame] | 35 | #include <syscalls/sys_clock_hw_cycles_per_sec_runtime_get_mrsh.c> |
Andrew Boie | fd49cf7 | 2019-05-21 14:02:26 -0700 | [diff] [blame] | 36 | #endif /* CONFIG_USERSPACE */ |
| 37 | #endif /* CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME */ |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 38 | |
| 39 | static struct _timeout *first(void) |
| 40 | { |
| 41 | sys_dnode_t *t = sys_dlist_peek_head(&timeout_list); |
| 42 | |
| 43 | return t == NULL ? NULL : CONTAINER_OF(t, struct _timeout, node); |
| 44 | } |
| 45 | |
| 46 | static struct _timeout *next(struct _timeout *t) |
| 47 | { |
| 48 | sys_dnode_t *n = sys_dlist_peek_next(&timeout_list, &t->node); |
| 49 | |
| 50 | return n == NULL ? NULL : CONTAINER_OF(n, struct _timeout, node); |
| 51 | } |
| 52 | |
Andy Ross | 386894c | 2018-10-17 08:29:19 -0700 | [diff] [blame] | 53 | static void remove_timeout(struct _timeout *t) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 54 | { |
Peter A. Bigot | 25fbe7b | 2018-12-30 06:05:03 -0600 | [diff] [blame] | 55 | if (next(t) != NULL) { |
| 56 | next(t)->dticks += t->dticks; |
Andy Ross | 71f5e56 | 2018-12-06 15:39:28 -0800 | [diff] [blame] | 57 | } |
Peter A. Bigot | 25fbe7b | 2018-12-30 06:05:03 -0600 | [diff] [blame] | 58 | |
| 59 | sys_dlist_remove(&t->node); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 60 | } |
| 61 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 62 | static int32_t elapsed(void) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 63 | { |
Anas Nashif | 9c1efe6 | 2021-02-25 15:33:15 -0500 | [diff] [blame] | 64 | return announce_remaining == 0 ? sys_clock_elapsed() : 0U; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 65 | } |
| 66 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 67 | static int32_t next_timeout(void) |
Andy Ross | e664c78 | 2019-01-16 08:54:38 -0800 | [diff] [blame] | 68 | { |
Andy Ross | e664c78 | 2019-01-16 08:54:38 -0800 | [diff] [blame] | 69 | struct _timeout *to = first(); |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 70 | int32_t ticks_elapsed = elapsed(); |
Flavio Ceolin | 47b7c2e | 2022-02-08 15:28:09 -0800 | [diff] [blame] | 71 | int32_t ret; |
| 72 | |
| 73 | if ((to == NULL) || |
| 74 | ((int64_t)(to->dticks - ticks_elapsed) > (int64_t)INT_MAX)) { |
| 75 | ret = MAX_WAIT; |
| 76 | } else { |
| 77 | ret = MAX(0, to->dticks - ticks_elapsed); |
| 78 | } |
Andy Ross | e664c78 | 2019-01-16 08:54:38 -0800 | [diff] [blame] | 79 | |
Andy Ross | e664c78 | 2019-01-16 08:54:38 -0800 | [diff] [blame] | 80 | return ret; |
| 81 | } |
| 82 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 83 | void z_add_timeout(struct _timeout *to, _timeout_func_t fn, |
| 84 | k_timeout_t timeout) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 85 | { |
Andy Ross | 12bd187 | 2020-04-21 11:07:07 -0700 | [diff] [blame] | 86 | if (K_TIMEOUT_EQ(timeout, K_FOREVER)) { |
| 87 | return; |
| 88 | } |
| 89 | |
Anas Nashif | 39f632e | 2020-12-07 13:15:42 -0500 | [diff] [blame] | 90 | #ifdef CONFIG_KERNEL_COHERENCE |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 91 | __ASSERT_NO_MSG(arch_mem_coherent(to)); |
| 92 | #endif |
| 93 | |
Peter A. Bigot | b4ece0a | 2019-01-02 08:29:43 -0600 | [diff] [blame] | 94 | __ASSERT(!sys_dnode_is_linked(&to->node), ""); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 95 | to->fn = fn; |
| 96 | |
| 97 | LOCKED(&timeout_lock) { |
| 98 | struct _timeout *t; |
| 99 | |
Andrzej Głąbek | 59b21a2 | 2021-05-24 11:24:13 +0200 | [diff] [blame] | 100 | if (IS_ENABLED(CONFIG_TIMEOUT_64BIT) && |
| 101 | Z_TICK_ABS(timeout.ticks) >= 0) { |
| 102 | k_ticks_t ticks = Z_TICK_ABS(timeout.ticks) - curr_tick; |
| 103 | |
| 104 | to->dticks = MAX(1, ticks); |
| 105 | } else { |
| 106 | to->dticks = timeout.ticks + 1 + elapsed(); |
| 107 | } |
| 108 | |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 109 | for (t = first(); t != NULL; t = next(t)) { |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 110 | if (t->dticks > to->dticks) { |
| 111 | t->dticks -= to->dticks; |
Andy Ross | eda4c02 | 2019-01-28 09:35:27 -0800 | [diff] [blame] | 112 | sys_dlist_insert(&t->node, &to->node); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 113 | break; |
| 114 | } |
| 115 | to->dticks -= t->dticks; |
| 116 | } |
| 117 | |
| 118 | if (t == NULL) { |
| 119 | sys_dlist_append(&timeout_list, &to->node); |
| 120 | } |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 121 | |
Pawel Dunaj | baea224 | 2018-11-22 11:49:32 +0100 | [diff] [blame] | 122 | if (to == first()) { |
Anas Nashif | 9c1efe6 | 2021-02-25 15:33:15 -0500 | [diff] [blame] | 123 | sys_clock_set_timeout(next_timeout(), false); |
Pawel Dunaj | baea224 | 2018-11-22 11:49:32 +0100 | [diff] [blame] | 124 | } |
Andy Ross | 02165d7 | 2018-11-20 08:26:34 -0800 | [diff] [blame] | 125 | } |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 126 | } |
| 127 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 128 | int z_abort_timeout(struct _timeout *to) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 129 | { |
Peter A. Bigot | b4ece0a | 2019-01-02 08:29:43 -0600 | [diff] [blame] | 130 | int ret = -EINVAL; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 131 | |
| 132 | LOCKED(&timeout_lock) { |
Peter A. Bigot | 25fbe7b | 2018-12-30 06:05:03 -0600 | [diff] [blame] | 133 | if (sys_dnode_is_linked(&to->node)) { |
Andy Ross | 386894c | 2018-10-17 08:29:19 -0700 | [diff] [blame] | 134 | remove_timeout(to); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 135 | ret = 0; |
| 136 | } |
| 137 | } |
| 138 | |
| 139 | return ret; |
| 140 | } |
| 141 | |
Andy Ross | 5a5d3da | 2020-03-09 13:59:15 -0700 | [diff] [blame] | 142 | /* must be locked */ |
Peter A. Bigot | 16a4081 | 2020-09-18 16:24:57 -0500 | [diff] [blame] | 143 | static k_ticks_t timeout_rem(const struct _timeout *timeout) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 144 | { |
Andy Ross | 5a5d3da | 2020-03-09 13:59:15 -0700 | [diff] [blame] | 145 | k_ticks_t ticks = 0; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 146 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 147 | if (z_is_inactive_timeout(timeout)) { |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 148 | return 0; |
| 149 | } |
| 150 | |
Andy Ross | 5a5d3da | 2020-03-09 13:59:15 -0700 | [diff] [blame] | 151 | for (struct _timeout *t = first(); t != NULL; t = next(t)) { |
| 152 | ticks += t->dticks; |
| 153 | if (timeout == t) { |
| 154 | break; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 155 | } |
| 156 | } |
| 157 | |
Charles E. Youse | 0ad4022 | 2019-03-01 10:51:04 -0800 | [diff] [blame] | 158 | return ticks - elapsed(); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 159 | } |
| 160 | |
Peter A. Bigot | 16a4081 | 2020-09-18 16:24:57 -0500 | [diff] [blame] | 161 | k_ticks_t z_timeout_remaining(const struct _timeout *timeout) |
Andy Ross | 5a5d3da | 2020-03-09 13:59:15 -0700 | [diff] [blame] | 162 | { |
| 163 | k_ticks_t ticks = 0; |
| 164 | |
| 165 | LOCKED(&timeout_lock) { |
| 166 | ticks = timeout_rem(timeout); |
| 167 | } |
| 168 | |
| 169 | return ticks; |
| 170 | } |
| 171 | |
Peter A. Bigot | 16a4081 | 2020-09-18 16:24:57 -0500 | [diff] [blame] | 172 | k_ticks_t z_timeout_expires(const struct _timeout *timeout) |
Andy Ross | 5a5d3da | 2020-03-09 13:59:15 -0700 | [diff] [blame] | 173 | { |
| 174 | k_ticks_t ticks = 0; |
| 175 | |
| 176 | LOCKED(&timeout_lock) { |
| 177 | ticks = curr_tick + timeout_rem(timeout); |
| 178 | } |
| 179 | |
| 180 | return ticks; |
| 181 | } |
| 182 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 183 | int32_t z_get_next_timeout_expiry(void) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 184 | { |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 185 | int32_t ret = (int32_t) K_TICKS_FOREVER; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 186 | |
| 187 | LOCKED(&timeout_lock) { |
Andy Ross | e664c78 | 2019-01-16 08:54:38 -0800 | [diff] [blame] | 188 | ret = next_timeout(); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 189 | } |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 190 | return ret; |
| 191 | } |
| 192 | |
Anas Nashif | 9c1efe6 | 2021-02-25 15:33:15 -0500 | [diff] [blame] | 193 | void sys_clock_announce(int32_t ticks) |
Andy Ross | 43ab8da | 2018-12-20 09:23:31 -0800 | [diff] [blame] | 194 | { |
Andy Ross | 43ab8da | 2018-12-20 09:23:31 -0800 | [diff] [blame] | 195 | k_spinlock_key_t key = k_spin_lock(&timeout_lock); |
| 196 | |
Andy Ross | 0b2ed38 | 2022-04-12 09:52:39 -0700 | [diff] [blame] | 197 | /* We release the lock around the callbacks below, so on SMP |
| 198 | * systems someone might be already running the loop. Don't |
| 199 | * race (which will cause paralllel execution of "sequential" |
| 200 | * timeouts and confuse apps), just increment the tick count |
| 201 | * and return. |
| 202 | */ |
Peter Mitsis | 3e2f30a | 2022-07-29 09:29:32 -0400 | [diff] [blame] | 203 | if (IS_ENABLED(CONFIG_SMP) && (announce_remaining != 0)) { |
Andy Ross | 0b2ed38 | 2022-04-12 09:52:39 -0700 | [diff] [blame] | 204 | announce_remaining += ticks; |
| 205 | k_spin_unlock(&timeout_lock, key); |
| 206 | return; |
| 207 | } |
| 208 | |
Andy Ross | 43ab8da | 2018-12-20 09:23:31 -0800 | [diff] [blame] | 209 | announce_remaining = ticks; |
| 210 | |
Peter Mitsis | 42db096 | 2022-12-14 11:53:58 -0500 | [diff] [blame] | 211 | struct _timeout *t = first(); |
| 212 | |
| 213 | for (t = first(); |
| 214 | (t != NULL) && (t->dticks <= announce_remaining); |
| 215 | t = first()) { |
Andy Ross | 43ab8da | 2018-12-20 09:23:31 -0800 | [diff] [blame] | 216 | int dt = t->dticks; |
| 217 | |
| 218 | curr_tick += dt; |
Andy Ross | 43ab8da | 2018-12-20 09:23:31 -0800 | [diff] [blame] | 219 | t->dticks = 0; |
| 220 | remove_timeout(t); |
| 221 | |
| 222 | k_spin_unlock(&timeout_lock, key); |
| 223 | t->fn(t); |
| 224 | key = k_spin_lock(&timeout_lock); |
Peter Mitsis | 3e2f30a | 2022-07-29 09:29:32 -0400 | [diff] [blame] | 225 | announce_remaining -= dt; |
Andy Ross | 43ab8da | 2018-12-20 09:23:31 -0800 | [diff] [blame] | 226 | } |
| 227 | |
Peter Mitsis | 42db096 | 2022-12-14 11:53:58 -0500 | [diff] [blame] | 228 | if (t != NULL) { |
| 229 | t->dticks -= announce_remaining; |
Andy Ross | 43ab8da | 2018-12-20 09:23:31 -0800 | [diff] [blame] | 230 | } |
| 231 | |
| 232 | curr_tick += announce_remaining; |
| 233 | announce_remaining = 0; |
| 234 | |
Anas Nashif | 9c1efe6 | 2021-02-25 15:33:15 -0500 | [diff] [blame] | 235 | sys_clock_set_timeout(next_timeout(), false); |
Andy Ross | 43ab8da | 2018-12-20 09:23:31 -0800 | [diff] [blame] | 236 | |
| 237 | k_spin_unlock(&timeout_lock, key); |
Andy Ross | f3afd5a | 2023-03-06 14:31:35 -0800 | [diff] [blame] | 238 | |
| 239 | #ifdef CONFIG_TIMESLICING |
| 240 | z_time_slice(); |
| 241 | #endif |
Andy Ross | 43ab8da | 2018-12-20 09:23:31 -0800 | [diff] [blame] | 242 | } |
| 243 | |
Anas Nashif | fe0872c | 2021-03-13 08:21:21 -0500 | [diff] [blame] | 244 | int64_t sys_clock_tick_get(void) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 245 | { |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 246 | uint64_t t = 0U; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 247 | |
| 248 | LOCKED(&timeout_lock) { |
Peter Mitsis | 71ef669 | 2022-08-03 16:11:32 -0400 | [diff] [blame] | 249 | t = curr_tick + elapsed(); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 250 | } |
| 251 | return t; |
| 252 | } |
| 253 | |
Anas Nashif | 5c90ceb | 2021-03-13 08:19:53 -0500 | [diff] [blame] | 254 | uint32_t sys_clock_tick_get_32(void) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 255 | { |
Andy Ross | d8421ad | 2018-10-02 11:12:08 -0700 | [diff] [blame] | 256 | #ifdef CONFIG_TICKLESS_KERNEL |
Anas Nashif | fe0872c | 2021-03-13 08:21:21 -0500 | [diff] [blame] | 257 | return (uint32_t)sys_clock_tick_get(); |
Andy Ross | d8421ad | 2018-10-02 11:12:08 -0700 | [diff] [blame] | 258 | #else |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 259 | return (uint32_t)curr_tick; |
Andy Ross | d8421ad | 2018-10-02 11:12:08 -0700 | [diff] [blame] | 260 | #endif |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 261 | } |
| 262 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 263 | int64_t z_impl_k_uptime_ticks(void) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 264 | { |
Anas Nashif | fe0872c | 2021-03-13 08:21:21 -0500 | [diff] [blame] | 265 | return sys_clock_tick_get(); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 266 | } |
| 267 | |
| 268 | #ifdef CONFIG_USERSPACE |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 269 | static inline int64_t z_vrfy_k_uptime_ticks(void) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 270 | { |
Andy Ross | 914205c | 2020-03-10 15:26:38 -0700 | [diff] [blame] | 271 | return z_impl_k_uptime_ticks(); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 272 | } |
Andy Ross | 914205c | 2020-03-10 15:26:38 -0700 | [diff] [blame] | 273 | #include <syscalls/k_uptime_ticks_mrsh.c> |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 274 | #endif |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 275 | |
Krzysztof Chruscinski | b8fb353 | 2021-04-14 13:35:29 +0200 | [diff] [blame] | 276 | void z_impl_k_busy_wait(uint32_t usec_to_wait) |
| 277 | { |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 278 | SYS_PORT_TRACING_FUNC_ENTER(k_thread, busy_wait, usec_to_wait); |
Krzysztof Chruscinski | b8fb353 | 2021-04-14 13:35:29 +0200 | [diff] [blame] | 279 | if (usec_to_wait == 0U) { |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 280 | SYS_PORT_TRACING_FUNC_EXIT(k_thread, busy_wait, usec_to_wait); |
Krzysztof Chruscinski | b8fb353 | 2021-04-14 13:35:29 +0200 | [diff] [blame] | 281 | return; |
| 282 | } |
| 283 | |
| 284 | #if !defined(CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT) |
| 285 | uint32_t start_cycles = k_cycle_get_32(); |
| 286 | |
| 287 | /* use 64-bit math to prevent overflow when multiplying */ |
| 288 | uint32_t cycles_to_wait = (uint32_t)( |
| 289 | (uint64_t)usec_to_wait * |
| 290 | (uint64_t)sys_clock_hw_cycles_per_sec() / |
| 291 | (uint64_t)USEC_PER_SEC |
| 292 | ); |
| 293 | |
| 294 | for (;;) { |
| 295 | uint32_t current_cycles = k_cycle_get_32(); |
| 296 | |
| 297 | /* this handles the rollover on an unsigned 32-bit value */ |
| 298 | if ((current_cycles - start_cycles) >= cycles_to_wait) { |
| 299 | break; |
| 300 | } |
| 301 | } |
| 302 | #else |
| 303 | arch_busy_wait(usec_to_wait); |
| 304 | #endif /* CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT */ |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 305 | SYS_PORT_TRACING_FUNC_EXIT(k_thread, busy_wait, usec_to_wait); |
Krzysztof Chruscinski | b8fb353 | 2021-04-14 13:35:29 +0200 | [diff] [blame] | 306 | } |
| 307 | |
| 308 | #ifdef CONFIG_USERSPACE |
| 309 | static inline void z_vrfy_k_busy_wait(uint32_t usec_to_wait) |
| 310 | { |
| 311 | z_impl_k_busy_wait(usec_to_wait); |
| 312 | } |
| 313 | #include <syscalls/k_busy_wait_mrsh.c> |
| 314 | #endif /* CONFIG_USERSPACE */ |
| 315 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 316 | /* Returns the uptime expiration (relative to an unlocked "now"!) of a |
Andy Ross | 4c7b77a | 2020-03-09 09:35:35 -0700 | [diff] [blame] | 317 | * timeout object. When used correctly, this should be called once, |
| 318 | * synchronously with the user passing a new timeout value. It should |
| 319 | * not be used iteratively to adjust a timeout. |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 320 | */ |
Anas Nashif | a518f48 | 2021-03-13 08:22:38 -0500 | [diff] [blame] | 321 | uint64_t sys_clock_timeout_end_calc(k_timeout_t timeout) |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 322 | { |
| 323 | k_ticks_t dt; |
| 324 | |
| 325 | if (K_TIMEOUT_EQ(timeout, K_FOREVER)) { |
| 326 | return UINT64_MAX; |
| 327 | } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { |
Anas Nashif | fe0872c | 2021-03-13 08:21:21 -0500 | [diff] [blame] | 328 | return sys_clock_tick_get(); |
Jennifer Williams | dc11ffb | 2021-03-20 00:36:55 +0200 | [diff] [blame] | 329 | } else { |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 330 | |
Jennifer Williams | dc11ffb | 2021-03-20 00:36:55 +0200 | [diff] [blame] | 331 | dt = timeout.ticks; |
Andy Ross | 4c7b77a | 2020-03-09 09:35:35 -0700 | [diff] [blame] | 332 | |
Jennifer Williams | dc11ffb | 2021-03-20 00:36:55 +0200 | [diff] [blame] | 333 | if (IS_ENABLED(CONFIG_TIMEOUT_64BIT) && Z_TICK_ABS(dt) >= 0) { |
| 334 | return Z_TICK_ABS(dt); |
| 335 | } |
| 336 | return sys_clock_tick_get() + MAX(1, dt); |
Andy Ross | 4c7b77a | 2020-03-09 09:35:35 -0700 | [diff] [blame] | 337 | } |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 338 | } |
Chris Friedt | 4108e14 | 2022-12-15 10:14:43 -0800 | [diff] [blame] | 339 | |
| 340 | #ifdef CONFIG_ZTEST |
| 341 | void z_impl_sys_clock_tick_set(uint64_t tick) |
| 342 | { |
| 343 | curr_tick = tick; |
| 344 | } |
| 345 | |
| 346 | void z_vrfy_sys_clock_tick_set(uint64_t tick) |
| 347 | { |
| 348 | z_impl_sys_clock_tick_set(tick); |
| 349 | } |
| 350 | #endif |