Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2018 Intel Corporation |
| 3 | * |
| 4 | * SPDX-License-Identifier: Apache-2.0 |
| 5 | */ |
Stephanos Ioannidis | 2d74604 | 2019-10-25 00:08:21 +0900 | [diff] [blame] | 6 | |
| 7 | #include <kernel.h> |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 8 | #include <spinlock.h> |
| 9 | #include <ksched.h> |
Stephanos Ioannidis | 2d74604 | 2019-10-25 00:08:21 +0900 | [diff] [blame] | 10 | #include <timeout_q.h> |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 11 | #include <syscall_handler.h> |
Stephanos Ioannidis | 2d74604 | 2019-10-25 00:08:21 +0900 | [diff] [blame] | 12 | #include <drivers/timer/system_timer.h> |
| 13 | #include <sys_clock.h> |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 14 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 15 | static uint64_t curr_tick; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 16 | |
| 17 | static sys_dlist_t timeout_list = SYS_DLIST_STATIC_INIT(&timeout_list); |
| 18 | |
| 19 | static struct k_spinlock timeout_lock; |
| 20 | |
Andy Ross | 1db9f18 | 2019-06-25 10:09:45 -0700 | [diff] [blame] | 21 | #define MAX_WAIT (IS_ENABLED(CONFIG_SYSTEM_CLOCK_SLOPPY_IDLE) \ |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 22 | ? K_TICKS_FOREVER : INT_MAX) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 23 | |
Anas Nashif | 9c1efe6 | 2021-02-25 15:33:15 -0500 | [diff] [blame] | 24 | /* Cycles left to process in the currently-executing sys_clock_announce() */ |
Andy Ross | 1cfff07 | 2018-10-03 08:50:52 -0700 | [diff] [blame] | 25 | static int announce_remaining; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 26 | |
| 27 | #if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME) |
| 28 | int z_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC; |
Andrew Boie | fd49cf7 | 2019-05-21 14:02:26 -0700 | [diff] [blame] | 29 | |
| 30 | #ifdef CONFIG_USERSPACE |
Anas Nashif | a387221 | 2021-03-13 08:16:53 -0500 | [diff] [blame] | 31 | static inline int z_vrfy_sys_clock_hw_cycles_per_sec_runtime_get(void) |
Andrew Boie | fd49cf7 | 2019-05-21 14:02:26 -0700 | [diff] [blame] | 32 | { |
Anas Nashif | a387221 | 2021-03-13 08:16:53 -0500 | [diff] [blame] | 33 | return z_impl_sys_clock_hw_cycles_per_sec_runtime_get(); |
Andrew Boie | fd49cf7 | 2019-05-21 14:02:26 -0700 | [diff] [blame] | 34 | } |
Anas Nashif | a387221 | 2021-03-13 08:16:53 -0500 | [diff] [blame] | 35 | #include <syscalls/sys_clock_hw_cycles_per_sec_runtime_get_mrsh.c> |
Andrew Boie | fd49cf7 | 2019-05-21 14:02:26 -0700 | [diff] [blame] | 36 | #endif /* CONFIG_USERSPACE */ |
| 37 | #endif /* CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME */ |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 38 | |
| 39 | static struct _timeout *first(void) |
| 40 | { |
| 41 | sys_dnode_t *t = sys_dlist_peek_head(&timeout_list); |
| 42 | |
| 43 | return t == NULL ? NULL : CONTAINER_OF(t, struct _timeout, node); |
| 44 | } |
| 45 | |
| 46 | static struct _timeout *next(struct _timeout *t) |
| 47 | { |
| 48 | sys_dnode_t *n = sys_dlist_peek_next(&timeout_list, &t->node); |
| 49 | |
| 50 | return n == NULL ? NULL : CONTAINER_OF(n, struct _timeout, node); |
| 51 | } |
| 52 | |
Andy Ross | 386894c | 2018-10-17 08:29:19 -0700 | [diff] [blame] | 53 | static void remove_timeout(struct _timeout *t) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 54 | { |
Peter A. Bigot | 25fbe7b | 2018-12-30 06:05:03 -0600 | [diff] [blame] | 55 | if (next(t) != NULL) { |
| 56 | next(t)->dticks += t->dticks; |
Andy Ross | 71f5e56 | 2018-12-06 15:39:28 -0800 | [diff] [blame] | 57 | } |
Peter A. Bigot | 25fbe7b | 2018-12-30 06:05:03 -0600 | [diff] [blame] | 58 | |
| 59 | sys_dlist_remove(&t->node); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 60 | } |
| 61 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 62 | static int32_t elapsed(void) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 63 | { |
Anas Nashif | 9c1efe6 | 2021-02-25 15:33:15 -0500 | [diff] [blame] | 64 | return announce_remaining == 0 ? sys_clock_elapsed() : 0U; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 65 | } |
| 66 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 67 | static int32_t next_timeout(void) |
Andy Ross | e664c78 | 2019-01-16 08:54:38 -0800 | [diff] [blame] | 68 | { |
Andy Ross | e664c78 | 2019-01-16 08:54:38 -0800 | [diff] [blame] | 69 | struct _timeout *to = first(); |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 70 | int32_t ticks_elapsed = elapsed(); |
Peter Bigot | 332b7df | 2020-09-15 20:06:49 -0500 | [diff] [blame] | 71 | int32_t ret = to == NULL ? MAX_WAIT |
Trond Einar Snekvik | 86c793a | 2020-10-27 12:27:25 +0100 | [diff] [blame] | 72 | : CLAMP(to->dticks - ticks_elapsed, 0, MAX_WAIT); |
Andy Ross | e664c78 | 2019-01-16 08:54:38 -0800 | [diff] [blame] | 73 | |
| 74 | #ifdef CONFIG_TIMESLICING |
| 75 | if (_current_cpu->slice_ticks && _current_cpu->slice_ticks < ret) { |
| 76 | ret = _current_cpu->slice_ticks; |
| 77 | } |
| 78 | #endif |
| 79 | return ret; |
| 80 | } |
| 81 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 82 | void z_add_timeout(struct _timeout *to, _timeout_func_t fn, |
| 83 | k_timeout_t timeout) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 84 | { |
Andy Ross | 12bd187 | 2020-04-21 11:07:07 -0700 | [diff] [blame] | 85 | if (K_TIMEOUT_EQ(timeout, K_FOREVER)) { |
| 86 | return; |
| 87 | } |
| 88 | |
Anas Nashif | 39f632e | 2020-12-07 13:15:42 -0500 | [diff] [blame] | 89 | #ifdef CONFIG_KERNEL_COHERENCE |
Andy Ross | f6d32ab | 2020-05-13 15:34:04 +0000 | [diff] [blame] | 90 | __ASSERT_NO_MSG(arch_mem_coherent(to)); |
| 91 | #endif |
| 92 | |
Peter A. Bigot | b4ece0a | 2019-01-02 08:29:43 -0600 | [diff] [blame] | 93 | __ASSERT(!sys_dnode_is_linked(&to->node), ""); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 94 | to->fn = fn; |
| 95 | |
| 96 | LOCKED(&timeout_lock) { |
| 97 | struct _timeout *t; |
| 98 | |
Andrzej Głąbek | 59b21a2 | 2021-05-24 11:24:13 +0200 | [diff] [blame] | 99 | if (IS_ENABLED(CONFIG_TIMEOUT_64BIT) && |
| 100 | Z_TICK_ABS(timeout.ticks) >= 0) { |
| 101 | k_ticks_t ticks = Z_TICK_ABS(timeout.ticks) - curr_tick; |
| 102 | |
| 103 | to->dticks = MAX(1, ticks); |
| 104 | } else { |
| 105 | to->dticks = timeout.ticks + 1 + elapsed(); |
| 106 | } |
| 107 | |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 108 | for (t = first(); t != NULL; t = next(t)) { |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 109 | if (t->dticks > to->dticks) { |
| 110 | t->dticks -= to->dticks; |
Andy Ross | eda4c02 | 2019-01-28 09:35:27 -0800 | [diff] [blame] | 111 | sys_dlist_insert(&t->node, &to->node); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 112 | break; |
| 113 | } |
| 114 | to->dticks -= t->dticks; |
| 115 | } |
| 116 | |
| 117 | if (t == NULL) { |
| 118 | sys_dlist_append(&timeout_list, &to->node); |
| 119 | } |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 120 | |
Pawel Dunaj | baea224 | 2018-11-22 11:49:32 +0100 | [diff] [blame] | 121 | if (to == first()) { |
Flavio Ceolin | 148769c | 2020-12-18 00:33:29 -0800 | [diff] [blame] | 122 | #if CONFIG_TIMESLICING |
| 123 | /* |
| 124 | * This is not ideal, since it does not |
Guennadi Liakhovetski | 339a6bd | 2021-07-15 09:41:28 +0200 | [diff] [blame] | 125 | * account the time elapsed since the |
Flavio Ceolin | 148769c | 2020-12-18 00:33:29 -0800 | [diff] [blame] | 126 | * last announcement, and slice_ticks is based |
Guennadi Liakhovetski | 339a6bd | 2021-07-15 09:41:28 +0200 | [diff] [blame] | 127 | * on that. It means that the time remaining for |
| 128 | * the next announcement can be less than |
Flavio Ceolin | 148769c | 2020-12-18 00:33:29 -0800 | [diff] [blame] | 129 | * slice_ticks. |
| 130 | */ |
| 131 | int32_t next_time = next_timeout(); |
| 132 | |
Andy Ross | 544475d | 2021-02-02 13:19:25 -0800 | [diff] [blame] | 133 | if (next_time == 0 || |
| 134 | _current_cpu->slice_ticks != next_time) { |
Anas Nashif | 9c1efe6 | 2021-02-25 15:33:15 -0500 | [diff] [blame] | 135 | sys_clock_set_timeout(next_time, false); |
Flavio Ceolin | 148769c | 2020-12-18 00:33:29 -0800 | [diff] [blame] | 136 | } |
| 137 | #else |
Anas Nashif | 9c1efe6 | 2021-02-25 15:33:15 -0500 | [diff] [blame] | 138 | sys_clock_set_timeout(next_timeout(), false); |
Flavio Ceolin | 148769c | 2020-12-18 00:33:29 -0800 | [diff] [blame] | 139 | #endif /* CONFIG_TIMESLICING */ |
Pawel Dunaj | baea224 | 2018-11-22 11:49:32 +0100 | [diff] [blame] | 140 | } |
Andy Ross | 02165d7 | 2018-11-20 08:26:34 -0800 | [diff] [blame] | 141 | } |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 142 | } |
| 143 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 144 | int z_abort_timeout(struct _timeout *to) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 145 | { |
Peter A. Bigot | b4ece0a | 2019-01-02 08:29:43 -0600 | [diff] [blame] | 146 | int ret = -EINVAL; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 147 | |
| 148 | LOCKED(&timeout_lock) { |
Peter A. Bigot | 25fbe7b | 2018-12-30 06:05:03 -0600 | [diff] [blame] | 149 | if (sys_dnode_is_linked(&to->node)) { |
Andy Ross | 386894c | 2018-10-17 08:29:19 -0700 | [diff] [blame] | 150 | remove_timeout(to); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 151 | ret = 0; |
| 152 | } |
| 153 | } |
| 154 | |
| 155 | return ret; |
| 156 | } |
| 157 | |
Andy Ross | 5a5d3da | 2020-03-09 13:59:15 -0700 | [diff] [blame] | 158 | /* must be locked */ |
Peter A. Bigot | 16a4081 | 2020-09-18 16:24:57 -0500 | [diff] [blame] | 159 | static k_ticks_t timeout_rem(const struct _timeout *timeout) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 160 | { |
Andy Ross | 5a5d3da | 2020-03-09 13:59:15 -0700 | [diff] [blame] | 161 | k_ticks_t ticks = 0; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 162 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 163 | if (z_is_inactive_timeout(timeout)) { |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 164 | return 0; |
| 165 | } |
| 166 | |
Andy Ross | 5a5d3da | 2020-03-09 13:59:15 -0700 | [diff] [blame] | 167 | for (struct _timeout *t = first(); t != NULL; t = next(t)) { |
| 168 | ticks += t->dticks; |
| 169 | if (timeout == t) { |
| 170 | break; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 171 | } |
| 172 | } |
| 173 | |
Charles E. Youse | 0ad4022 | 2019-03-01 10:51:04 -0800 | [diff] [blame] | 174 | return ticks - elapsed(); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 175 | } |
| 176 | |
Peter A. Bigot | 16a4081 | 2020-09-18 16:24:57 -0500 | [diff] [blame] | 177 | k_ticks_t z_timeout_remaining(const struct _timeout *timeout) |
Andy Ross | 5a5d3da | 2020-03-09 13:59:15 -0700 | [diff] [blame] | 178 | { |
| 179 | k_ticks_t ticks = 0; |
| 180 | |
| 181 | LOCKED(&timeout_lock) { |
| 182 | ticks = timeout_rem(timeout); |
| 183 | } |
| 184 | |
| 185 | return ticks; |
| 186 | } |
| 187 | |
Peter A. Bigot | 16a4081 | 2020-09-18 16:24:57 -0500 | [diff] [blame] | 188 | k_ticks_t z_timeout_expires(const struct _timeout *timeout) |
Andy Ross | 5a5d3da | 2020-03-09 13:59:15 -0700 | [diff] [blame] | 189 | { |
| 190 | k_ticks_t ticks = 0; |
| 191 | |
| 192 | LOCKED(&timeout_lock) { |
| 193 | ticks = curr_tick + timeout_rem(timeout); |
| 194 | } |
| 195 | |
| 196 | return ticks; |
| 197 | } |
| 198 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 199 | int32_t z_get_next_timeout_expiry(void) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 200 | { |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 201 | int32_t ret = (int32_t) K_TICKS_FOREVER; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 202 | |
| 203 | LOCKED(&timeout_lock) { |
Andy Ross | e664c78 | 2019-01-16 08:54:38 -0800 | [diff] [blame] | 204 | ret = next_timeout(); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 205 | } |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 206 | return ret; |
| 207 | } |
| 208 | |
Daniel Leung | b907cf7 | 2020-07-23 12:55:20 -0700 | [diff] [blame] | 209 | void z_set_timeout_expiry(int32_t ticks, bool is_idle) |
Pawel Dunaj | baea224 | 2018-11-22 11:49:32 +0100 | [diff] [blame] | 210 | { |
| 211 | LOCKED(&timeout_lock) { |
Daniel Leung | b907cf7 | 2020-07-23 12:55:20 -0700 | [diff] [blame] | 212 | int next_to = next_timeout(); |
| 213 | bool sooner = (next_to == K_TICKS_FOREVER) |
Andy Ross | 887e1ab | 2021-02-01 10:01:18 -0800 | [diff] [blame] | 214 | || (ticks <= next_to); |
Daniel Leung | b907cf7 | 2020-07-23 12:55:20 -0700 | [diff] [blame] | 215 | bool imminent = next_to <= 1; |
Pawel Dunaj | baea224 | 2018-11-22 11:49:32 +0100 | [diff] [blame] | 216 | |
Andy Ross | 9eda935 | 2019-01-02 11:34:26 -0800 | [diff] [blame] | 217 | /* Only set new timeouts when they are sooner than |
| 218 | * what we have. Also don't try to set a timeout when |
| 219 | * one is about to expire: drivers have internal logic |
| 220 | * that will bump the timeout to the "next" tick if |
| 221 | * it's not considered to be settable as directed. |
Andy Ross | 6a153ef | 2019-08-19 21:40:01 -0700 | [diff] [blame] | 222 | * SMP can't use this optimization though: we don't |
| 223 | * know when context switches happen until interrupt |
| 224 | * exit and so can't get the timeslicing clamp folded |
| 225 | * in. |
Andy Ross | 9eda935 | 2019-01-02 11:34:26 -0800 | [diff] [blame] | 226 | */ |
Andy Ross | 6a153ef | 2019-08-19 21:40:01 -0700 | [diff] [blame] | 227 | if (!imminent && (sooner || IS_ENABLED(CONFIG_SMP))) { |
Anas Nashif | 9c1efe6 | 2021-02-25 15:33:15 -0500 | [diff] [blame] | 228 | sys_clock_set_timeout(MIN(ticks, next_to), is_idle); |
Pawel Dunaj | baea224 | 2018-11-22 11:49:32 +0100 | [diff] [blame] | 229 | } |
| 230 | } |
| 231 | } |
| 232 | |
Anas Nashif | 9c1efe6 | 2021-02-25 15:33:15 -0500 | [diff] [blame] | 233 | void sys_clock_announce(int32_t ticks) |
Andy Ross | 43ab8da | 2018-12-20 09:23:31 -0800 | [diff] [blame] | 234 | { |
| 235 | #ifdef CONFIG_TIMESLICING |
| 236 | z_time_slice(ticks); |
| 237 | #endif |
| 238 | |
| 239 | k_spinlock_key_t key = k_spin_lock(&timeout_lock); |
| 240 | |
| 241 | announce_remaining = ticks; |
| 242 | |
| 243 | while (first() != NULL && first()->dticks <= announce_remaining) { |
| 244 | struct _timeout *t = first(); |
| 245 | int dt = t->dticks; |
| 246 | |
| 247 | curr_tick += dt; |
| 248 | announce_remaining -= dt; |
| 249 | t->dticks = 0; |
| 250 | remove_timeout(t); |
| 251 | |
| 252 | k_spin_unlock(&timeout_lock, key); |
| 253 | t->fn(t); |
| 254 | key = k_spin_lock(&timeout_lock); |
| 255 | } |
| 256 | |
| 257 | if (first() != NULL) { |
| 258 | first()->dticks -= announce_remaining; |
| 259 | } |
| 260 | |
| 261 | curr_tick += announce_remaining; |
| 262 | announce_remaining = 0; |
| 263 | |
Anas Nashif | 9c1efe6 | 2021-02-25 15:33:15 -0500 | [diff] [blame] | 264 | sys_clock_set_timeout(next_timeout(), false); |
Andy Ross | 43ab8da | 2018-12-20 09:23:31 -0800 | [diff] [blame] | 265 | |
| 266 | k_spin_unlock(&timeout_lock, key); |
| 267 | } |
| 268 | |
Anas Nashif | fe0872c | 2021-03-13 08:21:21 -0500 | [diff] [blame] | 269 | int64_t sys_clock_tick_get(void) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 270 | { |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 271 | uint64_t t = 0U; |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 272 | |
| 273 | LOCKED(&timeout_lock) { |
Anas Nashif | 9c1efe6 | 2021-02-25 15:33:15 -0500 | [diff] [blame] | 274 | t = curr_tick + sys_clock_elapsed(); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 275 | } |
| 276 | return t; |
| 277 | } |
| 278 | |
Anas Nashif | 5c90ceb | 2021-03-13 08:19:53 -0500 | [diff] [blame] | 279 | uint32_t sys_clock_tick_get_32(void) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 280 | { |
Andy Ross | d8421ad | 2018-10-02 11:12:08 -0700 | [diff] [blame] | 281 | #ifdef CONFIG_TICKLESS_KERNEL |
Anas Nashif | fe0872c | 2021-03-13 08:21:21 -0500 | [diff] [blame] | 282 | return (uint32_t)sys_clock_tick_get(); |
Andy Ross | d8421ad | 2018-10-02 11:12:08 -0700 | [diff] [blame] | 283 | #else |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 284 | return (uint32_t)curr_tick; |
Andy Ross | d8421ad | 2018-10-02 11:12:08 -0700 | [diff] [blame] | 285 | #endif |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 286 | } |
| 287 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 288 | int64_t z_impl_k_uptime_ticks(void) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 289 | { |
Anas Nashif | fe0872c | 2021-03-13 08:21:21 -0500 | [diff] [blame] | 290 | return sys_clock_tick_get(); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 291 | } |
| 292 | |
| 293 | #ifdef CONFIG_USERSPACE |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 294 | static inline int64_t z_vrfy_k_uptime_ticks(void) |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 295 | { |
Andy Ross | 914205c | 2020-03-10 15:26:38 -0700 | [diff] [blame] | 296 | return z_impl_k_uptime_ticks(); |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 297 | } |
Andy Ross | 914205c | 2020-03-10 15:26:38 -0700 | [diff] [blame] | 298 | #include <syscalls/k_uptime_ticks_mrsh.c> |
Andy Ross | 987c0e5 | 2018-09-27 16:50:00 -0700 | [diff] [blame] | 299 | #endif |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 300 | |
Krzysztof Chruscinski | b8fb353 | 2021-04-14 13:35:29 +0200 | [diff] [blame] | 301 | void z_impl_k_busy_wait(uint32_t usec_to_wait) |
| 302 | { |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 303 | SYS_PORT_TRACING_FUNC_ENTER(k_thread, busy_wait, usec_to_wait); |
Krzysztof Chruscinski | b8fb353 | 2021-04-14 13:35:29 +0200 | [diff] [blame] | 304 | if (usec_to_wait == 0U) { |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 305 | SYS_PORT_TRACING_FUNC_EXIT(k_thread, busy_wait, usec_to_wait); |
Krzysztof Chruscinski | b8fb353 | 2021-04-14 13:35:29 +0200 | [diff] [blame] | 306 | return; |
| 307 | } |
| 308 | |
| 309 | #if !defined(CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT) |
| 310 | uint32_t start_cycles = k_cycle_get_32(); |
| 311 | |
| 312 | /* use 64-bit math to prevent overflow when multiplying */ |
| 313 | uint32_t cycles_to_wait = (uint32_t)( |
| 314 | (uint64_t)usec_to_wait * |
| 315 | (uint64_t)sys_clock_hw_cycles_per_sec() / |
| 316 | (uint64_t)USEC_PER_SEC |
| 317 | ); |
| 318 | |
| 319 | for (;;) { |
| 320 | uint32_t current_cycles = k_cycle_get_32(); |
| 321 | |
| 322 | /* this handles the rollover on an unsigned 32-bit value */ |
| 323 | if ((current_cycles - start_cycles) >= cycles_to_wait) { |
| 324 | break; |
| 325 | } |
| 326 | } |
| 327 | #else |
| 328 | arch_busy_wait(usec_to_wait); |
| 329 | #endif /* CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT */ |
Torbjörn Leksell | f171443 | 2021-03-26 10:59:08 +0100 | [diff] [blame] | 330 | SYS_PORT_TRACING_FUNC_EXIT(k_thread, busy_wait, usec_to_wait); |
Krzysztof Chruscinski | b8fb353 | 2021-04-14 13:35:29 +0200 | [diff] [blame] | 331 | } |
| 332 | |
| 333 | #ifdef CONFIG_USERSPACE |
| 334 | static inline void z_vrfy_k_busy_wait(uint32_t usec_to_wait) |
| 335 | { |
| 336 | z_impl_k_busy_wait(usec_to_wait); |
| 337 | } |
| 338 | #include <syscalls/k_busy_wait_mrsh.c> |
| 339 | #endif /* CONFIG_USERSPACE */ |
| 340 | |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 341 | /* Returns the uptime expiration (relative to an unlocked "now"!) of a |
Andy Ross | 4c7b77a | 2020-03-09 09:35:35 -0700 | [diff] [blame] | 342 | * timeout object. When used correctly, this should be called once, |
| 343 | * synchronously with the user passing a new timeout value. It should |
| 344 | * not be used iteratively to adjust a timeout. |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 345 | */ |
Anas Nashif | a518f48 | 2021-03-13 08:22:38 -0500 | [diff] [blame] | 346 | uint64_t sys_clock_timeout_end_calc(k_timeout_t timeout) |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 347 | { |
| 348 | k_ticks_t dt; |
| 349 | |
| 350 | if (K_TIMEOUT_EQ(timeout, K_FOREVER)) { |
| 351 | return UINT64_MAX; |
| 352 | } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { |
Anas Nashif | fe0872c | 2021-03-13 08:21:21 -0500 | [diff] [blame] | 353 | return sys_clock_tick_get(); |
Jennifer Williams | dc11ffb | 2021-03-20 00:36:55 +0200 | [diff] [blame] | 354 | } else { |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 355 | |
Jennifer Williams | dc11ffb | 2021-03-20 00:36:55 +0200 | [diff] [blame] | 356 | dt = timeout.ticks; |
Andy Ross | 4c7b77a | 2020-03-09 09:35:35 -0700 | [diff] [blame] | 357 | |
Jennifer Williams | dc11ffb | 2021-03-20 00:36:55 +0200 | [diff] [blame] | 358 | if (IS_ENABLED(CONFIG_TIMEOUT_64BIT) && Z_TICK_ABS(dt) >= 0) { |
| 359 | return Z_TICK_ABS(dt); |
| 360 | } |
| 361 | return sys_clock_tick_get() + MAX(1, dt); |
Andy Ross | 4c7b77a | 2020-03-09 09:35:35 -0700 | [diff] [blame] | 362 | } |
Andy Ross | 7832738 | 2020-03-05 15:18:14 -0800 | [diff] [blame] | 363 | } |