blob: f5daf82225963f8670e12be14123c9d2d0220773 [file] [log] [blame]
Andy Ross987c0e52018-09-27 16:50:00 -07001/*
2 * Copyright (c) 2018 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
Stephanos Ioannidis2d746042019-10-25 00:08:21 +09006
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +02007#include <zephyr/kernel.h>
8#include <zephyr/spinlock.h>
Andy Ross987c0e52018-09-27 16:50:00 -07009#include <ksched.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020010#include <zephyr/timeout_q.h>
11#include <zephyr/syscall_handler.h>
12#include <zephyr/drivers/timer/system_timer.h>
13#include <zephyr/sys_clock.h>
Andy Ross987c0e52018-09-27 16:50:00 -070014
Kumar Galaa1b77fd2020-05-27 11:26:57 -050015static uint64_t curr_tick;
Andy Ross987c0e52018-09-27 16:50:00 -070016
17static sys_dlist_t timeout_list = SYS_DLIST_STATIC_INIT(&timeout_list);
18
19static struct k_spinlock timeout_lock;
20
Andy Ross1db9f182019-06-25 10:09:45 -070021#define MAX_WAIT (IS_ENABLED(CONFIG_SYSTEM_CLOCK_SLOPPY_IDLE) \
Andy Ross78327382020-03-05 15:18:14 -080022 ? K_TICKS_FOREVER : INT_MAX)
Andy Ross987c0e52018-09-27 16:50:00 -070023
Anas Nashif9c1efe62021-02-25 15:33:15 -050024/* Cycles left to process in the currently-executing sys_clock_announce() */
Andy Ross1cfff072018-10-03 08:50:52 -070025static int announce_remaining;
Andy Ross987c0e52018-09-27 16:50:00 -070026
27#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
28int z_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
Andrew Boiefd49cf72019-05-21 14:02:26 -070029
30#ifdef CONFIG_USERSPACE
Anas Nashifa3872212021-03-13 08:16:53 -050031static inline int z_vrfy_sys_clock_hw_cycles_per_sec_runtime_get(void)
Andrew Boiefd49cf72019-05-21 14:02:26 -070032{
Anas Nashifa3872212021-03-13 08:16:53 -050033 return z_impl_sys_clock_hw_cycles_per_sec_runtime_get();
Andrew Boiefd49cf72019-05-21 14:02:26 -070034}
Anas Nashifa3872212021-03-13 08:16:53 -050035#include <syscalls/sys_clock_hw_cycles_per_sec_runtime_get_mrsh.c>
Andrew Boiefd49cf72019-05-21 14:02:26 -070036#endif /* CONFIG_USERSPACE */
37#endif /* CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME */
Andy Ross987c0e52018-09-27 16:50:00 -070038
39static struct _timeout *first(void)
40{
41 sys_dnode_t *t = sys_dlist_peek_head(&timeout_list);
42
43 return t == NULL ? NULL : CONTAINER_OF(t, struct _timeout, node);
44}
45
46static struct _timeout *next(struct _timeout *t)
47{
48 sys_dnode_t *n = sys_dlist_peek_next(&timeout_list, &t->node);
49
50 return n == NULL ? NULL : CONTAINER_OF(n, struct _timeout, node);
51}
52
Andy Ross386894c2018-10-17 08:29:19 -070053static void remove_timeout(struct _timeout *t)
Andy Ross987c0e52018-09-27 16:50:00 -070054{
Peter A. Bigot25fbe7b2018-12-30 06:05:03 -060055 if (next(t) != NULL) {
56 next(t)->dticks += t->dticks;
Andy Ross71f5e562018-12-06 15:39:28 -080057 }
Peter A. Bigot25fbe7b2018-12-30 06:05:03 -060058
59 sys_dlist_remove(&t->node);
Andy Ross987c0e52018-09-27 16:50:00 -070060}
61
Kumar Galaa1b77fd2020-05-27 11:26:57 -050062static int32_t elapsed(void)
Andy Ross987c0e52018-09-27 16:50:00 -070063{
Anas Nashif9c1efe62021-02-25 15:33:15 -050064 return announce_remaining == 0 ? sys_clock_elapsed() : 0U;
Andy Ross987c0e52018-09-27 16:50:00 -070065}
66
Kumar Galaa1b77fd2020-05-27 11:26:57 -050067static int32_t next_timeout(void)
Andy Rosse664c782019-01-16 08:54:38 -080068{
Andy Rosse664c782019-01-16 08:54:38 -080069 struct _timeout *to = first();
Kumar Galaa1b77fd2020-05-27 11:26:57 -050070 int32_t ticks_elapsed = elapsed();
Flavio Ceolin47b7c2e2022-02-08 15:28:09 -080071 int32_t ret;
72
73 if ((to == NULL) ||
74 ((int64_t)(to->dticks - ticks_elapsed) > (int64_t)INT_MAX)) {
75 ret = MAX_WAIT;
76 } else {
77 ret = MAX(0, to->dticks - ticks_elapsed);
78 }
Andy Rosse664c782019-01-16 08:54:38 -080079
Andy Rosse664c782019-01-16 08:54:38 -080080 return ret;
81}
82
Andy Ross78327382020-03-05 15:18:14 -080083void z_add_timeout(struct _timeout *to, _timeout_func_t fn,
84 k_timeout_t timeout)
Andy Ross987c0e52018-09-27 16:50:00 -070085{
Andy Ross12bd1872020-04-21 11:07:07 -070086 if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
87 return;
88 }
89
Anas Nashif39f632e2020-12-07 13:15:42 -050090#ifdef CONFIG_KERNEL_COHERENCE
Andy Rossf6d32ab2020-05-13 15:34:04 +000091 __ASSERT_NO_MSG(arch_mem_coherent(to));
92#endif
93
Peter A. Bigotb4ece0a2019-01-02 08:29:43 -060094 __ASSERT(!sys_dnode_is_linked(&to->node), "");
Andy Ross987c0e52018-09-27 16:50:00 -070095 to->fn = fn;
96
97 LOCKED(&timeout_lock) {
98 struct _timeout *t;
99
Andrzej Głąbek59b21a22021-05-24 11:24:13 +0200100 if (IS_ENABLED(CONFIG_TIMEOUT_64BIT) &&
101 Z_TICK_ABS(timeout.ticks) >= 0) {
102 k_ticks_t ticks = Z_TICK_ABS(timeout.ticks) - curr_tick;
103
104 to->dticks = MAX(1, ticks);
105 } else {
106 to->dticks = timeout.ticks + 1 + elapsed();
107 }
108
Andy Ross987c0e52018-09-27 16:50:00 -0700109 for (t = first(); t != NULL; t = next(t)) {
Andy Ross987c0e52018-09-27 16:50:00 -0700110 if (t->dticks > to->dticks) {
111 t->dticks -= to->dticks;
Andy Rosseda4c022019-01-28 09:35:27 -0800112 sys_dlist_insert(&t->node, &to->node);
Andy Ross987c0e52018-09-27 16:50:00 -0700113 break;
114 }
115 to->dticks -= t->dticks;
116 }
117
118 if (t == NULL) {
119 sys_dlist_append(&timeout_list, &to->node);
120 }
Andy Ross987c0e52018-09-27 16:50:00 -0700121
Pawel Dunajbaea2242018-11-22 11:49:32 +0100122 if (to == first()) {
Anas Nashif9c1efe62021-02-25 15:33:15 -0500123 sys_clock_set_timeout(next_timeout(), false);
Pawel Dunajbaea2242018-11-22 11:49:32 +0100124 }
Andy Ross02165d72018-11-20 08:26:34 -0800125 }
Andy Ross987c0e52018-09-27 16:50:00 -0700126}
127
Patrik Flykt4344e272019-03-08 14:19:05 -0700128int z_abort_timeout(struct _timeout *to)
Andy Ross987c0e52018-09-27 16:50:00 -0700129{
Peter A. Bigotb4ece0a2019-01-02 08:29:43 -0600130 int ret = -EINVAL;
Andy Ross987c0e52018-09-27 16:50:00 -0700131
132 LOCKED(&timeout_lock) {
Peter A. Bigot25fbe7b2018-12-30 06:05:03 -0600133 if (sys_dnode_is_linked(&to->node)) {
Andy Ross386894c2018-10-17 08:29:19 -0700134 remove_timeout(to);
Andy Ross987c0e52018-09-27 16:50:00 -0700135 ret = 0;
136 }
137 }
138
139 return ret;
140}
141
Andy Ross5a5d3da2020-03-09 13:59:15 -0700142/* must be locked */
Peter A. Bigot16a40812020-09-18 16:24:57 -0500143static k_ticks_t timeout_rem(const struct _timeout *timeout)
Andy Ross987c0e52018-09-27 16:50:00 -0700144{
Andy Ross5a5d3da2020-03-09 13:59:15 -0700145 k_ticks_t ticks = 0;
Andy Ross987c0e52018-09-27 16:50:00 -0700146
Patrik Flykt4344e272019-03-08 14:19:05 -0700147 if (z_is_inactive_timeout(timeout)) {
Andy Ross987c0e52018-09-27 16:50:00 -0700148 return 0;
149 }
150
Andy Ross5a5d3da2020-03-09 13:59:15 -0700151 for (struct _timeout *t = first(); t != NULL; t = next(t)) {
152 ticks += t->dticks;
153 if (timeout == t) {
154 break;
Andy Ross987c0e52018-09-27 16:50:00 -0700155 }
156 }
157
Charles E. Youse0ad40222019-03-01 10:51:04 -0800158 return ticks - elapsed();
Andy Ross987c0e52018-09-27 16:50:00 -0700159}
160
Peter A. Bigot16a40812020-09-18 16:24:57 -0500161k_ticks_t z_timeout_remaining(const struct _timeout *timeout)
Andy Ross5a5d3da2020-03-09 13:59:15 -0700162{
163 k_ticks_t ticks = 0;
164
165 LOCKED(&timeout_lock) {
166 ticks = timeout_rem(timeout);
167 }
168
169 return ticks;
170}
171
Peter A. Bigot16a40812020-09-18 16:24:57 -0500172k_ticks_t z_timeout_expires(const struct _timeout *timeout)
Andy Ross5a5d3da2020-03-09 13:59:15 -0700173{
174 k_ticks_t ticks = 0;
175
176 LOCKED(&timeout_lock) {
177 ticks = curr_tick + timeout_rem(timeout);
178 }
179
180 return ticks;
181}
182
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500183int32_t z_get_next_timeout_expiry(void)
Andy Ross987c0e52018-09-27 16:50:00 -0700184{
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500185 int32_t ret = (int32_t) K_TICKS_FOREVER;
Andy Ross987c0e52018-09-27 16:50:00 -0700186
187 LOCKED(&timeout_lock) {
Andy Rosse664c782019-01-16 08:54:38 -0800188 ret = next_timeout();
Andy Ross987c0e52018-09-27 16:50:00 -0700189 }
Andy Ross987c0e52018-09-27 16:50:00 -0700190 return ret;
191}
192
Anas Nashif9c1efe62021-02-25 15:33:15 -0500193void sys_clock_announce(int32_t ticks)
Andy Ross43ab8da2018-12-20 09:23:31 -0800194{
Andy Ross43ab8da2018-12-20 09:23:31 -0800195 k_spinlock_key_t key = k_spin_lock(&timeout_lock);
196
Andy Ross0b2ed382022-04-12 09:52:39 -0700197 /* We release the lock around the callbacks below, so on SMP
198 * systems someone might be already running the loop. Don't
199 * race (which will cause paralllel execution of "sequential"
200 * timeouts and confuse apps), just increment the tick count
201 * and return.
202 */
Peter Mitsis3e2f30a2022-07-29 09:29:32 -0400203 if (IS_ENABLED(CONFIG_SMP) && (announce_remaining != 0)) {
Andy Ross0b2ed382022-04-12 09:52:39 -0700204 announce_remaining += ticks;
205 k_spin_unlock(&timeout_lock, key);
206 return;
207 }
208
Andy Ross43ab8da2018-12-20 09:23:31 -0800209 announce_remaining = ticks;
210
Peter Mitsis42db0962022-12-14 11:53:58 -0500211 struct _timeout *t = first();
212
213 for (t = first();
214 (t != NULL) && (t->dticks <= announce_remaining);
215 t = first()) {
Andy Ross43ab8da2018-12-20 09:23:31 -0800216 int dt = t->dticks;
217
218 curr_tick += dt;
Andy Ross43ab8da2018-12-20 09:23:31 -0800219 t->dticks = 0;
220 remove_timeout(t);
221
222 k_spin_unlock(&timeout_lock, key);
223 t->fn(t);
224 key = k_spin_lock(&timeout_lock);
Peter Mitsis3e2f30a2022-07-29 09:29:32 -0400225 announce_remaining -= dt;
Andy Ross43ab8da2018-12-20 09:23:31 -0800226 }
227
Peter Mitsis42db0962022-12-14 11:53:58 -0500228 if (t != NULL) {
229 t->dticks -= announce_remaining;
Andy Ross43ab8da2018-12-20 09:23:31 -0800230 }
231
232 curr_tick += announce_remaining;
233 announce_remaining = 0;
234
Anas Nashif9c1efe62021-02-25 15:33:15 -0500235 sys_clock_set_timeout(next_timeout(), false);
Andy Ross43ab8da2018-12-20 09:23:31 -0800236
237 k_spin_unlock(&timeout_lock, key);
Andy Rossf3afd5a2023-03-06 14:31:35 -0800238
239#ifdef CONFIG_TIMESLICING
240 z_time_slice();
241#endif
Andy Ross43ab8da2018-12-20 09:23:31 -0800242}
243
Anas Nashiffe0872c2021-03-13 08:21:21 -0500244int64_t sys_clock_tick_get(void)
Andy Ross987c0e52018-09-27 16:50:00 -0700245{
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500246 uint64_t t = 0U;
Andy Ross987c0e52018-09-27 16:50:00 -0700247
248 LOCKED(&timeout_lock) {
Peter Mitsis71ef6692022-08-03 16:11:32 -0400249 t = curr_tick + elapsed();
Andy Ross987c0e52018-09-27 16:50:00 -0700250 }
251 return t;
252}
253
Anas Nashif5c90ceb2021-03-13 08:19:53 -0500254uint32_t sys_clock_tick_get_32(void)
Andy Ross987c0e52018-09-27 16:50:00 -0700255{
Andy Rossd8421ad2018-10-02 11:12:08 -0700256#ifdef CONFIG_TICKLESS_KERNEL
Anas Nashiffe0872c2021-03-13 08:21:21 -0500257 return (uint32_t)sys_clock_tick_get();
Andy Rossd8421ad2018-10-02 11:12:08 -0700258#else
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500259 return (uint32_t)curr_tick;
Andy Rossd8421ad2018-10-02 11:12:08 -0700260#endif
Andy Ross987c0e52018-09-27 16:50:00 -0700261}
262
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500263int64_t z_impl_k_uptime_ticks(void)
Andy Ross987c0e52018-09-27 16:50:00 -0700264{
Anas Nashiffe0872c2021-03-13 08:21:21 -0500265 return sys_clock_tick_get();
Andy Ross987c0e52018-09-27 16:50:00 -0700266}
267
268#ifdef CONFIG_USERSPACE
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500269static inline int64_t z_vrfy_k_uptime_ticks(void)
Andy Ross987c0e52018-09-27 16:50:00 -0700270{
Andy Ross914205c2020-03-10 15:26:38 -0700271 return z_impl_k_uptime_ticks();
Andy Ross987c0e52018-09-27 16:50:00 -0700272}
Andy Ross914205c2020-03-10 15:26:38 -0700273#include <syscalls/k_uptime_ticks_mrsh.c>
Andy Ross987c0e52018-09-27 16:50:00 -0700274#endif
Andy Ross78327382020-03-05 15:18:14 -0800275
Krzysztof Chruscinskib8fb3532021-04-14 13:35:29 +0200276void z_impl_k_busy_wait(uint32_t usec_to_wait)
277{
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100278 SYS_PORT_TRACING_FUNC_ENTER(k_thread, busy_wait, usec_to_wait);
Krzysztof Chruscinskib8fb3532021-04-14 13:35:29 +0200279 if (usec_to_wait == 0U) {
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100280 SYS_PORT_TRACING_FUNC_EXIT(k_thread, busy_wait, usec_to_wait);
Krzysztof Chruscinskib8fb3532021-04-14 13:35:29 +0200281 return;
282 }
283
284#if !defined(CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT)
285 uint32_t start_cycles = k_cycle_get_32();
286
287 /* use 64-bit math to prevent overflow when multiplying */
288 uint32_t cycles_to_wait = (uint32_t)(
289 (uint64_t)usec_to_wait *
290 (uint64_t)sys_clock_hw_cycles_per_sec() /
291 (uint64_t)USEC_PER_SEC
292 );
293
294 for (;;) {
295 uint32_t current_cycles = k_cycle_get_32();
296
297 /* this handles the rollover on an unsigned 32-bit value */
298 if ((current_cycles - start_cycles) >= cycles_to_wait) {
299 break;
300 }
301 }
302#else
303 arch_busy_wait(usec_to_wait);
304#endif /* CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT */
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100305 SYS_PORT_TRACING_FUNC_EXIT(k_thread, busy_wait, usec_to_wait);
Krzysztof Chruscinskib8fb3532021-04-14 13:35:29 +0200306}
307
308#ifdef CONFIG_USERSPACE
309static inline void z_vrfy_k_busy_wait(uint32_t usec_to_wait)
310{
311 z_impl_k_busy_wait(usec_to_wait);
312}
313#include <syscalls/k_busy_wait_mrsh.c>
314#endif /* CONFIG_USERSPACE */
315
Andy Ross78327382020-03-05 15:18:14 -0800316/* Returns the uptime expiration (relative to an unlocked "now"!) of a
Andy Ross4c7b77a2020-03-09 09:35:35 -0700317 * timeout object. When used correctly, this should be called once,
318 * synchronously with the user passing a new timeout value. It should
319 * not be used iteratively to adjust a timeout.
Andy Ross78327382020-03-05 15:18:14 -0800320 */
Anas Nashifa518f482021-03-13 08:22:38 -0500321uint64_t sys_clock_timeout_end_calc(k_timeout_t timeout)
Andy Ross78327382020-03-05 15:18:14 -0800322{
323 k_ticks_t dt;
324
325 if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
326 return UINT64_MAX;
327 } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
Anas Nashiffe0872c2021-03-13 08:21:21 -0500328 return sys_clock_tick_get();
Jennifer Williamsdc11ffb2021-03-20 00:36:55 +0200329 } else {
Andy Ross78327382020-03-05 15:18:14 -0800330
Jennifer Williamsdc11ffb2021-03-20 00:36:55 +0200331 dt = timeout.ticks;
Andy Ross4c7b77a2020-03-09 09:35:35 -0700332
Jennifer Williamsdc11ffb2021-03-20 00:36:55 +0200333 if (IS_ENABLED(CONFIG_TIMEOUT_64BIT) && Z_TICK_ABS(dt) >= 0) {
334 return Z_TICK_ABS(dt);
335 }
336 return sys_clock_tick_get() + MAX(1, dt);
Andy Ross4c7b77a2020-03-09 09:35:35 -0700337 }
Andy Ross78327382020-03-05 15:18:14 -0800338}
Chris Friedt4108e142022-12-15 10:14:43 -0800339
340#ifdef CONFIG_ZTEST
341void z_impl_sys_clock_tick_set(uint64_t tick)
342{
343 curr_tick = tick;
344}
345
346void z_vrfy_sys_clock_tick_set(uint64_t tick)
347{
348 z_impl_sys_clock_tick_set(tick);
349}
350#endif