blob: 7845192c631a2ab043f11ab580cac53a0f05c9c5 [file] [log] [blame]
Andy Ross987c0e52018-09-27 16:50:00 -07001/*
2 * Copyright (c) 2018 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
Stephanos Ioannidis2d746042019-10-25 00:08:21 +09006
7#include <kernel.h>
Andy Ross987c0e52018-09-27 16:50:00 -07008#include <spinlock.h>
9#include <ksched.h>
Stephanos Ioannidis2d746042019-10-25 00:08:21 +090010#include <timeout_q.h>
Andy Ross987c0e52018-09-27 16:50:00 -070011#include <syscall_handler.h>
Stephanos Ioannidis2d746042019-10-25 00:08:21 +090012#include <drivers/timer/system_timer.h>
13#include <sys_clock.h>
Andy Ross987c0e52018-09-27 16:50:00 -070014
Kumar Galaa1b77fd2020-05-27 11:26:57 -050015static uint64_t curr_tick;
Andy Ross987c0e52018-09-27 16:50:00 -070016
17static sys_dlist_t timeout_list = SYS_DLIST_STATIC_INIT(&timeout_list);
18
19static struct k_spinlock timeout_lock;
20
Andy Ross1db9f182019-06-25 10:09:45 -070021#define MAX_WAIT (IS_ENABLED(CONFIG_SYSTEM_CLOCK_SLOPPY_IDLE) \
Andy Ross78327382020-03-05 15:18:14 -080022 ? K_TICKS_FOREVER : INT_MAX)
Andy Ross987c0e52018-09-27 16:50:00 -070023
Anas Nashif9c1efe62021-02-25 15:33:15 -050024/* Cycles left to process in the currently-executing sys_clock_announce() */
Andy Ross1cfff072018-10-03 08:50:52 -070025static int announce_remaining;
Andy Ross987c0e52018-09-27 16:50:00 -070026
27#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
28int z_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
Andrew Boiefd49cf72019-05-21 14:02:26 -070029
30#ifdef CONFIG_USERSPACE
Anas Nashifa3872212021-03-13 08:16:53 -050031static inline int z_vrfy_sys_clock_hw_cycles_per_sec_runtime_get(void)
Andrew Boiefd49cf72019-05-21 14:02:26 -070032{
Anas Nashifa3872212021-03-13 08:16:53 -050033 return z_impl_sys_clock_hw_cycles_per_sec_runtime_get();
Andrew Boiefd49cf72019-05-21 14:02:26 -070034}
Anas Nashifa3872212021-03-13 08:16:53 -050035#include <syscalls/sys_clock_hw_cycles_per_sec_runtime_get_mrsh.c>
Andrew Boiefd49cf72019-05-21 14:02:26 -070036#endif /* CONFIG_USERSPACE */
37#endif /* CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME */
Andy Ross987c0e52018-09-27 16:50:00 -070038
39static struct _timeout *first(void)
40{
41 sys_dnode_t *t = sys_dlist_peek_head(&timeout_list);
42
43 return t == NULL ? NULL : CONTAINER_OF(t, struct _timeout, node);
44}
45
46static struct _timeout *next(struct _timeout *t)
47{
48 sys_dnode_t *n = sys_dlist_peek_next(&timeout_list, &t->node);
49
50 return n == NULL ? NULL : CONTAINER_OF(n, struct _timeout, node);
51}
52
Andy Ross386894c2018-10-17 08:29:19 -070053static void remove_timeout(struct _timeout *t)
Andy Ross987c0e52018-09-27 16:50:00 -070054{
Peter A. Bigot25fbe7b2018-12-30 06:05:03 -060055 if (next(t) != NULL) {
56 next(t)->dticks += t->dticks;
Andy Ross71f5e562018-12-06 15:39:28 -080057 }
Peter A. Bigot25fbe7b2018-12-30 06:05:03 -060058
59 sys_dlist_remove(&t->node);
Andy Ross987c0e52018-09-27 16:50:00 -070060}
61
Kumar Galaa1b77fd2020-05-27 11:26:57 -050062static int32_t elapsed(void)
Andy Ross987c0e52018-09-27 16:50:00 -070063{
Anas Nashif9c1efe62021-02-25 15:33:15 -050064 return announce_remaining == 0 ? sys_clock_elapsed() : 0U;
Andy Ross987c0e52018-09-27 16:50:00 -070065}
66
Kumar Galaa1b77fd2020-05-27 11:26:57 -050067static int32_t next_timeout(void)
Andy Rosse664c782019-01-16 08:54:38 -080068{
Andy Rosse664c782019-01-16 08:54:38 -080069 struct _timeout *to = first();
Kumar Galaa1b77fd2020-05-27 11:26:57 -050070 int32_t ticks_elapsed = elapsed();
Peter Bigot332b7df2020-09-15 20:06:49 -050071 int32_t ret = to == NULL ? MAX_WAIT
Trond Einar Snekvik86c793a2020-10-27 12:27:25 +010072 : CLAMP(to->dticks - ticks_elapsed, 0, MAX_WAIT);
Andy Rosse664c782019-01-16 08:54:38 -080073
74#ifdef CONFIG_TIMESLICING
75 if (_current_cpu->slice_ticks && _current_cpu->slice_ticks < ret) {
76 ret = _current_cpu->slice_ticks;
77 }
78#endif
79 return ret;
80}
81
Andy Ross78327382020-03-05 15:18:14 -080082void z_add_timeout(struct _timeout *to, _timeout_func_t fn,
83 k_timeout_t timeout)
Andy Ross987c0e52018-09-27 16:50:00 -070084{
Andy Ross12bd1872020-04-21 11:07:07 -070085 if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
86 return;
87 }
88
Anas Nashif39f632e2020-12-07 13:15:42 -050089#ifdef CONFIG_KERNEL_COHERENCE
Andy Rossf6d32ab2020-05-13 15:34:04 +000090 __ASSERT_NO_MSG(arch_mem_coherent(to));
91#endif
92
Peter A. Bigotb4ece0a2019-01-02 08:29:43 -060093 __ASSERT(!sys_dnode_is_linked(&to->node), "");
Andy Ross987c0e52018-09-27 16:50:00 -070094 to->fn = fn;
95
96 LOCKED(&timeout_lock) {
97 struct _timeout *t;
98
Andrzej Głąbek59b21a22021-05-24 11:24:13 +020099 if (IS_ENABLED(CONFIG_TIMEOUT_64BIT) &&
100 Z_TICK_ABS(timeout.ticks) >= 0) {
101 k_ticks_t ticks = Z_TICK_ABS(timeout.ticks) - curr_tick;
102
103 to->dticks = MAX(1, ticks);
104 } else {
105 to->dticks = timeout.ticks + 1 + elapsed();
106 }
107
Andy Ross987c0e52018-09-27 16:50:00 -0700108 for (t = first(); t != NULL; t = next(t)) {
Andy Ross987c0e52018-09-27 16:50:00 -0700109 if (t->dticks > to->dticks) {
110 t->dticks -= to->dticks;
Andy Rosseda4c022019-01-28 09:35:27 -0800111 sys_dlist_insert(&t->node, &to->node);
Andy Ross987c0e52018-09-27 16:50:00 -0700112 break;
113 }
114 to->dticks -= t->dticks;
115 }
116
117 if (t == NULL) {
118 sys_dlist_append(&timeout_list, &to->node);
119 }
Andy Ross987c0e52018-09-27 16:50:00 -0700120
Pawel Dunajbaea2242018-11-22 11:49:32 +0100121 if (to == first()) {
Flavio Ceolin148769c2020-12-18 00:33:29 -0800122#if CONFIG_TIMESLICING
123 /*
124 * This is not ideal, since it does not
Guennadi Liakhovetski339a6bd2021-07-15 09:41:28 +0200125 * account the time elapsed since the
Flavio Ceolin148769c2020-12-18 00:33:29 -0800126 * last announcement, and slice_ticks is based
Guennadi Liakhovetski339a6bd2021-07-15 09:41:28 +0200127 * on that. It means that the time remaining for
128 * the next announcement can be less than
Flavio Ceolin148769c2020-12-18 00:33:29 -0800129 * slice_ticks.
130 */
131 int32_t next_time = next_timeout();
132
Andy Ross544475d2021-02-02 13:19:25 -0800133 if (next_time == 0 ||
134 _current_cpu->slice_ticks != next_time) {
Anas Nashif9c1efe62021-02-25 15:33:15 -0500135 sys_clock_set_timeout(next_time, false);
Flavio Ceolin148769c2020-12-18 00:33:29 -0800136 }
137#else
Anas Nashif9c1efe62021-02-25 15:33:15 -0500138 sys_clock_set_timeout(next_timeout(), false);
Flavio Ceolin148769c2020-12-18 00:33:29 -0800139#endif /* CONFIG_TIMESLICING */
Pawel Dunajbaea2242018-11-22 11:49:32 +0100140 }
Andy Ross02165d72018-11-20 08:26:34 -0800141 }
Andy Ross987c0e52018-09-27 16:50:00 -0700142}
143
Patrik Flykt4344e272019-03-08 14:19:05 -0700144int z_abort_timeout(struct _timeout *to)
Andy Ross987c0e52018-09-27 16:50:00 -0700145{
Peter A. Bigotb4ece0a2019-01-02 08:29:43 -0600146 int ret = -EINVAL;
Andy Ross987c0e52018-09-27 16:50:00 -0700147
148 LOCKED(&timeout_lock) {
Peter A. Bigot25fbe7b2018-12-30 06:05:03 -0600149 if (sys_dnode_is_linked(&to->node)) {
Andy Ross386894c2018-10-17 08:29:19 -0700150 remove_timeout(to);
Andy Ross987c0e52018-09-27 16:50:00 -0700151 ret = 0;
152 }
153 }
154
155 return ret;
156}
157
Andy Ross5a5d3da2020-03-09 13:59:15 -0700158/* must be locked */
Peter A. Bigot16a40812020-09-18 16:24:57 -0500159static k_ticks_t timeout_rem(const struct _timeout *timeout)
Andy Ross987c0e52018-09-27 16:50:00 -0700160{
Andy Ross5a5d3da2020-03-09 13:59:15 -0700161 k_ticks_t ticks = 0;
Andy Ross987c0e52018-09-27 16:50:00 -0700162
Patrik Flykt4344e272019-03-08 14:19:05 -0700163 if (z_is_inactive_timeout(timeout)) {
Andy Ross987c0e52018-09-27 16:50:00 -0700164 return 0;
165 }
166
Andy Ross5a5d3da2020-03-09 13:59:15 -0700167 for (struct _timeout *t = first(); t != NULL; t = next(t)) {
168 ticks += t->dticks;
169 if (timeout == t) {
170 break;
Andy Ross987c0e52018-09-27 16:50:00 -0700171 }
172 }
173
Charles E. Youse0ad40222019-03-01 10:51:04 -0800174 return ticks - elapsed();
Andy Ross987c0e52018-09-27 16:50:00 -0700175}
176
Peter A. Bigot16a40812020-09-18 16:24:57 -0500177k_ticks_t z_timeout_remaining(const struct _timeout *timeout)
Andy Ross5a5d3da2020-03-09 13:59:15 -0700178{
179 k_ticks_t ticks = 0;
180
181 LOCKED(&timeout_lock) {
182 ticks = timeout_rem(timeout);
183 }
184
185 return ticks;
186}
187
Peter A. Bigot16a40812020-09-18 16:24:57 -0500188k_ticks_t z_timeout_expires(const struct _timeout *timeout)
Andy Ross5a5d3da2020-03-09 13:59:15 -0700189{
190 k_ticks_t ticks = 0;
191
192 LOCKED(&timeout_lock) {
193 ticks = curr_tick + timeout_rem(timeout);
194 }
195
196 return ticks;
197}
198
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500199int32_t z_get_next_timeout_expiry(void)
Andy Ross987c0e52018-09-27 16:50:00 -0700200{
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500201 int32_t ret = (int32_t) K_TICKS_FOREVER;
Andy Ross987c0e52018-09-27 16:50:00 -0700202
203 LOCKED(&timeout_lock) {
Andy Rosse664c782019-01-16 08:54:38 -0800204 ret = next_timeout();
Andy Ross987c0e52018-09-27 16:50:00 -0700205 }
Andy Ross987c0e52018-09-27 16:50:00 -0700206 return ret;
207}
208
Daniel Leungb907cf72020-07-23 12:55:20 -0700209void z_set_timeout_expiry(int32_t ticks, bool is_idle)
Pawel Dunajbaea2242018-11-22 11:49:32 +0100210{
211 LOCKED(&timeout_lock) {
Daniel Leungb907cf72020-07-23 12:55:20 -0700212 int next_to = next_timeout();
213 bool sooner = (next_to == K_TICKS_FOREVER)
Andy Ross887e1ab2021-02-01 10:01:18 -0800214 || (ticks <= next_to);
Daniel Leungb907cf72020-07-23 12:55:20 -0700215 bool imminent = next_to <= 1;
Pawel Dunajbaea2242018-11-22 11:49:32 +0100216
Andy Ross9eda9352019-01-02 11:34:26 -0800217 /* Only set new timeouts when they are sooner than
218 * what we have. Also don't try to set a timeout when
219 * one is about to expire: drivers have internal logic
220 * that will bump the timeout to the "next" tick if
221 * it's not considered to be settable as directed.
Andy Ross6a153ef2019-08-19 21:40:01 -0700222 * SMP can't use this optimization though: we don't
223 * know when context switches happen until interrupt
224 * exit and so can't get the timeslicing clamp folded
225 * in.
Andy Ross9eda9352019-01-02 11:34:26 -0800226 */
Andy Ross6a153ef2019-08-19 21:40:01 -0700227 if (!imminent && (sooner || IS_ENABLED(CONFIG_SMP))) {
Anas Nashif9c1efe62021-02-25 15:33:15 -0500228 sys_clock_set_timeout(MIN(ticks, next_to), is_idle);
Pawel Dunajbaea2242018-11-22 11:49:32 +0100229 }
230 }
231}
232
Anas Nashif9c1efe62021-02-25 15:33:15 -0500233void sys_clock_announce(int32_t ticks)
Andy Ross43ab8da2018-12-20 09:23:31 -0800234{
235#ifdef CONFIG_TIMESLICING
236 z_time_slice(ticks);
237#endif
238
239 k_spinlock_key_t key = k_spin_lock(&timeout_lock);
240
241 announce_remaining = ticks;
242
243 while (first() != NULL && first()->dticks <= announce_remaining) {
244 struct _timeout *t = first();
245 int dt = t->dticks;
246
247 curr_tick += dt;
248 announce_remaining -= dt;
249 t->dticks = 0;
250 remove_timeout(t);
251
252 k_spin_unlock(&timeout_lock, key);
253 t->fn(t);
254 key = k_spin_lock(&timeout_lock);
255 }
256
257 if (first() != NULL) {
258 first()->dticks -= announce_remaining;
259 }
260
261 curr_tick += announce_remaining;
262 announce_remaining = 0;
263
Anas Nashif9c1efe62021-02-25 15:33:15 -0500264 sys_clock_set_timeout(next_timeout(), false);
Andy Ross43ab8da2018-12-20 09:23:31 -0800265
266 k_spin_unlock(&timeout_lock, key);
267}
268
Anas Nashiffe0872c2021-03-13 08:21:21 -0500269int64_t sys_clock_tick_get(void)
Andy Ross987c0e52018-09-27 16:50:00 -0700270{
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500271 uint64_t t = 0U;
Andy Ross987c0e52018-09-27 16:50:00 -0700272
273 LOCKED(&timeout_lock) {
Anas Nashif9c1efe62021-02-25 15:33:15 -0500274 t = curr_tick + sys_clock_elapsed();
Andy Ross987c0e52018-09-27 16:50:00 -0700275 }
276 return t;
277}
278
Anas Nashif5c90ceb2021-03-13 08:19:53 -0500279uint32_t sys_clock_tick_get_32(void)
Andy Ross987c0e52018-09-27 16:50:00 -0700280{
Andy Rossd8421ad2018-10-02 11:12:08 -0700281#ifdef CONFIG_TICKLESS_KERNEL
Anas Nashiffe0872c2021-03-13 08:21:21 -0500282 return (uint32_t)sys_clock_tick_get();
Andy Rossd8421ad2018-10-02 11:12:08 -0700283#else
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500284 return (uint32_t)curr_tick;
Andy Rossd8421ad2018-10-02 11:12:08 -0700285#endif
Andy Ross987c0e52018-09-27 16:50:00 -0700286}
287
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500288int64_t z_impl_k_uptime_ticks(void)
Andy Ross987c0e52018-09-27 16:50:00 -0700289{
Anas Nashiffe0872c2021-03-13 08:21:21 -0500290 return sys_clock_tick_get();
Andy Ross987c0e52018-09-27 16:50:00 -0700291}
292
293#ifdef CONFIG_USERSPACE
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500294static inline int64_t z_vrfy_k_uptime_ticks(void)
Andy Ross987c0e52018-09-27 16:50:00 -0700295{
Andy Ross914205c2020-03-10 15:26:38 -0700296 return z_impl_k_uptime_ticks();
Andy Ross987c0e52018-09-27 16:50:00 -0700297}
Andy Ross914205c2020-03-10 15:26:38 -0700298#include <syscalls/k_uptime_ticks_mrsh.c>
Andy Ross987c0e52018-09-27 16:50:00 -0700299#endif
Andy Ross78327382020-03-05 15:18:14 -0800300
Krzysztof Chruscinskib8fb3532021-04-14 13:35:29 +0200301void z_impl_k_busy_wait(uint32_t usec_to_wait)
302{
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100303 SYS_PORT_TRACING_FUNC_ENTER(k_thread, busy_wait, usec_to_wait);
Krzysztof Chruscinskib8fb3532021-04-14 13:35:29 +0200304 if (usec_to_wait == 0U) {
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100305 SYS_PORT_TRACING_FUNC_EXIT(k_thread, busy_wait, usec_to_wait);
Krzysztof Chruscinskib8fb3532021-04-14 13:35:29 +0200306 return;
307 }
308
309#if !defined(CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT)
310 uint32_t start_cycles = k_cycle_get_32();
311
312 /* use 64-bit math to prevent overflow when multiplying */
313 uint32_t cycles_to_wait = (uint32_t)(
314 (uint64_t)usec_to_wait *
315 (uint64_t)sys_clock_hw_cycles_per_sec() /
316 (uint64_t)USEC_PER_SEC
317 );
318
319 for (;;) {
320 uint32_t current_cycles = k_cycle_get_32();
321
322 /* this handles the rollover on an unsigned 32-bit value */
323 if ((current_cycles - start_cycles) >= cycles_to_wait) {
324 break;
325 }
326 }
327#else
328 arch_busy_wait(usec_to_wait);
329#endif /* CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT */
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100330 SYS_PORT_TRACING_FUNC_EXIT(k_thread, busy_wait, usec_to_wait);
Krzysztof Chruscinskib8fb3532021-04-14 13:35:29 +0200331}
332
333#ifdef CONFIG_USERSPACE
334static inline void z_vrfy_k_busy_wait(uint32_t usec_to_wait)
335{
336 z_impl_k_busy_wait(usec_to_wait);
337}
338#include <syscalls/k_busy_wait_mrsh.c>
339#endif /* CONFIG_USERSPACE */
340
Andy Ross78327382020-03-05 15:18:14 -0800341/* Returns the uptime expiration (relative to an unlocked "now"!) of a
Andy Ross4c7b77a2020-03-09 09:35:35 -0700342 * timeout object. When used correctly, this should be called once,
343 * synchronously with the user passing a new timeout value. It should
344 * not be used iteratively to adjust a timeout.
Andy Ross78327382020-03-05 15:18:14 -0800345 */
Anas Nashifa518f482021-03-13 08:22:38 -0500346uint64_t sys_clock_timeout_end_calc(k_timeout_t timeout)
Andy Ross78327382020-03-05 15:18:14 -0800347{
348 k_ticks_t dt;
349
350 if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
351 return UINT64_MAX;
352 } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
Anas Nashiffe0872c2021-03-13 08:21:21 -0500353 return sys_clock_tick_get();
Jennifer Williamsdc11ffb2021-03-20 00:36:55 +0200354 } else {
Andy Ross78327382020-03-05 15:18:14 -0800355
Jennifer Williamsdc11ffb2021-03-20 00:36:55 +0200356 dt = timeout.ticks;
Andy Ross4c7b77a2020-03-09 09:35:35 -0700357
Jennifer Williamsdc11ffb2021-03-20 00:36:55 +0200358 if (IS_ENABLED(CONFIG_TIMEOUT_64BIT) && Z_TICK_ABS(dt) >= 0) {
359 return Z_TICK_ABS(dt);
360 }
361 return sys_clock_tick_get() + MAX(1, dt);
Andy Ross4c7b77a2020-03-09 09:35:35 -0700362 }
Andy Ross78327382020-03-05 15:18:14 -0800363}