blob: 192fa8d1c33447797e90f18be33e32f19d4f55a9 [file] [log] [blame]
Anas Nashifdc3d73b2016-12-19 20:25:56 -05001/* system clock support */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002
3/*
4 * Copyright (c) 1997-2015 Wind River Systems, Inc.
5 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08006 * SPDX-License-Identifier: Apache-2.0
Benjamin Walsh456c6da2016-09-02 18:55:39 -04007 */
8
9
Benjamin Walshf6ca7de2016-11-08 10:36:50 -050010#include <kernel_structs.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040011#include <toolchain.h>
Anas Nashif397d29d2017-06-17 11:30:47 -040012#include <linker/sections.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040013#include <wait_q.h>
14#include <drivers/system_timer.h>
Andrew Boie76c04a22017-09-27 14:45:10 -070015#include <syscall_handler.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040016
17#ifdef CONFIG_SYS_CLOCK_EXISTS
Benjamin Walsh62092182016-12-20 14:39:08 -050018#ifdef _NON_OPTIMIZED_TICKS_PER_SEC
19#warning "non-optimized system clock frequency chosen: performance may suffer"
20#endif
21#endif
22
23#ifdef CONFIG_SYS_CLOCK_EXISTS
Benjamin Walsh456c6da2016-09-02 18:55:39 -040024int sys_clock_us_per_tick = 1000000 / sys_clock_ticks_per_sec;
25int sys_clock_hw_cycles_per_tick =
26 CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / sys_clock_ticks_per_sec;
27#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
28int sys_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
29#endif
30#else
31/* don't initialize to avoid division-by-zero error */
32int sys_clock_us_per_tick;
33int sys_clock_hw_cycles_per_tick;
34#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
35int sys_clock_hw_cycles_per_sec;
36#endif
37#endif
38
39/* updated by timer driver for tickless, stays at 1 for non-tickless */
Kumar Galacc334c72017-04-21 10:55:34 -050040s32_t _sys_idle_elapsed_ticks = 1;
Benjamin Walsh456c6da2016-09-02 18:55:39 -040041
Ramesh Thomas89ffd442017-02-05 19:37:19 -080042volatile u64_t _sys_clock_tick_count;
Benjamin Walsh456c6da2016-09-02 18:55:39 -040043
Ramesh Thomas89ffd442017-02-05 19:37:19 -080044#ifdef CONFIG_TICKLESS_KERNEL
45/*
46 * If this flag is set, system clock will run continuously even if
47 * there are no timer events programmed. This allows using the
48 * system clock to track passage of time without interruption.
49 * To save power, this should be turned on only when required.
50 */
51int _sys_clock_always_on;
52
53static u32_t next_ts;
54#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -040055/**
56 *
57 * @brief Return the lower part of the current system tick count
58 *
59 * @return the current system tick count
60 *
61 */
Kumar Galacc334c72017-04-21 10:55:34 -050062u32_t _tick_get_32(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040063{
Ramesh Thomas89ffd442017-02-05 19:37:19 -080064#ifdef CONFIG_TICKLESS_KERNEL
65 return (u32_t)_get_elapsed_clock_time();
66#else
Kumar Galacc334c72017-04-21 10:55:34 -050067 return (u32_t)_sys_clock_tick_count;
Ramesh Thomas89ffd442017-02-05 19:37:19 -080068#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -040069}
Kumar Galacc334c72017-04-21 10:55:34 -050070FUNC_ALIAS(_tick_get_32, sys_tick_get_32, u32_t);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040071
Andrew Boie76c04a22017-09-27 14:45:10 -070072u32_t _impl_k_uptime_get_32(void)
Benjamin Walshba5ddc12016-09-21 16:01:22 -040073{
Ramesh Thomas89ffd442017-02-05 19:37:19 -080074#ifdef CONFIG_TICKLESS_KERNEL
75 __ASSERT(_sys_clock_always_on,
76 "Call k_enable_sys_clock_always_on to use clock API");
77#endif
Andrew Boieb85e58a2016-11-09 10:44:56 -080078 return __ticks_to_ms(_tick_get_32());
Benjamin Walshba5ddc12016-09-21 16:01:22 -040079}
80
Andrew Boie76c04a22017-09-27 14:45:10 -070081#ifdef CONFIG_USERSPACE
Leandro Pereira6f99bdb2017-10-13 14:00:22 -070082_SYSCALL_HANDLER(k_uptime_get_32)
Andrew Boie76c04a22017-09-27 14:45:10 -070083{
Andrew Boie76c04a22017-09-27 14:45:10 -070084#ifdef CONFIG_TICKLESS_KERNEL
Andrew Boie225e4c02017-10-12 09:54:26 -070085 _SYSCALL_VERIFY(_sys_clock_always_on);
Andrew Boie76c04a22017-09-27 14:45:10 -070086#endif
87 return _impl_k_uptime_get_32();
88}
89#endif
90
Benjamin Walsh456c6da2016-09-02 18:55:39 -040091/**
92 *
93 * @brief Return the current system tick count
94 *
95 * @return the current system tick count
96 *
97 */
Kumar Galacc334c72017-04-21 10:55:34 -050098s64_t _tick_get(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040099{
Kumar Galacc334c72017-04-21 10:55:34 -0500100 s64_t tmp_sys_clock_tick_count;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400101 /*
102 * Lock the interrupts when reading _sys_clock_tick_count 64-bit
103 * variable. Some architectures (x86) do not handle 64-bit atomically,
104 * so we have to lock the timer interrupt that causes change of
105 * _sys_clock_tick_count
106 */
107 unsigned int imask = irq_lock();
108
Ramesh Thomas89ffd442017-02-05 19:37:19 -0800109#ifdef CONFIG_TICKLESS_KERNEL
110 tmp_sys_clock_tick_count = _get_elapsed_clock_time();
111#else
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400112 tmp_sys_clock_tick_count = _sys_clock_tick_count;
Ramesh Thomas89ffd442017-02-05 19:37:19 -0800113#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400114 irq_unlock(imask);
115 return tmp_sys_clock_tick_count;
116}
Kumar Galacc334c72017-04-21 10:55:34 -0500117FUNC_ALIAS(_tick_get, sys_tick_get, s64_t);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400118
Andrew Boiea73d3732017-10-08 12:23:55 -0700119s64_t _impl_k_uptime_get(void)
Benjamin Walshba5ddc12016-09-21 16:01:22 -0400120{
Ramesh Thomas89ffd442017-02-05 19:37:19 -0800121#ifdef CONFIG_TICKLESS_KERNEL
122 __ASSERT(_sys_clock_always_on,
123 "Call k_enable_sys_clock_always_on to use clock API");
124#endif
Andrew Boieb85e58a2016-11-09 10:44:56 -0800125 return __ticks_to_ms(_tick_get());
Benjamin Walshba5ddc12016-09-21 16:01:22 -0400126}
127
Andrew Boiea73d3732017-10-08 12:23:55 -0700128#ifdef CONFIG_USERSPACE
Leandro Pereira6f99bdb2017-10-13 14:00:22 -0700129_SYSCALL_HANDLER(k_uptime_get, ret_p)
Andrew Boiea73d3732017-10-08 12:23:55 -0700130{
131 u64_t *ret = (u64_t *)ret_p;
132
133 _SYSCALL_MEMORY_WRITE(ret, sizeof(*ret));
134 *ret = _impl_k_uptime_get();
135 return 0;
136}
137#endif
138
Kumar Galacc334c72017-04-21 10:55:34 -0500139s64_t k_uptime_delta(s64_t *reftime)
Benjamin Walshba5ddc12016-09-21 16:01:22 -0400140{
Kumar Galacc334c72017-04-21 10:55:34 -0500141 s64_t uptime, delta;
Benjamin Walshba5ddc12016-09-21 16:01:22 -0400142
143 uptime = k_uptime_get();
144 delta = uptime - *reftime;
145 *reftime = uptime;
146
147 return delta;
148}
149
Kumar Galacc334c72017-04-21 10:55:34 -0500150u32_t k_uptime_delta_32(s64_t *reftime)
Benjamin Walshba5ddc12016-09-21 16:01:22 -0400151{
Kumar Galacc334c72017-04-21 10:55:34 -0500152 return (u32_t)k_uptime_delta(reftime);
Benjamin Walshba5ddc12016-09-21 16:01:22 -0400153}
154
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400155/* handle the expired timeouts in the nano timeout queue */
156
Benjamin Walsh1a5450b2016-10-06 15:04:23 -0400157#ifdef CONFIG_SYS_CLOCK_EXISTS
Benjamin Walshb889fa82016-12-07 22:39:31 -0500158/*
159 * Handle timeouts by dequeuing the expired ones from _timeout_q and queue
160 * them on a local one, then doing the real handling from that queue. This
161 * allows going through the second queue without needing to have the
162 * interrupts locked since it is a local queue. Each expired timeout is marked
163 * as _EXPIRED so that an ISR preempting us and releasing an object on which
Benjamin Walshc88d0fb2017-02-11 11:29:36 -0500164 * a thread was timing out and expired will not give the object to that thread.
Benjamin Walshb889fa82016-12-07 22:39:31 -0500165 *
166 * Always called from interrupt level, and always only from the system clock
167 * interrupt.
168 */
Benjamin Walsheec37e62016-12-19 13:55:17 -0500169
170volatile int _handling_timeouts;
171
Kumar Galacc334c72017-04-21 10:55:34 -0500172static inline void handle_timeouts(s32_t ticks)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400173{
Benjamin Walshb889fa82016-12-07 22:39:31 -0500174 sys_dlist_t expired;
175 unsigned int key;
176
177 /* init before locking interrupts */
178 sys_dlist_init(&expired);
179
180 key = irq_lock();
181
Holman Greenhand8375fb72017-12-19 09:38:48 +0100182 sys_dnode_t *next = sys_dlist_peek_head(&_timeout_q);
183 struct _timeout *timeout = (struct _timeout *)next;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400184
Kumar Gala34a57db2017-04-19 10:39:57 -0500185 K_DEBUG("head: %p, delta: %d\n",
Holman Greenhand8375fb72017-12-19 09:38:48 +0100186 timeout, timeout ? timeout->delta_ticks_from_prev : -2112);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400187
Holman Greenhand8375fb72017-12-19 09:38:48 +0100188 if (!next) {
Benjamin Walshb889fa82016-12-07 22:39:31 -0500189 irq_unlock(key);
190 return;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400191 }
Benjamin Walshb889fa82016-12-07 22:39:31 -0500192
Benjamin Walshb889fa82016-12-07 22:39:31 -0500193 /*
194 * Dequeue all expired timeouts from _timeout_q, relieving irq lock
195 * pressure between each of them, allowing handling of higher priority
196 * interrupts. We know that no new timeout will be prepended in front
197 * of a timeout which delta is 0, since timeouts of 0 ticks are
198 * prohibited.
199 */
Benjamin Walshb889fa82016-12-07 22:39:31 -0500200
Benjamin Walsheec37e62016-12-19 13:55:17 -0500201 _handling_timeouts = 1;
202
Holman Greenhand8375fb72017-12-19 09:38:48 +0100203 while (next) {
Benjamin Walsh6f4bc802017-02-15 20:20:06 -0500204
205 /*
Holman Greenhand8375fb72017-12-19 09:38:48 +0100206 * In the case where ticks number is greater than the first
207 * timeout delta of the list, the lag produced by this initial
208 * difference must also be applied to others timeouts in list
209 * until it was entirely consumed.
Benjamin Walsh6f4bc802017-02-15 20:20:06 -0500210 */
Benjamin Walsh6f4bc802017-02-15 20:20:06 -0500211
Holman Greenhand8375fb72017-12-19 09:38:48 +0100212 s32_t tmp = timeout->delta_ticks_from_prev;
213
214 if (timeout->delta_ticks_from_prev < ticks) {
215 timeout->delta_ticks_from_prev = 0;
216 } else {
217 timeout->delta_ticks_from_prev -= ticks;
218 }
219
220 ticks -= tmp;
221
222 next = sys_dlist_peek_next(&_timeout_q, next);
223
224 if (timeout->delta_ticks_from_prev == 0) {
225 sys_dnode_t *node = &timeout->node;
226
227 sys_dlist_remove(node);
228
229 /*
230 * Reverse the order that that were queued in the
231 * timeout_q: timeouts expiring on the same ticks are
232 * queued in the reverse order, time-wise, that they are
233 * added to shorten the amount of time with interrupts
234 * locked while walking the timeout_q. By reversing the
235 * order _again_ when building the expired queue, they
236 * end up being processed in the same order they were
237 * added, time-wise.
238 */
239
240 sys_dlist_prepend(&expired, node);
241
242 timeout->delta_ticks_from_prev = _EXPIRED;
243
244 } else if (ticks <= 0) {
245 break;
246 }
Benjamin Walshb889fa82016-12-07 22:39:31 -0500247
248 irq_unlock(key);
249 key = irq_lock();
250
Benjamin Walshb889fa82016-12-07 22:39:31 -0500251 timeout = (struct _timeout *)next;
252 }
253
254 irq_unlock(key);
255
256 _handle_expired_timeouts(&expired);
Benjamin Walsheec37e62016-12-19 13:55:17 -0500257
258 _handling_timeouts = 0;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400259}
260#else
Benjamin Walshb889fa82016-12-07 22:39:31 -0500261 #define handle_timeouts(ticks) do { } while ((0))
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400262#endif
263
Peter Mitsis68d1f4b2016-09-12 11:35:26 -0400264#ifdef CONFIG_TIMESLICING
Kumar Galacc334c72017-04-21 10:55:34 -0500265s32_t _time_slice_elapsed;
266s32_t _time_slice_duration = CONFIG_TIMESLICE_SIZE;
Peter Mitsis68d1f4b2016-09-12 11:35:26 -0400267int _time_slice_prio_ceiling = CONFIG_TIMESLICE_PRIORITY;
268
Benjamin Walshb889fa82016-12-07 22:39:31 -0500269/*
270 * Always called from interrupt level, and always only from the system clock
271 * interrupt, thus:
272 * - _current does not have to be protected, since it only changes at thread
273 * level or when exiting a non-nested interrupt
274 * - _time_slice_elapsed does not have to be protected, since it can only change
275 * in this function and at thread level
276 * - _time_slice_duration does not have to be protected, since it can only
277 * change at thread level
278 */
Kumar Galacc334c72017-04-21 10:55:34 -0500279static void handle_time_slicing(s32_t ticks)
Peter Mitsis68d1f4b2016-09-12 11:35:26 -0400280{
Ramesh Thomas89ffd442017-02-05 19:37:19 -0800281#ifdef CONFIG_TICKLESS_KERNEL
282 next_ts = 0;
Andrew Boie3989de72017-05-30 12:51:39 -0700283#endif
Ramesh Thomas89ffd442017-02-05 19:37:19 -0800284 if (!_is_thread_time_slicing(_current)) {
285 return;
286 }
Peter Mitsis68d1f4b2016-09-12 11:35:26 -0400287
Anas Nashif2bffa302017-01-17 07:48:52 -0500288 _time_slice_elapsed += __ticks_to_ms(ticks);
Peter Mitsis68d1f4b2016-09-12 11:35:26 -0400289 if (_time_slice_elapsed >= _time_slice_duration) {
Benjamin Walshb889fa82016-12-07 22:39:31 -0500290
291 unsigned int key;
292
Peter Mitsis68d1f4b2016-09-12 11:35:26 -0400293 _time_slice_elapsed = 0;
Benjamin Walshb889fa82016-12-07 22:39:31 -0500294
295 key = irq_lock();
Benjamin Walsh35497d62016-09-30 13:44:58 -0400296 _move_thread_to_end_of_prio_q(_current);
Benjamin Walshb889fa82016-12-07 22:39:31 -0500297 irq_unlock(key);
Peter Mitsis68d1f4b2016-09-12 11:35:26 -0400298 }
Ramesh Thomas89ffd442017-02-05 19:37:19 -0800299#ifdef CONFIG_TICKLESS_KERNEL
300 next_ts =
301 _ms_to_ticks(_time_slice_duration - _time_slice_elapsed);
302#endif
Peter Mitsis68d1f4b2016-09-12 11:35:26 -0400303}
304#else
305#define handle_time_slicing(ticks) do { } while (0)
306#endif
Ramesh Thomas89ffd442017-02-05 19:37:19 -0800307
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400308/**
309 *
Anas Nashifdc3d73b2016-12-19 20:25:56 -0500310 * @brief Announce a tick to the kernel
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400311 *
312 * This function is only to be called by the system clock timer driver when a
Anas Nashifdc3d73b2016-12-19 20:25:56 -0500313 * tick is to be announced to the kernel. It takes care of dequeuing the
314 * timers that have expired and wake up the threads pending on them.
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400315 *
316 * @return N/A
317 */
Kumar Galacc334c72017-04-21 10:55:34 -0500318void _nano_sys_clock_tick_announce(s32_t ticks)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400319{
Andy Ross564f5902018-01-26 12:30:21 -0800320#ifdef CONFIG_SMP
321 /* sys_clock timekeeping happens only on the main CPU */
322 if (_arch_curr_cpu()->id) {
323 return;
324 }
325#endif
326
Ramesh Thomas89ffd442017-02-05 19:37:19 -0800327#ifndef CONFIG_TICKLESS_KERNEL
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400328 unsigned int key;
329
Kumar Gala34a57db2017-04-19 10:39:57 -0500330 K_DEBUG("ticks: %d\n", ticks);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400331
Benjamin Walshb889fa82016-12-07 22:39:31 -0500332 /* 64-bit value, ensure atomic access with irq lock */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400333 key = irq_lock();
334 _sys_clock_tick_count += ticks;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400335 irq_unlock(key);
Ramesh Thomas89ffd442017-02-05 19:37:19 -0800336#endif
Benjamin Walshb889fa82016-12-07 22:39:31 -0500337 handle_timeouts(ticks);
338
339 /* time slicing is basically handled like just yet another timeout */
340 handle_time_slicing(ticks);
Ramesh Thomas89ffd442017-02-05 19:37:19 -0800341
342#ifdef CONFIG_TICKLESS_KERNEL
343 u32_t next_to = _get_next_timeout_expiry();
344
345 next_to = next_to == K_FOREVER ? 0 : next_to;
346 next_to = !next_to || (next_ts
347 && next_to) > next_ts ? next_ts : next_to;
348
349 u32_t remaining = _get_remaining_program_time();
350
351 if ((!remaining && next_to) || (next_to < remaining)) {
352 /* Clears current program if next_to = 0 and remaining > 0 */
353 _set_time(next_to);
354 }
355#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400356}