blob: b6c019c8772d750bacc1c4b54aa63a3ada4449a7 [file] [log] [blame]
Anas Nashifdc3d73b2016-12-19 20:25:56 -05001/* system clock support */
Benjamin Walsh456c6da2016-09-02 18:55:39 -04002
3/*
4 * Copyright (c) 1997-2015 Wind River Systems, Inc.
5 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08006 * SPDX-License-Identifier: Apache-2.0
Benjamin Walsh456c6da2016-09-02 18:55:39 -04007 */
8
9
Benjamin Walshf6ca7de2016-11-08 10:36:50 -050010#include <kernel_structs.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040011#include <toolchain.h>
12#include <sections.h>
13#include <wait_q.h>
14#include <drivers/system_timer.h>
15
16#ifdef CONFIG_SYS_CLOCK_EXISTS
Benjamin Walsh62092182016-12-20 14:39:08 -050017#ifdef _NON_OPTIMIZED_TICKS_PER_SEC
18#warning "non-optimized system clock frequency chosen: performance may suffer"
19#endif
20#endif
21
22#ifdef CONFIG_SYS_CLOCK_EXISTS
Benjamin Walsh456c6da2016-09-02 18:55:39 -040023int sys_clock_us_per_tick = 1000000 / sys_clock_ticks_per_sec;
24int sys_clock_hw_cycles_per_tick =
25 CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / sys_clock_ticks_per_sec;
26#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
27int sys_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
28#endif
29#else
30/* don't initialize to avoid division-by-zero error */
31int sys_clock_us_per_tick;
32int sys_clock_hw_cycles_per_tick;
33#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
34int sys_clock_hw_cycles_per_sec;
35#endif
36#endif
37
38/* updated by timer driver for tickless, stays at 1 for non-tickless */
Kumar Galacc334c72017-04-21 10:55:34 -050039s32_t _sys_idle_elapsed_ticks = 1;
Benjamin Walsh456c6da2016-09-02 18:55:39 -040040
Ramesh Thomas89ffd442017-02-05 19:37:19 -080041volatile u64_t _sys_clock_tick_count;
Benjamin Walsh456c6da2016-09-02 18:55:39 -040042
Ramesh Thomas89ffd442017-02-05 19:37:19 -080043#ifdef CONFIG_TICKLESS_KERNEL
44/*
45 * If this flag is set, system clock will run continuously even if
46 * there are no timer events programmed. This allows using the
47 * system clock to track passage of time without interruption.
48 * To save power, this should be turned on only when required.
49 */
50int _sys_clock_always_on;
51
52static u32_t next_ts;
53#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -040054/**
55 *
56 * @brief Return the lower part of the current system tick count
57 *
58 * @return the current system tick count
59 *
60 */
Kumar Galacc334c72017-04-21 10:55:34 -050061u32_t _tick_get_32(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040062{
Ramesh Thomas89ffd442017-02-05 19:37:19 -080063#ifdef CONFIG_TICKLESS_KERNEL
64 return (u32_t)_get_elapsed_clock_time();
65#else
Kumar Galacc334c72017-04-21 10:55:34 -050066 return (u32_t)_sys_clock_tick_count;
Ramesh Thomas89ffd442017-02-05 19:37:19 -080067#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -040068}
Kumar Galacc334c72017-04-21 10:55:34 -050069FUNC_ALIAS(_tick_get_32, sys_tick_get_32, u32_t);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040070
Kumar Galacc334c72017-04-21 10:55:34 -050071u32_t k_uptime_get_32(void)
Benjamin Walshba5ddc12016-09-21 16:01:22 -040072{
Ramesh Thomas89ffd442017-02-05 19:37:19 -080073#ifdef CONFIG_TICKLESS_KERNEL
74 __ASSERT(_sys_clock_always_on,
75 "Call k_enable_sys_clock_always_on to use clock API");
76#endif
Andrew Boieb85e58a2016-11-09 10:44:56 -080077 return __ticks_to_ms(_tick_get_32());
Benjamin Walshba5ddc12016-09-21 16:01:22 -040078}
79
Benjamin Walsh456c6da2016-09-02 18:55:39 -040080/**
81 *
82 * @brief Return the current system tick count
83 *
84 * @return the current system tick count
85 *
86 */
Kumar Galacc334c72017-04-21 10:55:34 -050087s64_t _tick_get(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040088{
Kumar Galacc334c72017-04-21 10:55:34 -050089 s64_t tmp_sys_clock_tick_count;
Benjamin Walsh456c6da2016-09-02 18:55:39 -040090 /*
91 * Lock the interrupts when reading _sys_clock_tick_count 64-bit
92 * variable. Some architectures (x86) do not handle 64-bit atomically,
93 * so we have to lock the timer interrupt that causes change of
94 * _sys_clock_tick_count
95 */
96 unsigned int imask = irq_lock();
97
Ramesh Thomas89ffd442017-02-05 19:37:19 -080098#ifdef CONFIG_TICKLESS_KERNEL
99 tmp_sys_clock_tick_count = _get_elapsed_clock_time();
100#else
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400101 tmp_sys_clock_tick_count = _sys_clock_tick_count;
Ramesh Thomas89ffd442017-02-05 19:37:19 -0800102#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400103 irq_unlock(imask);
104 return tmp_sys_clock_tick_count;
105}
Kumar Galacc334c72017-04-21 10:55:34 -0500106FUNC_ALIAS(_tick_get, sys_tick_get, s64_t);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400107
Kumar Galacc334c72017-04-21 10:55:34 -0500108s64_t k_uptime_get(void)
Benjamin Walshba5ddc12016-09-21 16:01:22 -0400109{
Ramesh Thomas89ffd442017-02-05 19:37:19 -0800110#ifdef CONFIG_TICKLESS_KERNEL
111 __ASSERT(_sys_clock_always_on,
112 "Call k_enable_sys_clock_always_on to use clock API");
113#endif
Andrew Boieb85e58a2016-11-09 10:44:56 -0800114 return __ticks_to_ms(_tick_get());
Benjamin Walshba5ddc12016-09-21 16:01:22 -0400115}
116
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400117/**
118 *
119 * @brief Return number of ticks since a reference time
120 *
121 * This function is meant to be used in contained fragments of code. The first
122 * call to it in a particular code fragment fills in a reference time variable
123 * which then gets passed and updated every time the function is called. From
124 * the second call on, the delta between the value passed to it and the current
125 * tick count is the return value. Since the first call is meant to only fill in
126 * the reference time, its return value should be discarded.
127 *
128 * Since a code fragment that wants to use sys_tick_delta() passes in its
129 * own reference time variable, multiple code fragments can make use of this
130 * function concurrently.
131 *
132 * e.g.
Kumar Galacc334c72017-04-21 10:55:34 -0500133 * u64_t reftime;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400134 * (void) sys_tick_delta(&reftime); /# prime it #/
135 * [do stuff]
136 * x = sys_tick_delta(&reftime); /# how long since priming #/
137 * [do more stuff]
138 * y = sys_tick_delta(&reftime); /# how long since [do stuff] #/
139 *
140 * @return tick count since reference time; undefined for first invocation
141 *
142 * NOTE: We use inline function for both 64-bit and 32-bit functions.
143 * Compiler optimizes out 64-bit result handling in 32-bit version.
144 */
Kumar Galacc334c72017-04-21 10:55:34 -0500145static ALWAYS_INLINE s64_t _nano_tick_delta(s64_t *reftime)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400146{
Kumar Galacc334c72017-04-21 10:55:34 -0500147 s64_t delta;
148 s64_t saved;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400149
150 /*
151 * Lock the interrupts when reading _sys_clock_tick_count 64-bit
152 * variable. Some architectures (x86) do not handle 64-bit atomically,
153 * so we have to lock the timer interrupt that causes change of
154 * _sys_clock_tick_count
155 */
156 unsigned int imask = irq_lock();
157
Ramesh Thomas89ffd442017-02-05 19:37:19 -0800158#ifdef CONFIG_TICKLESS_KERNEL
159 saved = _get_elapsed_clock_time();
160#else
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400161 saved = _sys_clock_tick_count;
Ramesh Thomas89ffd442017-02-05 19:37:19 -0800162#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400163 irq_unlock(imask);
164 delta = saved - (*reftime);
165 *reftime = saved;
166
167 return delta;
168}
169
170/**
171 *
172 * @brief Return number of ticks since a reference time
173 *
174 * @return tick count since reference time; undefined for first invocation
175 */
Kumar Galacc334c72017-04-21 10:55:34 -0500176s64_t sys_tick_delta(s64_t *reftime)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400177{
178 return _nano_tick_delta(reftime);
179}
180
181
Kumar Galacc334c72017-04-21 10:55:34 -0500182u32_t sys_tick_delta_32(s64_t *reftime)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400183{
Kumar Galacc334c72017-04-21 10:55:34 -0500184 return (u32_t)_nano_tick_delta(reftime);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400185}
186
Kumar Galacc334c72017-04-21 10:55:34 -0500187s64_t k_uptime_delta(s64_t *reftime)
Benjamin Walshba5ddc12016-09-21 16:01:22 -0400188{
Kumar Galacc334c72017-04-21 10:55:34 -0500189 s64_t uptime, delta;
Benjamin Walshba5ddc12016-09-21 16:01:22 -0400190
191 uptime = k_uptime_get();
192 delta = uptime - *reftime;
193 *reftime = uptime;
194
195 return delta;
196}
197
Kumar Galacc334c72017-04-21 10:55:34 -0500198u32_t k_uptime_delta_32(s64_t *reftime)
Benjamin Walshba5ddc12016-09-21 16:01:22 -0400199{
Kumar Galacc334c72017-04-21 10:55:34 -0500200 return (u32_t)k_uptime_delta(reftime);
Benjamin Walshba5ddc12016-09-21 16:01:22 -0400201}
202
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400203/* handle the expired timeouts in the nano timeout queue */
204
Benjamin Walsh1a5450b2016-10-06 15:04:23 -0400205#ifdef CONFIG_SYS_CLOCK_EXISTS
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400206#include <wait_q.h>
207
Benjamin Walshb889fa82016-12-07 22:39:31 -0500208/*
209 * Handle timeouts by dequeuing the expired ones from _timeout_q and queue
210 * them on a local one, then doing the real handling from that queue. This
211 * allows going through the second queue without needing to have the
212 * interrupts locked since it is a local queue. Each expired timeout is marked
213 * as _EXPIRED so that an ISR preempting us and releasing an object on which
Benjamin Walshc88d0fb2017-02-11 11:29:36 -0500214 * a thread was timing out and expired will not give the object to that thread.
Benjamin Walshb889fa82016-12-07 22:39:31 -0500215 *
216 * Always called from interrupt level, and always only from the system clock
217 * interrupt.
218 */
Benjamin Walsheec37e62016-12-19 13:55:17 -0500219
220volatile int _handling_timeouts;
221
Kumar Galacc334c72017-04-21 10:55:34 -0500222static inline void handle_timeouts(s32_t ticks)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400223{
Benjamin Walshb889fa82016-12-07 22:39:31 -0500224 sys_dlist_t expired;
225 unsigned int key;
226
227 /* init before locking interrupts */
228 sys_dlist_init(&expired);
229
230 key = irq_lock();
231
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400232 struct _timeout *head =
233 (struct _timeout *)sys_dlist_peek_head(&_timeout_q);
234
Kumar Gala34a57db2017-04-19 10:39:57 -0500235 K_DEBUG("head: %p, delta: %d\n",
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400236 head, head ? head->delta_ticks_from_prev : -2112);
237
Benjamin Walshb889fa82016-12-07 22:39:31 -0500238 if (!head) {
239 irq_unlock(key);
240 return;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400241 }
Benjamin Walshb889fa82016-12-07 22:39:31 -0500242
243 head->delta_ticks_from_prev -= ticks;
244
245 /*
246 * Dequeue all expired timeouts from _timeout_q, relieving irq lock
247 * pressure between each of them, allowing handling of higher priority
248 * interrupts. We know that no new timeout will be prepended in front
249 * of a timeout which delta is 0, since timeouts of 0 ticks are
250 * prohibited.
251 */
252 sys_dnode_t *next = &head->node;
253 struct _timeout *timeout = (struct _timeout *)next;
254
Benjamin Walsheec37e62016-12-19 13:55:17 -0500255 _handling_timeouts = 1;
256
Benjamin Walshb889fa82016-12-07 22:39:31 -0500257 while (timeout && timeout->delta_ticks_from_prev == 0) {
258
259 sys_dlist_remove(next);
Benjamin Walsh6f4bc802017-02-15 20:20:06 -0500260
261 /*
262 * Reverse the order that that were queued in the timeout_q:
263 * timeouts expiring on the same ticks are queued in the
264 * reverse order, time-wise, that they are added to shorten the
265 * amount of time with interrupts locked while walking the
266 * timeout_q. By reversing the order _again_ when building the
267 * expired queue, they end up being processed in the same order
268 * they were added, time-wise.
269 */
270 sys_dlist_prepend(&expired, next);
271
Benjamin Walshb889fa82016-12-07 22:39:31 -0500272 timeout->delta_ticks_from_prev = _EXPIRED;
273
274 irq_unlock(key);
275 key = irq_lock();
276
277 next = sys_dlist_peek_head(&_timeout_q);
278 timeout = (struct _timeout *)next;
279 }
280
281 irq_unlock(key);
282
283 _handle_expired_timeouts(&expired);
Benjamin Walsheec37e62016-12-19 13:55:17 -0500284
285 _handling_timeouts = 0;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400286}
287#else
Benjamin Walshb889fa82016-12-07 22:39:31 -0500288 #define handle_timeouts(ticks) do { } while ((0))
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400289#endif
290
Peter Mitsis68d1f4b2016-09-12 11:35:26 -0400291#ifdef CONFIG_TIMESLICING
Kumar Galacc334c72017-04-21 10:55:34 -0500292s32_t _time_slice_elapsed;
293s32_t _time_slice_duration = CONFIG_TIMESLICE_SIZE;
Peter Mitsis68d1f4b2016-09-12 11:35:26 -0400294int _time_slice_prio_ceiling = CONFIG_TIMESLICE_PRIORITY;
295
Benjamin Walshb889fa82016-12-07 22:39:31 -0500296/*
297 * Always called from interrupt level, and always only from the system clock
298 * interrupt, thus:
299 * - _current does not have to be protected, since it only changes at thread
300 * level or when exiting a non-nested interrupt
301 * - _time_slice_elapsed does not have to be protected, since it can only change
302 * in this function and at thread level
303 * - _time_slice_duration does not have to be protected, since it can only
304 * change at thread level
305 */
Kumar Galacc334c72017-04-21 10:55:34 -0500306static void handle_time_slicing(s32_t ticks)
Peter Mitsis68d1f4b2016-09-12 11:35:26 -0400307{
Ramesh Thomas89ffd442017-02-05 19:37:19 -0800308#ifdef CONFIG_TICKLESS_KERNEL
309 next_ts = 0;
Andrew Boie3989de72017-05-30 12:51:39 -0700310#endif
Ramesh Thomas89ffd442017-02-05 19:37:19 -0800311 if (!_is_thread_time_slicing(_current)) {
312 return;
313 }
Peter Mitsis68d1f4b2016-09-12 11:35:26 -0400314
Anas Nashif2bffa302017-01-17 07:48:52 -0500315 _time_slice_elapsed += __ticks_to_ms(ticks);
Peter Mitsis68d1f4b2016-09-12 11:35:26 -0400316 if (_time_slice_elapsed >= _time_slice_duration) {
Benjamin Walshb889fa82016-12-07 22:39:31 -0500317
318 unsigned int key;
319
Peter Mitsis68d1f4b2016-09-12 11:35:26 -0400320 _time_slice_elapsed = 0;
Benjamin Walshb889fa82016-12-07 22:39:31 -0500321
322 key = irq_lock();
Benjamin Walsh35497d62016-09-30 13:44:58 -0400323 _move_thread_to_end_of_prio_q(_current);
Benjamin Walshb889fa82016-12-07 22:39:31 -0500324 irq_unlock(key);
Peter Mitsis68d1f4b2016-09-12 11:35:26 -0400325 }
Ramesh Thomas89ffd442017-02-05 19:37:19 -0800326#ifdef CONFIG_TICKLESS_KERNEL
327 next_ts =
328 _ms_to_ticks(_time_slice_duration - _time_slice_elapsed);
329#endif
Peter Mitsis68d1f4b2016-09-12 11:35:26 -0400330}
331#else
332#define handle_time_slicing(ticks) do { } while (0)
333#endif
Ramesh Thomas89ffd442017-02-05 19:37:19 -0800334
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400335/**
336 *
Anas Nashifdc3d73b2016-12-19 20:25:56 -0500337 * @brief Announce a tick to the kernel
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400338 *
339 * This function is only to be called by the system clock timer driver when a
Anas Nashifdc3d73b2016-12-19 20:25:56 -0500340 * tick is to be announced to the kernel. It takes care of dequeuing the
341 * timers that have expired and wake up the threads pending on them.
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400342 *
343 * @return N/A
344 */
Kumar Galacc334c72017-04-21 10:55:34 -0500345void _nano_sys_clock_tick_announce(s32_t ticks)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400346{
Ramesh Thomas89ffd442017-02-05 19:37:19 -0800347#ifndef CONFIG_TICKLESS_KERNEL
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400348 unsigned int key;
349
Kumar Gala34a57db2017-04-19 10:39:57 -0500350 K_DEBUG("ticks: %d\n", ticks);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400351
Benjamin Walshb889fa82016-12-07 22:39:31 -0500352 /* 64-bit value, ensure atomic access with irq lock */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400353 key = irq_lock();
354 _sys_clock_tick_count += ticks;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400355 irq_unlock(key);
Ramesh Thomas89ffd442017-02-05 19:37:19 -0800356#endif
Benjamin Walshb889fa82016-12-07 22:39:31 -0500357 handle_timeouts(ticks);
358
359 /* time slicing is basically handled like just yet another timeout */
360 handle_time_slicing(ticks);
Ramesh Thomas89ffd442017-02-05 19:37:19 -0800361
362#ifdef CONFIG_TICKLESS_KERNEL
363 u32_t next_to = _get_next_timeout_expiry();
364
365 next_to = next_to == K_FOREVER ? 0 : next_to;
366 next_to = !next_to || (next_ts
367 && next_to) > next_ts ? next_ts : next_to;
368
369 u32_t remaining = _get_remaining_program_time();
370
371 if ((!remaining && next_to) || (next_to < remaining)) {
372 /* Clears current program if next_to = 0 and remaining > 0 */
373 _set_time(next_to);
374 }
375#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400376}