Anas Nashif | dc3d73b | 2016-12-19 20:25:56 -0500 | [diff] [blame] | 1 | /* system clock support */ |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 2 | |
| 3 | /* |
| 4 | * Copyright (c) 1997-2015 Wind River Systems, Inc. |
| 5 | * |
David B. Kinder | ac74d8b | 2017-01-18 17:01:01 -0800 | [diff] [blame] | 6 | * SPDX-License-Identifier: Apache-2.0 |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 10 | #include <kernel_structs.h> |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 11 | #include <toolchain.h> |
Anas Nashif | 397d29d | 2017-06-17 11:30:47 -0400 | [diff] [blame] | 12 | #include <linker/sections.h> |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 13 | #include <wait_q.h> |
| 14 | #include <drivers/system_timer.h> |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 15 | #include <syscall_handler.h> |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 16 | |
| 17 | #ifdef CONFIG_SYS_CLOCK_EXISTS |
Benjamin Walsh | 6209218 | 2016-12-20 14:39:08 -0500 | [diff] [blame] | 18 | #ifdef _NON_OPTIMIZED_TICKS_PER_SEC |
| 19 | #warning "non-optimized system clock frequency chosen: performance may suffer" |
| 20 | #endif |
| 21 | #endif |
| 22 | |
| 23 | #ifdef CONFIG_SYS_CLOCK_EXISTS |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 24 | int sys_clock_us_per_tick = 1000000 / sys_clock_ticks_per_sec; |
| 25 | int sys_clock_hw_cycles_per_tick = |
| 26 | CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / sys_clock_ticks_per_sec; |
| 27 | #if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME) |
| 28 | int sys_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC; |
| 29 | #endif |
| 30 | #else |
| 31 | /* don't initialize to avoid division-by-zero error */ |
| 32 | int sys_clock_us_per_tick; |
| 33 | int sys_clock_hw_cycles_per_tick; |
| 34 | #if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME) |
| 35 | int sys_clock_hw_cycles_per_sec; |
| 36 | #endif |
| 37 | #endif |
| 38 | |
| 39 | /* updated by timer driver for tickless, stays at 1 for non-tickless */ |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 40 | s32_t _sys_idle_elapsed_ticks = 1; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 41 | |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 42 | volatile u64_t _sys_clock_tick_count; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 43 | |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 44 | #ifdef CONFIG_TICKLESS_KERNEL |
| 45 | /* |
| 46 | * If this flag is set, system clock will run continuously even if |
| 47 | * there are no timer events programmed. This allows using the |
| 48 | * system clock to track passage of time without interruption. |
| 49 | * To save power, this should be turned on only when required. |
| 50 | */ |
| 51 | int _sys_clock_always_on; |
| 52 | |
| 53 | static u32_t next_ts; |
| 54 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 55 | /** |
| 56 | * |
| 57 | * @brief Return the lower part of the current system tick count |
| 58 | * |
| 59 | * @return the current system tick count |
| 60 | * |
| 61 | */ |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 62 | u32_t _tick_get_32(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 63 | { |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 64 | #ifdef CONFIG_TICKLESS_KERNEL |
| 65 | return (u32_t)_get_elapsed_clock_time(); |
| 66 | #else |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 67 | return (u32_t)_sys_clock_tick_count; |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 68 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 69 | } |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 70 | FUNC_ALIAS(_tick_get_32, sys_tick_get_32, u32_t); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 71 | |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 72 | u32_t _impl_k_uptime_get_32(void) |
Benjamin Walsh | ba5ddc1 | 2016-09-21 16:01:22 -0400 | [diff] [blame] | 73 | { |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 74 | #ifdef CONFIG_TICKLESS_KERNEL |
| 75 | __ASSERT(_sys_clock_always_on, |
| 76 | "Call k_enable_sys_clock_always_on to use clock API"); |
| 77 | #endif |
Andrew Boie | b85e58a | 2016-11-09 10:44:56 -0800 | [diff] [blame] | 78 | return __ticks_to_ms(_tick_get_32()); |
Benjamin Walsh | ba5ddc1 | 2016-09-21 16:01:22 -0400 | [diff] [blame] | 79 | } |
| 80 | |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 81 | #ifdef CONFIG_USERSPACE |
Leandro Pereira | 6f99bdb | 2017-10-13 14:00:22 -0700 | [diff] [blame] | 82 | _SYSCALL_HANDLER(k_uptime_get_32) |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 83 | { |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 84 | #ifdef CONFIG_TICKLESS_KERNEL |
Andrew Boie | 225e4c0 | 2017-10-12 09:54:26 -0700 | [diff] [blame] | 85 | _SYSCALL_VERIFY(_sys_clock_always_on); |
Andrew Boie | 76c04a2 | 2017-09-27 14:45:10 -0700 | [diff] [blame] | 86 | #endif |
| 87 | return _impl_k_uptime_get_32(); |
| 88 | } |
| 89 | #endif |
| 90 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 91 | /** |
| 92 | * |
| 93 | * @brief Return the current system tick count |
| 94 | * |
| 95 | * @return the current system tick count |
| 96 | * |
| 97 | */ |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 98 | s64_t _tick_get(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 99 | { |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 100 | s64_t tmp_sys_clock_tick_count; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 101 | /* |
| 102 | * Lock the interrupts when reading _sys_clock_tick_count 64-bit |
| 103 | * variable. Some architectures (x86) do not handle 64-bit atomically, |
| 104 | * so we have to lock the timer interrupt that causes change of |
| 105 | * _sys_clock_tick_count |
| 106 | */ |
| 107 | unsigned int imask = irq_lock(); |
| 108 | |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 109 | #ifdef CONFIG_TICKLESS_KERNEL |
| 110 | tmp_sys_clock_tick_count = _get_elapsed_clock_time(); |
| 111 | #else |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 112 | tmp_sys_clock_tick_count = _sys_clock_tick_count; |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 113 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 114 | irq_unlock(imask); |
| 115 | return tmp_sys_clock_tick_count; |
| 116 | } |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 117 | FUNC_ALIAS(_tick_get, sys_tick_get, s64_t); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 118 | |
Andrew Boie | a73d373 | 2017-10-08 12:23:55 -0700 | [diff] [blame] | 119 | s64_t _impl_k_uptime_get(void) |
Benjamin Walsh | ba5ddc1 | 2016-09-21 16:01:22 -0400 | [diff] [blame] | 120 | { |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 121 | #ifdef CONFIG_TICKLESS_KERNEL |
| 122 | __ASSERT(_sys_clock_always_on, |
| 123 | "Call k_enable_sys_clock_always_on to use clock API"); |
| 124 | #endif |
Andrew Boie | b85e58a | 2016-11-09 10:44:56 -0800 | [diff] [blame] | 125 | return __ticks_to_ms(_tick_get()); |
Benjamin Walsh | ba5ddc1 | 2016-09-21 16:01:22 -0400 | [diff] [blame] | 126 | } |
| 127 | |
Andrew Boie | a73d373 | 2017-10-08 12:23:55 -0700 | [diff] [blame] | 128 | #ifdef CONFIG_USERSPACE |
Leandro Pereira | 6f99bdb | 2017-10-13 14:00:22 -0700 | [diff] [blame] | 129 | _SYSCALL_HANDLER(k_uptime_get, ret_p) |
Andrew Boie | a73d373 | 2017-10-08 12:23:55 -0700 | [diff] [blame] | 130 | { |
| 131 | u64_t *ret = (u64_t *)ret_p; |
| 132 | |
| 133 | _SYSCALL_MEMORY_WRITE(ret, sizeof(*ret)); |
| 134 | *ret = _impl_k_uptime_get(); |
| 135 | return 0; |
| 136 | } |
| 137 | #endif |
| 138 | |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 139 | s64_t k_uptime_delta(s64_t *reftime) |
Benjamin Walsh | ba5ddc1 | 2016-09-21 16:01:22 -0400 | [diff] [blame] | 140 | { |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 141 | s64_t uptime, delta; |
Benjamin Walsh | ba5ddc1 | 2016-09-21 16:01:22 -0400 | [diff] [blame] | 142 | |
| 143 | uptime = k_uptime_get(); |
| 144 | delta = uptime - *reftime; |
| 145 | *reftime = uptime; |
| 146 | |
| 147 | return delta; |
| 148 | } |
| 149 | |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 150 | u32_t k_uptime_delta_32(s64_t *reftime) |
Benjamin Walsh | ba5ddc1 | 2016-09-21 16:01:22 -0400 | [diff] [blame] | 151 | { |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 152 | return (u32_t)k_uptime_delta(reftime); |
Benjamin Walsh | ba5ddc1 | 2016-09-21 16:01:22 -0400 | [diff] [blame] | 153 | } |
| 154 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 155 | /* handle the expired timeouts in the nano timeout queue */ |
| 156 | |
Benjamin Walsh | 1a5450b | 2016-10-06 15:04:23 -0400 | [diff] [blame] | 157 | #ifdef CONFIG_SYS_CLOCK_EXISTS |
Benjamin Walsh | b889fa8 | 2016-12-07 22:39:31 -0500 | [diff] [blame] | 158 | /* |
| 159 | * Handle timeouts by dequeuing the expired ones from _timeout_q and queue |
| 160 | * them on a local one, then doing the real handling from that queue. This |
| 161 | * allows going through the second queue without needing to have the |
| 162 | * interrupts locked since it is a local queue. Each expired timeout is marked |
| 163 | * as _EXPIRED so that an ISR preempting us and releasing an object on which |
Benjamin Walsh | c88d0fb | 2017-02-11 11:29:36 -0500 | [diff] [blame] | 164 | * a thread was timing out and expired will not give the object to that thread. |
Benjamin Walsh | b889fa8 | 2016-12-07 22:39:31 -0500 | [diff] [blame] | 165 | * |
| 166 | * Always called from interrupt level, and always only from the system clock |
| 167 | * interrupt. |
| 168 | */ |
Benjamin Walsh | eec37e6 | 2016-12-19 13:55:17 -0500 | [diff] [blame] | 169 | |
| 170 | volatile int _handling_timeouts; |
| 171 | |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 172 | static inline void handle_timeouts(s32_t ticks) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 173 | { |
Benjamin Walsh | b889fa8 | 2016-12-07 22:39:31 -0500 | [diff] [blame] | 174 | sys_dlist_t expired; |
| 175 | unsigned int key; |
| 176 | |
| 177 | /* init before locking interrupts */ |
| 178 | sys_dlist_init(&expired); |
| 179 | |
| 180 | key = irq_lock(); |
| 181 | |
Holman Greenhand | 8375fb7 | 2017-12-19 09:38:48 +0100 | [diff] [blame] | 182 | sys_dnode_t *next = sys_dlist_peek_head(&_timeout_q); |
| 183 | struct _timeout *timeout = (struct _timeout *)next; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 184 | |
Kumar Gala | 34a57db | 2017-04-19 10:39:57 -0500 | [diff] [blame] | 185 | K_DEBUG("head: %p, delta: %d\n", |
Holman Greenhand | 8375fb7 | 2017-12-19 09:38:48 +0100 | [diff] [blame] | 186 | timeout, timeout ? timeout->delta_ticks_from_prev : -2112); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 187 | |
Holman Greenhand | 8375fb7 | 2017-12-19 09:38:48 +0100 | [diff] [blame] | 188 | if (!next) { |
Benjamin Walsh | b889fa8 | 2016-12-07 22:39:31 -0500 | [diff] [blame] | 189 | irq_unlock(key); |
| 190 | return; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 191 | } |
Benjamin Walsh | b889fa8 | 2016-12-07 22:39:31 -0500 | [diff] [blame] | 192 | |
Benjamin Walsh | b889fa8 | 2016-12-07 22:39:31 -0500 | [diff] [blame] | 193 | /* |
| 194 | * Dequeue all expired timeouts from _timeout_q, relieving irq lock |
| 195 | * pressure between each of them, allowing handling of higher priority |
| 196 | * interrupts. We know that no new timeout will be prepended in front |
| 197 | * of a timeout which delta is 0, since timeouts of 0 ticks are |
| 198 | * prohibited. |
| 199 | */ |
Benjamin Walsh | b889fa8 | 2016-12-07 22:39:31 -0500 | [diff] [blame] | 200 | |
Benjamin Walsh | eec37e6 | 2016-12-19 13:55:17 -0500 | [diff] [blame] | 201 | _handling_timeouts = 1; |
| 202 | |
Holman Greenhand | 8375fb7 | 2017-12-19 09:38:48 +0100 | [diff] [blame] | 203 | while (next) { |
Benjamin Walsh | 6f4bc80 | 2017-02-15 20:20:06 -0500 | [diff] [blame] | 204 | |
| 205 | /* |
Holman Greenhand | 8375fb7 | 2017-12-19 09:38:48 +0100 | [diff] [blame] | 206 | * In the case where ticks number is greater than the first |
| 207 | * timeout delta of the list, the lag produced by this initial |
| 208 | * difference must also be applied to others timeouts in list |
| 209 | * until it was entirely consumed. |
Benjamin Walsh | 6f4bc80 | 2017-02-15 20:20:06 -0500 | [diff] [blame] | 210 | */ |
Benjamin Walsh | 6f4bc80 | 2017-02-15 20:20:06 -0500 | [diff] [blame] | 211 | |
Holman Greenhand | 8375fb7 | 2017-12-19 09:38:48 +0100 | [diff] [blame] | 212 | s32_t tmp = timeout->delta_ticks_from_prev; |
| 213 | |
| 214 | if (timeout->delta_ticks_from_prev < ticks) { |
| 215 | timeout->delta_ticks_from_prev = 0; |
| 216 | } else { |
| 217 | timeout->delta_ticks_from_prev -= ticks; |
| 218 | } |
| 219 | |
| 220 | ticks -= tmp; |
| 221 | |
| 222 | next = sys_dlist_peek_next(&_timeout_q, next); |
| 223 | |
| 224 | if (timeout->delta_ticks_from_prev == 0) { |
| 225 | sys_dnode_t *node = &timeout->node; |
| 226 | |
| 227 | sys_dlist_remove(node); |
| 228 | |
| 229 | /* |
| 230 | * Reverse the order that that were queued in the |
| 231 | * timeout_q: timeouts expiring on the same ticks are |
| 232 | * queued in the reverse order, time-wise, that they are |
| 233 | * added to shorten the amount of time with interrupts |
| 234 | * locked while walking the timeout_q. By reversing the |
| 235 | * order _again_ when building the expired queue, they |
| 236 | * end up being processed in the same order they were |
| 237 | * added, time-wise. |
| 238 | */ |
| 239 | |
| 240 | sys_dlist_prepend(&expired, node); |
| 241 | |
| 242 | timeout->delta_ticks_from_prev = _EXPIRED; |
| 243 | |
| 244 | } else if (ticks <= 0) { |
| 245 | break; |
| 246 | } |
Benjamin Walsh | b889fa8 | 2016-12-07 22:39:31 -0500 | [diff] [blame] | 247 | |
| 248 | irq_unlock(key); |
| 249 | key = irq_lock(); |
| 250 | |
Benjamin Walsh | b889fa8 | 2016-12-07 22:39:31 -0500 | [diff] [blame] | 251 | timeout = (struct _timeout *)next; |
| 252 | } |
| 253 | |
| 254 | irq_unlock(key); |
| 255 | |
| 256 | _handle_expired_timeouts(&expired); |
Benjamin Walsh | eec37e6 | 2016-12-19 13:55:17 -0500 | [diff] [blame] | 257 | |
| 258 | _handling_timeouts = 0; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 259 | } |
| 260 | #else |
Benjamin Walsh | b889fa8 | 2016-12-07 22:39:31 -0500 | [diff] [blame] | 261 | #define handle_timeouts(ticks) do { } while ((0)) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 262 | #endif |
| 263 | |
Peter Mitsis | 68d1f4b | 2016-09-12 11:35:26 -0400 | [diff] [blame] | 264 | #ifdef CONFIG_TIMESLICING |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 265 | s32_t _time_slice_elapsed; |
| 266 | s32_t _time_slice_duration = CONFIG_TIMESLICE_SIZE; |
Peter Mitsis | 68d1f4b | 2016-09-12 11:35:26 -0400 | [diff] [blame] | 267 | int _time_slice_prio_ceiling = CONFIG_TIMESLICE_PRIORITY; |
| 268 | |
Benjamin Walsh | b889fa8 | 2016-12-07 22:39:31 -0500 | [diff] [blame] | 269 | /* |
| 270 | * Always called from interrupt level, and always only from the system clock |
| 271 | * interrupt, thus: |
| 272 | * - _current does not have to be protected, since it only changes at thread |
| 273 | * level or when exiting a non-nested interrupt |
| 274 | * - _time_slice_elapsed does not have to be protected, since it can only change |
| 275 | * in this function and at thread level |
| 276 | * - _time_slice_duration does not have to be protected, since it can only |
| 277 | * change at thread level |
| 278 | */ |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 279 | static void handle_time_slicing(s32_t ticks) |
Peter Mitsis | 68d1f4b | 2016-09-12 11:35:26 -0400 | [diff] [blame] | 280 | { |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 281 | #ifdef CONFIG_TICKLESS_KERNEL |
| 282 | next_ts = 0; |
Andrew Boie | 3989de7 | 2017-05-30 12:51:39 -0700 | [diff] [blame] | 283 | #endif |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 284 | if (!_is_thread_time_slicing(_current)) { |
| 285 | return; |
| 286 | } |
Peter Mitsis | 68d1f4b | 2016-09-12 11:35:26 -0400 | [diff] [blame] | 287 | |
Anas Nashif | 2bffa30 | 2017-01-17 07:48:52 -0500 | [diff] [blame] | 288 | _time_slice_elapsed += __ticks_to_ms(ticks); |
Peter Mitsis | 68d1f4b | 2016-09-12 11:35:26 -0400 | [diff] [blame] | 289 | if (_time_slice_elapsed >= _time_slice_duration) { |
Benjamin Walsh | b889fa8 | 2016-12-07 22:39:31 -0500 | [diff] [blame] | 290 | |
| 291 | unsigned int key; |
| 292 | |
Peter Mitsis | 68d1f4b | 2016-09-12 11:35:26 -0400 | [diff] [blame] | 293 | _time_slice_elapsed = 0; |
Benjamin Walsh | b889fa8 | 2016-12-07 22:39:31 -0500 | [diff] [blame] | 294 | |
| 295 | key = irq_lock(); |
Benjamin Walsh | 35497d6 | 2016-09-30 13:44:58 -0400 | [diff] [blame] | 296 | _move_thread_to_end_of_prio_q(_current); |
Benjamin Walsh | b889fa8 | 2016-12-07 22:39:31 -0500 | [diff] [blame] | 297 | irq_unlock(key); |
Peter Mitsis | 68d1f4b | 2016-09-12 11:35:26 -0400 | [diff] [blame] | 298 | } |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 299 | #ifdef CONFIG_TICKLESS_KERNEL |
| 300 | next_ts = |
| 301 | _ms_to_ticks(_time_slice_duration - _time_slice_elapsed); |
| 302 | #endif |
Peter Mitsis | 68d1f4b | 2016-09-12 11:35:26 -0400 | [diff] [blame] | 303 | } |
| 304 | #else |
| 305 | #define handle_time_slicing(ticks) do { } while (0) |
| 306 | #endif |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 307 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 308 | /** |
| 309 | * |
Anas Nashif | dc3d73b | 2016-12-19 20:25:56 -0500 | [diff] [blame] | 310 | * @brief Announce a tick to the kernel |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 311 | * |
| 312 | * This function is only to be called by the system clock timer driver when a |
Anas Nashif | dc3d73b | 2016-12-19 20:25:56 -0500 | [diff] [blame] | 313 | * tick is to be announced to the kernel. It takes care of dequeuing the |
| 314 | * timers that have expired and wake up the threads pending on them. |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 315 | * |
| 316 | * @return N/A |
| 317 | */ |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 318 | void _nano_sys_clock_tick_announce(s32_t ticks) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 319 | { |
Andy Ross | 564f590 | 2018-01-26 12:30:21 -0800 | [diff] [blame] | 320 | #ifdef CONFIG_SMP |
| 321 | /* sys_clock timekeeping happens only on the main CPU */ |
| 322 | if (_arch_curr_cpu()->id) { |
| 323 | return; |
| 324 | } |
| 325 | #endif |
| 326 | |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 327 | #ifndef CONFIG_TICKLESS_KERNEL |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 328 | unsigned int key; |
| 329 | |
Kumar Gala | 34a57db | 2017-04-19 10:39:57 -0500 | [diff] [blame] | 330 | K_DEBUG("ticks: %d\n", ticks); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 331 | |
Benjamin Walsh | b889fa8 | 2016-12-07 22:39:31 -0500 | [diff] [blame] | 332 | /* 64-bit value, ensure atomic access with irq lock */ |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 333 | key = irq_lock(); |
| 334 | _sys_clock_tick_count += ticks; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 335 | irq_unlock(key); |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 336 | #endif |
Benjamin Walsh | b889fa8 | 2016-12-07 22:39:31 -0500 | [diff] [blame] | 337 | handle_timeouts(ticks); |
| 338 | |
| 339 | /* time slicing is basically handled like just yet another timeout */ |
| 340 | handle_time_slicing(ticks); |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 341 | |
| 342 | #ifdef CONFIG_TICKLESS_KERNEL |
| 343 | u32_t next_to = _get_next_timeout_expiry(); |
| 344 | |
| 345 | next_to = next_to == K_FOREVER ? 0 : next_to; |
| 346 | next_to = !next_to || (next_ts |
| 347 | && next_to) > next_ts ? next_ts : next_to; |
| 348 | |
| 349 | u32_t remaining = _get_remaining_program_time(); |
| 350 | |
| 351 | if ((!remaining && next_to) || (next_to < remaining)) { |
| 352 | /* Clears current program if next_to = 0 and remaining > 0 */ |
| 353 | _set_time(next_to); |
| 354 | } |
| 355 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 356 | } |