Anas Nashif | dc3d73b | 2016-12-19 20:25:56 -0500 | [diff] [blame] | 1 | /* system clock support */ |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 2 | |
| 3 | /* |
| 4 | * Copyright (c) 1997-2015 Wind River Systems, Inc. |
| 5 | * |
David B. Kinder | ac74d8b | 2017-01-18 17:01:01 -0800 | [diff] [blame] | 6 | * SPDX-License-Identifier: Apache-2.0 |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 10 | #include <kernel_structs.h> |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 11 | #include <toolchain.h> |
| 12 | #include <sections.h> |
| 13 | #include <wait_q.h> |
| 14 | #include <drivers/system_timer.h> |
| 15 | |
| 16 | #ifdef CONFIG_SYS_CLOCK_EXISTS |
Benjamin Walsh | 6209218 | 2016-12-20 14:39:08 -0500 | [diff] [blame] | 17 | #ifdef _NON_OPTIMIZED_TICKS_PER_SEC |
| 18 | #warning "non-optimized system clock frequency chosen: performance may suffer" |
| 19 | #endif |
| 20 | #endif |
| 21 | |
| 22 | #ifdef CONFIG_SYS_CLOCK_EXISTS |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 23 | int sys_clock_us_per_tick = 1000000 / sys_clock_ticks_per_sec; |
| 24 | int sys_clock_hw_cycles_per_tick = |
| 25 | CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / sys_clock_ticks_per_sec; |
| 26 | #if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME) |
| 27 | int sys_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC; |
| 28 | #endif |
| 29 | #else |
| 30 | /* don't initialize to avoid division-by-zero error */ |
| 31 | int sys_clock_us_per_tick; |
| 32 | int sys_clock_hw_cycles_per_tick; |
| 33 | #if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME) |
| 34 | int sys_clock_hw_cycles_per_sec; |
| 35 | #endif |
| 36 | #endif |
| 37 | |
| 38 | /* updated by timer driver for tickless, stays at 1 for non-tickless */ |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 39 | s32_t _sys_idle_elapsed_ticks = 1; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 40 | |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 41 | volatile u64_t _sys_clock_tick_count; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 42 | |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 43 | #ifdef CONFIG_TICKLESS_KERNEL |
| 44 | /* |
| 45 | * If this flag is set, system clock will run continuously even if |
| 46 | * there are no timer events programmed. This allows using the |
| 47 | * system clock to track passage of time without interruption. |
| 48 | * To save power, this should be turned on only when required. |
| 49 | */ |
| 50 | int _sys_clock_always_on; |
| 51 | |
| 52 | static u32_t next_ts; |
| 53 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 54 | /** |
| 55 | * |
| 56 | * @brief Return the lower part of the current system tick count |
| 57 | * |
| 58 | * @return the current system tick count |
| 59 | * |
| 60 | */ |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 61 | u32_t _tick_get_32(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 62 | { |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 63 | #ifdef CONFIG_TICKLESS_KERNEL |
| 64 | return (u32_t)_get_elapsed_clock_time(); |
| 65 | #else |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 66 | return (u32_t)_sys_clock_tick_count; |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 67 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 68 | } |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 69 | FUNC_ALIAS(_tick_get_32, sys_tick_get_32, u32_t); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 70 | |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 71 | u32_t k_uptime_get_32(void) |
Benjamin Walsh | ba5ddc1 | 2016-09-21 16:01:22 -0400 | [diff] [blame] | 72 | { |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 73 | #ifdef CONFIG_TICKLESS_KERNEL |
| 74 | __ASSERT(_sys_clock_always_on, |
| 75 | "Call k_enable_sys_clock_always_on to use clock API"); |
| 76 | #endif |
Andrew Boie | b85e58a | 2016-11-09 10:44:56 -0800 | [diff] [blame] | 77 | return __ticks_to_ms(_tick_get_32()); |
Benjamin Walsh | ba5ddc1 | 2016-09-21 16:01:22 -0400 | [diff] [blame] | 78 | } |
| 79 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 80 | /** |
| 81 | * |
| 82 | * @brief Return the current system tick count |
| 83 | * |
| 84 | * @return the current system tick count |
| 85 | * |
| 86 | */ |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 87 | s64_t _tick_get(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 88 | { |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 89 | s64_t tmp_sys_clock_tick_count; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 90 | /* |
| 91 | * Lock the interrupts when reading _sys_clock_tick_count 64-bit |
| 92 | * variable. Some architectures (x86) do not handle 64-bit atomically, |
| 93 | * so we have to lock the timer interrupt that causes change of |
| 94 | * _sys_clock_tick_count |
| 95 | */ |
| 96 | unsigned int imask = irq_lock(); |
| 97 | |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 98 | #ifdef CONFIG_TICKLESS_KERNEL |
| 99 | tmp_sys_clock_tick_count = _get_elapsed_clock_time(); |
| 100 | #else |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 101 | tmp_sys_clock_tick_count = _sys_clock_tick_count; |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 102 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 103 | irq_unlock(imask); |
| 104 | return tmp_sys_clock_tick_count; |
| 105 | } |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 106 | FUNC_ALIAS(_tick_get, sys_tick_get, s64_t); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 107 | |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 108 | s64_t k_uptime_get(void) |
Benjamin Walsh | ba5ddc1 | 2016-09-21 16:01:22 -0400 | [diff] [blame] | 109 | { |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 110 | #ifdef CONFIG_TICKLESS_KERNEL |
| 111 | __ASSERT(_sys_clock_always_on, |
| 112 | "Call k_enable_sys_clock_always_on to use clock API"); |
| 113 | #endif |
Andrew Boie | b85e58a | 2016-11-09 10:44:56 -0800 | [diff] [blame] | 114 | return __ticks_to_ms(_tick_get()); |
Benjamin Walsh | ba5ddc1 | 2016-09-21 16:01:22 -0400 | [diff] [blame] | 115 | } |
| 116 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 117 | /** |
| 118 | * |
| 119 | * @brief Return number of ticks since a reference time |
| 120 | * |
| 121 | * This function is meant to be used in contained fragments of code. The first |
| 122 | * call to it in a particular code fragment fills in a reference time variable |
| 123 | * which then gets passed and updated every time the function is called. From |
| 124 | * the second call on, the delta between the value passed to it and the current |
| 125 | * tick count is the return value. Since the first call is meant to only fill in |
| 126 | * the reference time, its return value should be discarded. |
| 127 | * |
| 128 | * Since a code fragment that wants to use sys_tick_delta() passes in its |
| 129 | * own reference time variable, multiple code fragments can make use of this |
| 130 | * function concurrently. |
| 131 | * |
| 132 | * e.g. |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 133 | * u64_t reftime; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 134 | * (void) sys_tick_delta(&reftime); /# prime it #/ |
| 135 | * [do stuff] |
| 136 | * x = sys_tick_delta(&reftime); /# how long since priming #/ |
| 137 | * [do more stuff] |
| 138 | * y = sys_tick_delta(&reftime); /# how long since [do stuff] #/ |
| 139 | * |
| 140 | * @return tick count since reference time; undefined for first invocation |
| 141 | * |
| 142 | * NOTE: We use inline function for both 64-bit and 32-bit functions. |
| 143 | * Compiler optimizes out 64-bit result handling in 32-bit version. |
| 144 | */ |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 145 | static ALWAYS_INLINE s64_t _nano_tick_delta(s64_t *reftime) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 146 | { |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 147 | s64_t delta; |
| 148 | s64_t saved; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 149 | |
| 150 | /* |
| 151 | * Lock the interrupts when reading _sys_clock_tick_count 64-bit |
| 152 | * variable. Some architectures (x86) do not handle 64-bit atomically, |
| 153 | * so we have to lock the timer interrupt that causes change of |
| 154 | * _sys_clock_tick_count |
| 155 | */ |
| 156 | unsigned int imask = irq_lock(); |
| 157 | |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 158 | #ifdef CONFIG_TICKLESS_KERNEL |
| 159 | saved = _get_elapsed_clock_time(); |
| 160 | #else |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 161 | saved = _sys_clock_tick_count; |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 162 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 163 | irq_unlock(imask); |
| 164 | delta = saved - (*reftime); |
| 165 | *reftime = saved; |
| 166 | |
| 167 | return delta; |
| 168 | } |
| 169 | |
| 170 | /** |
| 171 | * |
| 172 | * @brief Return number of ticks since a reference time |
| 173 | * |
| 174 | * @return tick count since reference time; undefined for first invocation |
| 175 | */ |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 176 | s64_t sys_tick_delta(s64_t *reftime) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 177 | { |
| 178 | return _nano_tick_delta(reftime); |
| 179 | } |
| 180 | |
| 181 | |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 182 | u32_t sys_tick_delta_32(s64_t *reftime) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 183 | { |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 184 | return (u32_t)_nano_tick_delta(reftime); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 185 | } |
| 186 | |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 187 | s64_t k_uptime_delta(s64_t *reftime) |
Benjamin Walsh | ba5ddc1 | 2016-09-21 16:01:22 -0400 | [diff] [blame] | 188 | { |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 189 | s64_t uptime, delta; |
Benjamin Walsh | ba5ddc1 | 2016-09-21 16:01:22 -0400 | [diff] [blame] | 190 | |
| 191 | uptime = k_uptime_get(); |
| 192 | delta = uptime - *reftime; |
| 193 | *reftime = uptime; |
| 194 | |
| 195 | return delta; |
| 196 | } |
| 197 | |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 198 | u32_t k_uptime_delta_32(s64_t *reftime) |
Benjamin Walsh | ba5ddc1 | 2016-09-21 16:01:22 -0400 | [diff] [blame] | 199 | { |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 200 | return (u32_t)k_uptime_delta(reftime); |
Benjamin Walsh | ba5ddc1 | 2016-09-21 16:01:22 -0400 | [diff] [blame] | 201 | } |
| 202 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 203 | /* handle the expired timeouts in the nano timeout queue */ |
| 204 | |
Benjamin Walsh | 1a5450b | 2016-10-06 15:04:23 -0400 | [diff] [blame] | 205 | #ifdef CONFIG_SYS_CLOCK_EXISTS |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 206 | #include <wait_q.h> |
| 207 | |
Benjamin Walsh | b889fa8 | 2016-12-07 22:39:31 -0500 | [diff] [blame] | 208 | /* |
| 209 | * Handle timeouts by dequeuing the expired ones from _timeout_q and queue |
| 210 | * them on a local one, then doing the real handling from that queue. This |
| 211 | * allows going through the second queue without needing to have the |
| 212 | * interrupts locked since it is a local queue. Each expired timeout is marked |
| 213 | * as _EXPIRED so that an ISR preempting us and releasing an object on which |
Benjamin Walsh | c88d0fb | 2017-02-11 11:29:36 -0500 | [diff] [blame] | 214 | * a thread was timing out and expired will not give the object to that thread. |
Benjamin Walsh | b889fa8 | 2016-12-07 22:39:31 -0500 | [diff] [blame] | 215 | * |
| 216 | * Always called from interrupt level, and always only from the system clock |
| 217 | * interrupt. |
| 218 | */ |
Benjamin Walsh | eec37e6 | 2016-12-19 13:55:17 -0500 | [diff] [blame] | 219 | |
| 220 | volatile int _handling_timeouts; |
| 221 | |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 222 | static inline void handle_timeouts(s32_t ticks) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 223 | { |
Benjamin Walsh | b889fa8 | 2016-12-07 22:39:31 -0500 | [diff] [blame] | 224 | sys_dlist_t expired; |
| 225 | unsigned int key; |
| 226 | |
| 227 | /* init before locking interrupts */ |
| 228 | sys_dlist_init(&expired); |
| 229 | |
| 230 | key = irq_lock(); |
| 231 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 232 | struct _timeout *head = |
| 233 | (struct _timeout *)sys_dlist_peek_head(&_timeout_q); |
| 234 | |
Kumar Gala | 34a57db | 2017-04-19 10:39:57 -0500 | [diff] [blame] | 235 | K_DEBUG("head: %p, delta: %d\n", |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 236 | head, head ? head->delta_ticks_from_prev : -2112); |
| 237 | |
Benjamin Walsh | b889fa8 | 2016-12-07 22:39:31 -0500 | [diff] [blame] | 238 | if (!head) { |
| 239 | irq_unlock(key); |
| 240 | return; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 241 | } |
Benjamin Walsh | b889fa8 | 2016-12-07 22:39:31 -0500 | [diff] [blame] | 242 | |
| 243 | head->delta_ticks_from_prev -= ticks; |
| 244 | |
| 245 | /* |
| 246 | * Dequeue all expired timeouts from _timeout_q, relieving irq lock |
| 247 | * pressure between each of them, allowing handling of higher priority |
| 248 | * interrupts. We know that no new timeout will be prepended in front |
| 249 | * of a timeout which delta is 0, since timeouts of 0 ticks are |
| 250 | * prohibited. |
| 251 | */ |
| 252 | sys_dnode_t *next = &head->node; |
| 253 | struct _timeout *timeout = (struct _timeout *)next; |
| 254 | |
Benjamin Walsh | eec37e6 | 2016-12-19 13:55:17 -0500 | [diff] [blame] | 255 | _handling_timeouts = 1; |
| 256 | |
Benjamin Walsh | b889fa8 | 2016-12-07 22:39:31 -0500 | [diff] [blame] | 257 | while (timeout && timeout->delta_ticks_from_prev == 0) { |
| 258 | |
| 259 | sys_dlist_remove(next); |
Benjamin Walsh | 6f4bc80 | 2017-02-15 20:20:06 -0500 | [diff] [blame] | 260 | |
| 261 | /* |
| 262 | * Reverse the order that that were queued in the timeout_q: |
| 263 | * timeouts expiring on the same ticks are queued in the |
| 264 | * reverse order, time-wise, that they are added to shorten the |
| 265 | * amount of time with interrupts locked while walking the |
| 266 | * timeout_q. By reversing the order _again_ when building the |
| 267 | * expired queue, they end up being processed in the same order |
| 268 | * they were added, time-wise. |
| 269 | */ |
| 270 | sys_dlist_prepend(&expired, next); |
| 271 | |
Benjamin Walsh | b889fa8 | 2016-12-07 22:39:31 -0500 | [diff] [blame] | 272 | timeout->delta_ticks_from_prev = _EXPIRED; |
| 273 | |
| 274 | irq_unlock(key); |
| 275 | key = irq_lock(); |
| 276 | |
| 277 | next = sys_dlist_peek_head(&_timeout_q); |
| 278 | timeout = (struct _timeout *)next; |
| 279 | } |
| 280 | |
| 281 | irq_unlock(key); |
| 282 | |
| 283 | _handle_expired_timeouts(&expired); |
Benjamin Walsh | eec37e6 | 2016-12-19 13:55:17 -0500 | [diff] [blame] | 284 | |
| 285 | _handling_timeouts = 0; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 286 | } |
| 287 | #else |
Benjamin Walsh | b889fa8 | 2016-12-07 22:39:31 -0500 | [diff] [blame] | 288 | #define handle_timeouts(ticks) do { } while ((0)) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 289 | #endif |
| 290 | |
Peter Mitsis | 68d1f4b | 2016-09-12 11:35:26 -0400 | [diff] [blame] | 291 | #ifdef CONFIG_TIMESLICING |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 292 | s32_t _time_slice_elapsed; |
| 293 | s32_t _time_slice_duration = CONFIG_TIMESLICE_SIZE; |
Peter Mitsis | 68d1f4b | 2016-09-12 11:35:26 -0400 | [diff] [blame] | 294 | int _time_slice_prio_ceiling = CONFIG_TIMESLICE_PRIORITY; |
| 295 | |
Benjamin Walsh | b889fa8 | 2016-12-07 22:39:31 -0500 | [diff] [blame] | 296 | /* |
| 297 | * Always called from interrupt level, and always only from the system clock |
| 298 | * interrupt, thus: |
| 299 | * - _current does not have to be protected, since it only changes at thread |
| 300 | * level or when exiting a non-nested interrupt |
| 301 | * - _time_slice_elapsed does not have to be protected, since it can only change |
| 302 | * in this function and at thread level |
| 303 | * - _time_slice_duration does not have to be protected, since it can only |
| 304 | * change at thread level |
| 305 | */ |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 306 | static void handle_time_slicing(s32_t ticks) |
Peter Mitsis | 68d1f4b | 2016-09-12 11:35:26 -0400 | [diff] [blame] | 307 | { |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 308 | #ifdef CONFIG_TICKLESS_KERNEL |
| 309 | next_ts = 0; |
Andrew Boie | 3989de7 | 2017-05-30 12:51:39 -0700 | [diff] [blame] | 310 | #endif |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 311 | if (!_is_thread_time_slicing(_current)) { |
| 312 | return; |
| 313 | } |
Peter Mitsis | 68d1f4b | 2016-09-12 11:35:26 -0400 | [diff] [blame] | 314 | |
Anas Nashif | 2bffa30 | 2017-01-17 07:48:52 -0500 | [diff] [blame] | 315 | _time_slice_elapsed += __ticks_to_ms(ticks); |
Peter Mitsis | 68d1f4b | 2016-09-12 11:35:26 -0400 | [diff] [blame] | 316 | if (_time_slice_elapsed >= _time_slice_duration) { |
Benjamin Walsh | b889fa8 | 2016-12-07 22:39:31 -0500 | [diff] [blame] | 317 | |
| 318 | unsigned int key; |
| 319 | |
Peter Mitsis | 68d1f4b | 2016-09-12 11:35:26 -0400 | [diff] [blame] | 320 | _time_slice_elapsed = 0; |
Benjamin Walsh | b889fa8 | 2016-12-07 22:39:31 -0500 | [diff] [blame] | 321 | |
| 322 | key = irq_lock(); |
Benjamin Walsh | 35497d6 | 2016-09-30 13:44:58 -0400 | [diff] [blame] | 323 | _move_thread_to_end_of_prio_q(_current); |
Benjamin Walsh | b889fa8 | 2016-12-07 22:39:31 -0500 | [diff] [blame] | 324 | irq_unlock(key); |
Peter Mitsis | 68d1f4b | 2016-09-12 11:35:26 -0400 | [diff] [blame] | 325 | } |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 326 | #ifdef CONFIG_TICKLESS_KERNEL |
| 327 | next_ts = |
| 328 | _ms_to_ticks(_time_slice_duration - _time_slice_elapsed); |
| 329 | #endif |
Peter Mitsis | 68d1f4b | 2016-09-12 11:35:26 -0400 | [diff] [blame] | 330 | } |
| 331 | #else |
| 332 | #define handle_time_slicing(ticks) do { } while (0) |
| 333 | #endif |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 334 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 335 | /** |
| 336 | * |
Anas Nashif | dc3d73b | 2016-12-19 20:25:56 -0500 | [diff] [blame] | 337 | * @brief Announce a tick to the kernel |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 338 | * |
| 339 | * This function is only to be called by the system clock timer driver when a |
Anas Nashif | dc3d73b | 2016-12-19 20:25:56 -0500 | [diff] [blame] | 340 | * tick is to be announced to the kernel. It takes care of dequeuing the |
| 341 | * timers that have expired and wake up the threads pending on them. |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 342 | * |
| 343 | * @return N/A |
| 344 | */ |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 345 | void _nano_sys_clock_tick_announce(s32_t ticks) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 346 | { |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 347 | #ifndef CONFIG_TICKLESS_KERNEL |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 348 | unsigned int key; |
| 349 | |
Kumar Gala | 34a57db | 2017-04-19 10:39:57 -0500 | [diff] [blame] | 350 | K_DEBUG("ticks: %d\n", ticks); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 351 | |
Benjamin Walsh | b889fa8 | 2016-12-07 22:39:31 -0500 | [diff] [blame] | 352 | /* 64-bit value, ensure atomic access with irq lock */ |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 353 | key = irq_lock(); |
| 354 | _sys_clock_tick_count += ticks; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 355 | irq_unlock(key); |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 356 | #endif |
Benjamin Walsh | b889fa8 | 2016-12-07 22:39:31 -0500 | [diff] [blame] | 357 | handle_timeouts(ticks); |
| 358 | |
| 359 | /* time slicing is basically handled like just yet another timeout */ |
| 360 | handle_time_slicing(ticks); |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 361 | |
| 362 | #ifdef CONFIG_TICKLESS_KERNEL |
| 363 | u32_t next_to = _get_next_timeout_expiry(); |
| 364 | |
| 365 | next_to = next_to == K_FOREVER ? 0 : next_to; |
| 366 | next_to = !next_to || (next_ts |
| 367 | && next_to) > next_ts ? next_ts : next_to; |
| 368 | |
| 369 | u32_t remaining = _get_remaining_program_time(); |
| 370 | |
| 371 | if ((!remaining && next_to) || (next_to < remaining)) { |
| 372 | /* Clears current program if next_to = 0 and remaining > 0 */ |
| 373 | _set_time(next_to); |
| 374 | } |
| 375 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 376 | } |