blob: 9afb2e484e86e37c86232fef6a56867d04d6aa73 [file] [log] [blame]
Andy Ross987c0e52018-09-27 16:50:00 -07001/*
2 * Copyright (c) 2018 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6#include <timeout_q.h>
7#include <drivers/system_timer.h>
8#include <sys_clock.h>
9#include <spinlock.h>
10#include <ksched.h>
11#include <syscall_handler.h>
12
13#define LOCKED(lck) for (k_spinlock_key_t __i = {}, \
14 __key = k_spin_lock(lck); \
15 !__i.key; \
16 k_spin_unlock(lck, __key), __i.key = 1)
17
18static u64_t curr_tick;
19
20static sys_dlist_t timeout_list = SYS_DLIST_STATIC_INIT(&timeout_list);
21
22static struct k_spinlock timeout_lock;
23
24static bool can_wait_forever;
25
26/* During a call to z_clock_announce(), the "current" time is "ahead"
27 * of the reference used by timeout_list by this amount.
28 */
29static int announce_advance;
30
31#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
32int z_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
33#endif
34
35static struct _timeout *first(void)
36{
37 sys_dnode_t *t = sys_dlist_peek_head(&timeout_list);
38
39 return t == NULL ? NULL : CONTAINER_OF(t, struct _timeout, node);
40}
41
42static struct _timeout *next(struct _timeout *t)
43{
44 sys_dnode_t *n = sys_dlist_peek_next(&timeout_list, &t->node);
45
46 return n == NULL ? NULL : CONTAINER_OF(n, struct _timeout, node);
47}
48
49static void remove(struct _timeout *t)
50{
51 if (next(t) != NULL) {
52 next(t)->dticks += t->dticks;
53 }
54
55 sys_dlist_remove(&t->node);
56 t->dticks = _INACTIVE;
57}
58
59static s32_t adjust_elapsed(s32_t ticks)
60{
61 ticks -= z_clock_elapsed();
62 return ticks < 0 ? 0 : ticks;
63}
64
65void _add_timeout(struct _timeout *to, _timeout_func_t fn, s32_t ticks)
66{
67 __ASSERT(to->dticks < 0, "");
68 to->fn = fn;
69
70 LOCKED(&timeout_lock) {
71 struct _timeout *t;
72
73 to->dticks = adjust_elapsed(ticks) + announce_advance;
74 for (t = first(); t != NULL; t = next(t)) {
75 __ASSERT(t->dticks >= 0, "");
76
77 if (t->dticks > to->dticks) {
78 t->dticks -= to->dticks;
79 sys_dlist_insert_before(&timeout_list,
80 &t->node, &to->node);
81 break;
82 }
83 to->dticks -= t->dticks;
84 }
85
86 if (t == NULL) {
87 sys_dlist_append(&timeout_list, &to->node);
88 }
89 }
90
91 z_clock_set_timeout(_get_next_timeout_expiry(), false);
92}
93
94int _abort_timeout(struct _timeout *to)
95{
96 int ret = _INACTIVE;
97
98 LOCKED(&timeout_lock) {
99 if (to->dticks != _INACTIVE) {
100 remove(to);
101 ret = 0;
102 }
103 }
104
105 return ret;
106}
107
108s32_t z_timeout_remaining(struct _timeout *to)
109{
110 s32_t ticks = 0;
111
112 if (to->dticks == _INACTIVE) {
113 return 0;
114 }
115
116 LOCKED(&timeout_lock) {
117 for (struct _timeout *t = first(); t != NULL; t = next(t)) {
118 ticks += t->dticks;
119 if (to == t) {
120 break;
121 }
122 }
123 }
124
125 return ticks;
126}
127
128void z_clock_announce(s32_t ticks)
129{
130 struct _timeout *t = NULL;
131
132#ifdef CONFIG_TIMESLICING
133 z_time_slice(ticks);
134#endif
135
136 LOCKED(&timeout_lock) {
137 curr_tick += ticks;
138 announce_advance = ticks;
139 }
140
141 while (true) {
142 LOCKED(&timeout_lock) {
143 t = first();
144 if (t != NULL) {
145 if (t->dticks <= announce_advance) {
146 announce_advance -= t->dticks;
147 t->dticks = 0;
148 remove(t);
149 } else {
150 t->dticks -= announce_advance;
151 t = NULL;
152 }
153 }
154 }
155
156 if (t == NULL) {
157 break;
158 }
159
160 t->fn(t);
161 }
162
163 announce_advance = 0;
164 z_clock_set_timeout(_get_next_timeout_expiry(), false);
165}
166
167s32_t _get_next_timeout_expiry(void)
168{
169 s32_t ret = 0;
170 int max = can_wait_forever ? K_FOREVER : INT_MAX;
171
172 LOCKED(&timeout_lock) {
173 struct _timeout *to = first();
174
175 ret = to == NULL ? max : adjust_elapsed(to->dticks);
176 }
177
178#ifdef CONFIG_TIMESLICING
179 if (_current_cpu->slice_ticks && _current_cpu->slice_ticks < ret) {
180 ret = _current_cpu->slice_ticks;
181 }
182#endif
183 return ret;
184}
185
186int k_enable_sys_clock_always_on(void)
187{
188 int ret = !can_wait_forever;
189
190 can_wait_forever = 0;
191 return ret;
192}
193
194void k_disable_sys_clock_always_on(void)
195{
196 can_wait_forever = 1;
197}
198
199s64_t z_tick_get(void)
200{
201 u64_t t = 0;
202
203 LOCKED(&timeout_lock) {
204 t = curr_tick + z_clock_elapsed();
205 }
206 return t;
207}
208
209u32_t z_tick_get_32(void)
210{
211 /* Returning just the low word doesn't require locking as the
212 * API is by definition at risk of overflow
213 */
214 return z_clock_elapsed() + (u32_t)curr_tick;
215}
216
217u32_t _impl_k_uptime_get_32(void)
218{
219 return __ticks_to_ms(z_tick_get_32());
220}
221
222#ifdef CONFIG_USERSPACE
223Z_SYSCALL_HANDLER(k_uptime_get_32)
224{
225 return _impl_k_uptime_get_32();
226}
227#endif
228
229s64_t _impl_k_uptime_get(void)
230{
231 return __ticks_to_ms(z_tick_get());
232}
233
234#ifdef CONFIG_USERSPACE
235Z_SYSCALL_HANDLER(k_uptime_get, ret_p)
236{
237 u64_t *ret = (u64_t *)ret_p;
238
239 Z_OOPS(Z_SYSCALL_MEMORY_WRITE(ret, sizeof(*ret)));
240 *ret = _impl_k_uptime_get();
241 return 0;
242}
243#endif