blob: 8eafbb4d7e34e728ab404e9acd26dbffd6085bca [file] [log] [blame]
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001/*
2 * Copyright (c) 1997-2016 Wind River Systems, Inc.
3 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08004 * SPDX-License-Identifier: Apache-2.0
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005 */
6
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +02007#include <zephyr/kernel.h>
Anas Nashif4d994af2021-04-18 23:24:40 -04008
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +02009#include <zephyr/init.h>
Anas Nashif4e396172023-09-26 22:46:01 +000010#include <zephyr/internal/syscall_handler.h>
Flavio Ceolin76b35182018-12-16 12:48:29 -080011#include <stdbool.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020012#include <zephyr/spinlock.h>
Anas Nashif8634c3b2023-08-29 17:03:12 +000013#include <ksched.h>
14#include <wait_q.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040015
Andy Rossb29fb222019-02-05 16:19:30 -080016static struct k_spinlock lock;
17
Peter Mitsis6df8efe2023-05-11 14:06:46 -040018#ifdef CONFIG_OBJ_CORE_TIMER
19static struct k_obj_type obj_type_timer;
Simon Heinbcd1d192024-03-08 12:00:10 +010020#endif /* CONFIG_OBJ_CORE_TIMER */
Peter Mitsis6df8efe2023-05-11 14:06:46 -040021
Benjamin Walsh456c6da2016-09-02 18:55:39 -040022/**
Allan Stephens45bfa372016-10-12 12:39:42 -050023 * @brief Handle expiration of a kernel timer object.
Benjamin Walsh456c6da2016-09-02 18:55:39 -040024 *
Allan Stephens45bfa372016-10-12 12:39:42 -050025 * @param t Timeout used by the timer.
Benjamin Walsh456c6da2016-09-02 18:55:39 -040026 */
Patrik Flykt4344e272019-03-08 14:19:05 -070027void z_timer_expiration_handler(struct _timeout *t)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040028{
Benjamin Walsh456c6da2016-09-02 18:55:39 -040029 struct k_timer *timer = CONTAINER_OF(t, struct k_timer, timeout);
Benjamin Walshb889fa82016-12-07 22:39:31 -050030 struct k_thread *thread;
Chen Peng1dde3d6c2021-09-29 10:21:58 +080031 k_spinlock_key_t key = k_spin_lock(&lock);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040032
Andrzej Głąbeke60af792023-02-07 16:21:54 +010033 /* In sys_clock_announce(), when a timeout expires, it is first removed
34 * from the timeout list, then its expiration handler is called (with
35 * unlocked interrupts). For kernel timers, the expiration handler is
36 * this function. Usually, the timeout structure related to the timer
37 * that is handled here will not be linked to the timeout list at this
38 * point. But it may happen that before this function is executed and
39 * interrupts are locked again, a given timer gets restarted from an
40 * interrupt context that has a priority higher than the system timer
41 * interrupt. Then, the timeout structure for this timer will turn out
42 * to be linked to the timeout list. And in such case, since the timer
43 * was restarted, its expiration handler should not be executed then,
44 * so the function exits immediately.
45 */
46 if (sys_dnode_is_linked(&t->node)) {
47 k_spin_unlock(&lock, key);
48 return;
49 }
50
Allan Stephens6c98c4d2016-10-17 14:34:53 -050051 /*
52 * if the timer is periodic, start it again; don't add _TICK_ALIGN
53 * since we're already aligned to a tick boundary
54 */
Andy Ross78327382020-03-05 15:18:14 -080055 if (!K_TIMEOUT_EQ(timer->period, K_NO_WAIT) &&
56 !K_TIMEOUT_EQ(timer->period, K_FOREVER)) {
Andy Ross7a59ceb2022-04-07 13:49:20 -070057 k_timeout_t next = timer->period;
58
Nicolas Pitre6a51a102023-02-13 00:43:47 -050059 /* see note about z_add_timeout() in z_impl_k_timer_start() */
60 next.ticks = MAX(next.ticks - 1, 0);
61
Andy Ross7a59ceb2022-04-07 13:49:20 -070062#ifdef CONFIG_TIMEOUT_64BIT
63 /* Exploit the fact that uptime during a kernel
64 * timeout handler reflects the time of the scheduled
65 * event and not real time to get some inexpensive
66 * protection against late interrupts. If we're
67 * delayed for any reason, we still end up calculating
68 * the next expiration as a regular stride from where
69 * we "should" have run. Requires absolute timeouts.
70 * (Note offset by one: we're nominally at the
71 * beginning of a tick, so need to defeat the "round
72 * down" behavior on timeout addition).
73 */
Nicolas Pitre6a51a102023-02-13 00:43:47 -050074 next = K_TIMEOUT_ABS_TICKS(k_uptime_ticks() + 1 + next.ticks);
Simon Heinbcd1d192024-03-08 12:00:10 +010075#endif /* CONFIG_TIMEOUT_64BIT */
Patrik Flykt4344e272019-03-08 14:19:05 -070076 z_add_timeout(&timer->timeout, z_timer_expiration_handler,
Andy Ross7a59ceb2022-04-07 13:49:20 -070077 next);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040078 }
79
Allan Stephens45bfa372016-10-12 12:39:42 -050080 /* update timer's status */
Patrik Flykt24d71432019-03-26 19:57:45 -060081 timer->status += 1U;
Allan Stephens45bfa372016-10-12 12:39:42 -050082
83 /* invoke timer expiry function */
Flavio Ceolin76b35182018-12-16 12:48:29 -080084 if (timer->expiry_fn != NULL) {
Krzysztof Chruscinski8979fbc2021-11-03 16:24:11 +010085 /* Unlock for user handler. */
86 k_spin_unlock(&lock, key);
Allan Stephens45bfa372016-10-12 12:39:42 -050087 timer->expiry_fn(timer);
Krzysztof Chruscinski8979fbc2021-11-03 16:24:11 +010088 key = k_spin_lock(&lock);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040089 }
Benjamin Walshb889fa82016-12-07 22:39:31 -050090
Krzysztof Chruscinskidd0715c2021-04-14 13:36:58 +020091 if (!IS_ENABLED(CONFIG_MULTITHREADING)) {
Chen Peng1dde3d6c2021-09-29 10:21:58 +080092 k_spin_unlock(&lock, key);
Krzysztof Chruscinskidd0715c2021-04-14 13:36:58 +020093 return;
94 }
95
Patrik Flykt4344e272019-03-08 14:19:05 -070096 thread = z_waitq_head(&timer->wait_q);
Benjamin Walshb889fa82016-12-07 22:39:31 -050097
Flavio Ceolin4218d5f2018-09-17 09:39:51 -070098 if (thread == NULL) {
Chen Peng1dde3d6c2021-09-29 10:21:58 +080099 k_spin_unlock(&lock, key);
Benjamin Walshb889fa82016-12-07 22:39:31 -0500100 return;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400101 }
Allan Stephens45bfa372016-10-12 12:39:42 -0500102
Patrik Flykt4344e272019-03-08 14:19:05 -0700103 z_unpend_thread_no_timeout(thread);
Benjamin Walshb889fa82016-12-07 22:39:31 -0500104
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800105 arch_thread_return_value_set(thread, 0);
James Harrisc7bb4232021-03-02 13:22:52 -0800106
Chen Peng1dde3d6c2021-09-29 10:21:58 +0800107 k_spin_unlock(&lock, key);
108
James Harrisc7bb4232021-03-02 13:22:52 -0800109 z_ready_thread(thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400110}
111
112
Allan Stephens45bfa372016-10-12 12:39:42 -0500113void k_timer_init(struct k_timer *timer,
Flavio Ceolin118715c2018-11-16 19:52:37 -0800114 k_timer_expiry_t expiry_fn,
115 k_timer_stop_t stop_fn)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400116{
Allan Stephens45bfa372016-10-12 12:39:42 -0500117 timer->expiry_fn = expiry_fn;
118 timer->stop_fn = stop_fn;
Patrik Flykt24d71432019-03-26 19:57:45 -0600119 timer->status = 0U;
Allan Stephens45bfa372016-10-12 12:39:42 -0500120
Krzysztof Chruscinskidd0715c2021-04-14 13:36:58 +0200121 if (IS_ENABLED(CONFIG_MULTITHREADING)) {
122 z_waitq_init(&timer->wait_q);
123 }
124
Peter A. Bigot5639ea02019-09-27 09:20:26 -0500125 z_init_timeout(&timer->timeout);
Torbjörn Leksell3a66d6c2021-03-26 14:09:10 +0100126
127 SYS_PORT_TRACING_OBJ_INIT(k_timer, timer);
128
Maciek Borzecki4fef7602017-05-18 08:49:50 +0200129 timer->user_data = NULL;
Andrew Boie945af952017-08-22 13:15:23 -0700130
Anas Nashifc91cad72023-09-26 21:32:13 +0000131 k_object_init(timer);
Peter Mitsis6df8efe2023-05-11 14:06:46 -0400132
133#ifdef CONFIG_OBJ_CORE_TIMER
134 k_obj_core_init_and_link(K_OBJ_CORE(timer), &obj_type_timer);
Simon Heinbcd1d192024-03-08 12:00:10 +0100135#endif /* CONFIG_OBJ_CORE_TIMER */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400136}
137
138
Andy Ross78327382020-03-05 15:18:14 -0800139void z_impl_k_timer_start(struct k_timer *timer, k_timeout_t duration,
140 k_timeout_t period)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400141{
Anas Nashif6a9540a2022-04-12 15:29:37 -0400142 SYS_PORT_TRACING_OBJ_FUNC(k_timer, start, timer, duration, period);
Torbjörn Leksell3a66d6c2021-03-26 14:09:10 +0100143
Pedro Sousa4207f4a2023-10-03 22:35:05 +0100144 /* Acquire spinlock to ensure safety during concurrent calls to
145 * k_timer_start for scheduling or rescheduling. This is necessary
146 * since k_timer_start can be preempted, especially for the same
147 * timer instance.
148 */
149 k_spinlock_key_t key = k_spin_lock(&lock);
150
Andy Rossa343cf92020-05-29 07:36:39 -0700151 if (K_TIMEOUT_EQ(duration, K_FOREVER)) {
Pedro Sousa4207f4a2023-10-03 22:35:05 +0100152 k_spin_unlock(&lock, key);
Andy Rossa343cf92020-05-29 07:36:39 -0700153 return;
154 }
155
Andy Ross78327382020-03-05 15:18:14 -0800156 /* z_add_timeout() always adds one to the incoming tick count
157 * to round up to the next tick (by convention it waits for
158 * "at least as long as the specified timeout"), but the
159 * period interval is always guaranteed to be reset from
Nicolas Pitre6a51a102023-02-13 00:43:47 -0500160 * within the timer ISR, so no round up is desired and 1 is
161 * subtracted in there.
Andy Ross78327382020-03-05 15:18:14 -0800162 *
163 * Note that the duration (!) value gets the same treatment
164 * for backwards compatibility. This is unfortunate
165 * (i.e. k_timer_start() doesn't treat its initial sleep
166 * argument the same way k_sleep() does), but historical. The
167 * timer_api test relies on this behavior.
168 */
Andy Ross4c7b77a2020-03-09 09:35:35 -0700169 if (Z_TICK_ABS(duration.ticks) < 0) {
170 duration.ticks = MAX(duration.ticks - 1, 0);
171 }
Benjamin Walsh6ca6c282016-12-09 13:39:00 -0500172
Patrik Flykt4344e272019-03-08 14:19:05 -0700173 (void)z_abort_timeout(&timer->timeout);
Andy Ross78327382020-03-05 15:18:14 -0800174 timer->period = period;
Patrik Flykt24d71432019-03-26 19:57:45 -0600175 timer->status = 0U;
Andy Ross78327382020-03-05 15:18:14 -0800176
Patrik Flykt4344e272019-03-08 14:19:05 -0700177 z_add_timeout(&timer->timeout, z_timer_expiration_handler,
Andy Ross78327382020-03-05 15:18:14 -0800178 duration);
Pedro Sousa4207f4a2023-10-03 22:35:05 +0100179
180 k_spin_unlock(&lock, key);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400181}
182
Andrew Boiea354d492017-09-29 16:22:28 -0700183#ifdef CONFIG_USERSPACE
Andy Ross643701a2019-08-13 12:58:38 -0700184static inline void z_vrfy_k_timer_start(struct k_timer *timer,
Andy Ross78327382020-03-05 15:18:14 -0800185 k_timeout_t duration,
186 k_timeout_t period)
Andrew Boiea354d492017-09-29 16:22:28 -0700187{
Anas Nashifa08bfeb2023-09-27 11:20:28 +0000188 K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
Andy Ross65649742019-08-06 13:34:31 -0700189 z_impl_k_timer_start(timer, duration, period);
Andrew Boiea354d492017-09-29 16:22:28 -0700190}
Andy Ross65649742019-08-06 13:34:31 -0700191#include <syscalls/k_timer_start_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +0100192#endif /* CONFIG_USERSPACE */
Andrew Boiea354d492017-09-29 16:22:28 -0700193
Patrik Flykt4344e272019-03-08 14:19:05 -0700194void z_impl_k_timer_stop(struct k_timer *timer)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400195{
Torbjörn Leksell3a66d6c2021-03-26 14:09:10 +0100196 SYS_PORT_TRACING_OBJ_FUNC(k_timer, stop, timer);
197
Simon Hein02cfbfe2022-07-19 22:30:17 +0200198 bool inactive = (z_abort_timeout(&timer->timeout) != 0);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400199
Benjamin Walshd211a522016-12-06 11:44:01 -0500200 if (inactive) {
Allan Stephens45bfa372016-10-12 12:39:42 -0500201 return;
202 }
203
Flavio Ceolin76b35182018-12-16 12:48:29 -0800204 if (timer->stop_fn != NULL) {
Allan Stephens45bfa372016-10-12 12:39:42 -0500205 timer->stop_fn(timer);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400206 }
207
Krzysztof Chruscinskidd0715c2021-04-14 13:36:58 +0200208 if (IS_ENABLED(CONFIG_MULTITHREADING)) {
209 struct k_thread *pending_thread = z_unpend1_no_timeout(&timer->wait_q);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400210
Krzysztof Chruscinskidd0715c2021-04-14 13:36:58 +0200211 if (pending_thread != NULL) {
212 z_ready_thread(pending_thread);
213 z_reschedule_unlocked();
214 }
Allan Stephens45bfa372016-10-12 12:39:42 -0500215 }
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400216}
217
Andrew Boiea354d492017-09-29 16:22:28 -0700218#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -0700219static inline void z_vrfy_k_timer_stop(struct k_timer *timer)
220{
Anas Nashifa08bfeb2023-09-27 11:20:28 +0000221 K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
Andy Ross65649742019-08-06 13:34:31 -0700222 z_impl_k_timer_stop(timer);
223}
224#include <syscalls/k_timer_stop_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +0100225#endif /* CONFIG_USERSPACE */
Andrew Boiea354d492017-09-29 16:22:28 -0700226
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500227uint32_t z_impl_k_timer_status_get(struct k_timer *timer)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400228{
Andy Rossb29fb222019-02-05 16:19:30 -0800229 k_spinlock_key_t key = k_spin_lock(&lock);
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500230 uint32_t result = timer->status;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400231
Patrik Flykt24d71432019-03-26 19:57:45 -0600232 timer->status = 0U;
Andy Rossb29fb222019-02-05 16:19:30 -0800233 k_spin_unlock(&lock, key);
Allan Stephens45bfa372016-10-12 12:39:42 -0500234
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400235 return result;
236}
237
Andrew Boiea354d492017-09-29 16:22:28 -0700238#ifdef CONFIG_USERSPACE
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500239static inline uint32_t z_vrfy_k_timer_status_get(struct k_timer *timer)
Andy Ross65649742019-08-06 13:34:31 -0700240{
Anas Nashifa08bfeb2023-09-27 11:20:28 +0000241 K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
Andy Ross65649742019-08-06 13:34:31 -0700242 return z_impl_k_timer_status_get(timer);
243}
244#include <syscalls/k_timer_status_get_mrsh.c>
Simon Heinbcd1d192024-03-08 12:00:10 +0100245#endif /* CONFIG_USERSPACE */
Andrew Boiea354d492017-09-29 16:22:28 -0700246
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500247uint32_t z_impl_k_timer_status_sync(struct k_timer *timer)
Allan Stephens45bfa372016-10-12 12:39:42 -0500248{
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800249 __ASSERT(!arch_is_in_isr(), "");
Torbjörn Leksell3a66d6c2021-03-26 14:09:10 +0100250 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_timer, status_sync, timer);
Allan Stephens45bfa372016-10-12 12:39:42 -0500251
Krzysztof Chruscinskidd0715c2021-04-14 13:36:58 +0200252 if (!IS_ENABLED(CONFIG_MULTITHREADING)) {
253 uint32_t result;
254
255 do {
256 k_spinlock_key_t key = k_spin_lock(&lock);
257
258 if (!z_is_inactive_timeout(&timer->timeout)) {
259 result = *(volatile uint32_t *)&timer->status;
260 timer->status = 0U;
261 k_spin_unlock(&lock, key);
262 if (result > 0) {
263 break;
264 }
265 } else {
266 result = timer->status;
267 k_spin_unlock(&lock, key);
268 break;
269 }
270 } while (true);
271
272 return result;
273 }
274
Andy Rossb29fb222019-02-05 16:19:30 -0800275 k_spinlock_key_t key = k_spin_lock(&lock);
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500276 uint32_t result = timer->status;
Allan Stephens45bfa372016-10-12 12:39:42 -0500277
Patrik Flykt24d71432019-03-26 19:57:45 -0600278 if (result == 0U) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700279 if (!z_is_inactive_timeout(&timer->timeout)) {
Torbjörn Leksell3a66d6c2021-03-26 14:09:10 +0100280 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_timer, status_sync, timer, K_FOREVER);
281
Allan Stephens45bfa372016-10-12 12:39:42 -0500282 /* wait for timer to expire or stop */
Patrik Flykt4344e272019-03-08 14:19:05 -0700283 (void)z_pend_curr(&lock, key, &timer->wait_q, K_FOREVER);
Allan Stephens45bfa372016-10-12 12:39:42 -0500284
285 /* get updated timer status */
Andy Rossb29fb222019-02-05 16:19:30 -0800286 key = k_spin_lock(&lock);
Allan Stephens45bfa372016-10-12 12:39:42 -0500287 result = timer->status;
288 } else {
289 /* timer is already stopped */
290 }
291 } else {
292 /* timer has already expired at least once */
293 }
294
Patrik Flykt24d71432019-03-26 19:57:45 -0600295 timer->status = 0U;
Andy Rossb29fb222019-02-05 16:19:30 -0800296 k_spin_unlock(&lock, key);
Allan Stephens45bfa372016-10-12 12:39:42 -0500297
Torbjörn Leksell3a66d6c2021-03-26 14:09:10 +0100298 /**
299 * @note New tracing hook
300 */
301 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_timer, status_sync, timer, result);
302
Allan Stephens45bfa372016-10-12 12:39:42 -0500303 return result;
304}
305
Andrew Boiea354d492017-09-29 16:22:28 -0700306#ifdef CONFIG_USERSPACE
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500307static inline uint32_t z_vrfy_k_timer_status_sync(struct k_timer *timer)
Andrew Boie225e4c02017-10-12 09:54:26 -0700308{
Anas Nashifa08bfeb2023-09-27 11:20:28 +0000309 K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
Andy Ross65649742019-08-06 13:34:31 -0700310 return z_impl_k_timer_status_sync(timer);
Andrew Boie225e4c02017-10-12 09:54:26 -0700311}
Andy Ross65649742019-08-06 13:34:31 -0700312#include <syscalls/k_timer_status_sync_mrsh.c>
313
Peter Bigot0ab314f2020-11-16 15:28:59 -0600314static inline k_ticks_t z_vrfy_k_timer_remaining_ticks(
315 const struct k_timer *timer)
Andy Ross65649742019-08-06 13:34:31 -0700316{
Anas Nashifa08bfeb2023-09-27 11:20:28 +0000317 K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
Andy Ross5a5d3da2020-03-09 13:59:15 -0700318 return z_impl_k_timer_remaining_ticks(timer);
Andy Ross65649742019-08-06 13:34:31 -0700319}
Andy Ross5a5d3da2020-03-09 13:59:15 -0700320#include <syscalls/k_timer_remaining_ticks_mrsh.c>
321
Peter Bigot0ab314f2020-11-16 15:28:59 -0600322static inline k_ticks_t z_vrfy_k_timer_expires_ticks(
323 const struct k_timer *timer)
Andy Ross5a5d3da2020-03-09 13:59:15 -0700324{
Anas Nashifa08bfeb2023-09-27 11:20:28 +0000325 K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
Andy Ross5a5d3da2020-03-09 13:59:15 -0700326 return z_impl_k_timer_expires_ticks(timer);
327}
328#include <syscalls/k_timer_expires_ticks_mrsh.c>
Andy Ross65649742019-08-06 13:34:31 -0700329
Peter A. Bigotf1b86ca2020-09-18 16:24:57 -0500330static inline void *z_vrfy_k_timer_user_data_get(const struct k_timer *timer)
Andy Ross65649742019-08-06 13:34:31 -0700331{
Anas Nashifa08bfeb2023-09-27 11:20:28 +0000332 K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
Andy Ross65649742019-08-06 13:34:31 -0700333 return z_impl_k_timer_user_data_get(timer);
334}
335#include <syscalls/k_timer_user_data_get_mrsh.c>
336
Andy Ross643701a2019-08-13 12:58:38 -0700337static inline void z_vrfy_k_timer_user_data_set(struct k_timer *timer,
338 void *user_data)
Andy Ross65649742019-08-06 13:34:31 -0700339{
Anas Nashifa08bfeb2023-09-27 11:20:28 +0000340 K_OOPS(K_SYSCALL_OBJ(timer, K_OBJ_TIMER));
Andy Ross65649742019-08-06 13:34:31 -0700341 z_impl_k_timer_user_data_set(timer, user_data);
342}
343#include <syscalls/k_timer_user_data_set_mrsh.c>
344
Simon Heinbcd1d192024-03-08 12:00:10 +0100345#endif /* CONFIG_USERSPACE */
Peter Mitsis6df8efe2023-05-11 14:06:46 -0400346
347#ifdef CONFIG_OBJ_CORE_TIMER
348static int init_timer_obj_core_list(void)
349{
350 /* Initialize timer object type */
351
352 z_obj_type_init(&obj_type_timer, K_OBJ_TYPE_TIMER_ID,
353 offsetof(struct k_timer, obj_core));
354
355 /* Initialize and link statically defined timers */
356
357 STRUCT_SECTION_FOREACH(k_timer, timer) {
358 k_obj_core_init_and_link(K_OBJ_CORE(timer), &obj_type_timer);
359 }
360
361 return 0;
362}
363SYS_INIT(init_timer_obj_core_list, PRE_KERNEL_1,
364 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
Simon Heinbcd1d192024-03-08 12:00:10 +0100365#endif /* CONFIG_OBJ_CORE_TIMER */