blob: 2c94554fbfcd2caf15000b0229c58a1c4698b93e [file] [log] [blame]
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001/*
2 * Copyright (c) 2010-2014 Wind River Systems, Inc.
3 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08004 * SPDX-License-Identifier: Apache-2.0
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005 */
6
7/**
8 * @file
Allan Stephens9f097772016-10-24 12:41:43 -05009 * @brief Kernel thread support
Benjamin Walsh456c6da2016-09-02 18:55:39 -040010 *
Allan Stephens9f097772016-10-24 12:41:43 -050011 * This module provides general purpose thread support.
Benjamin Walsh456c6da2016-09-02 18:55:39 -040012 */
13
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020014#include <zephyr/kernel.h>
15#include <zephyr/spinlock.h>
16#include <zephyr/sys/math_extras.h>
17#include <zephyr/sys_clock.h>
Benjamin Walshb4b108d2016-10-13 10:31:48 -040018#include <ksched.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020019#include <zephyr/wait_q.h>
20#include <zephyr/syscall_handler.h>
Andy Ross245b54e2018-02-08 09:10:46 -080021#include <kernel_internal.h>
Andy Ross9c62cc62018-01-25 15:24:15 -080022#include <kswap.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020023#include <zephyr/init.h>
24#include <zephyr/tracing/tracing.h>
Stephanos Ioannidis2d746042019-10-25 00:08:21 +090025#include <string.h>
Flavio Ceolin09e362e2018-12-17 12:34:05 -080026#include <stdbool.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020027#include <zephyr/irq_offload.h>
28#include <zephyr/sys/check.h>
29#include <zephyr/random/rand32.h>
30#include <zephyr/sys/atomic.h>
31#include <zephyr/logging/log.h>
Krzysztof Chruscinski3ed80832020-11-26 19:32:34 +010032LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
Andrew Boie8ce260d2020-04-24 16:24:46 -070033
Andrew Boie6cf496f2020-02-14 10:52:49 -080034#ifdef CONFIG_THREAD_MONITOR
Andrew Boiec4fbc082020-02-14 10:11:35 -080035/* This lock protects the linked list of active threads; i.e. the
36 * initial _kernel.threads pointer and the linked list made up of
37 * thread->next_thread (until NULL)
38 */
39static struct k_spinlock z_thread_monitor_lock;
40#endif /* CONFIG_THREAD_MONITOR */
Andy Rosse456d0f2018-07-25 10:46:38 -070041
Allan Stephense7d2cc22016-10-19 16:10:46 -050042#define _FOREACH_STATIC_THREAD(thread_data) \
Fabio Baltierif88a4202021-08-04 23:05:54 +010043 STRUCT_SECTION_FOREACH(_static_thread_data, thread_data)
Peter Mitsis0ca7cea2016-09-28 19:18:09 -040044
Ramakrishna Pallala110b8e42018-04-27 12:55:43 +053045void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data)
46{
Ramakrishna Pallalae74d85d2018-07-18 13:50:52 +053047#if defined(CONFIG_THREAD_MONITOR)
Ramakrishna Pallala110b8e42018-04-27 12:55:43 +053048 struct k_thread *thread;
Andy Rosse456d0f2018-07-25 10:46:38 -070049 k_spinlock_key_t key;
Ramakrishna Pallala110b8e42018-04-27 12:55:43 +053050
Flavio Ceolind8837c62018-09-18 12:40:54 -070051 __ASSERT(user_cb != NULL, "user_cb can not be NULL");
Ramakrishna Pallala110b8e42018-04-27 12:55:43 +053052
53 /*
54 * Lock is needed to make sure that the _kernel.threads is not being
Paul Sokolovsky2df18292018-09-26 13:54:09 +030055 * modified by the user_cb either directly or indirectly.
56 * The indirect ways are through calling k_thread_create and
Ramakrishna Pallala110b8e42018-04-27 12:55:43 +053057 * k_thread_abort from user_cb.
58 */
Andrew Boiec4fbc082020-02-14 10:11:35 -080059 key = k_spin_lock(&z_thread_monitor_lock);
Torbjörn Leksellf1714432021-03-26 10:59:08 +010060
61 SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach);
62
Ramakrishna Pallala110b8e42018-04-27 12:55:43 +053063 for (thread = _kernel.threads; thread; thread = thread->next_thread) {
64 user_cb(thread, user_data);
65 }
Torbjörn Leksellf1714432021-03-26 10:59:08 +010066
67 SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach);
68
Andrew Boiec4fbc082020-02-14 10:11:35 -080069 k_spin_unlock(&z_thread_monitor_lock, key);
Ramakrishna Pallala110b8e42018-04-27 12:55:43 +053070#endif
Ramakrishna Pallalae74d85d2018-07-18 13:50:52 +053071}
Benjamin Walsh456c6da2016-09-02 18:55:39 -040072
Radoslaw Koppel2c529ce2019-11-27 14:20:37 +010073void k_thread_foreach_unlocked(k_thread_user_cb_t user_cb, void *user_data)
74{
75#if defined(CONFIG_THREAD_MONITOR)
76 struct k_thread *thread;
77 k_spinlock_key_t key;
78
79 __ASSERT(user_cb != NULL, "user_cb can not be NULL");
80
Andrew Boiec4fbc082020-02-14 10:11:35 -080081 key = k_spin_lock(&z_thread_monitor_lock);
Torbjörn Leksellf1714432021-03-26 10:59:08 +010082
83 SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach_unlocked);
84
Radoslaw Koppel2c529ce2019-11-27 14:20:37 +010085 for (thread = _kernel.threads; thread; thread = thread->next_thread) {
Andrew Boiec4fbc082020-02-14 10:11:35 -080086 k_spin_unlock(&z_thread_monitor_lock, key);
Radoslaw Koppel2c529ce2019-11-27 14:20:37 +010087 user_cb(thread, user_data);
Andrew Boiec4fbc082020-02-14 10:11:35 -080088 key = k_spin_lock(&z_thread_monitor_lock);
Radoslaw Koppel2c529ce2019-11-27 14:20:37 +010089 }
Torbjörn Leksellf1714432021-03-26 10:59:08 +010090
91 SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach_unlocked);
92
Andrew Boiec4fbc082020-02-14 10:11:35 -080093 k_spin_unlock(&z_thread_monitor_lock, key);
Radoslaw Koppel2c529ce2019-11-27 14:20:37 +010094#endif
95}
96
Flavio Ceolin6a4a86e2018-12-17 12:40:22 -080097bool k_is_in_isr(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040098{
Andrew Boie4f77c2a2019-11-07 12:43:29 -080099 return arch_is_in_isr();
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400100}
101
Allan Stephens9f097772016-10-24 12:41:43 -0500102/*
103 * This function tags the current thread as essential to system operation.
104 * Exceptions raised by this thread will be treated as a fatal system error.
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400105 */
Patrik Flykt4344e272019-03-08 14:19:05 -0700106void z_thread_essential_set(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400107{
Benjamin Walshed240f22017-01-22 13:05:08 -0500108 _current->base.user_options |= K_ESSENTIAL;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400109}
110
Allan Stephens9f097772016-10-24 12:41:43 -0500111/*
112 * This function tags the current thread as not essential to system operation.
113 * Exceptions raised by this thread may be recoverable.
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400114 * (This is the default tag for a thread.)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400115 */
Patrik Flykt4344e272019-03-08 14:19:05 -0700116void z_thread_essential_clear(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400117{
Benjamin Walshed240f22017-01-22 13:05:08 -0500118 _current->base.user_options &= ~K_ESSENTIAL;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400119}
120
Allan Stephens9f097772016-10-24 12:41:43 -0500121/*
122 * This routine indicates if the current thread is an essential system thread.
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400123 *
Flavio Ceolin09e362e2018-12-17 12:34:05 -0800124 * Returns true if current thread is essential, false if it is not.
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400125 */
Patrik Flykt4344e272019-03-08 14:19:05 -0700126bool z_is_thread_essential(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400127{
Flavio Ceolin09e362e2018-12-17 12:34:05 -0800128 return (_current->base.user_options & K_ESSENTIAL) == K_ESSENTIAL;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400129}
130
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400131#ifdef CONFIG_THREAD_CUSTOM_DATA
Patrik Flykt4344e272019-03-08 14:19:05 -0700132void z_impl_k_thread_custom_data_set(void *value)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400133{
134 _current->custom_data = value;
135}
136
Andrew Boie38129ce2019-06-25 08:54:37 -0700137#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -0700138static inline void z_vrfy_k_thread_custom_data_set(void *data)
Andrew Boie38129ce2019-06-25 08:54:37 -0700139{
Andy Ross65649742019-08-06 13:34:31 -0700140 z_impl_k_thread_custom_data_set(data);
Andrew Boie38129ce2019-06-25 08:54:37 -0700141}
Andy Ross65649742019-08-06 13:34:31 -0700142#include <syscalls/k_thread_custom_data_set_mrsh.c>
Andrew Boie38129ce2019-06-25 08:54:37 -0700143#endif
144
Patrik Flykt4344e272019-03-08 14:19:05 -0700145void *z_impl_k_thread_custom_data_get(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400146{
147 return _current->custom_data;
148}
149
Andrew Boie38129ce2019-06-25 08:54:37 -0700150#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -0700151static inline void *z_vrfy_k_thread_custom_data_get(void)
152{
153 return z_impl_k_thread_custom_data_get();
154}
155#include <syscalls/k_thread_custom_data_get_mrsh.c>
156
Andrew Boie38129ce2019-06-25 08:54:37 -0700157#endif /* CONFIG_USERSPACE */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400158#endif /* CONFIG_THREAD_CUSTOM_DATA */
159
160#if defined(CONFIG_THREAD_MONITOR)
Allan Stephens92e75042016-10-25 09:52:39 -0500161/*
162 * Remove a thread from the kernel's list of active threads.
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400163 */
Patrik Flykt4344e272019-03-08 14:19:05 -0700164void z_thread_monitor_exit(struct k_thread *thread)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400165{
Andrew Boiec4fbc082020-02-14 10:11:35 -0800166 k_spinlock_key_t key = k_spin_lock(&z_thread_monitor_lock);
Allan Stephens1be7bca2016-10-25 10:57:52 -0500167
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500168 if (thread == _kernel.threads) {
169 _kernel.threads = _kernel.threads->next_thread;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400170 } else {
Benjamin Walshb7ef0cb2016-10-05 17:32:01 -0400171 struct k_thread *prev_thread;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400172
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500173 prev_thread = _kernel.threads;
Flavio Ceolinc806ac32018-09-17 16:03:52 -0700174 while ((prev_thread != NULL) &&
175 (thread != prev_thread->next_thread)) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400176 prev_thread = prev_thread->next_thread;
177 }
Adithya Baglody10db82b2018-01-23 15:33:11 +0530178 if (prev_thread != NULL) {
179 prev_thread->next_thread = thread->next_thread;
180 }
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400181 }
Allan Stephens1be7bca2016-10-25 10:57:52 -0500182
Andrew Boiec4fbc082020-02-14 10:11:35 -0800183 k_spin_unlock(&z_thread_monitor_lock, key);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400184}
Anas Nashif57554052018-03-03 02:31:05 -0600185#endif
186
Andrew Boie38129ce2019-06-25 08:54:37 -0700187int z_impl_k_thread_name_set(struct k_thread *thread, const char *value)
188{
Anas Nashif57554052018-03-03 02:31:05 -0600189#ifdef CONFIG_THREAD_NAME
Anas Nashif57554052018-03-03 02:31:05 -0600190 if (thread == NULL) {
Andrew Boie38129ce2019-06-25 08:54:37 -0700191 thread = _current;
Anas Nashif57554052018-03-03 02:31:05 -0600192 }
Anas Nashif57554052018-03-03 02:31:05 -0600193
Dominik Ermel8db0e5a2022-03-09 17:08:21 +0000194 strncpy(thread->name, value, CONFIG_THREAD_MAX_NAME_LEN - 1);
Andrew Boie38129ce2019-06-25 08:54:37 -0700195 thread->name[CONFIG_THREAD_MAX_NAME_LEN - 1] = '\0';
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100196
197 SYS_PORT_TRACING_OBJ_FUNC(k_thread, name_set, thread, 0);
198
Andrew Boie38129ce2019-06-25 08:54:37 -0700199 return 0;
Anas Nashif57554052018-03-03 02:31:05 -0600200#else
Andrew Boie38129ce2019-06-25 08:54:37 -0700201 ARG_UNUSED(thread);
Anas Nashif57554052018-03-03 02:31:05 -0600202 ARG_UNUSED(value);
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100203
204 SYS_PORT_TRACING_OBJ_FUNC(k_thread, name_set, thread, -ENOSYS);
205
Andrew Boie38129ce2019-06-25 08:54:37 -0700206 return -ENOSYS;
Anas Nashif57554052018-03-03 02:31:05 -0600207#endif /* CONFIG_THREAD_NAME */
Andrew Boie38129ce2019-06-25 08:54:37 -0700208}
Anas Nashif57554052018-03-03 02:31:05 -0600209
210#ifdef CONFIG_USERSPACE
Anas Nashif25c87db2021-03-29 10:54:23 -0400211static inline int z_vrfy_k_thread_name_set(struct k_thread *thread, const char *str)
Anas Nashif57554052018-03-03 02:31:05 -0600212{
Andrew Boie38129ce2019-06-25 08:54:37 -0700213#ifdef CONFIG_THREAD_NAME
James Harris33c9be92021-03-08 09:23:49 -0800214 char name[CONFIG_THREAD_MAX_NAME_LEN];
Anas Nashif57554052018-03-03 02:31:05 -0600215
Anas Nashif25c87db2021-03-29 10:54:23 -0400216 if (thread != NULL) {
217 if (Z_SYSCALL_OBJ(thread, K_OBJ_THREAD) != 0) {
Andrew Boie38129ce2019-06-25 08:54:37 -0700218 return -EINVAL;
219 }
220 }
221
James Harris33c9be92021-03-08 09:23:49 -0800222 /* In theory we could copy directly into thread->name, but
223 * the current z_vrfy / z_impl split does not provide a
224 * means of doing so.
225 */
226 if (z_user_string_copy(name, (char *)str, sizeof(name)) != 0) {
Andrew Boie38129ce2019-06-25 08:54:37 -0700227 return -EFAULT;
228 }
229
Anas Nashif25c87db2021-03-29 10:54:23 -0400230 return z_impl_k_thread_name_set(thread, name);
Andrew Boie38129ce2019-06-25 08:54:37 -0700231#else
232 return -ENOSYS;
233#endif /* CONFIG_THREAD_NAME */
234}
Andy Ross65649742019-08-06 13:34:31 -0700235#include <syscalls/k_thread_name_set_mrsh.c>
Andrew Boie38129ce2019-06-25 08:54:37 -0700236#endif /* CONFIG_USERSPACE */
237
238const char *k_thread_name_get(struct k_thread *thread)
239{
240#ifdef CONFIG_THREAD_NAME
241 return (const char *)thread->name;
242#else
243 ARG_UNUSED(thread);
244 return NULL;
245#endif /* CONFIG_THREAD_NAME */
Anas Nashif57554052018-03-03 02:31:05 -0600246}
247
Anas Nashif25c87db2021-03-29 10:54:23 -0400248int z_impl_k_thread_name_copy(k_tid_t thread, char *buf, size_t size)
Anas Nashif57554052018-03-03 02:31:05 -0600249{
Andrew Boie38129ce2019-06-25 08:54:37 -0700250#ifdef CONFIG_THREAD_NAME
Anas Nashif25c87db2021-03-29 10:54:23 -0400251 strncpy(buf, thread->name, size);
Anas Nashif57554052018-03-03 02:31:05 -0600252 return 0;
Andrew Boie38129ce2019-06-25 08:54:37 -0700253#else
Anas Nashif25c87db2021-03-29 10:54:23 -0400254 ARG_UNUSED(thread);
Andrew Boie38129ce2019-06-25 08:54:37 -0700255 ARG_UNUSED(buf);
256 ARG_UNUSED(size);
257 return -ENOSYS;
258#endif /* CONFIG_THREAD_NAME */
Anas Nashif57554052018-03-03 02:31:05 -0600259}
260
Peter Mitsisa30cf392022-04-11 19:54:23 -0400261static size_t copy_bytes(char *dest, size_t dest_size, const char *src, size_t src_size)
Pavlo Hamov8076c802019-07-31 12:43:54 +0300262{
Peter Mitsisa30cf392022-04-11 19:54:23 -0400263 size_t bytes_to_copy;
264
265 bytes_to_copy = MIN(dest_size, src_size);
266 memcpy(dest, src, bytes_to_copy);
267
268 return bytes_to_copy;
269}
270
271const char *k_thread_state_str(k_tid_t thread_id, char *buf, size_t buf_size)
272{
273 size_t off = 0;
274 uint8_t bit;
275 uint8_t thread_state = thread_id->base.thread_state;
276 static const char *states_str[8] = {"dummy", "pending", "prestart",
277 "dead", "suspended", "aborting",
278 "", "queued"};
279 static const size_t states_sz[8] = {5, 7, 8, 4, 9, 8, 0, 6};
280
281 if ((buf == NULL) || (buf_size == 0)) {
Pavlo Hamov8076c802019-07-31 12:43:54 +0300282 return "";
Pavlo Hamov8076c802019-07-31 12:43:54 +0300283 }
Peter Mitsisa30cf392022-04-11 19:54:23 -0400284
285 buf_size--; /* Reserve 1 byte for end-of-string character */
286
287 /*
288 * Loop through each bit in the thread_state. Stop once all have
289 * been processed. If more than one thread_state bit is set, then
290 * separate the descriptive strings with a '+'.
291 */
292
293 for (uint8_t index = 0; thread_state != 0; index++) {
294 bit = BIT(index);
295 if ((thread_state & bit) == 0) {
296 continue;
297 }
298
299 off += copy_bytes(buf + off, buf_size - off,
300 states_str[index], states_sz[index]);
301
302 thread_state &= ~bit;
303
304 if (thread_state != 0) {
305 off += copy_bytes(buf + off, buf_size - off, "+", 1);
306 }
307 }
308
309 buf[off] = '\0';
310
311 return (const char *)buf;
Pavlo Hamov8076c802019-07-31 12:43:54 +0300312}
313
Andrew Boie38129ce2019-06-25 08:54:37 -0700314#ifdef CONFIG_USERSPACE
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500315static inline int z_vrfy_k_thread_name_copy(k_tid_t thread,
316 char *buf, size_t size)
Andrew Boie38129ce2019-06-25 08:54:37 -0700317{
318#ifdef CONFIG_THREAD_NAME
319 size_t len;
Andrew Boie2dc2ecf2020-03-11 07:13:07 -0700320 struct z_object *ko = z_object_find(thread);
Anas Nashif57554052018-03-03 02:31:05 -0600321
Andrew Boie38129ce2019-06-25 08:54:37 -0700322 /* Special case: we allow reading the names of initialized threads
323 * even if we don't have permission on them
324 */
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500325 if (thread == NULL || ko->type != K_OBJ_THREAD ||
Andrew Boie38129ce2019-06-25 08:54:37 -0700326 (ko->flags & K_OBJ_FLAG_INITIALIZED) == 0) {
327 return -EINVAL;
328 }
329 if (Z_SYSCALL_MEMORY_WRITE(buf, size) != 0) {
330 return -EFAULT;
331 }
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500332 len = strlen(thread->name);
Andrew Boie38129ce2019-06-25 08:54:37 -0700333 if (len + 1 > size) {
334 return -ENOSPC;
335 }
336
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500337 return z_user_to_copy((void *)buf, thread->name, len + 1);
Andrew Boie38129ce2019-06-25 08:54:37 -0700338#else
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500339 ARG_UNUSED(thread);
Andrew Boie38129ce2019-06-25 08:54:37 -0700340 ARG_UNUSED(buf);
341 ARG_UNUSED(size);
342 return -ENOSYS;
343#endif /* CONFIG_THREAD_NAME */
344}
Andy Ross65649742019-08-06 13:34:31 -0700345#include <syscalls/k_thread_name_copy_mrsh.c>
Andrew Boie38129ce2019-06-25 08:54:37 -0700346#endif /* CONFIG_USERSPACE */
347
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400348
Ioannis Glaropoulosdd574f62021-05-28 14:17:37 +0200349#ifdef CONFIG_MULTITHREADING
Andrew Boie5dcb2792017-05-11 13:29:15 -0700350#ifdef CONFIG_STACK_SENTINEL
351/* Check that the stack sentinel is still present
352 *
353 * The stack sentinel feature writes a magic value to the lowest 4 bytes of
354 * the thread's stack when the thread is initialized. This value gets checked
355 * in a few places:
356 *
357 * 1) In k_yield() if the current thread is not swapped out
Andrew Boieae1a75b2017-06-07 09:33:16 -0700358 * 2) After servicing a non-nested interrupt
Patrik Flykt4344e272019-03-08 14:19:05 -0700359 * 3) In z_swap(), check the sentinel in the outgoing thread
Andrew Boie5dcb2792017-05-11 13:29:15 -0700360 *
Andrew Boieae1a75b2017-06-07 09:33:16 -0700361 * Item 2 requires support in arch/ code.
Andrew Boie5dcb2792017-05-11 13:29:15 -0700362 *
363 * If the check fails, the thread will be terminated appropriately through
364 * the system fatal error handler.
365 */
Patrik Flykt4344e272019-03-08 14:19:05 -0700366void z_check_stack_sentinel(void)
Andrew Boie5dcb2792017-05-11 13:29:15 -0700367{
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500368 uint32_t *stack;
Andrew Boie5dcb2792017-05-11 13:29:15 -0700369
Flavio Ceolin76b35182018-12-16 12:48:29 -0800370 if ((_current->base.thread_state & _THREAD_DUMMY) != 0) {
Andrew Boie5dcb2792017-05-11 13:29:15 -0700371 return;
372 }
373
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500374 stack = (uint32_t *)_current->stack_info.start;
Andrew Boie5dcb2792017-05-11 13:29:15 -0700375 if (*stack != STACK_SENTINEL) {
376 /* Restore it so further checks don't trigger this same error */
377 *stack = STACK_SENTINEL;
Andrew Boie71ce8ce2019-07-11 14:18:28 -0700378 z_except_reason(K_ERR_STACK_CHK_FAIL);
Andrew Boie5dcb2792017-05-11 13:29:15 -0700379 }
380}
Ioannis Glaropoulosdd574f62021-05-28 14:17:37 +0200381#endif /* CONFIG_STACK_SENTINEL */
Andrew Boie5dcb2792017-05-11 13:29:15 -0700382
Patrik Flykt4344e272019-03-08 14:19:05 -0700383void z_impl_k_thread_start(struct k_thread *thread)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400384{
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100385 SYS_PORT_TRACING_OBJ_FUNC(k_thread, start, thread);
386
Andy Ross96ccc462020-01-23 13:28:30 -0800387 z_sched_start(thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400388}
Andrew Boie468190a2017-09-29 14:00:48 -0700389
390#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -0700391static inline void z_vrfy_k_thread_start(struct k_thread *thread)
392{
393 Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
394 return z_impl_k_thread_start(thread);
395}
396#include <syscalls/k_thread_start_mrsh.c>
Andrew Boie468190a2017-09-29 14:00:48 -0700397#endif
Benjamin Walsh096d8e92016-12-16 16:45:05 -0500398#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400399
Benjamin Walsh096d8e92016-12-16 16:45:05 -0500400#ifdef CONFIG_MULTITHREADING
Andy Ross78327382020-03-05 15:18:14 -0800401static void schedule_new_thread(struct k_thread *thread, k_timeout_t delay)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400402{
Benjamin Walsh1a5450b2016-10-06 15:04:23 -0400403#ifdef CONFIG_SYS_CLOCK_EXISTS
Andy Ross78327382020-03-05 15:18:14 -0800404 if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) {
Andrew Boie7d627c52017-08-30 11:01:56 -0700405 k_thread_start(thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400406 } else {
Andy Ross78327382020-03-05 15:18:14 -0800407 z_add_thread_timeout(thread, delay);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400408 }
409#else
410 ARG_UNUSED(delay);
Andrew Boie7d627c52017-08-30 11:01:56 -0700411 k_thread_start(thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400412#endif
413}
Benjamin Walsh096d8e92016-12-16 16:45:05 -0500414#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400415
Andrew Boieb0c155f2020-04-23 13:55:56 -0700416#if CONFIG_STACK_POINTER_RANDOM
Andrew Boie538754c2018-05-23 15:25:23 -0700417int z_stack_adjust_initialized;
418
Andrew Boieb0c155f2020-04-23 13:55:56 -0700419static size_t random_offset(size_t stack_size)
Leandro Pereira1ccd7152018-04-03 09:47:41 -0700420{
Andrew Boie538754c2018-05-23 15:25:23 -0700421 size_t random_val;
422
423 if (!z_stack_adjust_initialized) {
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500424 z_early_boot_rand_get((uint8_t *)&random_val, sizeof(random_val));
Andrew Boie538754c2018-05-23 15:25:23 -0700425 } else {
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500426 sys_rand_get((uint8_t *)&random_val, sizeof(random_val));
Andrew Boie538754c2018-05-23 15:25:23 -0700427 }
428
Andrew Boie61901cc2019-09-21 15:36:52 -0700429 /* Don't need to worry about alignment of the size here,
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800430 * arch_new_thread() is required to do it.
Andrew Boie83752c12018-03-02 07:54:13 -0800431 *
432 * FIXME: Not the best way to get a random number in a range.
433 * See #6493
434 */
Andrew Boie538754c2018-05-23 15:25:23 -0700435 const size_t fuzz = random_val % CONFIG_STACK_POINTER_RANDOM;
Leandro Pereira1ccd7152018-04-03 09:47:41 -0700436
437 if (unlikely(fuzz * 2 > stack_size)) {
Andrew Boieb0c155f2020-04-23 13:55:56 -0700438 return 0;
Leandro Pereira1ccd7152018-04-03 09:47:41 -0700439 }
440
Andrew Boieb0c155f2020-04-23 13:55:56 -0700441 return fuzz;
Leandro Pereira1ccd7152018-04-03 09:47:41 -0700442}
Leandro Pereira1ccd7152018-04-03 09:47:41 -0700443#if defined(CONFIG_STACK_GROWS_UP)
444 /* This is so rare not bothering for now */
445#error "Stack pointer randomization not implemented for upward growing stacks"
446#endif /* CONFIG_STACK_GROWS_UP */
Andrew Boie83752c12018-03-02 07:54:13 -0800447#endif /* CONFIG_STACK_POINTER_RANDOM */
Leandro Pereira1ccd7152018-04-03 09:47:41 -0700448
Andrew Boieb0c155f2020-04-23 13:55:56 -0700449static char *setup_thread_stack(struct k_thread *new_thread,
450 k_thread_stack_t *stack, size_t stack_size)
451{
Andrew Boie8ce260d2020-04-24 16:24:46 -0700452 size_t stack_obj_size, stack_buf_size;
453 char *stack_ptr, *stack_buf_start;
Andrew Boieb0c155f2020-04-23 13:55:56 -0700454 size_t delta = 0;
Andrew Boieb0c155f2020-04-23 13:55:56 -0700455
Andrew Boie8ce260d2020-04-24 16:24:46 -0700456#ifdef CONFIG_USERSPACE
457 if (z_stack_is_user_capable(stack)) {
458 stack_obj_size = Z_THREAD_STACK_SIZE_ADJUST(stack_size);
459 stack_buf_start = Z_THREAD_STACK_BUFFER(stack);
460 stack_buf_size = stack_obj_size - K_THREAD_STACK_RESERVED;
461 } else
462#endif
463 {
464 /* Object cannot host a user mode thread */
465 stack_obj_size = Z_KERNEL_STACK_SIZE_ADJUST(stack_size);
466 stack_buf_start = Z_KERNEL_STACK_BUFFER(stack);
467 stack_buf_size = stack_obj_size - K_KERNEL_STACK_RESERVED;
468 }
469
470 /* Initial stack pointer at the high end of the stack object, may
471 * be reduced later in this function by TLS or random offset
472 */
Andrew Boieb0c155f2020-04-23 13:55:56 -0700473 stack_ptr = (char *)stack + stack_obj_size;
474
Andrew Boie8ce260d2020-04-24 16:24:46 -0700475 LOG_DBG("stack %p for thread %p: obj_size=%zu buf_start=%p "
476 " buf_size %zu stack_ptr=%p",
Krzysztof Chruscinskibf39f542022-10-12 11:49:41 +0200477 stack, new_thread, stack_obj_size, (void *)stack_buf_start,
478 stack_buf_size, (void *)stack_ptr);
Andrew Boieb0c155f2020-04-23 13:55:56 -0700479
Andrew Boieb0c155f2020-04-23 13:55:56 -0700480#ifdef CONFIG_INIT_STACKS
481 memset(stack_buf_start, 0xaa, stack_buf_size);
482#endif
483#ifdef CONFIG_STACK_SENTINEL
484 /* Put the stack sentinel at the lowest 4 bytes of the stack area.
485 * We periodically check that it's still present and kill the thread
486 * if it isn't.
487 */
488 *((uint32_t *)stack_buf_start) = STACK_SENTINEL;
489#endif /* CONFIG_STACK_SENTINEL */
Daniel Leung02b20352020-09-28 11:27:11 -0700490#ifdef CONFIG_THREAD_LOCAL_STORAGE
Andrew Boie0e30c6a2020-10-24 13:04:04 -0700491 /* TLS is always last within the stack buffer */
492 delta += arch_tls_stack_setup(new_thread, stack_ptr);
Daniel Leung02b20352020-09-28 11:27:11 -0700493#endif /* CONFIG_THREAD_LOCAL_STORAGE */
Andrew Boieb0c155f2020-04-23 13:55:56 -0700494#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
495 size_t tls_size = sizeof(struct _thread_userspace_local_data);
496
497 /* reserve space on highest memory of stack buffer for local data */
498 delta += tls_size;
499 new_thread->userspace_local_data =
500 (struct _thread_userspace_local_data *)(stack_ptr - delta);
501#endif
502#if CONFIG_STACK_POINTER_RANDOM
503 delta += random_offset(stack_buf_size);
504#endif
505 delta = ROUND_UP(delta, ARCH_STACK_PTR_ALIGN);
506#ifdef CONFIG_THREAD_STACK_INFO
507 /* Initial values. Arches which implement MPU guards that "borrow"
508 * memory from the stack buffer (not tracked in K_THREAD_STACK_RESERVED)
509 * will need to appropriately update this.
510 *
511 * The bounds tracked here correspond to the area of the stack object
512 * that the thread can access, which includes TLS.
513 */
514 new_thread->stack_info.start = (uintptr_t)stack_buf_start;
515 new_thread->stack_info.size = stack_buf_size;
516 new_thread->stack_info.delta = delta;
517#endif
518 stack_ptr -= delta;
519
520 return stack_ptr;
521}
522
Ioannis Glaropoulosd69c2f82019-03-08 13:02:37 +0100523/*
Andrew Boieb0c155f2020-04-23 13:55:56 -0700524 * The provided stack_size value is presumed to be either the result of
525 * K_THREAD_STACK_SIZEOF(stack), or the size value passed to the instance
526 * of K_THREAD_STACK_DEFINE() which defined 'stack'.
Ioannis Glaropoulosd69c2f82019-03-08 13:02:37 +0100527 */
Andrew Boiee4cc84a2020-04-24 11:29:47 -0700528char *z_setup_new_thread(struct k_thread *new_thread,
529 k_thread_stack_t *stack, size_t stack_size,
530 k_thread_entry_t entry,
531 void *p1, void *p2, void *p3,
532 int prio, uint32_t options, const char *name)
Leandro Pereira1ccd7152018-04-03 09:47:41 -0700533{
Andrew Boieb0c155f2020-04-23 13:55:56 -0700534 char *stack_ptr;
535
Andrew Boie1f6f9772020-04-19 14:31:27 -0700536 Z_ASSERT_VALID_PRIO(prio, entry);
537
Andrew Boief281b742019-07-30 14:02:54 -0700538#ifdef CONFIG_USERSPACE
Flavio Ceolinf6f951c2021-04-02 23:06:00 -0700539 __ASSERT((options & K_USER) == 0U || z_stack_is_user_capable(stack),
Andrew Boie8ce260d2020-04-24 16:24:46 -0700540 "user thread %p with kernel-only stack %p",
541 new_thread, stack);
Andrew Boief281b742019-07-30 14:02:54 -0700542 z_object_init(new_thread);
543 z_object_init(stack);
544 new_thread->stack_obj = stack;
Andrew Boie378024c2020-05-28 11:48:54 -0700545 new_thread->syscall_frame = NULL;
Andrew Boief281b742019-07-30 14:02:54 -0700546
547 /* Any given thread has access to itself */
548 k_object_access_grant(new_thread, new_thread);
549#endif
Andy Ross6fb6d3c2021-02-19 15:32:19 -0800550 z_waitq_init(&new_thread->join_queue);
Andrew Boie322816e2020-02-20 16:33:06 -0800551
Andrew Boiec0df99c2020-04-19 14:28:15 -0700552 /* Initialize various struct k_thread members */
553 z_init_thread_base(&new_thread->base, prio, _THREAD_PRESTART, options);
Andrew Boieb0c155f2020-04-23 13:55:56 -0700554 stack_ptr = setup_thread_stack(new_thread, stack, stack_size);
Daniel Leungfc182432018-08-16 15:42:28 -0700555
Anas Nashif39f632e2020-12-07 13:15:42 -0500556#ifdef CONFIG_KERNEL_COHERENCE
Andy Rossa8d54372020-10-06 11:39:48 -0700557 /* Check that the thread object is safe, but that the stack is
558 * still cached!
559 */
Andy Rossf6d32ab2020-05-13 15:34:04 +0000560 __ASSERT_NO_MSG(arch_mem_coherent(new_thread));
561 __ASSERT_NO_MSG(!arch_mem_coherent(stack));
562#endif
563
Andrew Boieb0c155f2020-04-23 13:55:56 -0700564 arch_new_thread(new_thread, stack, stack_ptr, entry, p1, p2, p3);
565
Andrew Boiec0df99c2020-04-19 14:28:15 -0700566 /* static threads overwrite it afterwards with real value */
567 new_thread->init_data = NULL;
Daniel Leungfc182432018-08-16 15:42:28 -0700568
Andy Rossa2f68262020-02-18 12:23:53 -0800569#ifdef CONFIG_USE_SWITCH
570 /* switch_handle must be non-null except when inside z_swap()
571 * for synchronization reasons. Historically some notional
572 * USE_SWITCH architectures have actually ignored the field
573 */
574 __ASSERT(new_thread->switch_handle != NULL,
575 "arch layer failed to initialize switch_handle");
Andy Ross32354512020-01-17 09:32:36 -0800576#endif
Andrew Boiec0df99c2020-04-19 14:28:15 -0700577#ifdef CONFIG_THREAD_CUSTOM_DATA
578 /* Initialize custom data field (value is opaque to kernel) */
579 new_thread->custom_data = NULL;
580#endif
Andrew Boie2dd91ec2018-06-06 08:45:01 -0700581#ifdef CONFIG_THREAD_MONITOR
582 new_thread->entry.pEntry = entry;
583 new_thread->entry.parameter1 = p1;
584 new_thread->entry.parameter2 = p2;
585 new_thread->entry.parameter3 = p3;
586
Andrew Boiec4fbc082020-02-14 10:11:35 -0800587 k_spinlock_key_t key = k_spin_lock(&z_thread_monitor_lock);
Andrew Boie2dd91ec2018-06-06 08:45:01 -0700588
589 new_thread->next_thread = _kernel.threads;
590 _kernel.threads = new_thread;
Andrew Boiec4fbc082020-02-14 10:11:35 -0800591 k_spin_unlock(&z_thread_monitor_lock, key);
Andrew Boie2dd91ec2018-06-06 08:45:01 -0700592#endif
Anas Nashif57554052018-03-03 02:31:05 -0600593#ifdef CONFIG_THREAD_NAME
Andrew Boie38129ce2019-06-25 08:54:37 -0700594 if (name != NULL) {
595 strncpy(new_thread->name, name,
596 CONFIG_THREAD_MAX_NAME_LEN - 1);
597 /* Ensure NULL termination, truncate if longer */
598 new_thread->name[CONFIG_THREAD_MAX_NAME_LEN - 1] = '\0';
Andrew Boiec0df99c2020-04-19 14:28:15 -0700599 } else {
600 new_thread->name[0] = '\0';
Andrew Boie38129ce2019-06-25 08:54:37 -0700601 }
Anas Nashif57554052018-03-03 02:31:05 -0600602#endif
Andy Rossab46b1b2019-01-30 15:00:42 -0800603#ifdef CONFIG_SCHED_CPU_MASK
Andy Rossb11e7962021-09-24 10:57:39 -0700604 if (IS_ENABLED(CONFIG_SCHED_CPU_MASK_PIN_ONLY)) {
605 new_thread->base.cpu_mask = 1; /* must specify only one cpu */
606 } else {
607 new_thread->base.cpu_mask = -1; /* allow all cpus */
608 }
Andy Rossab46b1b2019-01-30 15:00:42 -0800609#endif
Andrew Boiea7fedb72017-11-13 14:12:23 -0800610#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
611 /* _current may be null if the dummy thread is not used */
612 if (!_current) {
Andrew Boie92e5bd72018-04-12 17:12:15 -0700613 new_thread->resource_pool = NULL;
Andrew Boiee4cc84a2020-04-24 11:29:47 -0700614 return stack_ptr;
Andrew Boiea7fedb72017-11-13 14:12:23 -0800615 }
616#endif
Andrew Boie92e5bd72018-04-12 17:12:15 -0700617#ifdef CONFIG_USERSPACE
Andrew Boieb5a71f72020-10-06 13:39:29 -0700618 z_mem_domain_init_thread(new_thread);
Andrew Boie0bf9d332017-10-16 09:12:47 -0700619
Patrik Flykt24d71432019-03-26 19:57:45 -0600620 if ((options & K_INHERIT_PERMS) != 0U) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700621 z_thread_perms_inherit(_current, new_thread);
Andrew Boie47f8fd12017-10-05 11:11:02 -0700622 }
Andrew Boie2acfcd62017-08-30 14:31:03 -0700623#endif
Andy Ross4a2e50f2018-05-15 11:06:25 -0700624#ifdef CONFIG_SCHED_DEADLINE
625 new_thread->base.prio_deadline = 0;
626#endif
Andrew Boie92e5bd72018-04-12 17:12:15 -0700627 new_thread->resource_pool = _current->resource_pool;
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100628
Peter Mitsis0ebd6c72021-12-15 09:46:52 -0500629#ifdef CONFIG_SCHED_THREAD_USAGE
630 new_thread->base.usage = (struct k_cycle_stats) {};
631 new_thread->base.usage.track_usage =
632 CONFIG_SCHED_THREAD_USAGE_AUTO_ENABLE;
633#endif
634
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100635 SYS_PORT_TRACING_OBJ_FUNC(k_thread, create, new_thread);
Andrew Boiee4cc84a2020-04-24 11:29:47 -0700636
637 return stack_ptr;
Andrew Boie2acfcd62017-08-30 14:31:03 -0700638}
639
640#ifdef CONFIG_MULTITHREADING
Patrik Flykt4344e272019-03-08 14:19:05 -0700641k_tid_t z_impl_k_thread_create(struct k_thread *new_thread,
Andrew Boiec5c104f2017-10-16 14:46:34 -0700642 k_thread_stack_t *stack,
Andrew Boie662c3452017-10-02 10:51:18 -0700643 size_t stack_size, k_thread_entry_t entry,
644 void *p1, void *p2, void *p3,
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500645 int prio, uint32_t options, k_timeout_t delay)
Andrew Boied26cf2d2017-03-30 13:07:02 -0700646{
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800647 __ASSERT(!arch_is_in_isr(), "Threads may not be created in ISRs");
Anas Nashifb6304e62018-07-04 08:03:03 -0500648
Patrik Flykt4344e272019-03-08 14:19:05 -0700649 z_setup_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3,
Anas Nashif57554052018-03-03 02:31:05 -0600650 prio, options, NULL);
Andrew Boied26cf2d2017-03-30 13:07:02 -0700651
Andy Ross78327382020-03-05 15:18:14 -0800652 if (!K_TIMEOUT_EQ(delay, K_FOREVER)) {
Andrew Boie7d627c52017-08-30 11:01:56 -0700653 schedule_new_thread(new_thread, delay);
654 }
Anas Nashifb6304e62018-07-04 08:03:03 -0500655
Andrew Boied26cf2d2017-03-30 13:07:02 -0700656 return new_thread;
657}
Andrew Boie662c3452017-10-02 10:51:18 -0700658
659
660#ifdef CONFIG_USERSPACE
Andrew Boie8ce260d2020-04-24 16:24:46 -0700661bool z_stack_is_user_capable(k_thread_stack_t *stack)
662{
663 return z_object_find(stack) != NULL;
664}
665
Andy Ross65649742019-08-06 13:34:31 -0700666k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread,
667 k_thread_stack_t *stack,
668 size_t stack_size, k_thread_entry_t entry,
669 void *p1, void *p2, void *p3,
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500670 int prio, uint32_t options, k_timeout_t delay)
Andrew Boie662c3452017-10-02 10:51:18 -0700671{
Andrew Boie28be7932020-03-11 10:56:19 -0700672 size_t total_size, stack_obj_size;
Andrew Boie2dc2ecf2020-03-11 07:13:07 -0700673 struct z_object *stack_object;
Andrew Boie662c3452017-10-02 10:51:18 -0700674
675 /* The thread and stack objects *must* be in an uninitialized state */
Andrew Boie8345e5e2018-05-04 15:57:57 -0700676 Z_OOPS(Z_SYSCALL_OBJ_NEVER_INIT(new_thread, K_OBJ_THREAD));
Andrew Boie8ce260d2020-04-24 16:24:46 -0700677
678 /* No need to check z_stack_is_user_capable(), it won't be in the
679 * object table if it isn't
680 */
Patrik Flykt4344e272019-03-08 14:19:05 -0700681 stack_object = z_object_find(stack);
682 Z_OOPS(Z_SYSCALL_VERIFY_MSG(z_obj_validation_check(stack_object, stack,
Andrew Boie4bad34e2020-03-11 06:56:58 -0700683 K_OBJ_THREAD_STACK_ELEMENT,
Flavio Ceolin92ea2f92018-09-20 16:14:57 -0700684 _OBJ_INIT_FALSE) == 0,
Andrew Boie8345e5e2018-05-04 15:57:57 -0700685 "bad stack object"));
Andrew Boie662c3452017-10-02 10:51:18 -0700686
687 /* Verify that the stack size passed in is OK by computing the total
688 * size and comparing it with the size value in the object metadata
689 */
Andrew Boieb5c68102019-11-21 17:38:17 -0800690 Z_OOPS(Z_SYSCALL_VERIFY_MSG(!size_add_overflow(K_THREAD_STACK_RESERVED,
691 stack_size, &total_size),
692 "stack size overflow (%zu+%zu)",
693 stack_size,
Andrew Boied0035f92019-03-19 10:43:06 -0700694 K_THREAD_STACK_RESERVED));
695
Andrew Boief4631d52019-03-19 10:48:09 -0700696 /* Testing less-than-or-equal since additional room may have been
697 * allocated for alignment constraints
698 */
Andrew Boie28be7932020-03-11 10:56:19 -0700699#ifdef CONFIG_GEN_PRIV_STACKS
700 stack_obj_size = stack_object->data.stack_data->size;
701#else
702 stack_obj_size = stack_object->data.stack_size;
703#endif
704 Z_OOPS(Z_SYSCALL_VERIFY_MSG(total_size <= stack_obj_size,
Andrew Boief2734ab2020-03-11 06:37:42 -0700705 "stack size %zu is too big, max is %zu",
Andrew Boie28be7932020-03-11 10:56:19 -0700706 total_size, stack_obj_size));
Andrew Boie662c3452017-10-02 10:51:18 -0700707
Andrew Boie662c3452017-10-02 10:51:18 -0700708 /* User threads may only create other user threads and they can't
709 * be marked as essential
710 */
Andrew Boie8345e5e2018-05-04 15:57:57 -0700711 Z_OOPS(Z_SYSCALL_VERIFY(options & K_USER));
712 Z_OOPS(Z_SYSCALL_VERIFY(!(options & K_ESSENTIAL)));
Andrew Boie662c3452017-10-02 10:51:18 -0700713
714 /* Check validity of prio argument; must be the same or worse priority
715 * than the caller
716 */
Andrew Boie8345e5e2018-05-04 15:57:57 -0700717 Z_OOPS(Z_SYSCALL_VERIFY(_is_valid_prio(prio, NULL)));
Patrik Flykt4344e272019-03-08 14:19:05 -0700718 Z_OOPS(Z_SYSCALL_VERIFY(z_is_prio_lower_or_equal(prio,
Andrew Boie8345e5e2018-05-04 15:57:57 -0700719 _current->base.prio)));
Andrew Boie662c3452017-10-02 10:51:18 -0700720
Andy Ross65649742019-08-06 13:34:31 -0700721 z_setup_new_thread(new_thread, stack, stack_size,
722 entry, p1, p2, p3, prio, options, NULL);
Andrew Boie662c3452017-10-02 10:51:18 -0700723
Andy Ross78327382020-03-05 15:18:14 -0800724 if (!K_TIMEOUT_EQ(delay, K_FOREVER)) {
Andrew Boie662c3452017-10-02 10:51:18 -0700725 schedule_new_thread(new_thread, delay);
726 }
727
Andy Ross65649742019-08-06 13:34:31 -0700728 return new_thread;
Andrew Boie662c3452017-10-02 10:51:18 -0700729}
Andy Ross65649742019-08-06 13:34:31 -0700730#include <syscalls/k_thread_create_mrsh.c>
Andrew Boie662c3452017-10-02 10:51:18 -0700731#endif /* CONFIG_USERSPACE */
732#endif /* CONFIG_MULTITHREADING */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400733
Benjamin Walshb12a8e02016-12-14 15:24:12 -0500734#ifdef CONFIG_MULTITHREADING
Andrew Boie877f82e2017-10-17 11:20:22 -0700735#ifdef CONFIG_USERSPACE
Andrew Boie877f82e2017-10-17 11:20:22 -0700736
737static void grant_static_access(void)
738{
Fabio Baltierif88a4202021-08-04 23:05:54 +0100739 STRUCT_SECTION_FOREACH(z_object_assignment, pos) {
Andrew Boie877f82e2017-10-17 11:20:22 -0700740 for (int i = 0; pos->objects[i] != NULL; i++) {
741 k_object_access_grant(pos->objects[i],
742 pos->thread);
743 }
744 }
745}
746#endif /* CONFIG_USERSPACE */
747
Patrik Flykt4344e272019-03-08 14:19:05 -0700748void z_init_static_threads(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400749{
Peter Mitsisa04c0d72016-09-28 19:26:00 -0400750 _FOREACH_STATIC_THREAD(thread_data) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700751 z_setup_new_thread(
Andrew Boied26cf2d2017-03-30 13:07:02 -0700752 thread_data->init_thread,
Peter Mitsisa04c0d72016-09-28 19:26:00 -0400753 thread_data->init_stack,
754 thread_data->init_stack_size,
Peter Mitsisa04c0d72016-09-28 19:26:00 -0400755 thread_data->init_entry,
756 thread_data->init_p1,
757 thread_data->init_p2,
758 thread_data->init_p3,
759 thread_data->init_prio,
Anas Nashif57554052018-03-03 02:31:05 -0600760 thread_data->init_options,
761 thread_data->init_name);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400762
Andrew Boied26cf2d2017-03-30 13:07:02 -0700763 thread_data->init_thread->init_data = thread_data;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400764 }
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400765
Andrew Boie877f82e2017-10-17 11:20:22 -0700766#ifdef CONFIG_USERSPACE
767 grant_static_access();
768#endif
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400769
770 /*
Andy Rosse456d0f2018-07-25 10:46:38 -0700771 * Non-legacy static threads may be started immediately or
772 * after a previously specified delay. Even though the
773 * scheduler is locked, ticks can still be delivered and
774 * processed. Take a sched lock to prevent them from running
775 * until they are all started.
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400776 *
777 * Note that static threads defined using the legacy API have a
778 * delay of K_FOREVER.
779 */
Andy Rosse456d0f2018-07-25 10:46:38 -0700780 k_sched_lock();
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400781 _FOREACH_STATIC_THREAD(thread_data) {
Andy Ross78327382020-03-05 15:18:14 -0800782 if (thread_data->init_delay != K_TICKS_FOREVER) {
Andrew Boied26cf2d2017-03-30 13:07:02 -0700783 schedule_new_thread(thread_data->init_thread,
Andy Ross78327382020-03-05 15:18:14 -0800784 K_MSEC(thread_data->init_delay));
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400785 }
786 }
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400787 k_sched_unlock();
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400788}
Benjamin Walshb12a8e02016-12-14 15:24:12 -0500789#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400790
Patrik Flykt4344e272019-03-08 14:19:05 -0700791void z_init_thread_base(struct _thread_base *thread_base, int priority,
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500792 uint32_t initial_state, unsigned int options)
Benjamin Walsh069fd362016-11-22 17:48:13 -0500793{
794 /* k_q_node is initialized upon first insertion in a list */
Andy Ross8e160122021-02-18 17:38:07 -0800795 thread_base->pended_on = NULL;
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500796 thread_base->user_options = (uint8_t)options;
797 thread_base->thread_state = (uint8_t)initial_state;
Benjamin Walsh069fd362016-11-22 17:48:13 -0500798
799 thread_base->prio = priority;
800
Patrik Flykt24d71432019-03-26 19:57:45 -0600801 thread_base->sched_locked = 0U;
Benjamin Walsh069fd362016-11-22 17:48:13 -0500802
Andy Ross6c283ca2019-08-16 22:09:30 -0700803#ifdef CONFIG_SMP
804 thread_base->is_idle = 0;
805#endif
806
Andy Ross3e696892021-11-30 18:26:26 -0800807#ifdef CONFIG_TIMESLICE_PER_THREAD
808 thread_base->slice_ticks = 0;
809 thread_base->slice_expired = NULL;
810#endif
811
Benjamin Walsh069fd362016-11-22 17:48:13 -0500812 /* swap_data does not need to be initialized */
813
Patrik Flykt4344e272019-03-08 14:19:05 -0700814 z_init_thread_timeout(thread_base);
Benjamin Walsh069fd362016-11-22 17:48:13 -0500815}
816
Andrew Boie3f091b52017-08-30 14:34:14 -0700817FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
818 void *p1, void *p2, void *p3)
819{
Torbjörn Leksellf1714432021-03-26 10:59:08 +0100820 SYS_PORT_TRACING_FUNC(k_thread, user_mode_enter);
821
Andrew Boie3f091b52017-08-30 14:34:14 -0700822 _current->base.user_options |= K_USER;
Patrik Flykt4344e272019-03-08 14:19:05 -0700823 z_thread_essential_clear();
Andrew Boie2dd91ec2018-06-06 08:45:01 -0700824#ifdef CONFIG_THREAD_MONITOR
825 _current->entry.pEntry = entry;
826 _current->entry.parameter1 = p1;
827 _current->entry.parameter2 = p2;
828 _current->entry.parameter3 = p3;
829#endif
Andrew Boie93eb6032017-09-29 04:42:30 -0700830#ifdef CONFIG_USERSPACE
Andrew Boie8ce260d2020-04-24 16:24:46 -0700831 __ASSERT(z_stack_is_user_capable(_current->stack_obj),
832 "dropping to user mode with kernel-only stack object");
Daniel Leung62cf1962020-10-05 14:54:45 -0700833#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
Andrew Boie743ff982020-05-08 14:52:12 -0700834 memset(_current->userspace_local_data, 0,
835 sizeof(struct _thread_userspace_local_data));
Daniel Leung62cf1962020-10-05 14:54:45 -0700836#endif
Andrew Boie0e30c6a2020-10-24 13:04:04 -0700837#ifdef CONFIG_THREAD_LOCAL_STORAGE
838 arch_tls_stack_setup(_current,
839 (char *)(_current->stack_info.start +
840 _current->stack_info.size));
841#endif
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800842 arch_user_mode_enter(entry, p1, p2, p3);
Andrew Boie93eb6032017-09-29 04:42:30 -0700843#else
844 /* XXX In this case we do not reset the stack */
Patrik Flykt4344e272019-03-08 14:19:05 -0700845 z_thread_entry(entry, p1, p2, p3);
Andrew Boie93eb6032017-09-29 04:42:30 -0700846#endif
Andrew Boie3f091b52017-08-30 14:34:14 -0700847}
Andy Ross5aa74602019-02-05 09:35:57 -0800848
849/* These spinlock assertion predicates are defined here because having
850 * them in spinlock.h is a giant header ordering headache.
851 */
Danny Oerndrupc9d78402019-12-13 11:24:56 +0100852#ifdef CONFIG_SPIN_VALIDATE
Flavio Ceolin625ac2e2019-03-14 11:41:21 -0700853bool z_spin_lock_valid(struct k_spinlock *l)
Andy Ross5aa74602019-02-05 09:35:57 -0800854{
Jim Shue1246702019-09-17 13:39:17 +0800855 uintptr_t thread_cpu = l->thread_cpu;
856
Anas Nashif3f4f3f62021-03-29 17:13:47 -0400857 if (thread_cpu != 0U) {
Aastha Grover83b9f692020-08-20 16:47:11 -0700858 if ((thread_cpu & 3U) == _current_cpu->id) {
Flavio Ceolin625ac2e2019-03-14 11:41:21 -0700859 return false;
Andy Ross5aa74602019-02-05 09:35:57 -0800860 }
861 }
Flavio Ceolin625ac2e2019-03-14 11:41:21 -0700862 return true;
Andy Ross5aa74602019-02-05 09:35:57 -0800863}
864
Flavio Ceolin625ac2e2019-03-14 11:41:21 -0700865bool z_spin_unlock_valid(struct k_spinlock *l)
Andy Ross5aa74602019-02-05 09:35:57 -0800866{
Nicolas Pitre0b5d9f72019-05-20 23:41:27 -0400867 if (l->thread_cpu != (_current_cpu->id | (uintptr_t)_current)) {
Flavio Ceolin625ac2e2019-03-14 11:41:21 -0700868 return false;
Andy Ross5aa74602019-02-05 09:35:57 -0800869 }
870 l->thread_cpu = 0;
Flavio Ceolin625ac2e2019-03-14 11:41:21 -0700871 return true;
Andy Ross5aa74602019-02-05 09:35:57 -0800872}
Andy Rossf37e0c62019-02-20 10:11:24 -0800873
874void z_spin_lock_set_owner(struct k_spinlock *l)
875{
Nicolas Pitre0b5d9f72019-05-20 23:41:27 -0400876 l->thread_cpu = _current_cpu->id | (uintptr_t)_current;
Andy Rossf37e0c62019-02-20 10:11:24 -0800877}
Daniel Leungd1495e92021-02-02 16:35:15 -0800878
879#ifdef CONFIG_KERNEL_COHERENCE
880bool z_spin_lock_mem_coherent(struct k_spinlock *l)
881{
882 return arch_mem_coherent((void *)l);
883}
884#endif /* CONFIG_KERNEL_COHERENCE */
885
Danny Oerndrupc9d78402019-12-13 11:24:56 +0100886#endif /* CONFIG_SPIN_VALIDATE */
Ioannis Glaropoulos0e677592019-07-10 05:25:39 +0200887
Ioannis Glaropoulosa6cb8b02019-05-09 21:55:10 +0200888int z_impl_k_float_disable(struct k_thread *thread)
889{
Stephanos Ioannidisaaf93202020-05-03 18:03:19 +0900890#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800891 return arch_float_disable(thread);
Ioannis Glaropoulosa6cb8b02019-05-09 21:55:10 +0200892#else
Katsuhiro Suzuki19db4852021-03-24 01:54:15 +0900893 return -ENOTSUP;
Stephanos Ioannidisaaf93202020-05-03 18:03:19 +0900894#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
Ioannis Glaropoulosa6cb8b02019-05-09 21:55:10 +0200895}
896
Katsuhiro Suzuki59903e22021-02-01 15:16:53 +0900897int z_impl_k_float_enable(struct k_thread *thread, unsigned int options)
898{
899#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
900 return arch_float_enable(thread, options);
901#else
902 return -ENOTSUP;
903#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
904}
905
Ioannis Glaropoulosa6cb8b02019-05-09 21:55:10 +0200906#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -0700907static inline int z_vrfy_k_float_disable(struct k_thread *thread)
Ioannis Glaropoulosa6cb8b02019-05-09 21:55:10 +0200908{
Ioannis Glaropoulosa6cb8b02019-05-09 21:55:10 +0200909 Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
Andy Ross65649742019-08-06 13:34:31 -0700910 return z_impl_k_float_disable(thread);
Ioannis Glaropoulosa6cb8b02019-05-09 21:55:10 +0200911}
Andy Ross65649742019-08-06 13:34:31 -0700912#include <syscalls/k_float_disable_mrsh.c>
Ioannis Glaropoulosa6cb8b02019-05-09 21:55:10 +0200913#endif /* CONFIG_USERSPACE */
Andrew Boiee09a0252019-11-06 14:17:17 -0800914
915#ifdef CONFIG_IRQ_OFFLOAD
Enjia Mai00d7f2c2020-12-09 11:08:34 +0800916/* Make offload_sem visible outside under testing, in order to release
917 * it outside when error happened.
918 */
919K_SEM_DEFINE(offload_sem, 1, 1);
Andrew Boiee09a0252019-11-06 14:17:17 -0800920
Tomasz Bursztykaae5a7612020-07-10 11:38:48 +0200921void irq_offload(irq_offload_routine_t routine, const void *parameter)
Andrew Boiee09a0252019-11-06 14:17:17 -0800922{
Andy Ross73453a32022-02-14 14:30:34 -0800923#ifdef CONFIG_IRQ_OFFLOAD_NESTED
924 arch_irq_offload(routine, parameter);
925#else
Andrew Boiee09a0252019-11-06 14:17:17 -0800926 k_sem_take(&offload_sem, K_FOREVER);
927 arch_irq_offload(routine, parameter);
928 k_sem_give(&offload_sem);
Andy Ross73453a32022-02-14 14:30:34 -0800929#endif
Andrew Boiee09a0252019-11-06 14:17:17 -0800930}
931#endif
Andrew Boieefc5fe02020-02-05 10:41:58 -0800932
933#if defined(CONFIG_INIT_STACKS) && defined(CONFIG_THREAD_STACK_INFO)
934#ifdef CONFIG_STACK_GROWS_UP
935#error "Unsupported configuration for stack analysis"
936#endif
937
Krzysztof Chruscinski1da97e12022-01-28 15:40:37 +0100938int z_stack_space_get(const uint8_t *stack_start, size_t size, size_t *unused_ptr)
Andrew Boieefc5fe02020-02-05 10:41:58 -0800939{
Andrew Boieefc5fe02020-02-05 10:41:58 -0800940 size_t unused = 0;
Krzysztof Chruscinski1da97e12022-01-28 15:40:37 +0100941 const uint8_t *checked_stack = stack_start;
Andrew Boieefc5fe02020-02-05 10:41:58 -0800942 /* Take the address of any local variable as a shallow bound for the
943 * stack pointer. Addresses above it are guaranteed to be
944 * accessible.
945 */
Krzysztof Chruscinski1da97e12022-01-28 15:40:37 +0100946 const uint8_t *stack_pointer = (const uint8_t *)&stack_start;
Andrew Boieefc5fe02020-02-05 10:41:58 -0800947
948 /* If we are currently running on the stack being analyzed, some
949 * memory management hardware will generate an exception if we
950 * read unused stack memory.
951 *
952 * This never happens when invoked from user mode, as user mode
953 * will always run this function on the privilege elevation stack.
954 */
Krzysztof Chruscinski1da97e12022-01-28 15:40:37 +0100955 if ((stack_pointer > stack_start) && (stack_pointer <= (stack_start + size)) &&
Andrew Boieefc5fe02020-02-05 10:41:58 -0800956 IS_ENABLED(CONFIG_NO_UNUSED_STACK_INSPECTION)) {
957 /* TODO: We could add an arch_ API call to temporarily
958 * disable the stack checking in the CPU, but this would
959 * need to be properly managed wrt context switches/interrupts
960 */
961 return -ENOTSUP;
962 }
963
964 if (IS_ENABLED(CONFIG_STACK_SENTINEL)) {
965 /* First 4 bytes of the stack buffer reserved for the
966 * sentinel value, it won't be 0xAAAAAAAA for thread
967 * stacks.
968 *
969 * FIXME: thread->stack_info.start ought to reflect
970 * this!
971 */
972 checked_stack += 4;
973 size -= 4;
974 }
975
976 for (size_t i = 0; i < size; i++) {
977 if ((checked_stack[i]) == 0xaaU) {
978 unused++;
979 } else {
980 break;
981 }
982 }
983
984 *unused_ptr = unused;
985
986 return 0;
987}
988
Krzysztof Chruscinski1da97e12022-01-28 15:40:37 +0100989int z_impl_k_thread_stack_space_get(const struct k_thread *thread,
990 size_t *unused_ptr)
991{
992 return z_stack_space_get((const uint8_t *)thread->stack_info.start,
993 thread->stack_info.size, unused_ptr);
994}
995
Andrew Boieefc5fe02020-02-05 10:41:58 -0800996#ifdef CONFIG_USERSPACE
997int z_vrfy_k_thread_stack_space_get(const struct k_thread *thread,
998 size_t *unused_ptr)
999{
1000 size_t unused;
1001 int ret;
1002
1003 ret = Z_SYSCALL_OBJ(thread, K_OBJ_THREAD);
1004 CHECKIF(ret != 0) {
1005 return ret;
1006 }
1007
1008 ret = z_impl_k_thread_stack_space_get(thread, &unused);
1009 CHECKIF(ret != 0) {
1010 return ret;
1011 }
1012
1013 ret = z_user_to_copy(unused_ptr, &unused, sizeof(size_t));
1014 CHECKIF(ret != 0) {
1015 return ret;
1016 }
1017
1018 return 0;
1019}
1020#include <syscalls/k_thread_stack_space_get_mrsh.c>
1021#endif /* CONFIG_USERSPACE */
1022#endif /* CONFIG_INIT_STACKS && CONFIG_THREAD_STACK_INFO */
Andy Ross5a5d3da2020-03-09 13:59:15 -07001023
1024#ifdef CONFIG_USERSPACE
1025static inline k_ticks_t z_vrfy_k_thread_timeout_remaining_ticks(
Peter Bigot0ab314f2020-11-16 15:28:59 -06001026 const struct k_thread *t)
Andy Ross5a5d3da2020-03-09 13:59:15 -07001027{
1028 Z_OOPS(Z_SYSCALL_OBJ(t, K_OBJ_THREAD));
1029 return z_impl_k_thread_timeout_remaining_ticks(t);
1030}
1031#include <syscalls/k_thread_timeout_remaining_ticks_mrsh.c>
1032
1033static inline k_ticks_t z_vrfy_k_thread_timeout_expires_ticks(
Peter Bigot0ab314f2020-11-16 15:28:59 -06001034 const struct k_thread *t)
Andy Ross5a5d3da2020-03-09 13:59:15 -07001035{
1036 Z_OOPS(Z_SYSCALL_OBJ(t, K_OBJ_THREAD));
1037 return z_impl_k_thread_timeout_expires_ticks(t);
1038}
1039#include <syscalls/k_thread_timeout_expires_ticks_mrsh.c>
1040#endif
Daniel Leungfc577c42020-08-27 13:54:14 -07001041
Daniel Leung11e6b432020-08-27 16:12:01 -07001042#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
Daniel Leungfc577c42020-08-27 13:54:14 -07001043void z_thread_mark_switched_in(void)
1044{
Andy Ross4ae32502021-09-28 07:59:42 -07001045#if defined(CONFIG_SCHED_THREAD_USAGE) && !defined(CONFIG_USE_SWITCH)
1046 z_sched_usage_start(_current);
1047#endif
1048
Daniel Leung11e6b432020-08-27 16:12:01 -07001049#ifdef CONFIG_TRACING
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001050 SYS_PORT_TRACING_FUNC(k_thread, switched_in);
Daniel Leung11e6b432020-08-27 16:12:01 -07001051#endif
Daniel Leungfc577c42020-08-27 13:54:14 -07001052}
1053
1054void z_thread_mark_switched_out(void)
1055{
Andy Ross4ae32502021-09-28 07:59:42 -07001056#if defined(CONFIG_SCHED_THREAD_USAGE) && !defined(CONFIG_USE_SWITCH)
1057 z_sched_usage_stop();
1058#endif
1059
Daniel Leung11e6b432020-08-27 16:12:01 -07001060#ifdef CONFIG_TRACING
Keith Packard95cec042022-10-18 15:38:21 -07001061#ifdef CONFIG_THREAD_LOCAL_STORAGE
1062 /* Dummy thread won't have TLS set up to run arbitrary code */
1063 if (!_current_cpu->current ||
1064 (_current_cpu->current->base.thread_state & _THREAD_DUMMY) != 0)
1065 return;
1066#endif
Torbjörn Leksellf1714432021-03-26 10:59:08 +01001067 SYS_PORT_TRACING_FUNC(k_thread, switched_out);
Daniel Leung11e6b432020-08-27 16:12:01 -07001068#endif
Daniel Leungfc577c42020-08-27 13:54:14 -07001069}
Andy Rossf169c5b2021-09-28 10:01:06 -07001070#endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */
Daniel Leungfc577c42020-08-27 13:54:14 -07001071
1072int k_thread_runtime_stats_get(k_tid_t thread,
1073 k_thread_runtime_stats_t *stats)
1074{
1075 if ((thread == NULL) || (stats == NULL)) {
1076 return -EINVAL;
1077 }
1078
Andy Rossf169c5b2021-09-28 10:01:06 -07001079#ifdef CONFIG_SCHED_THREAD_USAGE
Peter Mitsis5deaffb2021-12-14 10:56:14 -05001080 z_sched_thread_usage(thread, stats);
1081#else
1082 *stats = (k_thread_runtime_stats_t) {};
Andy Rossf169c5b2021-09-28 10:01:06 -07001083#endif
Daniel Leungfc577c42020-08-27 13:54:14 -07001084
1085 return 0;
1086}
1087
1088int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats)
1089{
Peter Mitsis4eb1dd02021-12-14 22:26:22 -05001090#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
1091 k_thread_runtime_stats_t tmp_stats;
1092#endif
1093
Daniel Leungfc577c42020-08-27 13:54:14 -07001094 if (stats == NULL) {
1095 return -EINVAL;
1096 }
1097
Andy Rossf169c5b2021-09-28 10:01:06 -07001098 *stats = (k_thread_runtime_stats_t) {};
1099
1100#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
Peter Mitsis4eb1dd02021-12-14 22:26:22 -05001101 /* Retrieve the usage stats for each core and amalgamate them. */
1102
Kumar Galaa1195ae2022-10-18 09:45:13 -05001103 unsigned int num_cpus = arch_num_cpus();
1104
1105 for (uint8_t i = 0; i < num_cpus; i++) {
Peter Mitsis4eb1dd02021-12-14 22:26:22 -05001106 z_sched_cpu_usage(i, &tmp_stats);
1107
1108 stats->execution_cycles += tmp_stats.execution_cycles;
1109 stats->total_cycles += tmp_stats.total_cycles;
1110#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
Peter Mitsis976e4082022-05-03 11:39:39 -04001111 stats->current_cycles += tmp_stats.current_cycles;
Peter Mitsis4eb1dd02021-12-14 22:26:22 -05001112 stats->peak_cycles += tmp_stats.peak_cycles;
1113 stats->average_cycles += tmp_stats.average_cycles;
1114#endif
1115 stats->idle_cycles += tmp_stats.idle_cycles;
1116 }
Andy Rossf169c5b2021-09-28 10:01:06 -07001117#endif
Daniel Leungfc577c42020-08-27 13:54:14 -07001118
1119 return 0;
1120}