Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2010-2014 Wind River Systems, Inc. |
| 3 | * |
David B. Kinder | ac74d8b | 2017-01-18 17:01:01 -0800 | [diff] [blame] | 4 | * SPDX-License-Identifier: Apache-2.0 |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | /** |
| 8 | * @file |
Allan Stephens | 9f09777 | 2016-10-24 12:41:43 -0500 | [diff] [blame] | 9 | * @brief Kernel thread support |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 10 | * |
Allan Stephens | 9f09777 | 2016-10-24 12:41:43 -0500 | [diff] [blame] | 11 | * This module provides general purpose thread support. |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 12 | */ |
| 13 | |
| 14 | #include <kernel.h> |
| 15 | |
| 16 | #include <toolchain.h> |
Anas Nashif | 397d29d | 2017-06-17 11:30:47 -0400 | [diff] [blame] | 17 | #include <linker/sections.h> |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 18 | |
Andy Ross | 5aa7460 | 2019-02-05 09:35:57 -0800 | [diff] [blame] | 19 | #include <spinlock.h> |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 20 | #include <kernel_structs.h> |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 21 | #include <misc/printk.h> |
Jakob Olesen | c8708d9 | 2019-05-07 10:17:35 -0700 | [diff] [blame] | 22 | #include <misc/math_extras.h> |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 23 | #include <sys_clock.h> |
| 24 | #include <drivers/system_timer.h> |
Benjamin Walsh | b4b108d | 2016-10-13 10:31:48 -0400 | [diff] [blame] | 25 | #include <ksched.h> |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 26 | #include <wait_q.h> |
Andrew Boie | 2acfcd6 | 2017-08-30 14:31:03 -0700 | [diff] [blame] | 27 | #include <atomic.h> |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 28 | #include <syscall_handler.h> |
Andy Ross | 245b54e | 2018-02-08 09:10:46 -0800 | [diff] [blame] | 29 | #include <kernel_internal.h> |
Andy Ross | 9c62cc6 | 2018-01-25 15:24:15 -0800 | [diff] [blame] | 30 | #include <kswap.h> |
Andrew Boie | 538754c | 2018-05-23 15:25:23 -0700 | [diff] [blame] | 31 | #include <init.h> |
Anas Nashif | b6304e6 | 2018-07-04 08:03:03 -0500 | [diff] [blame] | 32 | #include <tracing.h> |
Flavio Ceolin | 09e362e | 2018-12-17 12:34:05 -0800 | [diff] [blame] | 33 | #include <stdbool.h> |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 34 | |
Andy Ross | e456d0f | 2018-07-25 10:46:38 -0700 | [diff] [blame] | 35 | static struct k_spinlock lock; |
| 36 | |
Allan Stephens | e7d2cc2 | 2016-10-19 16:10:46 -0500 | [diff] [blame] | 37 | #define _FOREACH_STATIC_THREAD(thread_data) \ |
Nicolas Pitre | aa922885 | 2019-06-03 13:01:43 -0400 | [diff] [blame] | 38 | Z_STRUCT_SECTION_FOREACH(_static_thread_data, thread_data) |
Peter Mitsis | 0ca7cea | 2016-09-28 19:18:09 -0400 | [diff] [blame] | 39 | |
Ramakrishna Pallala | 110b8e4 | 2018-04-27 12:55:43 +0530 | [diff] [blame] | 40 | void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data) |
| 41 | { |
Ramakrishna Pallala | e74d85d | 2018-07-18 13:50:52 +0530 | [diff] [blame] | 42 | #if defined(CONFIG_THREAD_MONITOR) |
Ramakrishna Pallala | 110b8e4 | 2018-04-27 12:55:43 +0530 | [diff] [blame] | 43 | struct k_thread *thread; |
Andy Ross | e456d0f | 2018-07-25 10:46:38 -0700 | [diff] [blame] | 44 | k_spinlock_key_t key; |
Ramakrishna Pallala | 110b8e4 | 2018-04-27 12:55:43 +0530 | [diff] [blame] | 45 | |
Flavio Ceolin | d8837c6 | 2018-09-18 12:40:54 -0700 | [diff] [blame] | 46 | __ASSERT(user_cb != NULL, "user_cb can not be NULL"); |
Ramakrishna Pallala | 110b8e4 | 2018-04-27 12:55:43 +0530 | [diff] [blame] | 47 | |
| 48 | /* |
| 49 | * Lock is needed to make sure that the _kernel.threads is not being |
Paul Sokolovsky | 2df1829 | 2018-09-26 13:54:09 +0300 | [diff] [blame] | 50 | * modified by the user_cb either directly or indirectly. |
| 51 | * The indirect ways are through calling k_thread_create and |
Ramakrishna Pallala | 110b8e4 | 2018-04-27 12:55:43 +0530 | [diff] [blame] | 52 | * k_thread_abort from user_cb. |
| 53 | */ |
Andy Ross | e456d0f | 2018-07-25 10:46:38 -0700 | [diff] [blame] | 54 | key = k_spin_lock(&lock); |
Ramakrishna Pallala | 110b8e4 | 2018-04-27 12:55:43 +0530 | [diff] [blame] | 55 | for (thread = _kernel.threads; thread; thread = thread->next_thread) { |
| 56 | user_cb(thread, user_data); |
| 57 | } |
Andy Ross | e456d0f | 2018-07-25 10:46:38 -0700 | [diff] [blame] | 58 | k_spin_unlock(&lock, key); |
Ramakrishna Pallala | 110b8e4 | 2018-04-27 12:55:43 +0530 | [diff] [blame] | 59 | #endif |
Ramakrishna Pallala | e74d85d | 2018-07-18 13:50:52 +0530 | [diff] [blame] | 60 | } |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 61 | |
Flavio Ceolin | 6a4a86e | 2018-12-17 12:40:22 -0800 | [diff] [blame] | 62 | bool k_is_in_isr(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 63 | { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 64 | return z_is_in_isr(); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 65 | } |
| 66 | |
Allan Stephens | 9f09777 | 2016-10-24 12:41:43 -0500 | [diff] [blame] | 67 | /* |
| 68 | * This function tags the current thread as essential to system operation. |
| 69 | * Exceptions raised by this thread will be treated as a fatal system error. |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 70 | */ |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 71 | void z_thread_essential_set(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 72 | { |
Benjamin Walsh | ed240f2 | 2017-01-22 13:05:08 -0500 | [diff] [blame] | 73 | _current->base.user_options |= K_ESSENTIAL; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 74 | } |
| 75 | |
Allan Stephens | 9f09777 | 2016-10-24 12:41:43 -0500 | [diff] [blame] | 76 | /* |
| 77 | * This function tags the current thread as not essential to system operation. |
| 78 | * Exceptions raised by this thread may be recoverable. |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 79 | * (This is the default tag for a thread.) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 80 | */ |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 81 | void z_thread_essential_clear(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 82 | { |
Benjamin Walsh | ed240f2 | 2017-01-22 13:05:08 -0500 | [diff] [blame] | 83 | _current->base.user_options &= ~K_ESSENTIAL; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 84 | } |
| 85 | |
Allan Stephens | 9f09777 | 2016-10-24 12:41:43 -0500 | [diff] [blame] | 86 | /* |
| 87 | * This routine indicates if the current thread is an essential system thread. |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 88 | * |
Flavio Ceolin | 09e362e | 2018-12-17 12:34:05 -0800 | [diff] [blame] | 89 | * Returns true if current thread is essential, false if it is not. |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 90 | */ |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 91 | bool z_is_thread_essential(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 92 | { |
Flavio Ceolin | 09e362e | 2018-12-17 12:34:05 -0800 | [diff] [blame] | 93 | return (_current->base.user_options & K_ESSENTIAL) == K_ESSENTIAL; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 94 | } |
| 95 | |
Andrew Boie | 42cfd4f | 2018-11-14 14:29:24 -0800 | [diff] [blame] | 96 | #ifdef CONFIG_SYS_CLOCK_EXISTS |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 97 | void z_impl_k_busy_wait(u32_t usec_to_wait) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 98 | { |
Andrew Boie | 42cfd4f | 2018-11-14 14:29:24 -0800 | [diff] [blame] | 99 | #if !defined(CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 100 | /* use 64-bit math to prevent overflow when multiplying */ |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 101 | u32_t cycles_to_wait = (u32_t)( |
| 102 | (u64_t)usec_to_wait * |
Andy Ross | 220d4f83 | 2018-09-19 10:52:07 -0700 | [diff] [blame] | 103 | (u64_t)sys_clock_hw_cycles_per_sec() / |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 104 | (u64_t)USEC_PER_SEC |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 105 | ); |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 106 | u32_t start_cycles = k_cycle_get_32(); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 107 | |
| 108 | for (;;) { |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 109 | u32_t current_cycles = k_cycle_get_32(); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 110 | |
| 111 | /* this handles the rollover on an unsigned 32-bit value */ |
| 112 | if ((current_cycles - start_cycles) >= cycles_to_wait) { |
| 113 | break; |
| 114 | } |
| 115 | } |
Andrew Boie | 42cfd4f | 2018-11-14 14:29:24 -0800 | [diff] [blame] | 116 | #else |
| 117 | z_arch_busy_wait(usec_to_wait); |
| 118 | #endif /* CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT */ |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 119 | } |
Andrew Boie | 42cfd4f | 2018-11-14 14:29:24 -0800 | [diff] [blame] | 120 | |
| 121 | #ifdef CONFIG_USERSPACE |
| 122 | Z_SYSCALL_HANDLER(k_busy_wait, usec_to_wait) |
| 123 | { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 124 | z_impl_k_busy_wait(usec_to_wait); |
Andrew Boie | 42cfd4f | 2018-11-14 14:29:24 -0800 | [diff] [blame] | 125 | return 0; |
| 126 | } |
| 127 | #endif /* CONFIG_USERSPACE */ |
| 128 | #endif /* CONFIG_SYS_CLOCK_EXISTS */ |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 129 | |
| 130 | #ifdef CONFIG_THREAD_CUSTOM_DATA |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 131 | void z_impl_k_thread_custom_data_set(void *value) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 132 | { |
| 133 | _current->custom_data = value; |
| 134 | } |
| 135 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 136 | void *z_impl_k_thread_custom_data_get(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 137 | { |
| 138 | return _current->custom_data; |
| 139 | } |
| 140 | |
| 141 | #endif /* CONFIG_THREAD_CUSTOM_DATA */ |
| 142 | |
| 143 | #if defined(CONFIG_THREAD_MONITOR) |
Allan Stephens | 92e7504 | 2016-10-25 09:52:39 -0500 | [diff] [blame] | 144 | /* |
| 145 | * Remove a thread from the kernel's list of active threads. |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 146 | */ |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 147 | void z_thread_monitor_exit(struct k_thread *thread) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 148 | { |
Andy Ross | e456d0f | 2018-07-25 10:46:38 -0700 | [diff] [blame] | 149 | k_spinlock_key_t key = k_spin_lock(&lock); |
Allan Stephens | 1be7bca | 2016-10-25 10:57:52 -0500 | [diff] [blame] | 150 | |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 151 | if (thread == _kernel.threads) { |
| 152 | _kernel.threads = _kernel.threads->next_thread; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 153 | } else { |
Benjamin Walsh | b7ef0cb | 2016-10-05 17:32:01 -0400 | [diff] [blame] | 154 | struct k_thread *prev_thread; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 155 | |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 156 | prev_thread = _kernel.threads; |
Flavio Ceolin | c806ac3 | 2018-09-17 16:03:52 -0700 | [diff] [blame] | 157 | while ((prev_thread != NULL) && |
| 158 | (thread != prev_thread->next_thread)) { |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 159 | prev_thread = prev_thread->next_thread; |
| 160 | } |
Adithya Baglody | 10db82b | 2018-01-23 15:33:11 +0530 | [diff] [blame] | 161 | if (prev_thread != NULL) { |
| 162 | prev_thread->next_thread = thread->next_thread; |
| 163 | } |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 164 | } |
Allan Stephens | 1be7bca | 2016-10-25 10:57:52 -0500 | [diff] [blame] | 165 | |
Andy Ross | e456d0f | 2018-07-25 10:46:38 -0700 | [diff] [blame] | 166 | k_spin_unlock(&lock, key); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 167 | } |
Anas Nashif | 5755405 | 2018-03-03 02:31:05 -0600 | [diff] [blame] | 168 | #endif |
| 169 | |
| 170 | #ifdef CONFIG_THREAD_NAME |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 171 | void z_impl_k_thread_name_set(struct k_thread *thread, const char *value) |
Anas Nashif | 5755405 | 2018-03-03 02:31:05 -0600 | [diff] [blame] | 172 | { |
| 173 | if (thread == NULL) { |
| 174 | _current->name = value; |
| 175 | } else { |
| 176 | thread->name = value; |
| 177 | } |
| 178 | } |
| 179 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 180 | const char *z_impl_k_thread_name_get(struct k_thread *thread) |
Anas Nashif | 5755405 | 2018-03-03 02:31:05 -0600 | [diff] [blame] | 181 | { |
| 182 | return (const char *)thread->name; |
| 183 | } |
| 184 | |
| 185 | #else |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 186 | void z_impl_k_thread_name_set(k_tid_t thread_id, const char *value) |
Anas Nashif | 5755405 | 2018-03-03 02:31:05 -0600 | [diff] [blame] | 187 | { |
| 188 | ARG_UNUSED(thread_id); |
| 189 | ARG_UNUSED(value); |
| 190 | } |
| 191 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 192 | const char *z_impl_k_thread_name_get(k_tid_t thread_id) |
Anas Nashif | 5755405 | 2018-03-03 02:31:05 -0600 | [diff] [blame] | 193 | { |
| 194 | ARG_UNUSED(thread_id); |
| 195 | return NULL; |
| 196 | } |
| 197 | #endif /* CONFIG_THREAD_NAME */ |
| 198 | |
| 199 | #ifdef CONFIG_USERSPACE |
| 200 | |
| 201 | #if defined(CONFIG_THREAD_NAME) |
| 202 | Z_SYSCALL_HANDLER(k_thread_name_set, thread, data) |
| 203 | { |
| 204 | char *name_copy = NULL; |
| 205 | |
| 206 | name_copy = z_user_string_alloc_copy((char *)data, 64); |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 207 | z_impl_k_thread_name_set((struct k_thread *)thread, name_copy); |
Anas Nashif | 5755405 | 2018-03-03 02:31:05 -0600 | [diff] [blame] | 208 | return 0; |
| 209 | } |
| 210 | |
| 211 | Z_SYSCALL_HANDLER1_SIMPLE(k_thread_name_get, K_OBJ_THREAD, k_tid_t); |
| 212 | #endif |
| 213 | |
| 214 | #ifdef CONFIG_THREAD_CUSTOM_DATA |
| 215 | Z_SYSCALL_HANDLER(k_thread_custom_data_set, data) |
| 216 | { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 217 | z_impl_k_thread_custom_data_set((void *)data); |
Anas Nashif | 5755405 | 2018-03-03 02:31:05 -0600 | [diff] [blame] | 218 | return 0; |
| 219 | } |
| 220 | |
| 221 | Z_SYSCALL_HANDLER0_SIMPLE(k_thread_custom_data_get); |
| 222 | #endif /* CONFIG_THREAD_CUSTOM_DATA */ |
| 223 | |
| 224 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 225 | |
Andrew Boie | 5dcb279 | 2017-05-11 13:29:15 -0700 | [diff] [blame] | 226 | #ifdef CONFIG_STACK_SENTINEL |
| 227 | /* Check that the stack sentinel is still present |
| 228 | * |
| 229 | * The stack sentinel feature writes a magic value to the lowest 4 bytes of |
| 230 | * the thread's stack when the thread is initialized. This value gets checked |
| 231 | * in a few places: |
| 232 | * |
| 233 | * 1) In k_yield() if the current thread is not swapped out |
Andrew Boie | ae1a75b | 2017-06-07 09:33:16 -0700 | [diff] [blame] | 234 | * 2) After servicing a non-nested interrupt |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 235 | * 3) In z_swap(), check the sentinel in the outgoing thread |
Andrew Boie | 5dcb279 | 2017-05-11 13:29:15 -0700 | [diff] [blame] | 236 | * |
Andrew Boie | ae1a75b | 2017-06-07 09:33:16 -0700 | [diff] [blame] | 237 | * Item 2 requires support in arch/ code. |
Andrew Boie | 5dcb279 | 2017-05-11 13:29:15 -0700 | [diff] [blame] | 238 | * |
| 239 | * If the check fails, the thread will be terminated appropriately through |
| 240 | * the system fatal error handler. |
| 241 | */ |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 242 | void z_check_stack_sentinel(void) |
Andrew Boie | 5dcb279 | 2017-05-11 13:29:15 -0700 | [diff] [blame] | 243 | { |
| 244 | u32_t *stack; |
| 245 | |
Flavio Ceolin | 76b3518 | 2018-12-16 12:48:29 -0800 | [diff] [blame] | 246 | if ((_current->base.thread_state & _THREAD_DUMMY) != 0) { |
Andrew Boie | 5dcb279 | 2017-05-11 13:29:15 -0700 | [diff] [blame] | 247 | return; |
| 248 | } |
| 249 | |
| 250 | stack = (u32_t *)_current->stack_info.start; |
| 251 | if (*stack != STACK_SENTINEL) { |
| 252 | /* Restore it so further checks don't trigger this same error */ |
| 253 | *stack = STACK_SENTINEL; |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 254 | z_except_reason(_NANO_ERR_STACK_CHK_FAIL); |
Andrew Boie | 5dcb279 | 2017-05-11 13:29:15 -0700 | [diff] [blame] | 255 | } |
| 256 | } |
| 257 | #endif |
| 258 | |
Benjamin Walsh | 096d8e9 | 2016-12-16 16:45:05 -0500 | [diff] [blame] | 259 | #ifdef CONFIG_MULTITHREADING |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 260 | void z_impl_k_thread_start(struct k_thread *thread) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 261 | { |
Andy Ross | e456d0f | 2018-07-25 10:46:38 -0700 | [diff] [blame] | 262 | k_spinlock_key_t key = k_spin_lock(&lock); /* protect kernel queues */ |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 263 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 264 | if (z_has_thread_started(thread)) { |
Andy Ross | e456d0f | 2018-07-25 10:46:38 -0700 | [diff] [blame] | 265 | k_spin_unlock(&lock, key); |
Andrew Boie | 7d627c5 | 2017-08-30 11:01:56 -0700 | [diff] [blame] | 266 | return; |
| 267 | } |
| 268 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 269 | z_mark_thread_as_started(thread); |
| 270 | z_ready_thread(thread); |
| 271 | z_reschedule(&lock, key); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 272 | } |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 273 | |
| 274 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 275 | Z_SYSCALL_HANDLER1_SIMPLE_VOID(k_thread_start, K_OBJ_THREAD, struct k_thread *); |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 276 | #endif |
Benjamin Walsh | 096d8e9 | 2016-12-16 16:45:05 -0500 | [diff] [blame] | 277 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 278 | |
Benjamin Walsh | 096d8e9 | 2016-12-16 16:45:05 -0500 | [diff] [blame] | 279 | #ifdef CONFIG_MULTITHREADING |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 280 | static void schedule_new_thread(struct k_thread *thread, s32_t delay) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 281 | { |
Benjamin Walsh | 1a5450b | 2016-10-06 15:04:23 -0400 | [diff] [blame] | 282 | #ifdef CONFIG_SYS_CLOCK_EXISTS |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 283 | if (delay == 0) { |
Andrew Boie | 7d627c5 | 2017-08-30 11:01:56 -0700 | [diff] [blame] | 284 | k_thread_start(thread); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 285 | } else { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 286 | s32_t ticks = _TICK_ALIGN + z_ms_to_ticks(delay); |
Benjamin Walsh | a36e0cf | 2016-11-23 22:15:44 -0500 | [diff] [blame] | 287 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 288 | z_add_thread_timeout(thread, ticks); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 289 | } |
| 290 | #else |
| 291 | ARG_UNUSED(delay); |
Andrew Boie | 7d627c5 | 2017-08-30 11:01:56 -0700 | [diff] [blame] | 292 | k_thread_start(thread); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 293 | #endif |
| 294 | } |
Benjamin Walsh | 096d8e9 | 2016-12-16 16:45:05 -0500 | [diff] [blame] | 295 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 296 | |
Leandro Pereira | 1ccd715 | 2018-04-03 09:47:41 -0700 | [diff] [blame] | 297 | #if !CONFIG_STACK_POINTER_RANDOM |
| 298 | static inline size_t adjust_stack_size(size_t stack_size) |
Andrew Boie | 2acfcd6 | 2017-08-30 14:31:03 -0700 | [diff] [blame] | 299 | { |
Leandro Pereira | 1ccd715 | 2018-04-03 09:47:41 -0700 | [diff] [blame] | 300 | return stack_size; |
| 301 | } |
| 302 | #else |
Andrew Boie | 538754c | 2018-05-23 15:25:23 -0700 | [diff] [blame] | 303 | int z_stack_adjust_initialized; |
| 304 | |
Leandro Pereira | 1ccd715 | 2018-04-03 09:47:41 -0700 | [diff] [blame] | 305 | static inline size_t adjust_stack_size(size_t stack_size) |
| 306 | { |
Andrew Boie | 538754c | 2018-05-23 15:25:23 -0700 | [diff] [blame] | 307 | size_t random_val; |
| 308 | |
| 309 | if (!z_stack_adjust_initialized) { |
| 310 | random_val = z_early_boot_rand32_get(); |
| 311 | } else { |
| 312 | random_val = sys_rand32_get(); |
| 313 | } |
| 314 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 315 | /* Don't need to worry about alignment of the size here, z_new_thread() |
Andrew Boie | 83752c1 | 2018-03-02 07:54:13 -0800 | [diff] [blame] | 316 | * is required to do it |
| 317 | * |
| 318 | * FIXME: Not the best way to get a random number in a range. |
| 319 | * See #6493 |
| 320 | */ |
Andrew Boie | 538754c | 2018-05-23 15:25:23 -0700 | [diff] [blame] | 321 | const size_t fuzz = random_val % CONFIG_STACK_POINTER_RANDOM; |
Leandro Pereira | 1ccd715 | 2018-04-03 09:47:41 -0700 | [diff] [blame] | 322 | |
| 323 | if (unlikely(fuzz * 2 > stack_size)) { |
| 324 | return stack_size; |
| 325 | } |
| 326 | |
| 327 | return stack_size - fuzz; |
| 328 | } |
Leandro Pereira | 1ccd715 | 2018-04-03 09:47:41 -0700 | [diff] [blame] | 329 | #if defined(CONFIG_STACK_GROWS_UP) |
| 330 | /* This is so rare not bothering for now */ |
| 331 | #error "Stack pointer randomization not implemented for upward growing stacks" |
| 332 | #endif /* CONFIG_STACK_GROWS_UP */ |
| 333 | |
Andrew Boie | 83752c1 | 2018-03-02 07:54:13 -0800 | [diff] [blame] | 334 | #endif /* CONFIG_STACK_POINTER_RANDOM */ |
Leandro Pereira | 1ccd715 | 2018-04-03 09:47:41 -0700 | [diff] [blame] | 335 | |
Ioannis Glaropoulos | d69c2f8 | 2019-03-08 13:02:37 +0100 | [diff] [blame] | 336 | /* |
| 337 | * Note: |
| 338 | * The caller must guarantee that the stack_size passed here corresponds |
| 339 | * to the amount of stack memory available for the thread. |
| 340 | */ |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 341 | void z_setup_new_thread(struct k_thread *new_thread, |
Leandro Pereira | 1ccd715 | 2018-04-03 09:47:41 -0700 | [diff] [blame] | 342 | k_thread_stack_t *stack, size_t stack_size, |
| 343 | k_thread_entry_t entry, |
| 344 | void *p1, void *p2, void *p3, |
Anas Nashif | 5755405 | 2018-03-03 02:31:05 -0600 | [diff] [blame] | 345 | int prio, u32_t options, const char *name) |
Leandro Pereira | 1ccd715 | 2018-04-03 09:47:41 -0700 | [diff] [blame] | 346 | { |
| 347 | stack_size = adjust_stack_size(stack_size); |
| 348 | |
Daniel Leung | fc18243 | 2018-08-16 15:42:28 -0700 | [diff] [blame] | 349 | #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA |
| 350 | #ifndef CONFIG_THREAD_USERSPACE_LOCAL_DATA_ARCH_DEFER_SETUP |
| 351 | /* reserve space on top of stack for local data */ |
| 352 | stack_size = STACK_ROUND_DOWN(stack_size |
| 353 | - sizeof(*new_thread->userspace_local_data)); |
| 354 | #endif |
| 355 | #endif |
| 356 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 357 | z_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3, |
Andrew Boie | 2acfcd6 | 2017-08-30 14:31:03 -0700 | [diff] [blame] | 358 | prio, options); |
Daniel Leung | fc18243 | 2018-08-16 15:42:28 -0700 | [diff] [blame] | 359 | |
| 360 | #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA |
| 361 | #ifndef CONFIG_THREAD_USERSPACE_LOCAL_DATA_ARCH_DEFER_SETUP |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 362 | /* don't set again if the arch's own code in z_new_thread() has |
Daniel Leung | fc18243 | 2018-08-16 15:42:28 -0700 | [diff] [blame] | 363 | * already set the pointer. |
| 364 | */ |
| 365 | new_thread->userspace_local_data = |
| 366 | (struct _thread_userspace_local_data *) |
Andrew Boie | 4e5c093 | 2019-04-04 12:05:28 -0700 | [diff] [blame] | 367 | (Z_THREAD_STACK_BUFFER(stack) + stack_size); |
Daniel Leung | fc18243 | 2018-08-16 15:42:28 -0700 | [diff] [blame] | 368 | #endif |
| 369 | #endif |
| 370 | |
Andrew Boie | 2dd91ec | 2018-06-06 08:45:01 -0700 | [diff] [blame] | 371 | #ifdef CONFIG_THREAD_MONITOR |
| 372 | new_thread->entry.pEntry = entry; |
| 373 | new_thread->entry.parameter1 = p1; |
| 374 | new_thread->entry.parameter2 = p2; |
| 375 | new_thread->entry.parameter3 = p3; |
| 376 | |
Andy Ross | e456d0f | 2018-07-25 10:46:38 -0700 | [diff] [blame] | 377 | k_spinlock_key_t key = k_spin_lock(&lock); |
Andrew Boie | 2dd91ec | 2018-06-06 08:45:01 -0700 | [diff] [blame] | 378 | |
| 379 | new_thread->next_thread = _kernel.threads; |
| 380 | _kernel.threads = new_thread; |
Andy Ross | e456d0f | 2018-07-25 10:46:38 -0700 | [diff] [blame] | 381 | k_spin_unlock(&lock, key); |
Andrew Boie | 2dd91ec | 2018-06-06 08:45:01 -0700 | [diff] [blame] | 382 | #endif |
Anas Nashif | 5755405 | 2018-03-03 02:31:05 -0600 | [diff] [blame] | 383 | #ifdef CONFIG_THREAD_NAME |
| 384 | new_thread->name = name; |
| 385 | #endif |
Andrew Boie | 2acfcd6 | 2017-08-30 14:31:03 -0700 | [diff] [blame] | 386 | #ifdef CONFIG_USERSPACE |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 387 | z_object_init(new_thread); |
| 388 | z_object_init(stack); |
Andrew Boie | bca15da | 2017-10-15 14:17:48 -0700 | [diff] [blame] | 389 | new_thread->stack_obj = stack; |
Andrew Boie | d26cf2d | 2017-03-30 13:07:02 -0700 | [diff] [blame] | 390 | |
Andrew Boie | 2acfcd6 | 2017-08-30 14:31:03 -0700 | [diff] [blame] | 391 | /* Any given thread has access to itself */ |
Andrew Boie | 217017c | 2017-10-04 11:49:10 -0700 | [diff] [blame] | 392 | k_object_access_grant(new_thread, new_thread); |
Andrew Boie | 92e5bd7 | 2018-04-12 17:12:15 -0700 | [diff] [blame] | 393 | #endif |
Andy Ross | ab46b1b | 2019-01-30 15:00:42 -0800 | [diff] [blame] | 394 | #ifdef CONFIG_SCHED_CPU_MASK |
| 395 | new_thread->base.cpu_mask = -1; |
| 396 | #endif |
Andrew Boie | a7fedb7 | 2017-11-13 14:12:23 -0800 | [diff] [blame] | 397 | #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN |
| 398 | /* _current may be null if the dummy thread is not used */ |
| 399 | if (!_current) { |
Andrew Boie | 92e5bd7 | 2018-04-12 17:12:15 -0700 | [diff] [blame] | 400 | new_thread->resource_pool = NULL; |
Andrew Boie | a7fedb7 | 2017-11-13 14:12:23 -0800 | [diff] [blame] | 401 | return; |
| 402 | } |
| 403 | #endif |
Andrew Boie | 92e5bd7 | 2018-04-12 17:12:15 -0700 | [diff] [blame] | 404 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 0bf9d33 | 2017-10-16 09:12:47 -0700 | [diff] [blame] | 405 | /* New threads inherit any memory domain membership by the parent */ |
Flavio Ceolin | ea716bf | 2018-09-20 16:30:45 -0700 | [diff] [blame] | 406 | if (_current->mem_domain_info.mem_domain != NULL) { |
Andrew Boie | 0bf9d33 | 2017-10-16 09:12:47 -0700 | [diff] [blame] | 407 | k_mem_domain_add_thread(_current->mem_domain_info.mem_domain, |
| 408 | new_thread); |
| 409 | } |
| 410 | |
Patrik Flykt | 24d7143 | 2019-03-26 19:57:45 -0600 | [diff] [blame] | 411 | if ((options & K_INHERIT_PERMS) != 0U) { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 412 | z_thread_perms_inherit(_current, new_thread); |
Andrew Boie | 47f8fd1 | 2017-10-05 11:11:02 -0700 | [diff] [blame] | 413 | } |
Andrew Boie | 2acfcd6 | 2017-08-30 14:31:03 -0700 | [diff] [blame] | 414 | #endif |
Andy Ross | 4a2e50f | 2018-05-15 11:06:25 -0700 | [diff] [blame] | 415 | #ifdef CONFIG_SCHED_DEADLINE |
| 416 | new_thread->base.prio_deadline = 0; |
| 417 | #endif |
Andrew Boie | 92e5bd7 | 2018-04-12 17:12:15 -0700 | [diff] [blame] | 418 | new_thread->resource_pool = _current->resource_pool; |
Anas Nashif | b6304e6 | 2018-07-04 08:03:03 -0500 | [diff] [blame] | 419 | sys_trace_thread_create(new_thread); |
Andrew Boie | 2acfcd6 | 2017-08-30 14:31:03 -0700 | [diff] [blame] | 420 | } |
| 421 | |
| 422 | #ifdef CONFIG_MULTITHREADING |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 423 | k_tid_t z_impl_k_thread_create(struct k_thread *new_thread, |
Andrew Boie | c5c104f | 2017-10-16 14:46:34 -0700 | [diff] [blame] | 424 | k_thread_stack_t *stack, |
Andrew Boie | 662c345 | 2017-10-02 10:51:18 -0700 | [diff] [blame] | 425 | size_t stack_size, k_thread_entry_t entry, |
| 426 | void *p1, void *p2, void *p3, |
| 427 | int prio, u32_t options, s32_t delay) |
Andrew Boie | d26cf2d | 2017-03-30 13:07:02 -0700 | [diff] [blame] | 428 | { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 429 | __ASSERT(!z_is_in_isr(), "Threads may not be created in ISRs"); |
Anas Nashif | b6304e6 | 2018-07-04 08:03:03 -0500 | [diff] [blame] | 430 | |
Andrew Boie | 9f04c74 | 2019-04-05 15:55:07 -0700 | [diff] [blame] | 431 | /* Special case, only for unit tests */ |
| 432 | #if defined(CONFIG_TEST) && defined(CONFIG_ARCH_HAS_USERSPACE) && !defined(CONFIG_USERSPACE) |
| 433 | __ASSERT((options & K_USER) == 0, |
Marc Herbert | 4afcc0f | 2019-05-21 16:09:35 -0700 | [diff] [blame] | 434 | "Platform is capable of user mode, and test thread created with K_USER option," |
| 435 | " but neither CONFIG_TEST_USERSPACE nor CONFIG_USERSPACE is set\n"); |
Andrew Boie | 9f04c74 | 2019-04-05 15:55:07 -0700 | [diff] [blame] | 436 | #endif |
| 437 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 438 | z_setup_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3, |
Anas Nashif | 5755405 | 2018-03-03 02:31:05 -0600 | [diff] [blame] | 439 | prio, options, NULL); |
Andrew Boie | d26cf2d | 2017-03-30 13:07:02 -0700 | [diff] [blame] | 440 | |
Andrew Boie | 7d627c5 | 2017-08-30 11:01:56 -0700 | [diff] [blame] | 441 | if (delay != K_FOREVER) { |
| 442 | schedule_new_thread(new_thread, delay); |
| 443 | } |
Anas Nashif | b6304e6 | 2018-07-04 08:03:03 -0500 | [diff] [blame] | 444 | |
Andrew Boie | d26cf2d | 2017-03-30 13:07:02 -0700 | [diff] [blame] | 445 | return new_thread; |
| 446 | } |
Andrew Boie | 662c345 | 2017-10-02 10:51:18 -0700 | [diff] [blame] | 447 | |
| 448 | |
| 449 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 450 | Z_SYSCALL_HANDLER(k_thread_create, |
| 451 | new_thread_p, stack_p, stack_size, entry, p1, more_args) |
Andrew Boie | 662c345 | 2017-10-02 10:51:18 -0700 | [diff] [blame] | 452 | { |
| 453 | int prio; |
Andy Gross | 1c047c9 | 2017-12-08 12:22:49 -0600 | [diff] [blame] | 454 | u32_t options, delay; |
Kumar Gala | 79d151f | 2018-04-10 14:34:02 -0500 | [diff] [blame] | 455 | u32_t total_size; |
Andrew Boie | d0035f9 | 2019-03-19 10:43:06 -0700 | [diff] [blame] | 456 | |
Andrew Boie | 662c345 | 2017-10-02 10:51:18 -0700 | [diff] [blame] | 457 | struct _k_object *stack_object; |
| 458 | struct k_thread *new_thread = (struct k_thread *)new_thread_p; |
| 459 | volatile struct _syscall_10_args *margs = |
| 460 | (volatile struct _syscall_10_args *)more_args; |
Andrew Boie | c5c104f | 2017-10-16 14:46:34 -0700 | [diff] [blame] | 461 | k_thread_stack_t *stack = (k_thread_stack_t *)stack_p; |
Andrew Boie | 662c345 | 2017-10-02 10:51:18 -0700 | [diff] [blame] | 462 | |
| 463 | /* The thread and stack objects *must* be in an uninitialized state */ |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 464 | Z_OOPS(Z_SYSCALL_OBJ_NEVER_INIT(new_thread, K_OBJ_THREAD)); |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 465 | stack_object = z_object_find(stack); |
| 466 | Z_OOPS(Z_SYSCALL_VERIFY_MSG(z_obj_validation_check(stack_object, stack, |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 467 | K_OBJ__THREAD_STACK_ELEMENT, |
Flavio Ceolin | 92ea2f9 | 2018-09-20 16:14:57 -0700 | [diff] [blame] | 468 | _OBJ_INIT_FALSE) == 0, |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 469 | "bad stack object")); |
Andrew Boie | 662c345 | 2017-10-02 10:51:18 -0700 | [diff] [blame] | 470 | |
| 471 | /* Verify that the stack size passed in is OK by computing the total |
| 472 | * size and comparing it with the size value in the object metadata |
| 473 | */ |
Jakob Olesen | c8708d9 | 2019-05-07 10:17:35 -0700 | [diff] [blame] | 474 | Z_OOPS(Z_SYSCALL_VERIFY_MSG(!u32_add_overflow(K_THREAD_STACK_RESERVED, |
| 475 | stack_size, &total_size), |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 476 | "stack size overflow (%u+%u)", stack_size, |
Andrew Boie | d0035f9 | 2019-03-19 10:43:06 -0700 | [diff] [blame] | 477 | K_THREAD_STACK_RESERVED)); |
| 478 | |
Andrew Boie | f4631d5 | 2019-03-19 10:48:09 -0700 | [diff] [blame] | 479 | /* Testing less-than-or-equal since additional room may have been |
| 480 | * allocated for alignment constraints |
| 481 | */ |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 482 | Z_OOPS(Z_SYSCALL_VERIFY_MSG(total_size <= stack_object->data, |
| 483 | "stack size %u is too big, max is %u", |
| 484 | total_size, stack_object->data)); |
Andrew Boie | 662c345 | 2017-10-02 10:51:18 -0700 | [diff] [blame] | 485 | |
| 486 | /* Verify the struct containing args 6-10 */ |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 487 | Z_OOPS(Z_SYSCALL_MEMORY_READ(margs, sizeof(*margs))); |
Andrew Boie | 662c345 | 2017-10-02 10:51:18 -0700 | [diff] [blame] | 488 | |
| 489 | /* Stash struct arguments in local variables to prevent switcheroo |
| 490 | * attacks |
| 491 | */ |
| 492 | prio = margs->arg8; |
| 493 | options = margs->arg9; |
| 494 | delay = margs->arg10; |
| 495 | compiler_barrier(); |
| 496 | |
| 497 | /* User threads may only create other user threads and they can't |
| 498 | * be marked as essential |
| 499 | */ |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 500 | Z_OOPS(Z_SYSCALL_VERIFY(options & K_USER)); |
| 501 | Z_OOPS(Z_SYSCALL_VERIFY(!(options & K_ESSENTIAL))); |
Andrew Boie | 662c345 | 2017-10-02 10:51:18 -0700 | [diff] [blame] | 502 | |
| 503 | /* Check validity of prio argument; must be the same or worse priority |
| 504 | * than the caller |
| 505 | */ |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 506 | Z_OOPS(Z_SYSCALL_VERIFY(_is_valid_prio(prio, NULL))); |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 507 | Z_OOPS(Z_SYSCALL_VERIFY(z_is_prio_lower_or_equal(prio, |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 508 | _current->base.prio))); |
Andrew Boie | 662c345 | 2017-10-02 10:51:18 -0700 | [diff] [blame] | 509 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 510 | z_setup_new_thread((struct k_thread *)new_thread, stack, stack_size, |
Andrew Boie | 662c345 | 2017-10-02 10:51:18 -0700 | [diff] [blame] | 511 | (k_thread_entry_t)entry, (void *)p1, |
| 512 | (void *)margs->arg6, (void *)margs->arg7, prio, |
Anas Nashif | 5755405 | 2018-03-03 02:31:05 -0600 | [diff] [blame] | 513 | options, NULL); |
Andrew Boie | 662c345 | 2017-10-02 10:51:18 -0700 | [diff] [blame] | 514 | |
Andrew Boie | 662c345 | 2017-10-02 10:51:18 -0700 | [diff] [blame] | 515 | if (delay != K_FOREVER) { |
| 516 | schedule_new_thread(new_thread, delay); |
| 517 | } |
| 518 | |
| 519 | return new_thread_p; |
| 520 | } |
| 521 | #endif /* CONFIG_USERSPACE */ |
| 522 | #endif /* CONFIG_MULTITHREADING */ |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 523 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 524 | void z_thread_single_suspend(struct k_thread *thread) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 525 | { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 526 | if (z_is_thread_ready(thread)) { |
| 527 | z_remove_thread_from_ready_q(thread); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 528 | } |
| 529 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 530 | z_mark_thread_as_suspended(thread); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 531 | } |
| 532 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 533 | void z_impl_k_thread_suspend(struct k_thread *thread) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 534 | { |
Andy Ross | e456d0f | 2018-07-25 10:46:38 -0700 | [diff] [blame] | 535 | k_spinlock_key_t key = k_spin_lock(&lock); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 536 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 537 | z_thread_single_suspend(thread); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 538 | |
Anas Nashif | b6304e6 | 2018-07-04 08:03:03 -0500 | [diff] [blame] | 539 | sys_trace_thread_suspend(thread); |
| 540 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 541 | if (thread == _current) { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 542 | z_reschedule(&lock, key); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 543 | } else { |
Andy Ross | e456d0f | 2018-07-25 10:46:38 -0700 | [diff] [blame] | 544 | k_spin_unlock(&lock, key); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 545 | } |
| 546 | } |
| 547 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 548 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 549 | Z_SYSCALL_HANDLER1_SIMPLE_VOID(k_thread_suspend, K_OBJ_THREAD, k_tid_t); |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 550 | #endif |
| 551 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 552 | void z_thread_single_resume(struct k_thread *thread) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 553 | { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 554 | z_mark_thread_as_not_suspended(thread); |
| 555 | z_ready_thread(thread); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 556 | } |
| 557 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 558 | void z_impl_k_thread_resume(struct k_thread *thread) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 559 | { |
Andy Ross | e456d0f | 2018-07-25 10:46:38 -0700 | [diff] [blame] | 560 | k_spinlock_key_t key = k_spin_lock(&lock); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 561 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 562 | z_thread_single_resume(thread); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 563 | |
Anas Nashif | b6304e6 | 2018-07-04 08:03:03 -0500 | [diff] [blame] | 564 | sys_trace_thread_resume(thread); |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 565 | z_reschedule(&lock, key); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 566 | } |
| 567 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 568 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 569 | Z_SYSCALL_HANDLER1_SIMPLE_VOID(k_thread_resume, K_OBJ_THREAD, k_tid_t); |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 570 | #endif |
| 571 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 572 | void z_thread_single_abort(struct k_thread *thread) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 573 | { |
| 574 | if (thread->fn_abort != NULL) { |
| 575 | thread->fn_abort(); |
| 576 | } |
| 577 | |
Andy Ross | 42ed12a | 2019-02-19 16:03:39 -0800 | [diff] [blame] | 578 | if (IS_ENABLED(CONFIG_SMP)) { |
| 579 | z_sched_abort(thread); |
| 580 | } |
| 581 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 582 | if (z_is_thread_ready(thread)) { |
| 583 | z_remove_thread_from_ready_q(thread); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 584 | } else { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 585 | if (z_is_thread_pending(thread)) { |
| 586 | z_unpend_thread_no_timeout(thread); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 587 | } |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 588 | if (z_is_thread_timeout_active(thread)) { |
| 589 | (void)z_abort_thread_timeout(thread); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 590 | } |
| 591 | } |
Andrew Boie | 885fcd5 | 2017-10-05 14:37:59 -0700 | [diff] [blame] | 592 | |
| 593 | thread->base.thread_state |= _THREAD_DEAD; |
Anas Nashif | b6304e6 | 2018-07-04 08:03:03 -0500 | [diff] [blame] | 594 | |
| 595 | sys_trace_thread_abort(thread); |
Andrew Boie | 885fcd5 | 2017-10-05 14:37:59 -0700 | [diff] [blame] | 596 | |
| 597 | #ifdef CONFIG_USERSPACE |
Ioannis Glaropoulos | 6619261 | 2018-09-24 11:41:42 +0200 | [diff] [blame] | 598 | /* Clear initialized state so that this thread object may be re-used |
Andrew Boie | 885fcd5 | 2017-10-05 14:37:59 -0700 | [diff] [blame] | 599 | * and triggers errors if API calls are made on it from user threads |
| 600 | */ |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 601 | z_object_uninit(thread->stack_obj); |
| 602 | z_object_uninit(thread); |
Andrew Boie | 04caa67 | 2017-10-13 13:57:07 -0700 | [diff] [blame] | 603 | |
Andrew Boie | 818a96d | 2017-11-03 09:00:35 -0700 | [diff] [blame] | 604 | /* Revoke permissions on thread's ID so that it may be recycled */ |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 605 | z_thread_perms_all_clear(thread); |
Andrew Boie | 885fcd5 | 2017-10-05 14:37:59 -0700 | [diff] [blame] | 606 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 607 | } |
| 608 | |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 609 | #ifdef CONFIG_MULTITHREADING |
Andrew Boie | 877f82e | 2017-10-17 11:20:22 -0700 | [diff] [blame] | 610 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 877f82e | 2017-10-17 11:20:22 -0700 | [diff] [blame] | 611 | |
| 612 | static void grant_static_access(void) |
| 613 | { |
Nicolas Pitre | aa922885 | 2019-06-03 13:01:43 -0400 | [diff] [blame] | 614 | Z_STRUCT_SECTION_FOREACH(_k_object_assignment, pos) { |
Andrew Boie | 877f82e | 2017-10-17 11:20:22 -0700 | [diff] [blame] | 615 | for (int i = 0; pos->objects[i] != NULL; i++) { |
| 616 | k_object_access_grant(pos->objects[i], |
| 617 | pos->thread); |
| 618 | } |
| 619 | } |
| 620 | } |
| 621 | #endif /* CONFIG_USERSPACE */ |
| 622 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 623 | void z_init_static_threads(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 624 | { |
Peter Mitsis | a04c0d7 | 2016-09-28 19:26:00 -0400 | [diff] [blame] | 625 | _FOREACH_STATIC_THREAD(thread_data) { |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 626 | z_setup_new_thread( |
Andrew Boie | d26cf2d | 2017-03-30 13:07:02 -0700 | [diff] [blame] | 627 | thread_data->init_thread, |
Peter Mitsis | a04c0d7 | 2016-09-28 19:26:00 -0400 | [diff] [blame] | 628 | thread_data->init_stack, |
| 629 | thread_data->init_stack_size, |
Peter Mitsis | a04c0d7 | 2016-09-28 19:26:00 -0400 | [diff] [blame] | 630 | thread_data->init_entry, |
| 631 | thread_data->init_p1, |
| 632 | thread_data->init_p2, |
| 633 | thread_data->init_p3, |
| 634 | thread_data->init_prio, |
Anas Nashif | 5755405 | 2018-03-03 02:31:05 -0600 | [diff] [blame] | 635 | thread_data->init_options, |
| 636 | thread_data->init_name); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 637 | |
Andrew Boie | d26cf2d | 2017-03-30 13:07:02 -0700 | [diff] [blame] | 638 | thread_data->init_thread->init_data = thread_data; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 639 | } |
Peter Mitsis | b2fd5be | 2016-10-11 12:06:25 -0400 | [diff] [blame] | 640 | |
Andrew Boie | 877f82e | 2017-10-17 11:20:22 -0700 | [diff] [blame] | 641 | #ifdef CONFIG_USERSPACE |
| 642 | grant_static_access(); |
| 643 | #endif |
Peter Mitsis | b2fd5be | 2016-10-11 12:06:25 -0400 | [diff] [blame] | 644 | |
| 645 | /* |
Andy Ross | e456d0f | 2018-07-25 10:46:38 -0700 | [diff] [blame] | 646 | * Non-legacy static threads may be started immediately or |
| 647 | * after a previously specified delay. Even though the |
| 648 | * scheduler is locked, ticks can still be delivered and |
| 649 | * processed. Take a sched lock to prevent them from running |
| 650 | * until they are all started. |
Peter Mitsis | b2fd5be | 2016-10-11 12:06:25 -0400 | [diff] [blame] | 651 | * |
| 652 | * Note that static threads defined using the legacy API have a |
| 653 | * delay of K_FOREVER. |
| 654 | */ |
Andy Ross | e456d0f | 2018-07-25 10:46:38 -0700 | [diff] [blame] | 655 | k_sched_lock(); |
Peter Mitsis | b2fd5be | 2016-10-11 12:06:25 -0400 | [diff] [blame] | 656 | _FOREACH_STATIC_THREAD(thread_data) { |
| 657 | if (thread_data->init_delay != K_FOREVER) { |
Andrew Boie | d26cf2d | 2017-03-30 13:07:02 -0700 | [diff] [blame] | 658 | schedule_new_thread(thread_data->init_thread, |
Peter Mitsis | b2fd5be | 2016-10-11 12:06:25 -0400 | [diff] [blame] | 659 | thread_data->init_delay); |
| 660 | } |
| 661 | } |
Peter Mitsis | b2fd5be | 2016-10-11 12:06:25 -0400 | [diff] [blame] | 662 | k_sched_unlock(); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 663 | } |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 664 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 665 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 666 | void z_init_thread_base(struct _thread_base *thread_base, int priority, |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 667 | u32_t initial_state, unsigned int options) |
Benjamin Walsh | 069fd36 | 2016-11-22 17:48:13 -0500 | [diff] [blame] | 668 | { |
| 669 | /* k_q_node is initialized upon first insertion in a list */ |
| 670 | |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 671 | thread_base->user_options = (u8_t)options; |
| 672 | thread_base->thread_state = (u8_t)initial_state; |
Benjamin Walsh | 069fd36 | 2016-11-22 17:48:13 -0500 | [diff] [blame] | 673 | |
| 674 | thread_base->prio = priority; |
| 675 | |
Patrik Flykt | 24d7143 | 2019-03-26 19:57:45 -0600 | [diff] [blame] | 676 | thread_base->sched_locked = 0U; |
Benjamin Walsh | 069fd36 | 2016-11-22 17:48:13 -0500 | [diff] [blame] | 677 | |
| 678 | /* swap_data does not need to be initialized */ |
| 679 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 680 | z_init_thread_timeout(thread_base); |
Benjamin Walsh | 069fd36 | 2016-11-22 17:48:13 -0500 | [diff] [blame] | 681 | } |
| 682 | |
Andrew Boie | 3f091b5 | 2017-08-30 14:34:14 -0700 | [diff] [blame] | 683 | FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry, |
| 684 | void *p1, void *p2, void *p3) |
| 685 | { |
| 686 | _current->base.user_options |= K_USER; |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 687 | z_thread_essential_clear(); |
Andrew Boie | 2dd91ec | 2018-06-06 08:45:01 -0700 | [diff] [blame] | 688 | #ifdef CONFIG_THREAD_MONITOR |
| 689 | _current->entry.pEntry = entry; |
| 690 | _current->entry.parameter1 = p1; |
| 691 | _current->entry.parameter2 = p2; |
| 692 | _current->entry.parameter3 = p3; |
| 693 | #endif |
Andrew Boie | 93eb603 | 2017-09-29 04:42:30 -0700 | [diff] [blame] | 694 | #ifdef CONFIG_USERSPACE |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 695 | z_arch_user_mode_enter(entry, p1, p2, p3); |
Andrew Boie | 93eb603 | 2017-09-29 04:42:30 -0700 | [diff] [blame] | 696 | #else |
| 697 | /* XXX In this case we do not reset the stack */ |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 698 | z_thread_entry(entry, p1, p2, p3); |
Andrew Boie | 93eb603 | 2017-09-29 04:42:30 -0700 | [diff] [blame] | 699 | #endif |
Andrew Boie | 3f091b5 | 2017-08-30 14:34:14 -0700 | [diff] [blame] | 700 | } |
Andy Ross | 5aa7460 | 2019-02-05 09:35:57 -0800 | [diff] [blame] | 701 | |
| 702 | /* These spinlock assertion predicates are defined here because having |
| 703 | * them in spinlock.h is a giant header ordering headache. |
| 704 | */ |
| 705 | #ifdef SPIN_VALIDATE |
Flavio Ceolin | 625ac2e | 2019-03-14 11:41:21 -0700 | [diff] [blame] | 706 | bool z_spin_lock_valid(struct k_spinlock *l) |
Andy Ross | 5aa7460 | 2019-02-05 09:35:57 -0800 | [diff] [blame] | 707 | { |
| 708 | if (l->thread_cpu) { |
| 709 | if ((l->thread_cpu & 3) == _current_cpu->id) { |
Flavio Ceolin | 625ac2e | 2019-03-14 11:41:21 -0700 | [diff] [blame] | 710 | return false; |
Andy Ross | 5aa7460 | 2019-02-05 09:35:57 -0800 | [diff] [blame] | 711 | } |
| 712 | } |
Flavio Ceolin | 625ac2e | 2019-03-14 11:41:21 -0700 | [diff] [blame] | 713 | return true; |
Andy Ross | 5aa7460 | 2019-02-05 09:35:57 -0800 | [diff] [blame] | 714 | } |
| 715 | |
Flavio Ceolin | 625ac2e | 2019-03-14 11:41:21 -0700 | [diff] [blame] | 716 | bool z_spin_unlock_valid(struct k_spinlock *l) |
Andy Ross | 5aa7460 | 2019-02-05 09:35:57 -0800 | [diff] [blame] | 717 | { |
Nicolas Pitre | 0b5d9f7 | 2019-05-20 23:41:27 -0400 | [diff] [blame] | 718 | if (l->thread_cpu != (_current_cpu->id | (uintptr_t)_current)) { |
Flavio Ceolin | 625ac2e | 2019-03-14 11:41:21 -0700 | [diff] [blame] | 719 | return false; |
Andy Ross | 5aa7460 | 2019-02-05 09:35:57 -0800 | [diff] [blame] | 720 | } |
| 721 | l->thread_cpu = 0; |
Flavio Ceolin | 625ac2e | 2019-03-14 11:41:21 -0700 | [diff] [blame] | 722 | return true; |
Andy Ross | 5aa7460 | 2019-02-05 09:35:57 -0800 | [diff] [blame] | 723 | } |
Andy Ross | f37e0c6 | 2019-02-20 10:11:24 -0800 | [diff] [blame] | 724 | |
| 725 | void z_spin_lock_set_owner(struct k_spinlock *l) |
| 726 | { |
Nicolas Pitre | 0b5d9f7 | 2019-05-20 23:41:27 -0400 | [diff] [blame] | 727 | l->thread_cpu = _current_cpu->id | (uintptr_t)_current; |
Andy Ross | f37e0c6 | 2019-02-20 10:11:24 -0800 | [diff] [blame] | 728 | } |
| 729 | |
Ioannis Glaropoulos | a6cb8b0 | 2019-05-09 21:55:10 +0200 | [diff] [blame] | 730 | int z_impl_k_float_disable(struct k_thread *thread) |
| 731 | { |
| 732 | #if defined(CONFIG_FLOAT) && defined(CONFIG_FP_SHARING) |
| 733 | return z_arch_float_disable(thread); |
| 734 | #else |
| 735 | return -ENOSYS; |
| 736 | #endif /* CONFIG_FLOAT && CONFIG_FP_SHARING */ |
| 737 | } |
| 738 | |
| 739 | #ifdef CONFIG_USERSPACE |
| 740 | Z_SYSCALL_HANDLER(k_float_disable, thread_p) |
| 741 | { |
| 742 | struct k_thread *thread = (struct k_thread *)thread_p; |
| 743 | |
| 744 | Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD)); |
| 745 | |
| 746 | return z_impl_k_float_disable((struct k_thread *)thread_p); |
| 747 | } |
| 748 | #endif /* CONFIG_USERSPACE */ |
| 749 | |
Andy Ross | 5aa7460 | 2019-02-05 09:35:57 -0800 | [diff] [blame] | 750 | #endif |