Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2010-2014 Wind River Systems, Inc. |
| 3 | * |
David B. Kinder | ac74d8b | 2017-01-18 17:01:01 -0800 | [diff] [blame] | 4 | * SPDX-License-Identifier: Apache-2.0 |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | /** |
| 8 | * @file |
Allan Stephens | 9f09777 | 2016-10-24 12:41:43 -0500 | [diff] [blame] | 9 | * @brief Kernel thread support |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 10 | * |
Allan Stephens | 9f09777 | 2016-10-24 12:41:43 -0500 | [diff] [blame] | 11 | * This module provides general purpose thread support. |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 12 | */ |
| 13 | |
| 14 | #include <kernel.h> |
| 15 | |
| 16 | #include <toolchain.h> |
Anas Nashif | 397d29d | 2017-06-17 11:30:47 -0400 | [diff] [blame] | 17 | #include <linker/sections.h> |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 18 | |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 19 | #include <kernel_structs.h> |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 20 | #include <misc/printk.h> |
| 21 | #include <sys_clock.h> |
| 22 | #include <drivers/system_timer.h> |
Benjamin Walsh | b4b108d | 2016-10-13 10:31:48 -0400 | [diff] [blame] | 23 | #include <ksched.h> |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 24 | #include <wait_q.h> |
Andrew Boie | 2acfcd6 | 2017-08-30 14:31:03 -0700 | [diff] [blame] | 25 | #include <atomic.h> |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 26 | #include <syscall_handler.h> |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 27 | |
Allan Stephens | e7d2cc2 | 2016-10-19 16:10:46 -0500 | [diff] [blame] | 28 | extern struct _static_thread_data _static_thread_data_list_start[]; |
| 29 | extern struct _static_thread_data _static_thread_data_list_end[]; |
Peter Mitsis | 0ca7cea | 2016-09-28 19:18:09 -0400 | [diff] [blame] | 30 | |
Allan Stephens | e7d2cc2 | 2016-10-19 16:10:46 -0500 | [diff] [blame] | 31 | #define _FOREACH_STATIC_THREAD(thread_data) \ |
| 32 | for (struct _static_thread_data *thread_data = \ |
| 33 | _static_thread_data_list_start; \ |
| 34 | thread_data < _static_thread_data_list_end; \ |
| 35 | thread_data++) |
Peter Mitsis | 0ca7cea | 2016-09-28 19:18:09 -0400 | [diff] [blame] | 36 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 37 | |
Benjamin Walsh | c7ba8b1 | 2016-11-08 16:12:59 -0500 | [diff] [blame] | 38 | int k_is_in_isr(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 39 | { |
Benjamin Walsh | b452817 | 2016-10-27 16:57:13 -0400 | [diff] [blame] | 40 | return _is_in_isr(); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 41 | } |
| 42 | |
Allan Stephens | 9f09777 | 2016-10-24 12:41:43 -0500 | [diff] [blame] | 43 | /* |
| 44 | * This function tags the current thread as essential to system operation. |
| 45 | * Exceptions raised by this thread will be treated as a fatal system error. |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 46 | */ |
| 47 | void _thread_essential_set(void) |
| 48 | { |
Benjamin Walsh | ed240f2 | 2017-01-22 13:05:08 -0500 | [diff] [blame] | 49 | _current->base.user_options |= K_ESSENTIAL; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 50 | } |
| 51 | |
Allan Stephens | 9f09777 | 2016-10-24 12:41:43 -0500 | [diff] [blame] | 52 | /* |
| 53 | * This function tags the current thread as not essential to system operation. |
| 54 | * Exceptions raised by this thread may be recoverable. |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 55 | * (This is the default tag for a thread.) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 56 | */ |
| 57 | void _thread_essential_clear(void) |
| 58 | { |
Benjamin Walsh | ed240f2 | 2017-01-22 13:05:08 -0500 | [diff] [blame] | 59 | _current->base.user_options &= ~K_ESSENTIAL; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 60 | } |
| 61 | |
Allan Stephens | 9f09777 | 2016-10-24 12:41:43 -0500 | [diff] [blame] | 62 | /* |
| 63 | * This routine indicates if the current thread is an essential system thread. |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 64 | * |
Allan Stephens | 9f09777 | 2016-10-24 12:41:43 -0500 | [diff] [blame] | 65 | * Returns non-zero if current thread is essential, zero if it is not. |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 66 | */ |
| 67 | int _is_thread_essential(void) |
| 68 | { |
Benjamin Walsh | ed240f2 | 2017-01-22 13:05:08 -0500 | [diff] [blame] | 69 | return _current->base.user_options & K_ESSENTIAL; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 70 | } |
| 71 | |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 72 | void k_busy_wait(u32_t usec_to_wait) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 73 | { |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 74 | #if defined(CONFIG_TICKLESS_KERNEL) && \ |
| 75 | !defined(CONFIG_BUSY_WAIT_USES_ALTERNATE_CLOCK) |
| 76 | int saved_always_on = k_enable_sys_clock_always_on(); |
| 77 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 78 | /* use 64-bit math to prevent overflow when multiplying */ |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 79 | u32_t cycles_to_wait = (u32_t)( |
| 80 | (u64_t)usec_to_wait * |
| 81 | (u64_t)sys_clock_hw_cycles_per_sec / |
| 82 | (u64_t)USEC_PER_SEC |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 83 | ); |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 84 | u32_t start_cycles = k_cycle_get_32(); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 85 | |
| 86 | for (;;) { |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 87 | u32_t current_cycles = k_cycle_get_32(); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 88 | |
| 89 | /* this handles the rollover on an unsigned 32-bit value */ |
| 90 | if ((current_cycles - start_cycles) >= cycles_to_wait) { |
| 91 | break; |
| 92 | } |
| 93 | } |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 94 | #if defined(CONFIG_TICKLESS_KERNEL) && \ |
| 95 | !defined(CONFIG_BUSY_WAIT_USES_ALTERNATE_CLOCK) |
| 96 | _sys_clock_always_on = saved_always_on; |
| 97 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 98 | } |
| 99 | |
| 100 | #ifdef CONFIG_THREAD_CUSTOM_DATA |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 101 | void _impl_k_thread_custom_data_set(void *value) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 102 | { |
| 103 | _current->custom_data = value; |
| 104 | } |
| 105 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 106 | #ifdef CONFIG_USERSPACE |
Leandro Pereira | 6f99bdb | 2017-10-13 14:00:22 -0700 | [diff] [blame] | 107 | _SYSCALL_HANDLER(k_thread_custom_data_set, data) |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 108 | { |
Andrew Boie | 225e4c0 | 2017-10-12 09:54:26 -0700 | [diff] [blame] | 109 | _impl_k_thread_custom_data_set((void *)data); |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 110 | return 0; |
| 111 | } |
| 112 | #endif |
| 113 | |
| 114 | void *_impl_k_thread_custom_data_get(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 115 | { |
| 116 | return _current->custom_data; |
| 117 | } |
| 118 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 119 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 225e4c0 | 2017-10-12 09:54:26 -0700 | [diff] [blame] | 120 | _SYSCALL_HANDLER0_SIMPLE(k_thread_custom_data_get); |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 121 | #endif /* CONFIG_USERSPACE */ |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 122 | #endif /* CONFIG_THREAD_CUSTOM_DATA */ |
| 123 | |
| 124 | #if defined(CONFIG_THREAD_MONITOR) |
Allan Stephens | 92e7504 | 2016-10-25 09:52:39 -0500 | [diff] [blame] | 125 | /* |
| 126 | * Remove a thread from the kernel's list of active threads. |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 127 | */ |
Allan Stephens | 92e7504 | 2016-10-25 09:52:39 -0500 | [diff] [blame] | 128 | void _thread_monitor_exit(struct k_thread *thread) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 129 | { |
Allan Stephens | 1be7bca | 2016-10-25 10:57:52 -0500 | [diff] [blame] | 130 | unsigned int key = irq_lock(); |
| 131 | |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 132 | if (thread == _kernel.threads) { |
| 133 | _kernel.threads = _kernel.threads->next_thread; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 134 | } else { |
Benjamin Walsh | b7ef0cb | 2016-10-05 17:32:01 -0400 | [diff] [blame] | 135 | struct k_thread *prev_thread; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 136 | |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 137 | prev_thread = _kernel.threads; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 138 | while (thread != prev_thread->next_thread) { |
| 139 | prev_thread = prev_thread->next_thread; |
| 140 | } |
| 141 | prev_thread->next_thread = thread->next_thread; |
| 142 | } |
Allan Stephens | 1be7bca | 2016-10-25 10:57:52 -0500 | [diff] [blame] | 143 | |
| 144 | irq_unlock(key); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 145 | } |
| 146 | #endif /* CONFIG_THREAD_MONITOR */ |
| 147 | |
Andrew Boie | 5dcb279 | 2017-05-11 13:29:15 -0700 | [diff] [blame] | 148 | #ifdef CONFIG_STACK_SENTINEL |
| 149 | /* Check that the stack sentinel is still present |
| 150 | * |
| 151 | * The stack sentinel feature writes a magic value to the lowest 4 bytes of |
| 152 | * the thread's stack when the thread is initialized. This value gets checked |
| 153 | * in a few places: |
| 154 | * |
| 155 | * 1) In k_yield() if the current thread is not swapped out |
Andrew Boie | ae1a75b | 2017-06-07 09:33:16 -0700 | [diff] [blame] | 156 | * 2) After servicing a non-nested interrupt |
Andrew Boie | 5dcb279 | 2017-05-11 13:29:15 -0700 | [diff] [blame] | 157 | * 3) In _Swap(), check the sentinel in the outgoing thread |
Andrew Boie | 5dcb279 | 2017-05-11 13:29:15 -0700 | [diff] [blame] | 158 | * |
Andrew Boie | ae1a75b | 2017-06-07 09:33:16 -0700 | [diff] [blame] | 159 | * Item 2 requires support in arch/ code. |
Andrew Boie | 5dcb279 | 2017-05-11 13:29:15 -0700 | [diff] [blame] | 160 | * |
| 161 | * If the check fails, the thread will be terminated appropriately through |
| 162 | * the system fatal error handler. |
| 163 | */ |
| 164 | void _check_stack_sentinel(void) |
| 165 | { |
| 166 | u32_t *stack; |
| 167 | |
Andrew Boie | 26d1eb3 | 2017-09-12 11:47:18 -0700 | [diff] [blame] | 168 | if (_current->base.thread_state == _THREAD_DUMMY) { |
Andrew Boie | 5dcb279 | 2017-05-11 13:29:15 -0700 | [diff] [blame] | 169 | return; |
| 170 | } |
| 171 | |
| 172 | stack = (u32_t *)_current->stack_info.start; |
| 173 | if (*stack != STACK_SENTINEL) { |
| 174 | /* Restore it so further checks don't trigger this same error */ |
| 175 | *stack = STACK_SENTINEL; |
| 176 | _k_except_reason(_NANO_ERR_STACK_CHK_FAIL); |
| 177 | } |
| 178 | } |
| 179 | #endif |
| 180 | |
Allan Stephens | 9f09777 | 2016-10-24 12:41:43 -0500 | [diff] [blame] | 181 | /* |
| 182 | * Common thread entry point function (used by all threads) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 183 | * |
Allan Stephens | 9f09777 | 2016-10-24 12:41:43 -0500 | [diff] [blame] | 184 | * This routine invokes the actual thread entry point function and passes |
| 185 | * it three arguments. It also handles graceful termination of the thread |
| 186 | * if the entry point function ever returns. |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 187 | * |
Allan Stephens | 9f09777 | 2016-10-24 12:41:43 -0500 | [diff] [blame] | 188 | * This routine does not return, and is marked as such so the compiler won't |
| 189 | * generate preamble code that is only used by functions that actually return. |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 190 | */ |
Andrew Boie | 1e06ffc | 2017-09-11 09:30:04 -0700 | [diff] [blame] | 191 | FUNC_NORETURN void _thread_entry(k_thread_entry_t entry, |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 192 | void *p1, void *p2, void *p3) |
| 193 | { |
| 194 | entry(p1, p2, p3); |
| 195 | |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 196 | #ifdef CONFIG_MULTITHREADING |
Andrew Boie | 9a74a08 | 2017-08-30 00:17:44 -0700 | [diff] [blame] | 197 | k_thread_abort(k_current_get()); |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 198 | #else |
| 199 | for (;;) { |
| 200 | k_cpu_idle(); |
| 201 | } |
| 202 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 203 | |
| 204 | /* |
Allan Stephens | 9f09777 | 2016-10-24 12:41:43 -0500 | [diff] [blame] | 205 | * Compiler can't tell that k_thread_abort() won't return and issues a |
| 206 | * warning unless we tell it that control never gets this far. |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 207 | */ |
| 208 | |
| 209 | CODE_UNREACHABLE; |
| 210 | } |
| 211 | |
Benjamin Walsh | 096d8e9 | 2016-12-16 16:45:05 -0500 | [diff] [blame] | 212 | #ifdef CONFIG_MULTITHREADING |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 213 | void _impl_k_thread_start(struct k_thread *thread) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 214 | { |
| 215 | int key = irq_lock(); /* protect kernel queues */ |
| 216 | |
Andrew Boie | 7d627c5 | 2017-08-30 11:01:56 -0700 | [diff] [blame] | 217 | if (_has_thread_started(thread)) { |
| 218 | irq_unlock(key); |
| 219 | return; |
| 220 | } |
| 221 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 222 | _mark_thread_as_started(thread); |
| 223 | |
| 224 | if (_is_thread_ready(thread)) { |
| 225 | _add_thread_to_ready_q(thread); |
| 226 | if (_must_switch_threads()) { |
| 227 | _Swap(key); |
| 228 | return; |
| 229 | } |
| 230 | } |
| 231 | |
| 232 | irq_unlock(key); |
| 233 | } |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 234 | |
| 235 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 225e4c0 | 2017-10-12 09:54:26 -0700 | [diff] [blame] | 236 | _SYSCALL_HANDLER1_SIMPLE_VOID(k_thread_start, K_OBJ_THREAD, struct k_thread *); |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 237 | #endif |
Benjamin Walsh | 096d8e9 | 2016-12-16 16:45:05 -0500 | [diff] [blame] | 238 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 239 | |
Benjamin Walsh | 096d8e9 | 2016-12-16 16:45:05 -0500 | [diff] [blame] | 240 | #ifdef CONFIG_MULTITHREADING |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 241 | static void schedule_new_thread(struct k_thread *thread, s32_t delay) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 242 | { |
Benjamin Walsh | 1a5450b | 2016-10-06 15:04:23 -0400 | [diff] [blame] | 243 | #ifdef CONFIG_SYS_CLOCK_EXISTS |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 244 | if (delay == 0) { |
Andrew Boie | 7d627c5 | 2017-08-30 11:01:56 -0700 | [diff] [blame] | 245 | k_thread_start(thread); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 246 | } else { |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 247 | s32_t ticks = _TICK_ALIGN + _ms_to_ticks(delay); |
Benjamin Walsh | f421ec2 | 2016-11-30 17:41:16 -0500 | [diff] [blame] | 248 | int key = irq_lock(); |
Benjamin Walsh | a36e0cf | 2016-11-23 22:15:44 -0500 | [diff] [blame] | 249 | |
| 250 | _add_thread_timeout(thread, NULL, ticks); |
Benjamin Walsh | f421ec2 | 2016-11-30 17:41:16 -0500 | [diff] [blame] | 251 | irq_unlock(key); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 252 | } |
| 253 | #else |
| 254 | ARG_UNUSED(delay); |
Andrew Boie | 7d627c5 | 2017-08-30 11:01:56 -0700 | [diff] [blame] | 255 | k_thread_start(thread); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 256 | #endif |
| 257 | } |
Benjamin Walsh | 096d8e9 | 2016-12-16 16:45:05 -0500 | [diff] [blame] | 258 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 259 | |
Andrew Boie | 2acfcd6 | 2017-08-30 14:31:03 -0700 | [diff] [blame] | 260 | void _setup_new_thread(struct k_thread *new_thread, |
Andrew Boie | c5c104f | 2017-10-16 14:46:34 -0700 | [diff] [blame] | 261 | k_thread_stack_t *stack, size_t stack_size, |
Andrew Boie | 2acfcd6 | 2017-08-30 14:31:03 -0700 | [diff] [blame] | 262 | k_thread_entry_t entry, |
| 263 | void *p1, void *p2, void *p3, |
| 264 | int prio, u32_t options) |
| 265 | { |
| 266 | _new_thread(new_thread, stack, stack_size, entry, p1, p2, p3, |
| 267 | prio, options); |
| 268 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 2acfcd6 | 2017-08-30 14:31:03 -0700 | [diff] [blame] | 269 | _k_object_init(new_thread); |
Andrew Boie | bca15da | 2017-10-15 14:17:48 -0700 | [diff] [blame] | 270 | _k_object_init(stack); |
| 271 | new_thread->stack_obj = stack; |
Andrew Boie | d26cf2d | 2017-03-30 13:07:02 -0700 | [diff] [blame] | 272 | |
Andrew Boie | 2acfcd6 | 2017-08-30 14:31:03 -0700 | [diff] [blame] | 273 | /* Any given thread has access to itself */ |
Andrew Boie | 217017c | 2017-10-04 11:49:10 -0700 | [diff] [blame] | 274 | k_object_access_grant(new_thread, new_thread); |
Andrew Boie | 47f8fd1 | 2017-10-05 11:11:02 -0700 | [diff] [blame] | 275 | |
Andrew Boie | a7fedb7 | 2017-11-13 14:12:23 -0800 | [diff] [blame] | 276 | #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN |
| 277 | /* _current may be null if the dummy thread is not used */ |
| 278 | if (!_current) { |
| 279 | return; |
| 280 | } |
| 281 | #endif |
Andrew Boie | 0bf9d33 | 2017-10-16 09:12:47 -0700 | [diff] [blame] | 282 | /* New threads inherit any memory domain membership by the parent */ |
| 283 | if (_current->mem_domain_info.mem_domain) { |
| 284 | k_mem_domain_add_thread(_current->mem_domain_info.mem_domain, |
| 285 | new_thread); |
| 286 | } |
| 287 | |
Andrew Boie | 47f8fd1 | 2017-10-05 11:11:02 -0700 | [diff] [blame] | 288 | if (options & K_INHERIT_PERMS) { |
| 289 | _thread_perms_inherit(_current, new_thread); |
| 290 | } |
Andrew Boie | 2acfcd6 | 2017-08-30 14:31:03 -0700 | [diff] [blame] | 291 | #endif |
| 292 | } |
| 293 | |
| 294 | #ifdef CONFIG_MULTITHREADING |
Andrew Boie | 662c345 | 2017-10-02 10:51:18 -0700 | [diff] [blame] | 295 | k_tid_t _impl_k_thread_create(struct k_thread *new_thread, |
Andrew Boie | c5c104f | 2017-10-16 14:46:34 -0700 | [diff] [blame] | 296 | k_thread_stack_t *stack, |
Andrew Boie | 662c345 | 2017-10-02 10:51:18 -0700 | [diff] [blame] | 297 | size_t stack_size, k_thread_entry_t entry, |
| 298 | void *p1, void *p2, void *p3, |
| 299 | int prio, u32_t options, s32_t delay) |
Andrew Boie | d26cf2d | 2017-03-30 13:07:02 -0700 | [diff] [blame] | 300 | { |
| 301 | __ASSERT(!_is_in_isr(), "Threads may not be created in ISRs"); |
Andrew Boie | 2acfcd6 | 2017-08-30 14:31:03 -0700 | [diff] [blame] | 302 | _setup_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3, |
| 303 | prio, options); |
Andrew Boie | d26cf2d | 2017-03-30 13:07:02 -0700 | [diff] [blame] | 304 | |
Andrew Boie | 7d627c5 | 2017-08-30 11:01:56 -0700 | [diff] [blame] | 305 | if (delay != K_FOREVER) { |
| 306 | schedule_new_thread(new_thread, delay); |
| 307 | } |
Andrew Boie | d26cf2d | 2017-03-30 13:07:02 -0700 | [diff] [blame] | 308 | return new_thread; |
| 309 | } |
Andrew Boie | 662c345 | 2017-10-02 10:51:18 -0700 | [diff] [blame] | 310 | |
| 311 | |
| 312 | #ifdef CONFIG_USERSPACE |
| 313 | _SYSCALL_HANDLER(k_thread_create, |
| 314 | new_thread_p, stack_p, stack_size, entry, p1, more_args) |
| 315 | { |
| 316 | int prio; |
| 317 | u32_t options, delay, guard_size, total_size; |
| 318 | struct _k_object *stack_object; |
| 319 | struct k_thread *new_thread = (struct k_thread *)new_thread_p; |
| 320 | volatile struct _syscall_10_args *margs = |
| 321 | (volatile struct _syscall_10_args *)more_args; |
Andrew Boie | c5c104f | 2017-10-16 14:46:34 -0700 | [diff] [blame] | 322 | k_thread_stack_t *stack = (k_thread_stack_t *)stack_p; |
Andrew Boie | 662c345 | 2017-10-02 10:51:18 -0700 | [diff] [blame] | 323 | |
| 324 | /* The thread and stack objects *must* be in an uninitialized state */ |
| 325 | _SYSCALL_OBJ_NEVER_INIT(new_thread, K_OBJ_THREAD); |
| 326 | stack_object = _k_object_find(stack); |
| 327 | _SYSCALL_VERIFY_MSG(!_obj_validation_check(stack_object, stack, |
| 328 | K_OBJ__THREAD_STACK_ELEMENT, |
| 329 | _OBJ_INIT_FALSE), |
| 330 | "bad stack object"); |
| 331 | |
| 332 | /* Verify that the stack size passed in is OK by computing the total |
| 333 | * size and comparing it with the size value in the object metadata |
| 334 | */ |
| 335 | guard_size = (u32_t)K_THREAD_STACK_BUFFER(stack) - (u32_t)stack; |
| 336 | _SYSCALL_VERIFY_MSG(!__builtin_uadd_overflow(guard_size, stack_size, |
| 337 | &total_size), |
| 338 | "stack size overflow (%u+%u)", stack_size, |
| 339 | guard_size); |
| 340 | /* They really ought to be equal, make this more strict? */ |
| 341 | _SYSCALL_VERIFY_MSG(total_size <= stack_object->data, |
| 342 | "stack size %u is too big, max is %u", |
| 343 | total_size, stack_object->data); |
| 344 | |
| 345 | /* Verify the struct containing args 6-10 */ |
| 346 | _SYSCALL_MEMORY_READ(margs, sizeof(*margs)); |
| 347 | |
| 348 | /* Stash struct arguments in local variables to prevent switcheroo |
| 349 | * attacks |
| 350 | */ |
| 351 | prio = margs->arg8; |
| 352 | options = margs->arg9; |
| 353 | delay = margs->arg10; |
| 354 | compiler_barrier(); |
| 355 | |
| 356 | /* User threads may only create other user threads and they can't |
| 357 | * be marked as essential |
| 358 | */ |
| 359 | _SYSCALL_VERIFY(options & K_USER); |
| 360 | _SYSCALL_VERIFY(!(options & K_ESSENTIAL)); |
| 361 | |
| 362 | /* Check validity of prio argument; must be the same or worse priority |
| 363 | * than the caller |
| 364 | */ |
| 365 | _SYSCALL_VERIFY(_VALID_PRIO(prio, NULL)); |
| 366 | _SYSCALL_VERIFY(_is_prio_lower_or_equal(prio, _current->base.prio)); |
| 367 | |
| 368 | _setup_new_thread((struct k_thread *)new_thread, stack, stack_size, |
| 369 | (k_thread_entry_t)entry, (void *)p1, |
| 370 | (void *)margs->arg6, (void *)margs->arg7, prio, |
| 371 | options); |
| 372 | |
Andrew Boie | 662c345 | 2017-10-02 10:51:18 -0700 | [diff] [blame] | 373 | if (delay != K_FOREVER) { |
| 374 | schedule_new_thread(new_thread, delay); |
| 375 | } |
| 376 | |
| 377 | return new_thread_p; |
| 378 | } |
| 379 | #endif /* CONFIG_USERSPACE */ |
| 380 | #endif /* CONFIG_MULTITHREADING */ |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 381 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 382 | int _impl_k_thread_cancel(k_tid_t tid) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 383 | { |
Benjamin Walsh | b7ef0cb | 2016-10-05 17:32:01 -0400 | [diff] [blame] | 384 | struct k_thread *thread = tid; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 385 | |
| 386 | int key = irq_lock(); |
| 387 | |
Benjamin Walsh | a36e0cf | 2016-11-23 22:15:44 -0500 | [diff] [blame] | 388 | if (_has_thread_started(thread) || |
| 389 | !_is_thread_timeout_active(thread)) { |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 390 | irq_unlock(key); |
| 391 | return -EINVAL; |
| 392 | } |
| 393 | |
Benjamin Walsh | 7caef45 | 2016-10-05 12:55:17 -0400 | [diff] [blame] | 394 | _abort_thread_timeout(thread); |
Allan Stephens | 92e7504 | 2016-10-25 09:52:39 -0500 | [diff] [blame] | 395 | _thread_monitor_exit(thread); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 396 | |
| 397 | irq_unlock(key); |
| 398 | |
| 399 | return 0; |
| 400 | } |
| 401 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 402 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 225e4c0 | 2017-10-12 09:54:26 -0700 | [diff] [blame] | 403 | _SYSCALL_HANDLER1_SIMPLE(k_thread_cancel, K_OBJ_THREAD, struct k_thread *); |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 404 | #endif |
| 405 | |
Peter Mitsis | a04c0d7 | 2016-09-28 19:26:00 -0400 | [diff] [blame] | 406 | static inline int is_in_any_group(struct _static_thread_data *thread_data, |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 407 | u32_t groups) |
Peter Mitsis | 0ca7cea | 2016-09-28 19:18:09 -0400 | [diff] [blame] | 408 | { |
Peter Mitsis | a04c0d7 | 2016-09-28 19:26:00 -0400 | [diff] [blame] | 409 | return !!(thread_data->init_groups & groups); |
Peter Mitsis | 0ca7cea | 2016-09-28 19:18:09 -0400 | [diff] [blame] | 410 | } |
| 411 | |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 412 | void _k_thread_group_op(u32_t groups, void (*func)(struct k_thread *)) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 413 | { |
| 414 | unsigned int key; |
| 415 | |
| 416 | __ASSERT(!_is_in_isr(), ""); |
| 417 | |
Benjamin Walsh | d7ad176 | 2016-11-10 14:46:58 -0500 | [diff] [blame] | 418 | _sched_lock(); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 419 | |
| 420 | /* Invoke func() on each static thread in the specified group set. */ |
| 421 | |
Peter Mitsis | a04c0d7 | 2016-09-28 19:26:00 -0400 | [diff] [blame] | 422 | _FOREACH_STATIC_THREAD(thread_data) { |
| 423 | if (is_in_any_group(thread_data, groups)) { |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 424 | key = irq_lock(); |
Andrew Boie | d26cf2d | 2017-03-30 13:07:02 -0700 | [diff] [blame] | 425 | func(thread_data->init_thread); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 426 | irq_unlock(key); |
| 427 | } |
| 428 | } |
| 429 | |
| 430 | /* |
| 431 | * If the current thread is still in a ready state, then let the |
| 432 | * "unlock scheduler" code determine if any rescheduling is needed. |
| 433 | */ |
| 434 | if (_is_thread_ready(_current)) { |
| 435 | k_sched_unlock(); |
| 436 | return; |
| 437 | } |
| 438 | |
| 439 | /* The current thread is no longer in a ready state--reschedule. */ |
| 440 | key = irq_lock(); |
| 441 | _sched_unlock_no_reschedule(); |
| 442 | _Swap(key); |
| 443 | } |
| 444 | |
Benjamin Walsh | b7ef0cb | 2016-10-05 17:32:01 -0400 | [diff] [blame] | 445 | void _k_thread_single_start(struct k_thread *thread) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 446 | { |
| 447 | _mark_thread_as_started(thread); |
| 448 | |
| 449 | if (_is_thread_ready(thread)) { |
| 450 | _add_thread_to_ready_q(thread); |
| 451 | } |
| 452 | } |
| 453 | |
Benjamin Walsh | b7ef0cb | 2016-10-05 17:32:01 -0400 | [diff] [blame] | 454 | void _k_thread_single_suspend(struct k_thread *thread) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 455 | { |
| 456 | if (_is_thread_ready(thread)) { |
| 457 | _remove_thread_from_ready_q(thread); |
| 458 | } |
| 459 | |
| 460 | _mark_thread_as_suspended(thread); |
| 461 | } |
| 462 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 463 | void _impl_k_thread_suspend(struct k_thread *thread) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 464 | { |
| 465 | unsigned int key = irq_lock(); |
| 466 | |
| 467 | _k_thread_single_suspend(thread); |
| 468 | |
| 469 | if (thread == _current) { |
| 470 | _Swap(key); |
| 471 | } else { |
| 472 | irq_unlock(key); |
| 473 | } |
| 474 | } |
| 475 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 476 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 225e4c0 | 2017-10-12 09:54:26 -0700 | [diff] [blame] | 477 | _SYSCALL_HANDLER1_SIMPLE_VOID(k_thread_suspend, K_OBJ_THREAD, k_tid_t); |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 478 | #endif |
| 479 | |
Benjamin Walsh | b7ef0cb | 2016-10-05 17:32:01 -0400 | [diff] [blame] | 480 | void _k_thread_single_resume(struct k_thread *thread) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 481 | { |
| 482 | _mark_thread_as_not_suspended(thread); |
| 483 | |
| 484 | if (_is_thread_ready(thread)) { |
| 485 | _add_thread_to_ready_q(thread); |
| 486 | } |
| 487 | } |
| 488 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 489 | void _impl_k_thread_resume(struct k_thread *thread) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 490 | { |
| 491 | unsigned int key = irq_lock(); |
| 492 | |
| 493 | _k_thread_single_resume(thread); |
| 494 | |
| 495 | _reschedule_threads(key); |
| 496 | } |
| 497 | |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 498 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 225e4c0 | 2017-10-12 09:54:26 -0700 | [diff] [blame] | 499 | _SYSCALL_HANDLER1_SIMPLE_VOID(k_thread_resume, K_OBJ_THREAD, k_tid_t); |
Andrew Boie | 468190a | 2017-09-29 14:00:48 -0700 | [diff] [blame] | 500 | #endif |
| 501 | |
Benjamin Walsh | b7ef0cb | 2016-10-05 17:32:01 -0400 | [diff] [blame] | 502 | void _k_thread_single_abort(struct k_thread *thread) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 503 | { |
| 504 | if (thread->fn_abort != NULL) { |
| 505 | thread->fn_abort(); |
| 506 | } |
| 507 | |
| 508 | if (_is_thread_ready(thread)) { |
| 509 | _remove_thread_from_ready_q(thread); |
| 510 | } else { |
| 511 | if (_is_thread_pending(thread)) { |
| 512 | _unpend_thread(thread); |
| 513 | } |
Benjamin Walsh | a36e0cf | 2016-11-23 22:15:44 -0500 | [diff] [blame] | 514 | if (_is_thread_timeout_active(thread)) { |
Benjamin Walsh | 7caef45 | 2016-10-05 12:55:17 -0400 | [diff] [blame] | 515 | _abort_thread_timeout(thread); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 516 | } |
| 517 | } |
Andrew Boie | 885fcd5 | 2017-10-05 14:37:59 -0700 | [diff] [blame] | 518 | |
| 519 | thread->base.thread_state |= _THREAD_DEAD; |
| 520 | #ifdef CONFIG_KERNEL_EVENT_LOGGER_THREAD |
| 521 | _sys_k_event_logger_thread_exit(thread); |
| 522 | #endif |
| 523 | |
| 524 | #ifdef CONFIG_USERSPACE |
| 525 | /* Clear initailized state so that this thread object may be re-used |
| 526 | * and triggers errors if API calls are made on it from user threads |
| 527 | */ |
Andrew Boie | bca15da | 2017-10-15 14:17:48 -0700 | [diff] [blame] | 528 | _k_object_uninit(thread->stack_obj); |
Ramakrishna Pallala | 1777c57 | 2017-10-25 13:15:52 -0400 | [diff] [blame] | 529 | _k_object_uninit(thread); |
Andrew Boie | 04caa67 | 2017-10-13 13:57:07 -0700 | [diff] [blame] | 530 | |
Andrew Boie | 818a96d | 2017-11-03 09:00:35 -0700 | [diff] [blame] | 531 | /* Revoke permissions on thread's ID so that it may be recycled */ |
| 532 | _thread_perms_all_clear(thread); |
Andrew Boie | 885fcd5 | 2017-10-05 14:37:59 -0700 | [diff] [blame] | 533 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 534 | } |
| 535 | |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 536 | #ifdef CONFIG_MULTITHREADING |
Andrew Boie | 877f82e | 2017-10-17 11:20:22 -0700 | [diff] [blame] | 537 | #ifdef CONFIG_USERSPACE |
| 538 | extern char __object_access_start[]; |
| 539 | extern char __object_access_end[]; |
| 540 | |
| 541 | static void grant_static_access(void) |
| 542 | { |
| 543 | struct _k_object_assignment *pos; |
| 544 | |
| 545 | for (pos = (struct _k_object_assignment *)__object_access_start; |
| 546 | pos < (struct _k_object_assignment *)__object_access_end; |
| 547 | pos++) { |
| 548 | for (int i = 0; pos->objects[i] != NULL; i++) { |
| 549 | k_object_access_grant(pos->objects[i], |
| 550 | pos->thread); |
| 551 | } |
| 552 | } |
| 553 | } |
| 554 | #endif /* CONFIG_USERSPACE */ |
| 555 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 556 | void _init_static_threads(void) |
| 557 | { |
Peter Mitsis | b2fd5be | 2016-10-11 12:06:25 -0400 | [diff] [blame] | 558 | unsigned int key; |
| 559 | |
Peter Mitsis | a04c0d7 | 2016-09-28 19:26:00 -0400 | [diff] [blame] | 560 | _FOREACH_STATIC_THREAD(thread_data) { |
Andrew Boie | 2acfcd6 | 2017-08-30 14:31:03 -0700 | [diff] [blame] | 561 | _setup_new_thread( |
Andrew Boie | d26cf2d | 2017-03-30 13:07:02 -0700 | [diff] [blame] | 562 | thread_data->init_thread, |
Peter Mitsis | a04c0d7 | 2016-09-28 19:26:00 -0400 | [diff] [blame] | 563 | thread_data->init_stack, |
| 564 | thread_data->init_stack_size, |
Peter Mitsis | a04c0d7 | 2016-09-28 19:26:00 -0400 | [diff] [blame] | 565 | thread_data->init_entry, |
| 566 | thread_data->init_p1, |
| 567 | thread_data->init_p2, |
| 568 | thread_data->init_p3, |
| 569 | thread_data->init_prio, |
Allan Stephens | 743bdb8 | 2016-10-26 10:46:44 -0500 | [diff] [blame] | 570 | thread_data->init_options); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 571 | |
Andrew Boie | d26cf2d | 2017-03-30 13:07:02 -0700 | [diff] [blame] | 572 | thread_data->init_thread->init_data = thread_data; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 573 | } |
Peter Mitsis | b2fd5be | 2016-10-11 12:06:25 -0400 | [diff] [blame] | 574 | |
Andrew Boie | 877f82e | 2017-10-17 11:20:22 -0700 | [diff] [blame] | 575 | #ifdef CONFIG_USERSPACE |
| 576 | grant_static_access(); |
| 577 | #endif |
Benjamin Walsh | d7ad176 | 2016-11-10 14:46:58 -0500 | [diff] [blame] | 578 | _sched_lock(); |
Peter Mitsis | b2fd5be | 2016-10-11 12:06:25 -0400 | [diff] [blame] | 579 | |
| 580 | /* |
| 581 | * Non-legacy static threads may be started immediately or after a |
| 582 | * previously specified delay. Even though the scheduler is locked, |
| 583 | * ticks can still be delivered and processed. Lock interrupts so |
| 584 | * that the countdown until execution begins from the same tick. |
| 585 | * |
| 586 | * Note that static threads defined using the legacy API have a |
| 587 | * delay of K_FOREVER. |
| 588 | */ |
| 589 | key = irq_lock(); |
| 590 | _FOREACH_STATIC_THREAD(thread_data) { |
| 591 | if (thread_data->init_delay != K_FOREVER) { |
Andrew Boie | d26cf2d | 2017-03-30 13:07:02 -0700 | [diff] [blame] | 592 | schedule_new_thread(thread_data->init_thread, |
Peter Mitsis | b2fd5be | 2016-10-11 12:06:25 -0400 | [diff] [blame] | 593 | thread_data->init_delay); |
| 594 | } |
| 595 | } |
| 596 | irq_unlock(key); |
| 597 | k_sched_unlock(); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 598 | } |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 599 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 600 | |
Benjamin Walsh | 069fd36 | 2016-11-22 17:48:13 -0500 | [diff] [blame] | 601 | void _init_thread_base(struct _thread_base *thread_base, int priority, |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 602 | u32_t initial_state, unsigned int options) |
Benjamin Walsh | 069fd36 | 2016-11-22 17:48:13 -0500 | [diff] [blame] | 603 | { |
| 604 | /* k_q_node is initialized upon first insertion in a list */ |
| 605 | |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 606 | thread_base->user_options = (u8_t)options; |
| 607 | thread_base->thread_state = (u8_t)initial_state; |
Benjamin Walsh | 069fd36 | 2016-11-22 17:48:13 -0500 | [diff] [blame] | 608 | |
| 609 | thread_base->prio = priority; |
| 610 | |
| 611 | thread_base->sched_locked = 0; |
| 612 | |
| 613 | /* swap_data does not need to be initialized */ |
| 614 | |
| 615 | _init_thread_timeout(thread_base); |
| 616 | } |
| 617 | |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 618 | u32_t _k_thread_group_mask_get(struct k_thread *thread) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 619 | { |
Peter Mitsis | a04c0d7 | 2016-09-28 19:26:00 -0400 | [diff] [blame] | 620 | struct _static_thread_data *thread_data = thread->init_data; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 621 | |
Peter Mitsis | a04c0d7 | 2016-09-28 19:26:00 -0400 | [diff] [blame] | 622 | return thread_data->init_groups; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 623 | } |
| 624 | |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 625 | void _k_thread_group_join(u32_t groups, struct k_thread *thread) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 626 | { |
Peter Mitsis | a04c0d7 | 2016-09-28 19:26:00 -0400 | [diff] [blame] | 627 | struct _static_thread_data *thread_data = thread->init_data; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 628 | |
Peter Mitsis | a04c0d7 | 2016-09-28 19:26:00 -0400 | [diff] [blame] | 629 | thread_data->init_groups |= groups; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 630 | } |
| 631 | |
Kumar Gala | cc334c7 | 2017-04-21 10:55:34 -0500 | [diff] [blame] | 632 | void _k_thread_group_leave(u32_t groups, struct k_thread *thread) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 633 | { |
Peter Mitsis | a04c0d7 | 2016-09-28 19:26:00 -0400 | [diff] [blame] | 634 | struct _static_thread_data *thread_data = thread->init_data; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 635 | |
Ramakrishna Pallala | 1777c57 | 2017-10-25 13:15:52 -0400 | [diff] [blame] | 636 | thread_data->init_groups &= ~groups; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 637 | } |
| 638 | |
Andrew Boie | e12857a | 2017-10-17 11:38:26 -0700 | [diff] [blame] | 639 | void k_thread_access_grant(struct k_thread *thread, ...) |
| 640 | { |
| 641 | #ifdef CONFIG_USERSPACE |
| 642 | va_list args; |
| 643 | va_start(args, thread); |
| 644 | |
| 645 | while (1) { |
| 646 | void *object = va_arg(args, void *); |
| 647 | if (object == NULL) { |
| 648 | break; |
| 649 | } |
| 650 | k_object_access_grant(object, thread); |
| 651 | } |
| 652 | va_end(args); |
| 653 | #else |
| 654 | ARG_UNUSED(thread); |
| 655 | #endif |
| 656 | } |
| 657 | |
Andrew Boie | 3f091b5 | 2017-08-30 14:34:14 -0700 | [diff] [blame] | 658 | FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry, |
| 659 | void *p1, void *p2, void *p3) |
| 660 | { |
| 661 | _current->base.user_options |= K_USER; |
| 662 | _thread_essential_clear(); |
Andrew Boie | 93eb603 | 2017-09-29 04:42:30 -0700 | [diff] [blame] | 663 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 3f091b5 | 2017-08-30 14:34:14 -0700 | [diff] [blame] | 664 | _arch_user_mode_enter(entry, p1, p2, p3); |
Andrew Boie | 93eb603 | 2017-09-29 04:42:30 -0700 | [diff] [blame] | 665 | #else |
| 666 | /* XXX In this case we do not reset the stack */ |
| 667 | _thread_entry(entry, p1, p2, p3); |
| 668 | #endif |
Andrew Boie | 3f091b5 | 2017-08-30 14:34:14 -0700 | [diff] [blame] | 669 | } |