blob: b2494a48a6420f0ce782a184350027020003c384 [file] [log] [blame]
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001/*
2 * Copyright (c) 2010-2014 Wind River Systems, Inc.
3 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08004 * SPDX-License-Identifier: Apache-2.0
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005 */
6
7/**
8 * @file
Allan Stephens9f097772016-10-24 12:41:43 -05009 * @brief Kernel thread support
Benjamin Walsh456c6da2016-09-02 18:55:39 -040010 *
Allan Stephens9f097772016-10-24 12:41:43 -050011 * This module provides general purpose thread support.
Benjamin Walsh456c6da2016-09-02 18:55:39 -040012 */
13
14#include <kernel.h>
15
16#include <toolchain.h>
Anas Nashif397d29d2017-06-17 11:30:47 -040017#include <linker/sections.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040018
Andy Ross5aa74602019-02-05 09:35:57 -080019#include <spinlock.h>
Benjamin Walshf6ca7de2016-11-08 10:36:50 -050020#include <kernel_structs.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040021#include <misc/printk.h>
Jakob Olesenc8708d92019-05-07 10:17:35 -070022#include <misc/math_extras.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040023#include <sys_clock.h>
24#include <drivers/system_timer.h>
Benjamin Walshb4b108d2016-10-13 10:31:48 -040025#include <ksched.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040026#include <wait_q.h>
Andrew Boie2acfcd62017-08-30 14:31:03 -070027#include <atomic.h>
Andrew Boie468190a2017-09-29 14:00:48 -070028#include <syscall_handler.h>
Andy Ross245b54e2018-02-08 09:10:46 -080029#include <kernel_internal.h>
Andy Ross9c62cc62018-01-25 15:24:15 -080030#include <kswap.h>
Andrew Boie538754c2018-05-23 15:25:23 -070031#include <init.h>
Anas Nashifb6304e62018-07-04 08:03:03 -050032#include <tracing.h>
Flavio Ceolin09e362e2018-12-17 12:34:05 -080033#include <stdbool.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040034
Andy Rosse456d0f2018-07-25 10:46:38 -070035static struct k_spinlock lock;
36
Allan Stephense7d2cc22016-10-19 16:10:46 -050037#define _FOREACH_STATIC_THREAD(thread_data) \
Nicolas Pitreaa9228852019-06-03 13:01:43 -040038 Z_STRUCT_SECTION_FOREACH(_static_thread_data, thread_data)
Peter Mitsis0ca7cea2016-09-28 19:18:09 -040039
Ramakrishna Pallala110b8e42018-04-27 12:55:43 +053040void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data)
41{
Ramakrishna Pallalae74d85d2018-07-18 13:50:52 +053042#if defined(CONFIG_THREAD_MONITOR)
Ramakrishna Pallala110b8e42018-04-27 12:55:43 +053043 struct k_thread *thread;
Andy Rosse456d0f2018-07-25 10:46:38 -070044 k_spinlock_key_t key;
Ramakrishna Pallala110b8e42018-04-27 12:55:43 +053045
Flavio Ceolind8837c62018-09-18 12:40:54 -070046 __ASSERT(user_cb != NULL, "user_cb can not be NULL");
Ramakrishna Pallala110b8e42018-04-27 12:55:43 +053047
48 /*
49 * Lock is needed to make sure that the _kernel.threads is not being
Paul Sokolovsky2df18292018-09-26 13:54:09 +030050 * modified by the user_cb either directly or indirectly.
51 * The indirect ways are through calling k_thread_create and
Ramakrishna Pallala110b8e42018-04-27 12:55:43 +053052 * k_thread_abort from user_cb.
53 */
Andy Rosse456d0f2018-07-25 10:46:38 -070054 key = k_spin_lock(&lock);
Ramakrishna Pallala110b8e42018-04-27 12:55:43 +053055 for (thread = _kernel.threads; thread; thread = thread->next_thread) {
56 user_cb(thread, user_data);
57 }
Andy Rosse456d0f2018-07-25 10:46:38 -070058 k_spin_unlock(&lock, key);
Ramakrishna Pallala110b8e42018-04-27 12:55:43 +053059#endif
Ramakrishna Pallalae74d85d2018-07-18 13:50:52 +053060}
Benjamin Walsh456c6da2016-09-02 18:55:39 -040061
Flavio Ceolin6a4a86e2018-12-17 12:40:22 -080062bool k_is_in_isr(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040063{
Patrik Flykt4344e272019-03-08 14:19:05 -070064 return z_is_in_isr();
Benjamin Walsh456c6da2016-09-02 18:55:39 -040065}
66
Allan Stephens9f097772016-10-24 12:41:43 -050067/*
68 * This function tags the current thread as essential to system operation.
69 * Exceptions raised by this thread will be treated as a fatal system error.
Benjamin Walsh456c6da2016-09-02 18:55:39 -040070 */
Patrik Flykt4344e272019-03-08 14:19:05 -070071void z_thread_essential_set(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040072{
Benjamin Walshed240f22017-01-22 13:05:08 -050073 _current->base.user_options |= K_ESSENTIAL;
Benjamin Walsh456c6da2016-09-02 18:55:39 -040074}
75
Allan Stephens9f097772016-10-24 12:41:43 -050076/*
77 * This function tags the current thread as not essential to system operation.
78 * Exceptions raised by this thread may be recoverable.
Benjamin Walsh456c6da2016-09-02 18:55:39 -040079 * (This is the default tag for a thread.)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040080 */
Patrik Flykt4344e272019-03-08 14:19:05 -070081void z_thread_essential_clear(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040082{
Benjamin Walshed240f22017-01-22 13:05:08 -050083 _current->base.user_options &= ~K_ESSENTIAL;
Benjamin Walsh456c6da2016-09-02 18:55:39 -040084}
85
Allan Stephens9f097772016-10-24 12:41:43 -050086/*
87 * This routine indicates if the current thread is an essential system thread.
Benjamin Walsh456c6da2016-09-02 18:55:39 -040088 *
Flavio Ceolin09e362e2018-12-17 12:34:05 -080089 * Returns true if current thread is essential, false if it is not.
Benjamin Walsh456c6da2016-09-02 18:55:39 -040090 */
Patrik Flykt4344e272019-03-08 14:19:05 -070091bool z_is_thread_essential(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040092{
Flavio Ceolin09e362e2018-12-17 12:34:05 -080093 return (_current->base.user_options & K_ESSENTIAL) == K_ESSENTIAL;
Benjamin Walsh456c6da2016-09-02 18:55:39 -040094}
95
Andrew Boie42cfd4f2018-11-14 14:29:24 -080096#ifdef CONFIG_SYS_CLOCK_EXISTS
Patrik Flykt4344e272019-03-08 14:19:05 -070097void z_impl_k_busy_wait(u32_t usec_to_wait)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040098{
Andrew Boie42cfd4f2018-11-14 14:29:24 -080099#if !defined(CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400100 /* use 64-bit math to prevent overflow when multiplying */
Kumar Galacc334c72017-04-21 10:55:34 -0500101 u32_t cycles_to_wait = (u32_t)(
102 (u64_t)usec_to_wait *
Andy Ross220d4f832018-09-19 10:52:07 -0700103 (u64_t)sys_clock_hw_cycles_per_sec() /
Kumar Galacc334c72017-04-21 10:55:34 -0500104 (u64_t)USEC_PER_SEC
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400105 );
Kumar Galacc334c72017-04-21 10:55:34 -0500106 u32_t start_cycles = k_cycle_get_32();
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400107
108 for (;;) {
Kumar Galacc334c72017-04-21 10:55:34 -0500109 u32_t current_cycles = k_cycle_get_32();
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400110
111 /* this handles the rollover on an unsigned 32-bit value */
112 if ((current_cycles - start_cycles) >= cycles_to_wait) {
113 break;
114 }
115 }
Andrew Boie42cfd4f2018-11-14 14:29:24 -0800116#else
117 z_arch_busy_wait(usec_to_wait);
118#endif /* CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400119}
Andrew Boie42cfd4f2018-11-14 14:29:24 -0800120
121#ifdef CONFIG_USERSPACE
122Z_SYSCALL_HANDLER(k_busy_wait, usec_to_wait)
123{
Patrik Flykt4344e272019-03-08 14:19:05 -0700124 z_impl_k_busy_wait(usec_to_wait);
Andrew Boie42cfd4f2018-11-14 14:29:24 -0800125 return 0;
126}
127#endif /* CONFIG_USERSPACE */
128#endif /* CONFIG_SYS_CLOCK_EXISTS */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400129
130#ifdef CONFIG_THREAD_CUSTOM_DATA
Patrik Flykt4344e272019-03-08 14:19:05 -0700131void z_impl_k_thread_custom_data_set(void *value)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400132{
133 _current->custom_data = value;
134}
135
Patrik Flykt4344e272019-03-08 14:19:05 -0700136void *z_impl_k_thread_custom_data_get(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400137{
138 return _current->custom_data;
139}
140
141#endif /* CONFIG_THREAD_CUSTOM_DATA */
142
143#if defined(CONFIG_THREAD_MONITOR)
Allan Stephens92e75042016-10-25 09:52:39 -0500144/*
145 * Remove a thread from the kernel's list of active threads.
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400146 */
Patrik Flykt4344e272019-03-08 14:19:05 -0700147void z_thread_monitor_exit(struct k_thread *thread)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400148{
Andy Rosse456d0f2018-07-25 10:46:38 -0700149 k_spinlock_key_t key = k_spin_lock(&lock);
Allan Stephens1be7bca2016-10-25 10:57:52 -0500150
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500151 if (thread == _kernel.threads) {
152 _kernel.threads = _kernel.threads->next_thread;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400153 } else {
Benjamin Walshb7ef0cb2016-10-05 17:32:01 -0400154 struct k_thread *prev_thread;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400155
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500156 prev_thread = _kernel.threads;
Flavio Ceolinc806ac32018-09-17 16:03:52 -0700157 while ((prev_thread != NULL) &&
158 (thread != prev_thread->next_thread)) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400159 prev_thread = prev_thread->next_thread;
160 }
Adithya Baglody10db82b2018-01-23 15:33:11 +0530161 if (prev_thread != NULL) {
162 prev_thread->next_thread = thread->next_thread;
163 }
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400164 }
Allan Stephens1be7bca2016-10-25 10:57:52 -0500165
Andy Rosse456d0f2018-07-25 10:46:38 -0700166 k_spin_unlock(&lock, key);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400167}
Anas Nashif57554052018-03-03 02:31:05 -0600168#endif
169
170#ifdef CONFIG_THREAD_NAME
Patrik Flykt4344e272019-03-08 14:19:05 -0700171void z_impl_k_thread_name_set(struct k_thread *thread, const char *value)
Anas Nashif57554052018-03-03 02:31:05 -0600172{
173 if (thread == NULL) {
174 _current->name = value;
175 } else {
176 thread->name = value;
177 }
178}
179
Patrik Flykt4344e272019-03-08 14:19:05 -0700180const char *z_impl_k_thread_name_get(struct k_thread *thread)
Anas Nashif57554052018-03-03 02:31:05 -0600181{
182 return (const char *)thread->name;
183}
184
185#else
Patrik Flykt4344e272019-03-08 14:19:05 -0700186void z_impl_k_thread_name_set(k_tid_t thread_id, const char *value)
Anas Nashif57554052018-03-03 02:31:05 -0600187{
188 ARG_UNUSED(thread_id);
189 ARG_UNUSED(value);
190}
191
Patrik Flykt4344e272019-03-08 14:19:05 -0700192const char *z_impl_k_thread_name_get(k_tid_t thread_id)
Anas Nashif57554052018-03-03 02:31:05 -0600193{
194 ARG_UNUSED(thread_id);
195 return NULL;
196}
197#endif /* CONFIG_THREAD_NAME */
198
199#ifdef CONFIG_USERSPACE
200
201#if defined(CONFIG_THREAD_NAME)
202Z_SYSCALL_HANDLER(k_thread_name_set, thread, data)
203{
204 char *name_copy = NULL;
205
206 name_copy = z_user_string_alloc_copy((char *)data, 64);
Patrik Flykt4344e272019-03-08 14:19:05 -0700207 z_impl_k_thread_name_set((struct k_thread *)thread, name_copy);
Anas Nashif57554052018-03-03 02:31:05 -0600208 return 0;
209}
210
211Z_SYSCALL_HANDLER1_SIMPLE(k_thread_name_get, K_OBJ_THREAD, k_tid_t);
212#endif
213
214#ifdef CONFIG_THREAD_CUSTOM_DATA
215Z_SYSCALL_HANDLER(k_thread_custom_data_set, data)
216{
Patrik Flykt4344e272019-03-08 14:19:05 -0700217 z_impl_k_thread_custom_data_set((void *)data);
Anas Nashif57554052018-03-03 02:31:05 -0600218 return 0;
219}
220
221Z_SYSCALL_HANDLER0_SIMPLE(k_thread_custom_data_get);
222#endif /* CONFIG_THREAD_CUSTOM_DATA */
223
224#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400225
Andrew Boie5dcb2792017-05-11 13:29:15 -0700226#ifdef CONFIG_STACK_SENTINEL
227/* Check that the stack sentinel is still present
228 *
229 * The stack sentinel feature writes a magic value to the lowest 4 bytes of
230 * the thread's stack when the thread is initialized. This value gets checked
231 * in a few places:
232 *
233 * 1) In k_yield() if the current thread is not swapped out
Andrew Boieae1a75b2017-06-07 09:33:16 -0700234 * 2) After servicing a non-nested interrupt
Patrik Flykt4344e272019-03-08 14:19:05 -0700235 * 3) In z_swap(), check the sentinel in the outgoing thread
Andrew Boie5dcb2792017-05-11 13:29:15 -0700236 *
Andrew Boieae1a75b2017-06-07 09:33:16 -0700237 * Item 2 requires support in arch/ code.
Andrew Boie5dcb2792017-05-11 13:29:15 -0700238 *
239 * If the check fails, the thread will be terminated appropriately through
240 * the system fatal error handler.
241 */
Patrik Flykt4344e272019-03-08 14:19:05 -0700242void z_check_stack_sentinel(void)
Andrew Boie5dcb2792017-05-11 13:29:15 -0700243{
244 u32_t *stack;
245
Flavio Ceolin76b35182018-12-16 12:48:29 -0800246 if ((_current->base.thread_state & _THREAD_DUMMY) != 0) {
Andrew Boie5dcb2792017-05-11 13:29:15 -0700247 return;
248 }
249
250 stack = (u32_t *)_current->stack_info.start;
251 if (*stack != STACK_SENTINEL) {
252 /* Restore it so further checks don't trigger this same error */
253 *stack = STACK_SENTINEL;
Patrik Flykt4344e272019-03-08 14:19:05 -0700254 z_except_reason(_NANO_ERR_STACK_CHK_FAIL);
Andrew Boie5dcb2792017-05-11 13:29:15 -0700255 }
256}
257#endif
258
Benjamin Walsh096d8e92016-12-16 16:45:05 -0500259#ifdef CONFIG_MULTITHREADING
Patrik Flykt4344e272019-03-08 14:19:05 -0700260void z_impl_k_thread_start(struct k_thread *thread)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400261{
Andy Rosse456d0f2018-07-25 10:46:38 -0700262 k_spinlock_key_t key = k_spin_lock(&lock); /* protect kernel queues */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400263
Patrik Flykt4344e272019-03-08 14:19:05 -0700264 if (z_has_thread_started(thread)) {
Andy Rosse456d0f2018-07-25 10:46:38 -0700265 k_spin_unlock(&lock, key);
Andrew Boie7d627c52017-08-30 11:01:56 -0700266 return;
267 }
268
Patrik Flykt4344e272019-03-08 14:19:05 -0700269 z_mark_thread_as_started(thread);
270 z_ready_thread(thread);
271 z_reschedule(&lock, key);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400272}
Andrew Boie468190a2017-09-29 14:00:48 -0700273
274#ifdef CONFIG_USERSPACE
Andrew Boie8345e5e2018-05-04 15:57:57 -0700275Z_SYSCALL_HANDLER1_SIMPLE_VOID(k_thread_start, K_OBJ_THREAD, struct k_thread *);
Andrew Boie468190a2017-09-29 14:00:48 -0700276#endif
Benjamin Walsh096d8e92016-12-16 16:45:05 -0500277#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400278
Benjamin Walsh096d8e92016-12-16 16:45:05 -0500279#ifdef CONFIG_MULTITHREADING
Kumar Galacc334c72017-04-21 10:55:34 -0500280static void schedule_new_thread(struct k_thread *thread, s32_t delay)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400281{
Benjamin Walsh1a5450b2016-10-06 15:04:23 -0400282#ifdef CONFIG_SYS_CLOCK_EXISTS
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400283 if (delay == 0) {
Andrew Boie7d627c52017-08-30 11:01:56 -0700284 k_thread_start(thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400285 } else {
Patrik Flykt4344e272019-03-08 14:19:05 -0700286 s32_t ticks = _TICK_ALIGN + z_ms_to_ticks(delay);
Benjamin Walsha36e0cf2016-11-23 22:15:44 -0500287
Patrik Flykt4344e272019-03-08 14:19:05 -0700288 z_add_thread_timeout(thread, ticks);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400289 }
290#else
291 ARG_UNUSED(delay);
Andrew Boie7d627c52017-08-30 11:01:56 -0700292 k_thread_start(thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400293#endif
294}
Benjamin Walsh096d8e92016-12-16 16:45:05 -0500295#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400296
Leandro Pereira1ccd7152018-04-03 09:47:41 -0700297#if !CONFIG_STACK_POINTER_RANDOM
298static inline size_t adjust_stack_size(size_t stack_size)
Andrew Boie2acfcd62017-08-30 14:31:03 -0700299{
Leandro Pereira1ccd7152018-04-03 09:47:41 -0700300 return stack_size;
301}
302#else
Andrew Boie538754c2018-05-23 15:25:23 -0700303int z_stack_adjust_initialized;
304
Leandro Pereira1ccd7152018-04-03 09:47:41 -0700305static inline size_t adjust_stack_size(size_t stack_size)
306{
Andrew Boie538754c2018-05-23 15:25:23 -0700307 size_t random_val;
308
309 if (!z_stack_adjust_initialized) {
310 random_val = z_early_boot_rand32_get();
311 } else {
312 random_val = sys_rand32_get();
313 }
314
Patrik Flykt4344e272019-03-08 14:19:05 -0700315 /* Don't need to worry about alignment of the size here, z_new_thread()
Andrew Boie83752c12018-03-02 07:54:13 -0800316 * is required to do it
317 *
318 * FIXME: Not the best way to get a random number in a range.
319 * See #6493
320 */
Andrew Boie538754c2018-05-23 15:25:23 -0700321 const size_t fuzz = random_val % CONFIG_STACK_POINTER_RANDOM;
Leandro Pereira1ccd7152018-04-03 09:47:41 -0700322
323 if (unlikely(fuzz * 2 > stack_size)) {
324 return stack_size;
325 }
326
327 return stack_size - fuzz;
328}
Leandro Pereira1ccd7152018-04-03 09:47:41 -0700329#if defined(CONFIG_STACK_GROWS_UP)
330 /* This is so rare not bothering for now */
331#error "Stack pointer randomization not implemented for upward growing stacks"
332#endif /* CONFIG_STACK_GROWS_UP */
333
Andrew Boie83752c12018-03-02 07:54:13 -0800334#endif /* CONFIG_STACK_POINTER_RANDOM */
Leandro Pereira1ccd7152018-04-03 09:47:41 -0700335
Ioannis Glaropoulosd69c2f82019-03-08 13:02:37 +0100336/*
337 * Note:
338 * The caller must guarantee that the stack_size passed here corresponds
339 * to the amount of stack memory available for the thread.
340 */
Patrik Flykt4344e272019-03-08 14:19:05 -0700341void z_setup_new_thread(struct k_thread *new_thread,
Leandro Pereira1ccd7152018-04-03 09:47:41 -0700342 k_thread_stack_t *stack, size_t stack_size,
343 k_thread_entry_t entry,
344 void *p1, void *p2, void *p3,
Anas Nashif57554052018-03-03 02:31:05 -0600345 int prio, u32_t options, const char *name)
Leandro Pereira1ccd7152018-04-03 09:47:41 -0700346{
347 stack_size = adjust_stack_size(stack_size);
348
Daniel Leungfc182432018-08-16 15:42:28 -0700349#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
350#ifndef CONFIG_THREAD_USERSPACE_LOCAL_DATA_ARCH_DEFER_SETUP
351 /* reserve space on top of stack for local data */
352 stack_size = STACK_ROUND_DOWN(stack_size
353 - sizeof(*new_thread->userspace_local_data));
354#endif
355#endif
356
Patrik Flykt4344e272019-03-08 14:19:05 -0700357 z_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3,
Andrew Boie2acfcd62017-08-30 14:31:03 -0700358 prio, options);
Daniel Leungfc182432018-08-16 15:42:28 -0700359
360#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
361#ifndef CONFIG_THREAD_USERSPACE_LOCAL_DATA_ARCH_DEFER_SETUP
Patrik Flykt4344e272019-03-08 14:19:05 -0700362 /* don't set again if the arch's own code in z_new_thread() has
Daniel Leungfc182432018-08-16 15:42:28 -0700363 * already set the pointer.
364 */
365 new_thread->userspace_local_data =
366 (struct _thread_userspace_local_data *)
Andrew Boie4e5c0932019-04-04 12:05:28 -0700367 (Z_THREAD_STACK_BUFFER(stack) + stack_size);
Daniel Leungfc182432018-08-16 15:42:28 -0700368#endif
369#endif
370
Andrew Boie2dd91ec2018-06-06 08:45:01 -0700371#ifdef CONFIG_THREAD_MONITOR
372 new_thread->entry.pEntry = entry;
373 new_thread->entry.parameter1 = p1;
374 new_thread->entry.parameter2 = p2;
375 new_thread->entry.parameter3 = p3;
376
Andy Rosse456d0f2018-07-25 10:46:38 -0700377 k_spinlock_key_t key = k_spin_lock(&lock);
Andrew Boie2dd91ec2018-06-06 08:45:01 -0700378
379 new_thread->next_thread = _kernel.threads;
380 _kernel.threads = new_thread;
Andy Rosse456d0f2018-07-25 10:46:38 -0700381 k_spin_unlock(&lock, key);
Andrew Boie2dd91ec2018-06-06 08:45:01 -0700382#endif
Anas Nashif57554052018-03-03 02:31:05 -0600383#ifdef CONFIG_THREAD_NAME
384 new_thread->name = name;
385#endif
Andrew Boie2acfcd62017-08-30 14:31:03 -0700386#ifdef CONFIG_USERSPACE
Patrik Flykt4344e272019-03-08 14:19:05 -0700387 z_object_init(new_thread);
388 z_object_init(stack);
Andrew Boiebca15da2017-10-15 14:17:48 -0700389 new_thread->stack_obj = stack;
Andrew Boied26cf2d2017-03-30 13:07:02 -0700390
Andrew Boie2acfcd62017-08-30 14:31:03 -0700391 /* Any given thread has access to itself */
Andrew Boie217017c2017-10-04 11:49:10 -0700392 k_object_access_grant(new_thread, new_thread);
Andrew Boie92e5bd72018-04-12 17:12:15 -0700393#endif
Andy Rossab46b1b2019-01-30 15:00:42 -0800394#ifdef CONFIG_SCHED_CPU_MASK
395 new_thread->base.cpu_mask = -1;
396#endif
Andrew Boiea7fedb72017-11-13 14:12:23 -0800397#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
398 /* _current may be null if the dummy thread is not used */
399 if (!_current) {
Andrew Boie92e5bd72018-04-12 17:12:15 -0700400 new_thread->resource_pool = NULL;
Andrew Boiea7fedb72017-11-13 14:12:23 -0800401 return;
402 }
403#endif
Andrew Boie92e5bd72018-04-12 17:12:15 -0700404#ifdef CONFIG_USERSPACE
Andrew Boie0bf9d332017-10-16 09:12:47 -0700405 /* New threads inherit any memory domain membership by the parent */
Flavio Ceolinea716bf2018-09-20 16:30:45 -0700406 if (_current->mem_domain_info.mem_domain != NULL) {
Andrew Boie0bf9d332017-10-16 09:12:47 -0700407 k_mem_domain_add_thread(_current->mem_domain_info.mem_domain,
408 new_thread);
409 }
410
Patrik Flykt24d71432019-03-26 19:57:45 -0600411 if ((options & K_INHERIT_PERMS) != 0U) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700412 z_thread_perms_inherit(_current, new_thread);
Andrew Boie47f8fd12017-10-05 11:11:02 -0700413 }
Andrew Boie2acfcd62017-08-30 14:31:03 -0700414#endif
Andy Ross4a2e50f2018-05-15 11:06:25 -0700415#ifdef CONFIG_SCHED_DEADLINE
416 new_thread->base.prio_deadline = 0;
417#endif
Andrew Boie92e5bd72018-04-12 17:12:15 -0700418 new_thread->resource_pool = _current->resource_pool;
Anas Nashifb6304e62018-07-04 08:03:03 -0500419 sys_trace_thread_create(new_thread);
Andrew Boie2acfcd62017-08-30 14:31:03 -0700420}
421
422#ifdef CONFIG_MULTITHREADING
Patrik Flykt4344e272019-03-08 14:19:05 -0700423k_tid_t z_impl_k_thread_create(struct k_thread *new_thread,
Andrew Boiec5c104f2017-10-16 14:46:34 -0700424 k_thread_stack_t *stack,
Andrew Boie662c3452017-10-02 10:51:18 -0700425 size_t stack_size, k_thread_entry_t entry,
426 void *p1, void *p2, void *p3,
427 int prio, u32_t options, s32_t delay)
Andrew Boied26cf2d2017-03-30 13:07:02 -0700428{
Patrik Flykt4344e272019-03-08 14:19:05 -0700429 __ASSERT(!z_is_in_isr(), "Threads may not be created in ISRs");
Anas Nashifb6304e62018-07-04 08:03:03 -0500430
Andrew Boie9f04c742019-04-05 15:55:07 -0700431 /* Special case, only for unit tests */
432#if defined(CONFIG_TEST) && defined(CONFIG_ARCH_HAS_USERSPACE) && !defined(CONFIG_USERSPACE)
433 __ASSERT((options & K_USER) == 0,
Marc Herbert4afcc0f2019-05-21 16:09:35 -0700434 "Platform is capable of user mode, and test thread created with K_USER option,"
435 " but neither CONFIG_TEST_USERSPACE nor CONFIG_USERSPACE is set\n");
Andrew Boie9f04c742019-04-05 15:55:07 -0700436#endif
437
Patrik Flykt4344e272019-03-08 14:19:05 -0700438 z_setup_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3,
Anas Nashif57554052018-03-03 02:31:05 -0600439 prio, options, NULL);
Andrew Boied26cf2d2017-03-30 13:07:02 -0700440
Andrew Boie7d627c52017-08-30 11:01:56 -0700441 if (delay != K_FOREVER) {
442 schedule_new_thread(new_thread, delay);
443 }
Anas Nashifb6304e62018-07-04 08:03:03 -0500444
Andrew Boied26cf2d2017-03-30 13:07:02 -0700445 return new_thread;
446}
Andrew Boie662c3452017-10-02 10:51:18 -0700447
448
449#ifdef CONFIG_USERSPACE
Andrew Boie8345e5e2018-05-04 15:57:57 -0700450Z_SYSCALL_HANDLER(k_thread_create,
451 new_thread_p, stack_p, stack_size, entry, p1, more_args)
Andrew Boie662c3452017-10-02 10:51:18 -0700452{
453 int prio;
Andy Gross1c047c92017-12-08 12:22:49 -0600454 u32_t options, delay;
Kumar Gala79d151f2018-04-10 14:34:02 -0500455 u32_t total_size;
Andrew Boied0035f92019-03-19 10:43:06 -0700456
Andrew Boie662c3452017-10-02 10:51:18 -0700457 struct _k_object *stack_object;
458 struct k_thread *new_thread = (struct k_thread *)new_thread_p;
459 volatile struct _syscall_10_args *margs =
460 (volatile struct _syscall_10_args *)more_args;
Andrew Boiec5c104f2017-10-16 14:46:34 -0700461 k_thread_stack_t *stack = (k_thread_stack_t *)stack_p;
Andrew Boie662c3452017-10-02 10:51:18 -0700462
463 /* The thread and stack objects *must* be in an uninitialized state */
Andrew Boie8345e5e2018-05-04 15:57:57 -0700464 Z_OOPS(Z_SYSCALL_OBJ_NEVER_INIT(new_thread, K_OBJ_THREAD));
Patrik Flykt4344e272019-03-08 14:19:05 -0700465 stack_object = z_object_find(stack);
466 Z_OOPS(Z_SYSCALL_VERIFY_MSG(z_obj_validation_check(stack_object, stack,
Andrew Boie8345e5e2018-05-04 15:57:57 -0700467 K_OBJ__THREAD_STACK_ELEMENT,
Flavio Ceolin92ea2f92018-09-20 16:14:57 -0700468 _OBJ_INIT_FALSE) == 0,
Andrew Boie8345e5e2018-05-04 15:57:57 -0700469 "bad stack object"));
Andrew Boie662c3452017-10-02 10:51:18 -0700470
471 /* Verify that the stack size passed in is OK by computing the total
472 * size and comparing it with the size value in the object metadata
473 */
Jakob Olesenc8708d92019-05-07 10:17:35 -0700474 Z_OOPS(Z_SYSCALL_VERIFY_MSG(!u32_add_overflow(K_THREAD_STACK_RESERVED,
475 stack_size, &total_size),
Andrew Boie8345e5e2018-05-04 15:57:57 -0700476 "stack size overflow (%u+%u)", stack_size,
Andrew Boied0035f92019-03-19 10:43:06 -0700477 K_THREAD_STACK_RESERVED));
478
Andrew Boief4631d52019-03-19 10:48:09 -0700479 /* Testing less-than-or-equal since additional room may have been
480 * allocated for alignment constraints
481 */
Andrew Boie8345e5e2018-05-04 15:57:57 -0700482 Z_OOPS(Z_SYSCALL_VERIFY_MSG(total_size <= stack_object->data,
483 "stack size %u is too big, max is %u",
484 total_size, stack_object->data));
Andrew Boie662c3452017-10-02 10:51:18 -0700485
486 /* Verify the struct containing args 6-10 */
Andrew Boie8345e5e2018-05-04 15:57:57 -0700487 Z_OOPS(Z_SYSCALL_MEMORY_READ(margs, sizeof(*margs)));
Andrew Boie662c3452017-10-02 10:51:18 -0700488
489 /* Stash struct arguments in local variables to prevent switcheroo
490 * attacks
491 */
492 prio = margs->arg8;
493 options = margs->arg9;
494 delay = margs->arg10;
495 compiler_barrier();
496
497 /* User threads may only create other user threads and they can't
498 * be marked as essential
499 */
Andrew Boie8345e5e2018-05-04 15:57:57 -0700500 Z_OOPS(Z_SYSCALL_VERIFY(options & K_USER));
501 Z_OOPS(Z_SYSCALL_VERIFY(!(options & K_ESSENTIAL)));
Andrew Boie662c3452017-10-02 10:51:18 -0700502
503 /* Check validity of prio argument; must be the same or worse priority
504 * than the caller
505 */
Andrew Boie8345e5e2018-05-04 15:57:57 -0700506 Z_OOPS(Z_SYSCALL_VERIFY(_is_valid_prio(prio, NULL)));
Patrik Flykt4344e272019-03-08 14:19:05 -0700507 Z_OOPS(Z_SYSCALL_VERIFY(z_is_prio_lower_or_equal(prio,
Andrew Boie8345e5e2018-05-04 15:57:57 -0700508 _current->base.prio)));
Andrew Boie662c3452017-10-02 10:51:18 -0700509
Patrik Flykt4344e272019-03-08 14:19:05 -0700510 z_setup_new_thread((struct k_thread *)new_thread, stack, stack_size,
Andrew Boie662c3452017-10-02 10:51:18 -0700511 (k_thread_entry_t)entry, (void *)p1,
512 (void *)margs->arg6, (void *)margs->arg7, prio,
Anas Nashif57554052018-03-03 02:31:05 -0600513 options, NULL);
Andrew Boie662c3452017-10-02 10:51:18 -0700514
Andrew Boie662c3452017-10-02 10:51:18 -0700515 if (delay != K_FOREVER) {
516 schedule_new_thread(new_thread, delay);
517 }
518
519 return new_thread_p;
520}
521#endif /* CONFIG_USERSPACE */
522#endif /* CONFIG_MULTITHREADING */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400523
Patrik Flykt4344e272019-03-08 14:19:05 -0700524void z_thread_single_suspend(struct k_thread *thread)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400525{
Patrik Flykt4344e272019-03-08 14:19:05 -0700526 if (z_is_thread_ready(thread)) {
527 z_remove_thread_from_ready_q(thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400528 }
529
Patrik Flykt4344e272019-03-08 14:19:05 -0700530 z_mark_thread_as_suspended(thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400531}
532
Patrik Flykt4344e272019-03-08 14:19:05 -0700533void z_impl_k_thread_suspend(struct k_thread *thread)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400534{
Andy Rosse456d0f2018-07-25 10:46:38 -0700535 k_spinlock_key_t key = k_spin_lock(&lock);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400536
Patrik Flykt4344e272019-03-08 14:19:05 -0700537 z_thread_single_suspend(thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400538
Anas Nashifb6304e62018-07-04 08:03:03 -0500539 sys_trace_thread_suspend(thread);
540
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400541 if (thread == _current) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700542 z_reschedule(&lock, key);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400543 } else {
Andy Rosse456d0f2018-07-25 10:46:38 -0700544 k_spin_unlock(&lock, key);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400545 }
546}
547
Andrew Boie468190a2017-09-29 14:00:48 -0700548#ifdef CONFIG_USERSPACE
Andrew Boie8345e5e2018-05-04 15:57:57 -0700549Z_SYSCALL_HANDLER1_SIMPLE_VOID(k_thread_suspend, K_OBJ_THREAD, k_tid_t);
Andrew Boie468190a2017-09-29 14:00:48 -0700550#endif
551
Patrik Flykt4344e272019-03-08 14:19:05 -0700552void z_thread_single_resume(struct k_thread *thread)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400553{
Patrik Flykt4344e272019-03-08 14:19:05 -0700554 z_mark_thread_as_not_suspended(thread);
555 z_ready_thread(thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400556}
557
Patrik Flykt4344e272019-03-08 14:19:05 -0700558void z_impl_k_thread_resume(struct k_thread *thread)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400559{
Andy Rosse456d0f2018-07-25 10:46:38 -0700560 k_spinlock_key_t key = k_spin_lock(&lock);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400561
Patrik Flykt4344e272019-03-08 14:19:05 -0700562 z_thread_single_resume(thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400563
Anas Nashifb6304e62018-07-04 08:03:03 -0500564 sys_trace_thread_resume(thread);
Patrik Flykt4344e272019-03-08 14:19:05 -0700565 z_reschedule(&lock, key);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400566}
567
Andrew Boie468190a2017-09-29 14:00:48 -0700568#ifdef CONFIG_USERSPACE
Andrew Boie8345e5e2018-05-04 15:57:57 -0700569Z_SYSCALL_HANDLER1_SIMPLE_VOID(k_thread_resume, K_OBJ_THREAD, k_tid_t);
Andrew Boie468190a2017-09-29 14:00:48 -0700570#endif
571
Patrik Flykt4344e272019-03-08 14:19:05 -0700572void z_thread_single_abort(struct k_thread *thread)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400573{
574 if (thread->fn_abort != NULL) {
575 thread->fn_abort();
576 }
577
Andy Ross42ed12a2019-02-19 16:03:39 -0800578 if (IS_ENABLED(CONFIG_SMP)) {
579 z_sched_abort(thread);
580 }
581
Patrik Flykt4344e272019-03-08 14:19:05 -0700582 if (z_is_thread_ready(thread)) {
583 z_remove_thread_from_ready_q(thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400584 } else {
Patrik Flykt4344e272019-03-08 14:19:05 -0700585 if (z_is_thread_pending(thread)) {
586 z_unpend_thread_no_timeout(thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400587 }
Patrik Flykt4344e272019-03-08 14:19:05 -0700588 if (z_is_thread_timeout_active(thread)) {
589 (void)z_abort_thread_timeout(thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400590 }
591 }
Andrew Boie885fcd52017-10-05 14:37:59 -0700592
593 thread->base.thread_state |= _THREAD_DEAD;
Anas Nashifb6304e62018-07-04 08:03:03 -0500594
595 sys_trace_thread_abort(thread);
Andrew Boie885fcd52017-10-05 14:37:59 -0700596
597#ifdef CONFIG_USERSPACE
Ioannis Glaropoulos66192612018-09-24 11:41:42 +0200598 /* Clear initialized state so that this thread object may be re-used
Andrew Boie885fcd52017-10-05 14:37:59 -0700599 * and triggers errors if API calls are made on it from user threads
600 */
Patrik Flykt4344e272019-03-08 14:19:05 -0700601 z_object_uninit(thread->stack_obj);
602 z_object_uninit(thread);
Andrew Boie04caa672017-10-13 13:57:07 -0700603
Andrew Boie818a96d2017-11-03 09:00:35 -0700604 /* Revoke permissions on thread's ID so that it may be recycled */
Patrik Flykt4344e272019-03-08 14:19:05 -0700605 z_thread_perms_all_clear(thread);
Andrew Boie885fcd52017-10-05 14:37:59 -0700606#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400607}
608
Benjamin Walshb12a8e02016-12-14 15:24:12 -0500609#ifdef CONFIG_MULTITHREADING
Andrew Boie877f82e2017-10-17 11:20:22 -0700610#ifdef CONFIG_USERSPACE
Andrew Boie877f82e2017-10-17 11:20:22 -0700611
612static void grant_static_access(void)
613{
Nicolas Pitreaa9228852019-06-03 13:01:43 -0400614 Z_STRUCT_SECTION_FOREACH(_k_object_assignment, pos) {
Andrew Boie877f82e2017-10-17 11:20:22 -0700615 for (int i = 0; pos->objects[i] != NULL; i++) {
616 k_object_access_grant(pos->objects[i],
617 pos->thread);
618 }
619 }
620}
621#endif /* CONFIG_USERSPACE */
622
Patrik Flykt4344e272019-03-08 14:19:05 -0700623void z_init_static_threads(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400624{
Peter Mitsisa04c0d72016-09-28 19:26:00 -0400625 _FOREACH_STATIC_THREAD(thread_data) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700626 z_setup_new_thread(
Andrew Boied26cf2d2017-03-30 13:07:02 -0700627 thread_data->init_thread,
Peter Mitsisa04c0d72016-09-28 19:26:00 -0400628 thread_data->init_stack,
629 thread_data->init_stack_size,
Peter Mitsisa04c0d72016-09-28 19:26:00 -0400630 thread_data->init_entry,
631 thread_data->init_p1,
632 thread_data->init_p2,
633 thread_data->init_p3,
634 thread_data->init_prio,
Anas Nashif57554052018-03-03 02:31:05 -0600635 thread_data->init_options,
636 thread_data->init_name);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400637
Andrew Boied26cf2d2017-03-30 13:07:02 -0700638 thread_data->init_thread->init_data = thread_data;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400639 }
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400640
Andrew Boie877f82e2017-10-17 11:20:22 -0700641#ifdef CONFIG_USERSPACE
642 grant_static_access();
643#endif
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400644
645 /*
Andy Rosse456d0f2018-07-25 10:46:38 -0700646 * Non-legacy static threads may be started immediately or
647 * after a previously specified delay. Even though the
648 * scheduler is locked, ticks can still be delivered and
649 * processed. Take a sched lock to prevent them from running
650 * until they are all started.
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400651 *
652 * Note that static threads defined using the legacy API have a
653 * delay of K_FOREVER.
654 */
Andy Rosse456d0f2018-07-25 10:46:38 -0700655 k_sched_lock();
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400656 _FOREACH_STATIC_THREAD(thread_data) {
657 if (thread_data->init_delay != K_FOREVER) {
Andrew Boied26cf2d2017-03-30 13:07:02 -0700658 schedule_new_thread(thread_data->init_thread,
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400659 thread_data->init_delay);
660 }
661 }
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400662 k_sched_unlock();
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400663}
Benjamin Walshb12a8e02016-12-14 15:24:12 -0500664#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400665
Patrik Flykt4344e272019-03-08 14:19:05 -0700666void z_init_thread_base(struct _thread_base *thread_base, int priority,
Kumar Galacc334c72017-04-21 10:55:34 -0500667 u32_t initial_state, unsigned int options)
Benjamin Walsh069fd362016-11-22 17:48:13 -0500668{
669 /* k_q_node is initialized upon first insertion in a list */
670
Kumar Galacc334c72017-04-21 10:55:34 -0500671 thread_base->user_options = (u8_t)options;
672 thread_base->thread_state = (u8_t)initial_state;
Benjamin Walsh069fd362016-11-22 17:48:13 -0500673
674 thread_base->prio = priority;
675
Patrik Flykt24d71432019-03-26 19:57:45 -0600676 thread_base->sched_locked = 0U;
Benjamin Walsh069fd362016-11-22 17:48:13 -0500677
678 /* swap_data does not need to be initialized */
679
Patrik Flykt4344e272019-03-08 14:19:05 -0700680 z_init_thread_timeout(thread_base);
Benjamin Walsh069fd362016-11-22 17:48:13 -0500681}
682
Andrew Boie3f091b52017-08-30 14:34:14 -0700683FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
684 void *p1, void *p2, void *p3)
685{
686 _current->base.user_options |= K_USER;
Patrik Flykt4344e272019-03-08 14:19:05 -0700687 z_thread_essential_clear();
Andrew Boie2dd91ec2018-06-06 08:45:01 -0700688#ifdef CONFIG_THREAD_MONITOR
689 _current->entry.pEntry = entry;
690 _current->entry.parameter1 = p1;
691 _current->entry.parameter2 = p2;
692 _current->entry.parameter3 = p3;
693#endif
Andrew Boie93eb6032017-09-29 04:42:30 -0700694#ifdef CONFIG_USERSPACE
Patrik Flykt4344e272019-03-08 14:19:05 -0700695 z_arch_user_mode_enter(entry, p1, p2, p3);
Andrew Boie93eb6032017-09-29 04:42:30 -0700696#else
697 /* XXX In this case we do not reset the stack */
Patrik Flykt4344e272019-03-08 14:19:05 -0700698 z_thread_entry(entry, p1, p2, p3);
Andrew Boie93eb6032017-09-29 04:42:30 -0700699#endif
Andrew Boie3f091b52017-08-30 14:34:14 -0700700}
Andy Ross5aa74602019-02-05 09:35:57 -0800701
702/* These spinlock assertion predicates are defined here because having
703 * them in spinlock.h is a giant header ordering headache.
704 */
705#ifdef SPIN_VALIDATE
Flavio Ceolin625ac2e2019-03-14 11:41:21 -0700706bool z_spin_lock_valid(struct k_spinlock *l)
Andy Ross5aa74602019-02-05 09:35:57 -0800707{
708 if (l->thread_cpu) {
709 if ((l->thread_cpu & 3) == _current_cpu->id) {
Flavio Ceolin625ac2e2019-03-14 11:41:21 -0700710 return false;
Andy Ross5aa74602019-02-05 09:35:57 -0800711 }
712 }
Flavio Ceolin625ac2e2019-03-14 11:41:21 -0700713 return true;
Andy Ross5aa74602019-02-05 09:35:57 -0800714}
715
Flavio Ceolin625ac2e2019-03-14 11:41:21 -0700716bool z_spin_unlock_valid(struct k_spinlock *l)
Andy Ross5aa74602019-02-05 09:35:57 -0800717{
Nicolas Pitre0b5d9f72019-05-20 23:41:27 -0400718 if (l->thread_cpu != (_current_cpu->id | (uintptr_t)_current)) {
Flavio Ceolin625ac2e2019-03-14 11:41:21 -0700719 return false;
Andy Ross5aa74602019-02-05 09:35:57 -0800720 }
721 l->thread_cpu = 0;
Flavio Ceolin625ac2e2019-03-14 11:41:21 -0700722 return true;
Andy Ross5aa74602019-02-05 09:35:57 -0800723}
Andy Rossf37e0c62019-02-20 10:11:24 -0800724
725void z_spin_lock_set_owner(struct k_spinlock *l)
726{
Nicolas Pitre0b5d9f72019-05-20 23:41:27 -0400727 l->thread_cpu = _current_cpu->id | (uintptr_t)_current;
Andy Rossf37e0c62019-02-20 10:11:24 -0800728}
729
Ioannis Glaropoulosa6cb8b02019-05-09 21:55:10 +0200730int z_impl_k_float_disable(struct k_thread *thread)
731{
732#if defined(CONFIG_FLOAT) && defined(CONFIG_FP_SHARING)
733 return z_arch_float_disable(thread);
734#else
735 return -ENOSYS;
736#endif /* CONFIG_FLOAT && CONFIG_FP_SHARING */
737}
738
739#ifdef CONFIG_USERSPACE
740Z_SYSCALL_HANDLER(k_float_disable, thread_p)
741{
742 struct k_thread *thread = (struct k_thread *)thread_p;
743
744 Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
745
746 return z_impl_k_float_disable((struct k_thread *)thread_p);
747}
748#endif /* CONFIG_USERSPACE */
749
Andy Ross5aa74602019-02-05 09:35:57 -0800750#endif