blob: 4d26d6578e31b8f7f9baf077851734ba248766d0 [file] [log] [blame]
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001/*
2 * Copyright (c) 2010-2014 Wind River Systems, Inc.
3 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08004 * SPDX-License-Identifier: Apache-2.0
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005 */
6
7/**
8 * @file
Allan Stephens9f097772016-10-24 12:41:43 -05009 * @brief Kernel thread support
Benjamin Walsh456c6da2016-09-02 18:55:39 -040010 *
Allan Stephens9f097772016-10-24 12:41:43 -050011 * This module provides general purpose thread support.
Benjamin Walsh456c6da2016-09-02 18:55:39 -040012 */
13
14#include <kernel.h>
15
16#include <toolchain.h>
Anas Nashif397d29d2017-06-17 11:30:47 -040017#include <linker/sections.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040018
Benjamin Walshf6ca7de2016-11-08 10:36:50 -050019#include <kernel_structs.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040020#include <misc/printk.h>
21#include <sys_clock.h>
22#include <drivers/system_timer.h>
Benjamin Walshb4b108d2016-10-13 10:31:48 -040023#include <ksched.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040024#include <wait_q.h>
Andrew Boie2acfcd62017-08-30 14:31:03 -070025#include <atomic.h>
Andrew Boie468190a2017-09-29 14:00:48 -070026#include <syscall_handler.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040027
Allan Stephense7d2cc22016-10-19 16:10:46 -050028extern struct _static_thread_data _static_thread_data_list_start[];
29extern struct _static_thread_data _static_thread_data_list_end[];
Peter Mitsis0ca7cea2016-09-28 19:18:09 -040030
Allan Stephense7d2cc22016-10-19 16:10:46 -050031#define _FOREACH_STATIC_THREAD(thread_data) \
32 for (struct _static_thread_data *thread_data = \
33 _static_thread_data_list_start; \
34 thread_data < _static_thread_data_list_end; \
35 thread_data++)
Peter Mitsis0ca7cea2016-09-28 19:18:09 -040036
Benjamin Walsh456c6da2016-09-02 18:55:39 -040037
Benjamin Walshc7ba8b12016-11-08 16:12:59 -050038int k_is_in_isr(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040039{
Benjamin Walshb4528172016-10-27 16:57:13 -040040 return _is_in_isr();
Benjamin Walsh456c6da2016-09-02 18:55:39 -040041}
42
Allan Stephens9f097772016-10-24 12:41:43 -050043/*
44 * This function tags the current thread as essential to system operation.
45 * Exceptions raised by this thread will be treated as a fatal system error.
Benjamin Walsh456c6da2016-09-02 18:55:39 -040046 */
47void _thread_essential_set(void)
48{
Benjamin Walshed240f22017-01-22 13:05:08 -050049 _current->base.user_options |= K_ESSENTIAL;
Benjamin Walsh456c6da2016-09-02 18:55:39 -040050}
51
Allan Stephens9f097772016-10-24 12:41:43 -050052/*
53 * This function tags the current thread as not essential to system operation.
54 * Exceptions raised by this thread may be recoverable.
Benjamin Walsh456c6da2016-09-02 18:55:39 -040055 * (This is the default tag for a thread.)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040056 */
57void _thread_essential_clear(void)
58{
Benjamin Walshed240f22017-01-22 13:05:08 -050059 _current->base.user_options &= ~K_ESSENTIAL;
Benjamin Walsh456c6da2016-09-02 18:55:39 -040060}
61
Allan Stephens9f097772016-10-24 12:41:43 -050062/*
63 * This routine indicates if the current thread is an essential system thread.
Benjamin Walsh456c6da2016-09-02 18:55:39 -040064 *
Allan Stephens9f097772016-10-24 12:41:43 -050065 * Returns non-zero if current thread is essential, zero if it is not.
Benjamin Walsh456c6da2016-09-02 18:55:39 -040066 */
67int _is_thread_essential(void)
68{
Benjamin Walshed240f22017-01-22 13:05:08 -050069 return _current->base.user_options & K_ESSENTIAL;
Benjamin Walsh456c6da2016-09-02 18:55:39 -040070}
71
Kumar Galacc334c72017-04-21 10:55:34 -050072void k_busy_wait(u32_t usec_to_wait)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040073{
Ramesh Thomas89ffd442017-02-05 19:37:19 -080074#if defined(CONFIG_TICKLESS_KERNEL) && \
75 !defined(CONFIG_BUSY_WAIT_USES_ALTERNATE_CLOCK)
76int saved_always_on = k_enable_sys_clock_always_on();
77#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -040078 /* use 64-bit math to prevent overflow when multiplying */
Kumar Galacc334c72017-04-21 10:55:34 -050079 u32_t cycles_to_wait = (u32_t)(
80 (u64_t)usec_to_wait *
81 (u64_t)sys_clock_hw_cycles_per_sec /
82 (u64_t)USEC_PER_SEC
Benjamin Walsh456c6da2016-09-02 18:55:39 -040083 );
Kumar Galacc334c72017-04-21 10:55:34 -050084 u32_t start_cycles = k_cycle_get_32();
Benjamin Walsh456c6da2016-09-02 18:55:39 -040085
86 for (;;) {
Kumar Galacc334c72017-04-21 10:55:34 -050087 u32_t current_cycles = k_cycle_get_32();
Benjamin Walsh456c6da2016-09-02 18:55:39 -040088
89 /* this handles the rollover on an unsigned 32-bit value */
90 if ((current_cycles - start_cycles) >= cycles_to_wait) {
91 break;
92 }
93 }
Ramesh Thomas89ffd442017-02-05 19:37:19 -080094#if defined(CONFIG_TICKLESS_KERNEL) && \
95 !defined(CONFIG_BUSY_WAIT_USES_ALTERNATE_CLOCK)
96 _sys_clock_always_on = saved_always_on;
97#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -040098}
99
100#ifdef CONFIG_THREAD_CUSTOM_DATA
Andrew Boie468190a2017-09-29 14:00:48 -0700101void _impl_k_thread_custom_data_set(void *value)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400102{
103 _current->custom_data = value;
104}
105
Andrew Boie468190a2017-09-29 14:00:48 -0700106#ifdef CONFIG_USERSPACE
Leandro Pereira6f99bdb2017-10-13 14:00:22 -0700107_SYSCALL_HANDLER(k_thread_custom_data_set, data)
Andrew Boie468190a2017-09-29 14:00:48 -0700108{
Andrew Boie225e4c02017-10-12 09:54:26 -0700109 _impl_k_thread_custom_data_set((void *)data);
Andrew Boie468190a2017-09-29 14:00:48 -0700110 return 0;
111}
112#endif
113
114void *_impl_k_thread_custom_data_get(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400115{
116 return _current->custom_data;
117}
118
Andrew Boie468190a2017-09-29 14:00:48 -0700119#ifdef CONFIG_USERSPACE
Andrew Boie225e4c02017-10-12 09:54:26 -0700120_SYSCALL_HANDLER0_SIMPLE(k_thread_custom_data_get);
Andrew Boie468190a2017-09-29 14:00:48 -0700121#endif /* CONFIG_USERSPACE */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400122#endif /* CONFIG_THREAD_CUSTOM_DATA */
123
124#if defined(CONFIG_THREAD_MONITOR)
Allan Stephens92e75042016-10-25 09:52:39 -0500125/*
126 * Remove a thread from the kernel's list of active threads.
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400127 */
Allan Stephens92e75042016-10-25 09:52:39 -0500128void _thread_monitor_exit(struct k_thread *thread)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400129{
Allan Stephens1be7bca2016-10-25 10:57:52 -0500130 unsigned int key = irq_lock();
131
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500132 if (thread == _kernel.threads) {
133 _kernel.threads = _kernel.threads->next_thread;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400134 } else {
Benjamin Walshb7ef0cb2016-10-05 17:32:01 -0400135 struct k_thread *prev_thread;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400136
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500137 prev_thread = _kernel.threads;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400138 while (thread != prev_thread->next_thread) {
139 prev_thread = prev_thread->next_thread;
140 }
141 prev_thread->next_thread = thread->next_thread;
142 }
Allan Stephens1be7bca2016-10-25 10:57:52 -0500143
144 irq_unlock(key);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400145}
146#endif /* CONFIG_THREAD_MONITOR */
147
Andrew Boie5dcb2792017-05-11 13:29:15 -0700148#ifdef CONFIG_STACK_SENTINEL
149/* Check that the stack sentinel is still present
150 *
151 * The stack sentinel feature writes a magic value to the lowest 4 bytes of
152 * the thread's stack when the thread is initialized. This value gets checked
153 * in a few places:
154 *
155 * 1) In k_yield() if the current thread is not swapped out
Andrew Boieae1a75b2017-06-07 09:33:16 -0700156 * 2) After servicing a non-nested interrupt
Andrew Boie5dcb2792017-05-11 13:29:15 -0700157 * 3) In _Swap(), check the sentinel in the outgoing thread
Andrew Boie5dcb2792017-05-11 13:29:15 -0700158 *
Andrew Boieae1a75b2017-06-07 09:33:16 -0700159 * Item 2 requires support in arch/ code.
Andrew Boie5dcb2792017-05-11 13:29:15 -0700160 *
161 * If the check fails, the thread will be terminated appropriately through
162 * the system fatal error handler.
163 */
164void _check_stack_sentinel(void)
165{
166 u32_t *stack;
167
Andrew Boie26d1eb32017-09-12 11:47:18 -0700168 if (_current->base.thread_state == _THREAD_DUMMY) {
Andrew Boie5dcb2792017-05-11 13:29:15 -0700169 return;
170 }
171
172 stack = (u32_t *)_current->stack_info.start;
173 if (*stack != STACK_SENTINEL) {
174 /* Restore it so further checks don't trigger this same error */
175 *stack = STACK_SENTINEL;
176 _k_except_reason(_NANO_ERR_STACK_CHK_FAIL);
177 }
178}
179#endif
180
Allan Stephens9f097772016-10-24 12:41:43 -0500181/*
182 * Common thread entry point function (used by all threads)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400183 *
Allan Stephens9f097772016-10-24 12:41:43 -0500184 * This routine invokes the actual thread entry point function and passes
185 * it three arguments. It also handles graceful termination of the thread
186 * if the entry point function ever returns.
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400187 *
Allan Stephens9f097772016-10-24 12:41:43 -0500188 * This routine does not return, and is marked as such so the compiler won't
189 * generate preamble code that is only used by functions that actually return.
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400190 */
Andrew Boie1e06ffc2017-09-11 09:30:04 -0700191FUNC_NORETURN void _thread_entry(k_thread_entry_t entry,
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400192 void *p1, void *p2, void *p3)
193{
194 entry(p1, p2, p3);
195
Benjamin Walshb12a8e02016-12-14 15:24:12 -0500196#ifdef CONFIG_MULTITHREADING
Andrew Boie9a74a082017-08-30 00:17:44 -0700197 k_thread_abort(k_current_get());
Benjamin Walshb12a8e02016-12-14 15:24:12 -0500198#else
199 for (;;) {
200 k_cpu_idle();
201 }
202#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400203
204 /*
Allan Stephens9f097772016-10-24 12:41:43 -0500205 * Compiler can't tell that k_thread_abort() won't return and issues a
206 * warning unless we tell it that control never gets this far.
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400207 */
208
209 CODE_UNREACHABLE;
210}
211
Benjamin Walsh096d8e92016-12-16 16:45:05 -0500212#ifdef CONFIG_MULTITHREADING
Andrew Boie468190a2017-09-29 14:00:48 -0700213void _impl_k_thread_start(struct k_thread *thread)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400214{
215 int key = irq_lock(); /* protect kernel queues */
216
Andrew Boie7d627c52017-08-30 11:01:56 -0700217 if (_has_thread_started(thread)) {
218 irq_unlock(key);
219 return;
220 }
221
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400222 _mark_thread_as_started(thread);
223
224 if (_is_thread_ready(thread)) {
225 _add_thread_to_ready_q(thread);
226 if (_must_switch_threads()) {
227 _Swap(key);
228 return;
229 }
230 }
231
232 irq_unlock(key);
233}
Andrew Boie468190a2017-09-29 14:00:48 -0700234
235#ifdef CONFIG_USERSPACE
Andrew Boie225e4c02017-10-12 09:54:26 -0700236_SYSCALL_HANDLER1_SIMPLE_VOID(k_thread_start, K_OBJ_THREAD, struct k_thread *);
Andrew Boie468190a2017-09-29 14:00:48 -0700237#endif
Benjamin Walsh096d8e92016-12-16 16:45:05 -0500238#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400239
Benjamin Walsh096d8e92016-12-16 16:45:05 -0500240#ifdef CONFIG_MULTITHREADING
Kumar Galacc334c72017-04-21 10:55:34 -0500241static void schedule_new_thread(struct k_thread *thread, s32_t delay)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400242{
Benjamin Walsh1a5450b2016-10-06 15:04:23 -0400243#ifdef CONFIG_SYS_CLOCK_EXISTS
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400244 if (delay == 0) {
Andrew Boie7d627c52017-08-30 11:01:56 -0700245 k_thread_start(thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400246 } else {
Kumar Galacc334c72017-04-21 10:55:34 -0500247 s32_t ticks = _TICK_ALIGN + _ms_to_ticks(delay);
Benjamin Walshf421ec22016-11-30 17:41:16 -0500248 int key = irq_lock();
Benjamin Walsha36e0cf2016-11-23 22:15:44 -0500249
250 _add_thread_timeout(thread, NULL, ticks);
Benjamin Walshf421ec22016-11-30 17:41:16 -0500251 irq_unlock(key);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400252 }
253#else
254 ARG_UNUSED(delay);
Andrew Boie7d627c52017-08-30 11:01:56 -0700255 k_thread_start(thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400256#endif
257}
Benjamin Walsh096d8e92016-12-16 16:45:05 -0500258#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400259
Andrew Boie2acfcd62017-08-30 14:31:03 -0700260void _setup_new_thread(struct k_thread *new_thread,
Andrew Boiec5c104f2017-10-16 14:46:34 -0700261 k_thread_stack_t *stack, size_t stack_size,
Andrew Boie2acfcd62017-08-30 14:31:03 -0700262 k_thread_entry_t entry,
263 void *p1, void *p2, void *p3,
264 int prio, u32_t options)
265{
266 _new_thread(new_thread, stack, stack_size, entry, p1, p2, p3,
267 prio, options);
268#ifdef CONFIG_USERSPACE
Andrew Boie2acfcd62017-08-30 14:31:03 -0700269 _k_object_init(new_thread);
Andrew Boiebca15da2017-10-15 14:17:48 -0700270 _k_object_init(stack);
271 new_thread->stack_obj = stack;
Andrew Boied26cf2d2017-03-30 13:07:02 -0700272
Andrew Boie2acfcd62017-08-30 14:31:03 -0700273 /* Any given thread has access to itself */
Andrew Boie217017c2017-10-04 11:49:10 -0700274 k_object_access_grant(new_thread, new_thread);
Andrew Boie47f8fd12017-10-05 11:11:02 -0700275
Andrew Boiea7fedb72017-11-13 14:12:23 -0800276#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
277 /* _current may be null if the dummy thread is not used */
278 if (!_current) {
279 return;
280 }
281#endif
Andrew Boie0bf9d332017-10-16 09:12:47 -0700282 /* New threads inherit any memory domain membership by the parent */
283 if (_current->mem_domain_info.mem_domain) {
284 k_mem_domain_add_thread(_current->mem_domain_info.mem_domain,
285 new_thread);
286 }
287
Andrew Boie47f8fd12017-10-05 11:11:02 -0700288 if (options & K_INHERIT_PERMS) {
289 _thread_perms_inherit(_current, new_thread);
290 }
Andrew Boie2acfcd62017-08-30 14:31:03 -0700291#endif
292}
293
294#ifdef CONFIG_MULTITHREADING
Andrew Boie662c3452017-10-02 10:51:18 -0700295k_tid_t _impl_k_thread_create(struct k_thread *new_thread,
Andrew Boiec5c104f2017-10-16 14:46:34 -0700296 k_thread_stack_t *stack,
Andrew Boie662c3452017-10-02 10:51:18 -0700297 size_t stack_size, k_thread_entry_t entry,
298 void *p1, void *p2, void *p3,
299 int prio, u32_t options, s32_t delay)
Andrew Boied26cf2d2017-03-30 13:07:02 -0700300{
301 __ASSERT(!_is_in_isr(), "Threads may not be created in ISRs");
Andrew Boie2acfcd62017-08-30 14:31:03 -0700302 _setup_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3,
303 prio, options);
Andrew Boied26cf2d2017-03-30 13:07:02 -0700304
Andrew Boie7d627c52017-08-30 11:01:56 -0700305 if (delay != K_FOREVER) {
306 schedule_new_thread(new_thread, delay);
307 }
Andrew Boied26cf2d2017-03-30 13:07:02 -0700308 return new_thread;
309}
Andrew Boie662c3452017-10-02 10:51:18 -0700310
311
312#ifdef CONFIG_USERSPACE
313_SYSCALL_HANDLER(k_thread_create,
314 new_thread_p, stack_p, stack_size, entry, p1, more_args)
315{
316 int prio;
317 u32_t options, delay, guard_size, total_size;
318 struct _k_object *stack_object;
319 struct k_thread *new_thread = (struct k_thread *)new_thread_p;
320 volatile struct _syscall_10_args *margs =
321 (volatile struct _syscall_10_args *)more_args;
Andrew Boiec5c104f2017-10-16 14:46:34 -0700322 k_thread_stack_t *stack = (k_thread_stack_t *)stack_p;
Andrew Boie662c3452017-10-02 10:51:18 -0700323
324 /* The thread and stack objects *must* be in an uninitialized state */
325 _SYSCALL_OBJ_NEVER_INIT(new_thread, K_OBJ_THREAD);
326 stack_object = _k_object_find(stack);
327 _SYSCALL_VERIFY_MSG(!_obj_validation_check(stack_object, stack,
328 K_OBJ__THREAD_STACK_ELEMENT,
329 _OBJ_INIT_FALSE),
330 "bad stack object");
331
332 /* Verify that the stack size passed in is OK by computing the total
333 * size and comparing it with the size value in the object metadata
334 */
335 guard_size = (u32_t)K_THREAD_STACK_BUFFER(stack) - (u32_t)stack;
336 _SYSCALL_VERIFY_MSG(!__builtin_uadd_overflow(guard_size, stack_size,
337 &total_size),
338 "stack size overflow (%u+%u)", stack_size,
339 guard_size);
340 /* They really ought to be equal, make this more strict? */
341 _SYSCALL_VERIFY_MSG(total_size <= stack_object->data,
342 "stack size %u is too big, max is %u",
343 total_size, stack_object->data);
344
345 /* Verify the struct containing args 6-10 */
346 _SYSCALL_MEMORY_READ(margs, sizeof(*margs));
347
348 /* Stash struct arguments in local variables to prevent switcheroo
349 * attacks
350 */
351 prio = margs->arg8;
352 options = margs->arg9;
353 delay = margs->arg10;
354 compiler_barrier();
355
356 /* User threads may only create other user threads and they can't
357 * be marked as essential
358 */
359 _SYSCALL_VERIFY(options & K_USER);
360 _SYSCALL_VERIFY(!(options & K_ESSENTIAL));
361
362 /* Check validity of prio argument; must be the same or worse priority
363 * than the caller
364 */
365 _SYSCALL_VERIFY(_VALID_PRIO(prio, NULL));
366 _SYSCALL_VERIFY(_is_prio_lower_or_equal(prio, _current->base.prio));
367
368 _setup_new_thread((struct k_thread *)new_thread, stack, stack_size,
369 (k_thread_entry_t)entry, (void *)p1,
370 (void *)margs->arg6, (void *)margs->arg7, prio,
371 options);
372
Andrew Boie662c3452017-10-02 10:51:18 -0700373 if (delay != K_FOREVER) {
374 schedule_new_thread(new_thread, delay);
375 }
376
377 return new_thread_p;
378}
379#endif /* CONFIG_USERSPACE */
380#endif /* CONFIG_MULTITHREADING */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400381
Andrew Boie468190a2017-09-29 14:00:48 -0700382int _impl_k_thread_cancel(k_tid_t tid)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400383{
Benjamin Walshb7ef0cb2016-10-05 17:32:01 -0400384 struct k_thread *thread = tid;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400385
386 int key = irq_lock();
387
Benjamin Walsha36e0cf2016-11-23 22:15:44 -0500388 if (_has_thread_started(thread) ||
389 !_is_thread_timeout_active(thread)) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400390 irq_unlock(key);
391 return -EINVAL;
392 }
393
Benjamin Walsh7caef452016-10-05 12:55:17 -0400394 _abort_thread_timeout(thread);
Allan Stephens92e75042016-10-25 09:52:39 -0500395 _thread_monitor_exit(thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400396
397 irq_unlock(key);
398
399 return 0;
400}
401
Andrew Boie468190a2017-09-29 14:00:48 -0700402#ifdef CONFIG_USERSPACE
Andrew Boie225e4c02017-10-12 09:54:26 -0700403_SYSCALL_HANDLER1_SIMPLE(k_thread_cancel, K_OBJ_THREAD, struct k_thread *);
Andrew Boie468190a2017-09-29 14:00:48 -0700404#endif
405
Peter Mitsisa04c0d72016-09-28 19:26:00 -0400406static inline int is_in_any_group(struct _static_thread_data *thread_data,
Kumar Galacc334c72017-04-21 10:55:34 -0500407 u32_t groups)
Peter Mitsis0ca7cea2016-09-28 19:18:09 -0400408{
Peter Mitsisa04c0d72016-09-28 19:26:00 -0400409 return !!(thread_data->init_groups & groups);
Peter Mitsis0ca7cea2016-09-28 19:18:09 -0400410}
411
Kumar Galacc334c72017-04-21 10:55:34 -0500412void _k_thread_group_op(u32_t groups, void (*func)(struct k_thread *))
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400413{
414 unsigned int key;
415
416 __ASSERT(!_is_in_isr(), "");
417
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500418 _sched_lock();
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400419
420 /* Invoke func() on each static thread in the specified group set. */
421
Peter Mitsisa04c0d72016-09-28 19:26:00 -0400422 _FOREACH_STATIC_THREAD(thread_data) {
423 if (is_in_any_group(thread_data, groups)) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400424 key = irq_lock();
Andrew Boied26cf2d2017-03-30 13:07:02 -0700425 func(thread_data->init_thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400426 irq_unlock(key);
427 }
428 }
429
430 /*
431 * If the current thread is still in a ready state, then let the
432 * "unlock scheduler" code determine if any rescheduling is needed.
433 */
434 if (_is_thread_ready(_current)) {
435 k_sched_unlock();
436 return;
437 }
438
439 /* The current thread is no longer in a ready state--reschedule. */
440 key = irq_lock();
441 _sched_unlock_no_reschedule();
442 _Swap(key);
443}
444
Benjamin Walshb7ef0cb2016-10-05 17:32:01 -0400445void _k_thread_single_start(struct k_thread *thread)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400446{
447 _mark_thread_as_started(thread);
448
449 if (_is_thread_ready(thread)) {
450 _add_thread_to_ready_q(thread);
451 }
452}
453
Benjamin Walshb7ef0cb2016-10-05 17:32:01 -0400454void _k_thread_single_suspend(struct k_thread *thread)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400455{
456 if (_is_thread_ready(thread)) {
457 _remove_thread_from_ready_q(thread);
458 }
459
460 _mark_thread_as_suspended(thread);
461}
462
Andrew Boie468190a2017-09-29 14:00:48 -0700463void _impl_k_thread_suspend(struct k_thread *thread)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400464{
465 unsigned int key = irq_lock();
466
467 _k_thread_single_suspend(thread);
468
469 if (thread == _current) {
470 _Swap(key);
471 } else {
472 irq_unlock(key);
473 }
474}
475
Andrew Boie468190a2017-09-29 14:00:48 -0700476#ifdef CONFIG_USERSPACE
Andrew Boie225e4c02017-10-12 09:54:26 -0700477_SYSCALL_HANDLER1_SIMPLE_VOID(k_thread_suspend, K_OBJ_THREAD, k_tid_t);
Andrew Boie468190a2017-09-29 14:00:48 -0700478#endif
479
Benjamin Walshb7ef0cb2016-10-05 17:32:01 -0400480void _k_thread_single_resume(struct k_thread *thread)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400481{
482 _mark_thread_as_not_suspended(thread);
483
484 if (_is_thread_ready(thread)) {
485 _add_thread_to_ready_q(thread);
486 }
487}
488
Andrew Boie468190a2017-09-29 14:00:48 -0700489void _impl_k_thread_resume(struct k_thread *thread)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400490{
491 unsigned int key = irq_lock();
492
493 _k_thread_single_resume(thread);
494
495 _reschedule_threads(key);
496}
497
Andrew Boie468190a2017-09-29 14:00:48 -0700498#ifdef CONFIG_USERSPACE
Andrew Boie225e4c02017-10-12 09:54:26 -0700499_SYSCALL_HANDLER1_SIMPLE_VOID(k_thread_resume, K_OBJ_THREAD, k_tid_t);
Andrew Boie468190a2017-09-29 14:00:48 -0700500#endif
501
Benjamin Walshb7ef0cb2016-10-05 17:32:01 -0400502void _k_thread_single_abort(struct k_thread *thread)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400503{
504 if (thread->fn_abort != NULL) {
505 thread->fn_abort();
506 }
507
508 if (_is_thread_ready(thread)) {
509 _remove_thread_from_ready_q(thread);
510 } else {
511 if (_is_thread_pending(thread)) {
512 _unpend_thread(thread);
513 }
Benjamin Walsha36e0cf2016-11-23 22:15:44 -0500514 if (_is_thread_timeout_active(thread)) {
Benjamin Walsh7caef452016-10-05 12:55:17 -0400515 _abort_thread_timeout(thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400516 }
517 }
Andrew Boie885fcd52017-10-05 14:37:59 -0700518
519 thread->base.thread_state |= _THREAD_DEAD;
520#ifdef CONFIG_KERNEL_EVENT_LOGGER_THREAD
521 _sys_k_event_logger_thread_exit(thread);
522#endif
523
524#ifdef CONFIG_USERSPACE
525 /* Clear initailized state so that this thread object may be re-used
526 * and triggers errors if API calls are made on it from user threads
527 */
Andrew Boiebca15da2017-10-15 14:17:48 -0700528 _k_object_uninit(thread->stack_obj);
Ramakrishna Pallala1777c572017-10-25 13:15:52 -0400529 _k_object_uninit(thread);
Andrew Boie04caa672017-10-13 13:57:07 -0700530
Andrew Boie818a96d2017-11-03 09:00:35 -0700531 /* Revoke permissions on thread's ID so that it may be recycled */
532 _thread_perms_all_clear(thread);
Andrew Boie885fcd52017-10-05 14:37:59 -0700533#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400534}
535
Benjamin Walshb12a8e02016-12-14 15:24:12 -0500536#ifdef CONFIG_MULTITHREADING
Andrew Boie877f82e2017-10-17 11:20:22 -0700537#ifdef CONFIG_USERSPACE
538extern char __object_access_start[];
539extern char __object_access_end[];
540
541static void grant_static_access(void)
542{
543 struct _k_object_assignment *pos;
544
545 for (pos = (struct _k_object_assignment *)__object_access_start;
546 pos < (struct _k_object_assignment *)__object_access_end;
547 pos++) {
548 for (int i = 0; pos->objects[i] != NULL; i++) {
549 k_object_access_grant(pos->objects[i],
550 pos->thread);
551 }
552 }
553}
554#endif /* CONFIG_USERSPACE */
555
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400556void _init_static_threads(void)
557{
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400558 unsigned int key;
559
Peter Mitsisa04c0d72016-09-28 19:26:00 -0400560 _FOREACH_STATIC_THREAD(thread_data) {
Andrew Boie2acfcd62017-08-30 14:31:03 -0700561 _setup_new_thread(
Andrew Boied26cf2d2017-03-30 13:07:02 -0700562 thread_data->init_thread,
Peter Mitsisa04c0d72016-09-28 19:26:00 -0400563 thread_data->init_stack,
564 thread_data->init_stack_size,
Peter Mitsisa04c0d72016-09-28 19:26:00 -0400565 thread_data->init_entry,
566 thread_data->init_p1,
567 thread_data->init_p2,
568 thread_data->init_p3,
569 thread_data->init_prio,
Allan Stephens743bdb82016-10-26 10:46:44 -0500570 thread_data->init_options);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400571
Andrew Boied26cf2d2017-03-30 13:07:02 -0700572 thread_data->init_thread->init_data = thread_data;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400573 }
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400574
Andrew Boie877f82e2017-10-17 11:20:22 -0700575#ifdef CONFIG_USERSPACE
576 grant_static_access();
577#endif
Benjamin Walshd7ad1762016-11-10 14:46:58 -0500578 _sched_lock();
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400579
580 /*
581 * Non-legacy static threads may be started immediately or after a
582 * previously specified delay. Even though the scheduler is locked,
583 * ticks can still be delivered and processed. Lock interrupts so
584 * that the countdown until execution begins from the same tick.
585 *
586 * Note that static threads defined using the legacy API have a
587 * delay of K_FOREVER.
588 */
589 key = irq_lock();
590 _FOREACH_STATIC_THREAD(thread_data) {
591 if (thread_data->init_delay != K_FOREVER) {
Andrew Boied26cf2d2017-03-30 13:07:02 -0700592 schedule_new_thread(thread_data->init_thread,
Peter Mitsisb2fd5be2016-10-11 12:06:25 -0400593 thread_data->init_delay);
594 }
595 }
596 irq_unlock(key);
597 k_sched_unlock();
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400598}
Benjamin Walshb12a8e02016-12-14 15:24:12 -0500599#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400600
Benjamin Walsh069fd362016-11-22 17:48:13 -0500601void _init_thread_base(struct _thread_base *thread_base, int priority,
Kumar Galacc334c72017-04-21 10:55:34 -0500602 u32_t initial_state, unsigned int options)
Benjamin Walsh069fd362016-11-22 17:48:13 -0500603{
604 /* k_q_node is initialized upon first insertion in a list */
605
Kumar Galacc334c72017-04-21 10:55:34 -0500606 thread_base->user_options = (u8_t)options;
607 thread_base->thread_state = (u8_t)initial_state;
Benjamin Walsh069fd362016-11-22 17:48:13 -0500608
609 thread_base->prio = priority;
610
611 thread_base->sched_locked = 0;
612
613 /* swap_data does not need to be initialized */
614
615 _init_thread_timeout(thread_base);
616}
617
Kumar Galacc334c72017-04-21 10:55:34 -0500618u32_t _k_thread_group_mask_get(struct k_thread *thread)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400619{
Peter Mitsisa04c0d72016-09-28 19:26:00 -0400620 struct _static_thread_data *thread_data = thread->init_data;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400621
Peter Mitsisa04c0d72016-09-28 19:26:00 -0400622 return thread_data->init_groups;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400623}
624
Kumar Galacc334c72017-04-21 10:55:34 -0500625void _k_thread_group_join(u32_t groups, struct k_thread *thread)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400626{
Peter Mitsisa04c0d72016-09-28 19:26:00 -0400627 struct _static_thread_data *thread_data = thread->init_data;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400628
Peter Mitsisa04c0d72016-09-28 19:26:00 -0400629 thread_data->init_groups |= groups;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400630}
631
Kumar Galacc334c72017-04-21 10:55:34 -0500632void _k_thread_group_leave(u32_t groups, struct k_thread *thread)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400633{
Peter Mitsisa04c0d72016-09-28 19:26:00 -0400634 struct _static_thread_data *thread_data = thread->init_data;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400635
Ramakrishna Pallala1777c572017-10-25 13:15:52 -0400636 thread_data->init_groups &= ~groups;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400637}
638
Andrew Boiee12857a2017-10-17 11:38:26 -0700639void k_thread_access_grant(struct k_thread *thread, ...)
640{
641#ifdef CONFIG_USERSPACE
642 va_list args;
643 va_start(args, thread);
644
645 while (1) {
646 void *object = va_arg(args, void *);
647 if (object == NULL) {
648 break;
649 }
650 k_object_access_grant(object, thread);
651 }
652 va_end(args);
653#else
654 ARG_UNUSED(thread);
655#endif
656}
657
Andrew Boie3f091b52017-08-30 14:34:14 -0700658FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
659 void *p1, void *p2, void *p3)
660{
661 _current->base.user_options |= K_USER;
662 _thread_essential_clear();
Andrew Boie93eb6032017-09-29 04:42:30 -0700663#ifdef CONFIG_USERSPACE
Andrew Boie3f091b52017-08-30 14:34:14 -0700664 _arch_user_mode_enter(entry, p1, p2, p3);
Andrew Boie93eb6032017-09-29 04:42:30 -0700665#else
666 /* XXX In this case we do not reset the stack */
667 _thread_entry(entry, p1, p2, p3);
668#endif
Andrew Boie3f091b52017-08-30 14:34:14 -0700669}