blob: 8ced6540f5caf1d05b7571abf51544ba3d2b6932 [file] [log] [blame]
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001/*
2 * Copyright (c) 2010-2012, 2014-2015 Wind River Systems, Inc.
3 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08004 * SPDX-License-Identifier: Apache-2.0
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005 */
6
7/**
8 * @file
Anas Nashifdc3d73b2016-12-19 20:25:56 -05009 * @brief Architecture-independent private kernel APIs
Benjamin Walsh456c6da2016-09-02 18:55:39 -040010 *
Anas Nashifdc3d73b2016-12-19 20:25:56 -050011 * This file contains private kernel APIs that are not architecture-specific.
Benjamin Walsh456c6da2016-09-02 18:55:39 -040012 */
13
Flavio Ceolina7fffa92018-09-13 15:06:35 -070014#ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_
15#define ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_
Benjamin Walsh456c6da2016-09-02 18:55:39 -040016
Andrew Boie73abd322017-04-04 13:19:13 -070017#include <kernel.h>
Stephanos Ioannidis2d746042019-10-25 00:08:21 +090018#include <kernel_arch_interface.h>
19#include <string.h>
Benjamin Walsh358a53c2016-11-18 15:35:05 -050020
Benjamin Walsh456c6da2016-09-02 18:55:39 -040021#ifndef _ASMLANGUAGE
22
23#ifdef __cplusplus
24extern "C" {
25#endif
26
27/* Early boot functions */
28
Patrik Flykt4344e272019-03-08 14:19:05 -070029void z_bss_zero(void);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040030#ifdef CONFIG_XIP
Patrik Flykt4344e272019-03-08 14:19:05 -070031void z_data_copy(void);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040032#else
Patrik Flykt4344e272019-03-08 14:19:05 -070033static inline void z_data_copy(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040034{
35 /* Do nothing */
36}
37#endif
Patrik Flykt4344e272019-03-08 14:19:05 -070038FUNC_NORETURN void z_cstart(void);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040039
Peter Bigot1cadd8b2021-02-02 10:07:18 -060040void z_device_state_init(void);
41
Patrik Flykt4344e272019-03-08 14:19:05 -070042extern FUNC_NORETURN void z_thread_entry(k_thread_entry_t entry,
Andrew Boie1e06ffc2017-09-11 09:30:04 -070043 void *p1, void *p2, void *p3);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040044
Andrew Boiee4cc84a2020-04-24 11:29:47 -070045extern char *z_setup_new_thread(struct k_thread *new_thread,
46 k_thread_stack_t *stack, size_t stack_size,
47 k_thread_entry_t entry,
48 void *p1, void *p2, void *p3,
49 int prio, uint32_t options, const char *name);
Andrew Boie2acfcd62017-08-30 14:31:03 -070050
Andrew Boie92e5bd72018-04-12 17:12:15 -070051/**
Daniel Leung0c9f9692020-12-15 13:37:11 -080052 * @brief Allocate aligned memory from the current thread's resource pool
53 *
54 * Threads may be assigned a resource pool, which will be used to allocate
55 * memory on behalf of certain kernel and driver APIs. Memory reserved
56 * in this way should be freed with k_free().
57 *
58 * If called from an ISR, the k_malloc() system heap will be used if it exists.
59 *
60 * @param align Required memory alignment
61 * @param size Memory allocation size
62 * @return A pointer to the allocated memory, or NULL if there is insufficient
63 * RAM in the pool or there is no pool to draw memory from
64 */
65void *z_thread_aligned_alloc(size_t align, size_t size);
66
67/**
Andrew Boie92e5bd72018-04-12 17:12:15 -070068 * @brief Allocate some memory from the current thread's resource pool
69 *
70 * Threads may be assigned a resource pool, which will be used to allocate
71 * memory on behalf of certain kernel and driver APIs. Memory reserved
72 * in this way should be freed with k_free().
73 *
Andrew Boie6f654bb2019-05-22 10:38:43 -070074 * If called from an ISR, the k_malloc() system heap will be used if it exists.
75 *
Andrew Boie92e5bd72018-04-12 17:12:15 -070076 * @param size Memory allocation size
77 * @return A pointer to the allocated memory, or NULL if there is insufficient
Andrew Boie6f654bb2019-05-22 10:38:43 -070078 * RAM in the pool or there is no pool to draw memory from
Andrew Boie92e5bd72018-04-12 17:12:15 -070079 */
Daniel Leung0c9f9692020-12-15 13:37:11 -080080static inline void *z_thread_malloc(size_t size)
81{
82 return z_thread_aligned_alloc(0, size);
83}
Andrew Boie92e5bd72018-04-12 17:12:15 -070084
Anas Nashif780324b2017-10-29 07:10:22 -040085/* set and clear essential thread flag */
Benjamin Walsh456c6da2016-09-02 18:55:39 -040086
Patrik Flykt4344e272019-03-08 14:19:05 -070087extern void z_thread_essential_set(void);
88extern void z_thread_essential_clear(void);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040089
90/* clean up when a thread is aborted */
91
92#if defined(CONFIG_THREAD_MONITOR)
Patrik Flykt4344e272019-03-08 14:19:05 -070093extern void z_thread_monitor_exit(struct k_thread *thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040094#else
Patrik Flykt4344e272019-03-08 14:19:05 -070095#define z_thread_monitor_exit(thread) \
Benjamin Walsh456c6da2016-09-02 18:55:39 -040096 do {/* nothing */ \
Flavio Ceolinb3d92022018-09-17 15:56:06 -070097 } while (false)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040098#endif /* CONFIG_THREAD_MONITOR */
99
Stephanos Ioannidis2d746042019-10-25 00:08:21 +0900100#ifdef CONFIG_USE_SWITCH
101/* This is a arch function traditionally, but when the switch-based
102 * z_swap() is in use it's a simple inline provided by the kernel.
103 */
104static ALWAYS_INLINE void
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800105arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
Stephanos Ioannidis2d746042019-10-25 00:08:21 +0900106{
107 thread->swap_retval = value;
108}
109#endif
110
111static ALWAYS_INLINE void
112z_thread_return_value_set_with_data(struct k_thread *thread,
113 unsigned int value,
114 void *data)
115{
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800116 arch_thread_return_value_set(thread, value);
Stephanos Ioannidis2d746042019-10-25 00:08:21 +0900117 thread->base.swap_data = data;
118}
119
Andy Rossa12f2d62019-06-05 08:58:42 -0700120extern void z_smp_init(void);
Andy Rossbdcd18a72018-01-17 11:34:50 -0800121
Andy Ross564f5902018-01-26 12:30:21 -0800122extern void smp_timer_init(void);
123
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500124extern void z_early_boot_rand_get(uint8_t *buf, size_t length);
Andrew Boie538754c2018-05-23 15:25:23 -0700125
126#if CONFIG_STACK_POINTER_RANDOM
127extern int z_stack_adjust_initialized;
128#endif
129
Andrew Boiee6654102019-09-21 16:55:55 -0700130#ifdef CONFIG_BOOT_TIME_MEASUREMENT
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500131extern uint32_t z_timestamp_main; /* timestamp when main task starts */
132extern uint32_t z_timestamp_idle; /* timestamp when CPU goes idle */
Andrew Boiee6654102019-09-21 16:55:55 -0700133#endif
134
Andrew Boiefe031612019-09-21 17:54:37 -0700135extern struct k_thread z_main_thread;
Andrew Boie80a0d9d2020-03-12 15:37:29 -0700136
137
138#ifdef CONFIG_MULTITHREADING
139extern struct k_thread z_idle_threads[CONFIG_MP_NUM_CPUS];
140#endif
Andrew Boie8b4b0d62020-04-25 15:19:23 -0700141extern K_KERNEL_STACK_ARRAY_DEFINE(z_interrupt_stacks, CONFIG_MP_NUM_CPUS,
Andrew Boie80a0d9d2020-03-12 15:37:29 -0700142 CONFIG_ISR_STACK_SIZE);
Andrew Boiefe031612019-09-21 17:54:37 -0700143
Andrew Boie28be7932020-03-11 10:56:19 -0700144#ifdef CONFIG_GEN_PRIV_STACKS
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500145extern uint8_t *z_priv_stack_find(k_thread_stack_t *stack);
Andrew Boie28be7932020-03-11 10:56:19 -0700146#endif
147
Andrew Boie8ce260d2020-04-24 16:24:46 -0700148#ifdef CONFIG_USERSPACE
149bool z_stack_is_user_capable(k_thread_stack_t *stack);
Andrew Boieb5a71f72020-10-06 13:39:29 -0700150
151/* Memory domain setup hook, called from z_setup_new_thread() */
152void z_mem_domain_init_thread(struct k_thread *thread);
153
Andy Ross6fb6d3c2021-02-19 15:32:19 -0800154/* Memory domain teardown hook, called from z_thread_abort() */
Andrew Boieb5a71f72020-10-06 13:39:29 -0700155void z_mem_domain_exit_thread(struct k_thread *thread);
Andrew Boie348a0fd2020-10-06 15:53:43 -0700156
157/* This spinlock:
158 *
159 * - Protects the full set of active k_mem_domain objects and their contents
160 * - Serializes calls to arch_mem_domain_* APIs
161 *
162 * If architecture code needs to access k_mem_domain structures or the
163 * partitions they contain at any other point, this spinlock should be held.
164 * Uniprocessor systems can get away with just locking interrupts but this is
165 * not recommended.
166 */
167extern struct k_spinlock z_mem_domain_lock;
Andrew Boie8ce260d2020-04-24 16:24:46 -0700168#endif /* CONFIG_USERSPACE */
169
Flavio Ceolin5408f312020-05-21 16:55:28 -0700170#ifdef CONFIG_GDBSTUB
171struct gdb_ctx;
172
173/* Should be called by the arch layer. This is the gdbstub main loop
174 * and synchronously communicate with gdb on host.
175 */
176extern int z_gdb_main_loop(struct gdb_ctx *ctx, bool start);
177#endif
178
Daniel Leung11e6b432020-08-27 16:12:01 -0700179#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
Daniel Leungfc577c42020-08-27 13:54:14 -0700180void z_thread_mark_switched_in(void);
181void z_thread_mark_switched_out(void);
182#else
Daniel Leungfc577c42020-08-27 13:54:14 -0700183
Daniel Leung11e6b432020-08-27 16:12:01 -0700184/**
185 * @brief Called after a thread has been selected to run
186 */
187#define z_thread_mark_switched_in()
188
189/**
190 * @brief Called before a thread has been selected to run
191 */
192
193#define z_thread_mark_switched_out()
194
195#endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */
Flavio Ceolin5408f312020-05-21 16:55:28 -0700196
Andrew Boiee35f1792020-12-09 12:18:40 -0800197/* Init hook for page frame management, invoked immediately upon entry of
198 * main thread, before POST_KERNEL tasks
199 */
200void z_mem_manage_init(void);
201
Andrew Boieecb25fe2020-12-18 15:18:04 -0800202/* Workaround for build-time page table mapping of the kernel */
203void z_kernel_map_fixup(void);
204
Andrei Emeltchenko377456c2021-02-19 16:57:02 +0200205#define LOCKED(lck) for (k_spinlock_key_t __i = {}, \
206 __key = k_spin_lock(lck); \
207 !__i.key; \
208 k_spin_unlock(lck, __key), __i.key = 1)
209
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400210#ifdef __cplusplus
211}
212#endif
213
214#endif /* _ASMLANGUAGE */
215
Flavio Ceolina7fffa92018-09-13 15:06:35 -0700216#endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_ */