blob: af9cd54e18c577fa2115792a95a3050666a45098 [file] [log] [blame]
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001/*
2 * Copyright (c) 2010-2012, 2014-2015 Wind River Systems, Inc.
3 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08004 * SPDX-License-Identifier: Apache-2.0
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005 */
6
7/**
8 * @file
Anas Nashifdc3d73b2016-12-19 20:25:56 -05009 * @brief Architecture-independent private kernel APIs
Benjamin Walsh456c6da2016-09-02 18:55:39 -040010 *
Anas Nashifdc3d73b2016-12-19 20:25:56 -050011 * This file contains private kernel APIs that are not architecture-specific.
Benjamin Walsh456c6da2016-09-02 18:55:39 -040012 */
13
Flavio Ceolina7fffa92018-09-13 15:06:35 -070014#ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_
15#define ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_
Benjamin Walsh456c6da2016-09-02 18:55:39 -040016
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020017#include <zephyr/kernel.h>
Stephanos Ioannidis2d746042019-10-25 00:08:21 +090018#include <kernel_arch_interface.h>
19#include <string.h>
Benjamin Walsh358a53c2016-11-18 15:35:05 -050020
Benjamin Walsh456c6da2016-09-02 18:55:39 -040021#ifndef _ASMLANGUAGE
22
23#ifdef __cplusplus
24extern "C" {
25#endif
26
27/* Early boot functions */
28
Nicolas Pitre678b76e2022-02-10 13:54:49 -050029void z_early_memset(void *dst, int c, size_t n);
30void z_early_memcpy(void *dst, const void *src, size_t n);
31
Patrik Flykt4344e272019-03-08 14:19:05 -070032void z_bss_zero(void);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040033#ifdef CONFIG_XIP
Patrik Flykt4344e272019-03-08 14:19:05 -070034void z_data_copy(void);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040035#else
Patrik Flykt4344e272019-03-08 14:19:05 -070036static inline void z_data_copy(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040037{
38 /* Do nothing */
39}
40#endif
Daniel Leungd8127282021-02-24 10:18:34 -080041
42#ifdef CONFIG_LINKER_USE_BOOT_SECTION
43void z_bss_zero_boot(void);
44#else
45static inline void z_bss_zero_boot(void)
46{
47 /* Do nothing */
48}
49#endif
50
Daniel Leung1310ad62021-02-23 13:33:38 -080051#ifdef CONFIG_LINKER_USE_PINNED_SECTION
52void z_bss_zero_pinned(void);
53#else
54static inline void z_bss_zero_pinned(void)
55{
56 /* Do nothing */
57}
58#endif
59
Patrik Flykt4344e272019-03-08 14:19:05 -070060FUNC_NORETURN void z_cstart(void);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040061
Peter Bigot1cadd8b2021-02-02 10:07:18 -060062void z_device_state_init(void);
63
Patrik Flykt4344e272019-03-08 14:19:05 -070064extern FUNC_NORETURN void z_thread_entry(k_thread_entry_t entry,
Andrew Boie1e06ffc2017-09-11 09:30:04 -070065 void *p1, void *p2, void *p3);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040066
Andrew Boiee4cc84a2020-04-24 11:29:47 -070067extern char *z_setup_new_thread(struct k_thread *new_thread,
68 k_thread_stack_t *stack, size_t stack_size,
69 k_thread_entry_t entry,
70 void *p1, void *p2, void *p3,
71 int prio, uint32_t options, const char *name);
Andrew Boie2acfcd62017-08-30 14:31:03 -070072
Andrew Boie92e5bd72018-04-12 17:12:15 -070073/**
Daniel Leung0c9f9692020-12-15 13:37:11 -080074 * @brief Allocate aligned memory from the current thread's resource pool
75 *
76 * Threads may be assigned a resource pool, which will be used to allocate
77 * memory on behalf of certain kernel and driver APIs. Memory reserved
78 * in this way should be freed with k_free().
79 *
80 * If called from an ISR, the k_malloc() system heap will be used if it exists.
81 *
82 * @param align Required memory alignment
83 * @param size Memory allocation size
84 * @return A pointer to the allocated memory, or NULL if there is insufficient
85 * RAM in the pool or there is no pool to draw memory from
86 */
87void *z_thread_aligned_alloc(size_t align, size_t size);
88
89/**
Andrew Boie92e5bd72018-04-12 17:12:15 -070090 * @brief Allocate some memory from the current thread's resource pool
91 *
92 * Threads may be assigned a resource pool, which will be used to allocate
93 * memory on behalf of certain kernel and driver APIs. Memory reserved
94 * in this way should be freed with k_free().
95 *
Andrew Boie6f654bb2019-05-22 10:38:43 -070096 * If called from an ISR, the k_malloc() system heap will be used if it exists.
97 *
Andrew Boie92e5bd72018-04-12 17:12:15 -070098 * @param size Memory allocation size
99 * @return A pointer to the allocated memory, or NULL if there is insufficient
Andrew Boie6f654bb2019-05-22 10:38:43 -0700100 * RAM in the pool or there is no pool to draw memory from
Andrew Boie92e5bd72018-04-12 17:12:15 -0700101 */
Daniel Leung0c9f9692020-12-15 13:37:11 -0800102static inline void *z_thread_malloc(size_t size)
103{
104 return z_thread_aligned_alloc(0, size);
105}
Andrew Boie92e5bd72018-04-12 17:12:15 -0700106
Anas Nashif780324b2017-10-29 07:10:22 -0400107/* set and clear essential thread flag */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400108
Patrik Flykt4344e272019-03-08 14:19:05 -0700109extern void z_thread_essential_set(void);
110extern void z_thread_essential_clear(void);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400111
112/* clean up when a thread is aborted */
113
114#if defined(CONFIG_THREAD_MONITOR)
Patrik Flykt4344e272019-03-08 14:19:05 -0700115extern void z_thread_monitor_exit(struct k_thread *thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400116#else
Patrik Flykt4344e272019-03-08 14:19:05 -0700117#define z_thread_monitor_exit(thread) \
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400118 do {/* nothing */ \
Flavio Ceolinb3d92022018-09-17 15:56:06 -0700119 } while (false)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400120#endif /* CONFIG_THREAD_MONITOR */
121
Stephanos Ioannidis2d746042019-10-25 00:08:21 +0900122#ifdef CONFIG_USE_SWITCH
123/* This is a arch function traditionally, but when the switch-based
124 * z_swap() is in use it's a simple inline provided by the kernel.
125 */
126static ALWAYS_INLINE void
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800127arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
Stephanos Ioannidis2d746042019-10-25 00:08:21 +0900128{
129 thread->swap_retval = value;
130}
131#endif
132
133static ALWAYS_INLINE void
134z_thread_return_value_set_with_data(struct k_thread *thread,
135 unsigned int value,
136 void *data)
137{
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800138 arch_thread_return_value_set(thread, value);
Stephanos Ioannidis2d746042019-10-25 00:08:21 +0900139 thread->base.swap_data = data;
140}
141
Guennadi Liakhovetski45b70e12021-07-15 09:36:45 +0200142#ifdef CONFIG_SMP
Andy Rossa12f2d62019-06-05 08:58:42 -0700143extern void z_smp_init(void);
Andy Ross564f5902018-01-26 12:30:21 -0800144extern void smp_timer_init(void);
Guennadi Liakhovetski45b70e12021-07-15 09:36:45 +0200145#endif
Andy Ross564f5902018-01-26 12:30:21 -0800146
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500147extern void z_early_boot_rand_get(uint8_t *buf, size_t length);
Andrew Boie538754c2018-05-23 15:25:23 -0700148
149#if CONFIG_STACK_POINTER_RANDOM
150extern int z_stack_adjust_initialized;
151#endif
152
Andrew Boiefe031612019-09-21 17:54:37 -0700153extern struct k_thread z_main_thread;
Andrew Boie80a0d9d2020-03-12 15:37:29 -0700154
155
156#ifdef CONFIG_MULTITHREADING
157extern struct k_thread z_idle_threads[CONFIG_MP_NUM_CPUS];
158#endif
Daniel Leungdbc0be42021-06-02 14:40:15 -0700159K_KERNEL_PINNED_STACK_ARRAY_EXTERN(z_interrupt_stacks, CONFIG_MP_NUM_CPUS,
Andrew Boie80a0d9d2020-03-12 15:37:29 -0700160 CONFIG_ISR_STACK_SIZE);
Andrew Boiefe031612019-09-21 17:54:37 -0700161
Andrew Boie28be7932020-03-11 10:56:19 -0700162#ifdef CONFIG_GEN_PRIV_STACKS
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500163extern uint8_t *z_priv_stack_find(k_thread_stack_t *stack);
Andrew Boie28be7932020-03-11 10:56:19 -0700164#endif
165
Krzysztof Chruscinski1da97e12022-01-28 15:40:37 +0100166/* Calculate stack usage. */
167int z_stack_space_get(const uint8_t *stack_start, size_t size, size_t *unused_ptr);
168
Andrew Boie8ce260d2020-04-24 16:24:46 -0700169#ifdef CONFIG_USERSPACE
170bool z_stack_is_user_capable(k_thread_stack_t *stack);
Andrew Boieb5a71f72020-10-06 13:39:29 -0700171
172/* Memory domain setup hook, called from z_setup_new_thread() */
173void z_mem_domain_init_thread(struct k_thread *thread);
174
Andy Ross6fb6d3c2021-02-19 15:32:19 -0800175/* Memory domain teardown hook, called from z_thread_abort() */
Andrew Boieb5a71f72020-10-06 13:39:29 -0700176void z_mem_domain_exit_thread(struct k_thread *thread);
Andrew Boie348a0fd2020-10-06 15:53:43 -0700177
178/* This spinlock:
179 *
180 * - Protects the full set of active k_mem_domain objects and their contents
181 * - Serializes calls to arch_mem_domain_* APIs
182 *
183 * If architecture code needs to access k_mem_domain structures or the
184 * partitions they contain at any other point, this spinlock should be held.
185 * Uniprocessor systems can get away with just locking interrupts but this is
186 * not recommended.
187 */
188extern struct k_spinlock z_mem_domain_lock;
Andrew Boie8ce260d2020-04-24 16:24:46 -0700189#endif /* CONFIG_USERSPACE */
190
Flavio Ceolin5408f312020-05-21 16:55:28 -0700191#ifdef CONFIG_GDBSTUB
192struct gdb_ctx;
193
194/* Should be called by the arch layer. This is the gdbstub main loop
195 * and synchronously communicate with gdb on host.
196 */
Daniel Leung650a6292021-10-28 14:53:28 -0700197extern int z_gdb_main_loop(struct gdb_ctx *ctx);
Flavio Ceolin5408f312020-05-21 16:55:28 -0700198#endif
199
Daniel Leung11e6b432020-08-27 16:12:01 -0700200#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
Daniel Leungfc577c42020-08-27 13:54:14 -0700201void z_thread_mark_switched_in(void);
202void z_thread_mark_switched_out(void);
203#else
Daniel Leungfc577c42020-08-27 13:54:14 -0700204
Daniel Leung11e6b432020-08-27 16:12:01 -0700205/**
206 * @brief Called after a thread has been selected to run
207 */
208#define z_thread_mark_switched_in()
209
210/**
211 * @brief Called before a thread has been selected to run
212 */
213
214#define z_thread_mark_switched_out()
215
216#endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */
Flavio Ceolin5408f312020-05-21 16:55:28 -0700217
Andrew Boiee35f1792020-12-09 12:18:40 -0800218/* Init hook for page frame management, invoked immediately upon entry of
219 * main thread, before POST_KERNEL tasks
220 */
221void z_mem_manage_init(void);
222
Daniel Leunge88afd22021-07-15 13:15:29 -0700223/**
224 * @brief Finalize page frame management at the end of boot process.
225 */
226void z_mem_manage_boot_finish(void);
227
Andrei Emeltchenko377456c2021-02-19 16:57:02 +0200228#define LOCKED(lck) for (k_spinlock_key_t __i = {}, \
229 __key = k_spin_lock(lck); \
230 !__i.key; \
231 k_spin_unlock(lck, __key), __i.key = 1)
232
Flavio Ceolin10f29352021-02-23 08:59:28 -0800233#ifdef CONFIG_PM
234
235/* When the kernel is about to go idle, it calls this function to notify the
236 * power management subsystem, that the kernel is ready to enter the idle state.
237 *
238 * At this point, the kernel has disabled interrupts and computed the maximum
239 * time the system can remain idle. The function passes the time that the system
240 * can remain idle. The SOC interface performs power operations that can be done
241 * in the available time. The power management operations must halt execution of
242 * the CPU.
243 *
244 * This function assumes that a wake up event has already been set up by the
245 * application.
246 *
247 * This function is entered with interrupts disabled. It should re-enable
248 * interrupts if it had entered a power state.
Flavio Ceolin94444802021-10-30 23:13:08 -0700249 *
250 * @return True if the system suspended, otherwise return false
Flavio Ceolin10f29352021-02-23 08:59:28 -0800251 */
Flavio Ceolin94444802021-10-30 23:13:08 -0700252bool pm_system_suspend(int32_t ticks);
Flavio Ceolin10f29352021-02-23 08:59:28 -0800253
Flavio Ceolin9b246ab2021-02-24 15:53:46 -0800254/**
255 * Notify exit from kernel idling after PM operations
256 *
257 * This function would notify exit from kernel idling if a corresponding
258 * pm_system_suspend() notification was handled and did not return
259 * PM_STATE_ACTIVE.
260 *
261 * This function would be called from the ISR context of the event
262 * that caused the exit from kernel idling. This will be called immediately
263 * after interrupts are enabled. This is called to give a chance to do
264 * any operations before the kernel would switch tasks or processes nested
265 * interrupts. This is required for cpu low power states that would require
266 * interrupts to be enabled while entering low power states. e.g. C1 in x86. In
267 * those cases, the ISR would be invoked immediately after the event wakes up
268 * the CPU, before code following the CPU wait, gets a chance to execute. This
269 * can be ignored if no operation needs to be done at the wake event
Flavio Ceolin623ed5a2021-11-19 18:20:46 -0800270 * notification.
Flavio Ceolin9b246ab2021-02-24 15:53:46 -0800271 */
272void pm_system_resume(void);
273
Flavio Ceolin10f29352021-02-23 08:59:28 -0800274#endif
275
Daniel Leung8eea5112021-03-30 14:38:00 -0700276#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
277/**
278 * Initialize the timing histograms for demand paging.
279 */
280void z_paging_histogram_init(void);
281
282/**
283 * Increment the counter in the timing histogram.
284 *
285 * @param hist The timing histogram to be updated.
286 * @param cycles Time spent in measured operation.
287 */
288void z_paging_histogram_inc(struct k_mem_paging_histogram_t *hist,
289 uint32_t cycles);
290#endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
291
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400292#ifdef __cplusplus
293}
294#endif
295
296#endif /* _ASMLANGUAGE */
297
Flavio Ceolina7fffa92018-09-13 15:06:35 -0700298#endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_ */