blob: 3d5991f1fbe35726952de6d98d035724b5d256be [file] [log] [blame]
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001/*
2 * Copyright (c) 2010-2014 Wind River Systems, Inc.
3 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08004 * SPDX-License-Identifier: Apache-2.0
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005 */
6
7/**
8 * @file
Anas Nashifcb888e62016-12-18 09:42:55 -05009 * @brief Kernel initialization module
Benjamin Walsh456c6da2016-09-02 18:55:39 -040010 *
Anas Nashifdc3d73b2016-12-19 20:25:56 -050011 * This module contains routines that are used to initialize the kernel.
Benjamin Walsh456c6da2016-09-02 18:55:39 -040012 */
13
Benjamin Walshf6ca7de2016-11-08 10:36:50 -050014#include <offsets_short.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020015#include <zephyr/kernel.h>
16#include <zephyr/sys/printk.h>
17#include <zephyr/debug/stack.h>
18#include <zephyr/random/rand32.h>
19#include <zephyr/linker/sections.h>
20#include <zephyr/toolchain.h>
21#include <zephyr/kernel_structs.h>
22#include <zephyr/device.h>
23#include <zephyr/init.h>
24#include <zephyr/linker/linker-defs.h>
Benjamin Walshb4b108d2016-10-13 10:31:48 -040025#include <ksched.h>
Mahavir Jainacea2412016-12-02 21:48:39 +053026#include <string.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020027#include <zephyr/sys/dlist.h>
Andy Ross245b54e2018-02-08 09:10:46 -080028#include <kernel_internal.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020029#include <zephyr/drivers/entropy.h>
30#include <zephyr/logging/log_ctrl.h>
31#include <zephyr/tracing/tracing.h>
Flavio Ceolinb3d92022018-09-17 15:56:06 -070032#include <stdbool.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020033#include <zephyr/debug/gcov.h>
Andrew Boie468efad2020-05-12 16:20:14 -070034#include <kswap.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020035#include <zephyr/timing/timing.h>
36#include <zephyr/logging/log.h>
Jordan Yatesdb3d51b2022-03-12 21:10:42 +100037#include <zephyr/pm/device_runtime.h>
Krzysztof Chruscinski3ed80832020-11-26 19:32:34 +010038LOG_MODULE_REGISTER(os, CONFIG_KERNEL_LOG_LEVEL);
Anas Nashif57554052018-03-03 02:31:05 -060039
Kumar Gala0722b6f2022-10-21 09:34:08 -050040
41BUILD_ASSERT(CONFIG_MP_NUM_CPUS == CONFIG_MP_MAX_NUM_CPUS,
42 "CONFIG_MP_NUM_CPUS and CONFIG_MP_MAX_NUM_CPUS need to be set the same");
43
Krzysztof Chruscinski7dcff6e2021-04-16 15:16:00 +020044/* the only struct z_kernel instance */
Qipeng Zhafa973d12023-05-15 13:29:18 +080045__pinned_bss
Krzysztof Chruscinski7dcff6e2021-04-16 15:16:00 +020046struct z_kernel _kernel;
47
Flavio Ceolin4f299302023-05-30 11:49:45 -070048__pinned_bss
49atomic_t _cpus_active;
50
Benjamin Walsh456c6da2016-09-02 18:55:39 -040051/* init/main and idle threads */
Daniel Leungebbfde92021-07-12 13:47:48 -070052K_THREAD_PINNED_STACK_DEFINE(z_main_stack, CONFIG_MAIN_STACK_SIZE);
Andrew Boiefe031612019-09-21 17:54:37 -070053struct k_thread z_main_thread;
Andrew Boie80a0d9d2020-03-12 15:37:29 -070054
55#ifdef CONFIG_MULTITHREADING
Daniel Leung660d1472021-03-25 16:05:15 -070056__pinned_bss
Kumar Galac778eb22022-10-12 10:55:36 -050057struct k_thread z_idle_threads[CONFIG_MP_MAX_NUM_CPUS];
Daniel Leung660d1472021-03-25 16:05:15 -070058
59static K_KERNEL_PINNED_STACK_ARRAY_DEFINE(z_idle_stacks,
Kumar Galac778eb22022-10-12 10:55:36 -050060 CONFIG_MP_MAX_NUM_CPUS,
Daniel Leung660d1472021-03-25 16:05:15 -070061 CONFIG_IDLE_STACK_SIZE);
Andrew Boie80a0d9d2020-03-12 15:37:29 -070062#endif /* CONFIG_MULTITHREADING */
Benjamin Walsh456c6da2016-09-02 18:55:39 -040063
Gerard Marull-Paretas83123932022-10-04 11:25:37 +020064extern const struct init_entry __init_start[];
65extern const struct init_entry __init_EARLY_start[];
66extern const struct init_entry __init_PRE_KERNEL_1_start[];
67extern const struct init_entry __init_PRE_KERNEL_2_start[];
68extern const struct init_entry __init_POST_KERNEL_start[];
69extern const struct init_entry __init_APPLICATION_start[];
70extern const struct init_entry __init_end[];
71
Gerard Marull-Paretas495245a2022-10-04 11:52:18 +020072enum init_level {
73 INIT_LEVEL_EARLY = 0,
74 INIT_LEVEL_PRE_KERNEL_1,
75 INIT_LEVEL_PRE_KERNEL_2,
76 INIT_LEVEL_POST_KERNEL,
77 INIT_LEVEL_APPLICATION,
78#ifdef CONFIG_SMP
79 INIT_LEVEL_SMP,
80#endif
81};
82
Gerard Marull-Paretas83123932022-10-04 11:25:37 +020083#ifdef CONFIG_SMP
84extern const struct init_entry __init_SMP_start[];
85#endif
86
Benjamin Walsh456c6da2016-09-02 18:55:39 -040087/*
88 * storage space for the interrupt stack
89 *
Anas Nashifdc3d73b2016-12-19 20:25:56 -050090 * Note: This area is used as the system stack during kernel initialization,
91 * since the kernel hasn't yet set up its own stack areas. The dual purposing
92 * of this area is safe since interrupts are disabled until the kernel context
93 * switches to the init thread.
Benjamin Walsh456c6da2016-09-02 18:55:39 -040094 */
Daniel Leung660d1472021-03-25 16:05:15 -070095K_KERNEL_PINNED_STACK_ARRAY_DEFINE(z_interrupt_stacks,
Kumar Galac778eb22022-10-12 10:55:36 -050096 CONFIG_MP_MAX_NUM_CPUS,
Daniel Leung660d1472021-03-25 16:05:15 -070097 CONFIG_ISR_STACK_SIZE);
Andy Ross780ba232018-01-29 09:20:18 -080098
Peter Mitsis96cb05c2016-09-15 12:37:58 -040099extern void idle(void *unused1, void *unused2, void *unused3);
100
Carles Cuficb0cf9f2017-01-10 10:57:38 +0100101
Andrew Boiefe228a82019-06-11 12:49:32 -0700102/* LCOV_EXCL_START
103 *
104 * This code is called so early in the boot process that code coverage
105 * doesn't work properly. In addition, not all arches call this code,
106 * some like x86 do this with optimized assembly
107 */
108
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400109/**
Nicolas Pitre678b76e2022-02-10 13:54:49 -0500110 * @brief equivalent of memset() for early boot usage
111 *
112 * Architectures that can't safely use the regular (optimized) memset very
113 * early during boot because e.g. hardware isn't yet sufficiently initialized
114 * may override this with their own safe implementation.
115 */
116__boot_func
117void __weak z_early_memset(void *dst, int c, size_t n)
118{
119 (void) memset(dst, c, n);
120}
121
122/**
123 * @brief equivalent of memcpy() for early boot usage
124 *
125 * Architectures that can't safely use the regular (optimized) memcpy very
126 * early during boot because e.g. hardware isn't yet sufficiently initialized
127 * may override this with their own safe implementation.
128 */
129__boot_func
130void __weak z_early_memcpy(void *dst, const void *src, size_t n)
131{
132 (void) memcpy(dst, src, n);
133}
134
135/**
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400136 * @brief Clear BSS
137 *
138 * This routine clears the BSS region, so all bytes are 0.
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400139 */
Daniel Leung660d1472021-03-25 16:05:15 -0700140__boot_func
Patrik Flykt4344e272019-03-08 14:19:05 -0700141void z_bss_zero(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400142{
Andy Ross5722da72022-08-09 17:55:14 -0700143 if (IS_ENABLED(CONFIG_ARCH_POSIX)) {
144 /* native_posix gets its memory cleared on entry by
145 * the host OS, and in any case the host clang/lld
146 * doesn't emit the __bss_end symbol this code expects
147 * to see
148 */
149 return;
150 }
151
Nicolas Pitre678b76e2022-02-10 13:54:49 -0500152 z_early_memset(__bss_start, 0, __bss_end - __bss_start);
Martí Bolívar6e8775f2020-05-11 11:56:08 -0700153#if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_ccm), okay)
Nicolas Pitre678b76e2022-02-10 13:54:49 -0500154 z_early_memset(&__ccm_bss_start, 0,
155 (uintptr_t) &__ccm_bss_end
156 - (uintptr_t) &__ccm_bss_start);
Erwin Rol1dc41d12017-10-05 01:22:32 +0200157#endif
Martí Bolívar6e8775f2020-05-11 11:56:08 -0700158#if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_dtcm), okay)
Nicolas Pitre678b76e2022-02-10 13:54:49 -0500159 z_early_memset(&__dtcm_bss_start, 0,
160 (uintptr_t) &__dtcm_bss_end
161 - (uintptr_t) &__dtcm_bss_start);
Alexander Wachterb4c5f4b2019-07-03 14:19:29 +0200162#endif
Immo Birnbaumda288292022-01-21 12:38:30 +0100163#if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_ocm), okay)
Nicolas Pitre36173982022-02-22 00:00:52 -0500164 z_early_memset(&__ocm_bss_start, 0,
165 (uintptr_t) &__ocm_bss_end
166 - (uintptr_t) &__ocm_bss_start);
Immo Birnbaumda288292022-01-21 12:38:30 +0100167#endif
Adithya Baglody91c5b842018-11-13 16:57:45 +0530168#ifdef CONFIG_CODE_DATA_RELOCATION
169 extern void bss_zeroing_relocation(void);
170
171 bss_zeroing_relocation();
172#endif /* CONFIG_CODE_DATA_RELOCATION */
Adithya Baglody71e90f92018-08-29 16:44:16 +0530173#ifdef CONFIG_COVERAGE_GCOV
Nicolas Pitre678b76e2022-02-10 13:54:49 -0500174 z_early_memset(&__gcov_bss_start, 0,
175 ((uintptr_t) &__gcov_bss_end - (uintptr_t) &__gcov_bss_start));
Adithya Baglody71e90f92018-08-29 16:44:16 +0530176#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400177}
178
Daniel Leungd8127282021-02-24 10:18:34 -0800179#ifdef CONFIG_LINKER_USE_BOOT_SECTION
180/**
181 * @brief Clear BSS within the bot region
182 *
183 * This routine clears the BSS within the boot region.
184 * This is separate from z_bss_zero() as boot region may
185 * contain symbols required for the boot process before
186 * paging is initialized.
187 */
188__boot_func
189void z_bss_zero_boot(void)
190{
Nicolas Pitre678b76e2022-02-10 13:54:49 -0500191 z_early_memset(&lnkr_boot_bss_start, 0,
192 (uintptr_t)&lnkr_boot_bss_end
193 - (uintptr_t)&lnkr_boot_bss_start);
Daniel Leungd8127282021-02-24 10:18:34 -0800194}
195#endif /* CONFIG_LINKER_USE_BOOT_SECTION */
196
Daniel Leung1310ad62021-02-23 13:33:38 -0800197#ifdef CONFIG_LINKER_USE_PINNED_SECTION
198/**
199 * @brief Clear BSS within the pinned region
200 *
201 * This routine clears the BSS within the pinned region.
202 * This is separate from z_bss_zero() as pinned region may
203 * contain symbols required for the boot process before
204 * paging is initialized.
205 */
206#ifdef CONFIG_LINKER_USE_BOOT_SECTION
207__boot_func
208#else
209__pinned_func
210#endif
211void z_bss_zero_pinned(void)
212{
Nicolas Pitre678b76e2022-02-10 13:54:49 -0500213 z_early_memset(&lnkr_pinned_bss_start, 0,
214 (uintptr_t)&lnkr_pinned_bss_end
215 - (uintptr_t)&lnkr_pinned_bss_start);
Daniel Leung1310ad62021-02-23 13:33:38 -0800216}
217#endif /* CONFIG_LINKER_USE_PINNED_SECTION */
218
Andrew Boie01100ea2019-02-21 15:02:22 -0800219#ifdef CONFIG_STACK_CANARIES
Flavio Ceolind16c5b92023-08-01 15:07:57 -0700220#ifdef CONFIG_STACK_CANARIES_TLS
221extern __thread volatile uintptr_t __stack_chk_guard;
222#else
Andrew Boie01100ea2019-02-21 15:02:22 -0800223extern volatile uintptr_t __stack_chk_guard;
Flavio Ceolind16c5b92023-08-01 15:07:57 -0700224#endif
Andrew Boie01100ea2019-02-21 15:02:22 -0800225#endif /* CONFIG_STACK_CANARIES */
226
Andrew Boiefe228a82019-06-11 12:49:32 -0700227/* LCOV_EXCL_STOP */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400228
Daniel Leung660d1472021-03-25 16:05:15 -0700229__pinned_bss
Peter Bigot74ef3952019-12-23 11:48:43 -0600230bool z_sys_post_kernel;
Daniel Leung660d1472021-03-25 16:05:15 -0700231
Gerard Marull-Paretas83123932022-10-04 11:25:37 +0200232/**
233 * @brief Execute all the init entry initialization functions at a given level
234 *
235 * @details Invokes the initialization routine for each init entry object
236 * created by the INIT_ENTRY_DEFINE() macro using the specified level.
237 * The linker script places the init entry objects in memory in the order
238 * they need to be invoked, with symbols indicating where one level leaves
239 * off and the next one begins.
240 *
241 * @param level init level to run.
242 */
Gerard Marull-Paretas495245a2022-10-04 11:52:18 +0200243static void z_sys_init_run_level(enum init_level level)
Gerard Marull-Paretas83123932022-10-04 11:25:37 +0200244{
245 static const struct init_entry *levels[] = {
246 __init_EARLY_start,
247 __init_PRE_KERNEL_1_start,
248 __init_PRE_KERNEL_2_start,
249 __init_POST_KERNEL_start,
250 __init_APPLICATION_start,
251#ifdef CONFIG_SMP
252 __init_SMP_start,
253#endif
254 /* End marker */
255 __init_end,
256 };
257 const struct init_entry *entry;
258
259 for (entry = levels[level]; entry < levels[level+1]; entry++) {
260 const struct device *dev = entry->dev;
Gerard Marull-Paretas83123932022-10-04 11:25:37 +0200261
262 if (dev != NULL) {
Gerard Marull-Paretas4d061662023-04-18 10:56:15 +0200263 int rc = 0;
264
265 if (entry->init_fn.dev != NULL) {
266 rc = entry->init_fn.dev(dev);
267 /* Mark device initialized. If initialization
268 * failed, record the error condition.
269 */
270 if (rc != 0) {
271 if (rc < 0) {
272 rc = -rc;
273 }
274 if (rc > UINT8_MAX) {
275 rc = UINT8_MAX;
276 }
277 dev->state->init_res = rc;
Gerard Marull-Paretas83123932022-10-04 11:25:37 +0200278 }
Gerard Marull-Paretas83123932022-10-04 11:25:37 +0200279 }
Gerard Marull-Paretas4d061662023-04-18 10:56:15 +0200280
Gerard Marull-Paretas83123932022-10-04 11:25:37 +0200281 dev->state->initialized = true;
Gerard Marull-Paretas4d061662023-04-18 10:56:15 +0200282
Jordan Yatesdb3d51b2022-03-12 21:10:42 +1000283 if (rc == 0) {
284 /* Run automatic device runtime enablement */
285 (void)pm_device_runtime_auto_enable(dev);
286 }
Gerard Marull-Paretasa5fd0d12022-10-19 09:33:44 +0200287 } else {
288 (void)entry->init_fn.sys();
Gerard Marull-Paretas83123932022-10-04 11:25:37 +0200289 }
290 }
291}
292
Anas Nashif4b593122020-08-27 09:08:40 -0400293extern void boot_banner(void);
Peter Bigot74ef3952019-12-23 11:48:43 -0600294
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400295/**
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800296 * @brief Mainline for kernel's background thread
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400297 *
298 * This routine completes kernel initialization by invoking the remaining
299 * init functions, then invokes application's main() routine.
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400300 */
Daniel Leung660d1472021-03-25 16:05:15 -0700301__boot_func
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800302static void bg_thread_main(void *unused1, void *unused2, void *unused3)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400303{
304 ARG_UNUSED(unused1);
305 ARG_UNUSED(unused2);
306 ARG_UNUSED(unused3);
307
Andrew Boiee35f1792020-12-09 12:18:40 -0800308#ifdef CONFIG_MMU
309 /* Invoked here such that backing store or eviction algorithms may
310 * initialize kernel objects, and that all POST_KERNEL and later tasks
311 * may perform memory management tasks (except for z_phys_map() which
312 * is allowed at any time)
313 */
314 z_mem_manage_init();
315#endif /* CONFIG_MMU */
Peter Bigot74ef3952019-12-23 11:48:43 -0600316 z_sys_post_kernel = true;
317
Gerard Marull-Paretas495245a2022-10-04 11:52:18 +0200318 z_sys_init_run_level(INIT_LEVEL_POST_KERNEL);
Andrew Boie538754c2018-05-23 15:25:23 -0700319#if CONFIG_STACK_POINTER_RANDOM
320 z_stack_adjust_initialized = 1;
321#endif
Anas Nashif4b593122020-08-27 09:08:40 -0400322 boot_banner();
Andrew Boie0b474ee2016-11-08 11:06:55 -0800323
Stephanos Ioannidis4a64bfe2022-12-09 06:16:44 +0900324#if defined(CONFIG_CPP)
Evgeniy Paltsev497cb2e2021-04-26 16:15:49 +0300325 void z_cpp_init_static(void);
326 z_cpp_init_static();
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400327#endif
328
Peter Bigotc308f802020-05-08 09:28:44 -0500329 /* Final init level before app starts */
Gerard Marull-Paretas495245a2022-10-04 11:52:18 +0200330 z_sys_init_run_level(INIT_LEVEL_APPLICATION);
Peter Bigotc308f802020-05-08 09:28:44 -0500331
Patrik Flykt4344e272019-03-08 14:19:05 -0700332 z_init_static_threads();
Anas Nashif83088a22017-08-24 04:27:51 -0400333
Anas Nashif39f632e2020-12-07 13:15:42 -0500334#ifdef CONFIG_KERNEL_COHERENCE
Daniel Leung079bc642021-02-02 16:12:15 -0800335 __ASSERT_NO_MSG(arch_mem_coherent(&_kernel));
Andy Rossf6d32ab2020-05-13 15:34:04 +0000336#endif
337
Andy Rosseb258702018-04-12 12:10:10 -0700338#ifdef CONFIG_SMP
Andy Ross2b210cb2022-01-17 11:56:54 -0800339 if (!IS_ENABLED(CONFIG_SMP_BOOT_DELAY)) {
340 z_smp_init();
341 }
Gerard Marull-Paretas495245a2022-10-04 11:52:18 +0200342 z_sys_init_run_level(INIT_LEVEL_SMP);
Andy Rosseb258702018-04-12 12:10:10 -0700343#endif
Inaky Perez-Gonzalezc51f73f2017-06-20 17:01:09 -0700344
Daniel Leunge88afd22021-07-15 13:15:29 -0700345#ifdef CONFIG_MMU
346 z_mem_manage_boot_finish();
347#endif /* CONFIG_MMU */
348
Stephanos Ioannidisfa5fd412022-11-05 00:22:25 +0900349 extern int main(void);
Andrew Boief1c373c2016-10-28 12:45:05 -0700350
Stephanos Ioannidisfa5fd412022-11-05 00:22:25 +0900351 (void)main();
Allan Stephens073442e2016-11-09 07:46:56 -0600352
Nazar Kazakov9713f0d2022-02-24 12:00:55 +0000353 /* Mark nonessential since main() has no more work to do */
Andrew Boiefe031612019-09-21 17:54:37 -0700354 z_main_thread.base.user_options &= ~K_ESSENTIAL;
Andrew Boie8e053332019-06-11 12:58:16 -0700355
Anas Nashif471ffbe2020-01-30 08:44:10 -0500356#ifdef CONFIG_COVERAGE_DUMP
Adithya Baglody71e90f92018-08-29 16:44:16 +0530357 /* Dump coverage data once the main() has exited. */
358 gcov_coverage_dump();
Anas Nashif471ffbe2020-01-30 08:44:10 -0500359#endif
Andrew Boie8e053332019-06-11 12:58:16 -0700360} /* LCOV_EXCL_LINE ... because we just dumped final coverage data */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400361
Andy Ross780ba232018-01-29 09:20:18 -0800362#if defined(CONFIG_MULTITHREADING)
Daniel Leung660d1472021-03-25 16:05:15 -0700363__boot_func
Andrew Boiec24673e2020-03-16 09:44:28 -0700364static void init_idle_thread(int i)
Andy Ross780ba232018-01-29 09:20:18 -0800365{
Andrew Boiec24673e2020-03-16 09:44:28 -0700366 struct k_thread *thread = &z_idle_threads[i];
367 k_thread_stack_t *stack = z_idle_stacks[i];
368
369#ifdef CONFIG_THREAD_NAME
Andrew Boiec24673e2020-03-16 09:44:28 -0700370
Kumar Gala4f458ba2022-10-18 11:11:46 -0500371#if CONFIG_MP_MAX_NUM_CPUS > 1
Trond Einar Snekvik6224ecb2022-03-25 12:46:32 +0100372 char tname[8];
Andrew Boiec24673e2020-03-16 09:44:28 -0700373 snprintk(tname, 8, "idle %02d", i);
374#else
Trond Einar Snekvik6224ecb2022-03-25 12:46:32 +0100375 char *tname = "idle";
376#endif
377
378#else
Andrew Boiec24673e2020-03-16 09:44:28 -0700379 char *tname = NULL;
380#endif /* CONFIG_THREAD_NAME */
381
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500382 z_setup_new_thread(thread, stack,
Andrew Boief5a7e1a2020-09-02 09:20:38 -0700383 CONFIG_IDLE_STACK_SIZE, idle, &_kernel.cpus[i],
Andy Ross851d14a2021-05-13 15:46:43 -0700384 NULL, NULL, K_IDLE_PRIO, K_ESSENTIAL,
Andrew Boief5a7e1a2020-09-02 09:20:38 -0700385 tname);
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500386 z_mark_thread_as_started(thread);
Andy Ross6c283ca2019-08-16 22:09:30 -0700387
388#ifdef CONFIG_SMP
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500389 thread->base.is_idle = 1U;
Andy Ross6c283ca2019-08-16 22:09:30 -0700390#endif
Andy Ross780ba232018-01-29 09:20:18 -0800391}
Andy Ross780ba232018-01-29 09:20:18 -0800392
Andy Ross2b210cb2022-01-17 11:56:54 -0800393void z_init_cpu(int id)
Andy Rossc6d077e2021-08-18 06:28:11 -0700394{
Andy Ross2b210cb2022-01-17 11:56:54 -0800395 init_idle_thread(id);
396 _kernel.cpus[id].idle_thread = &z_idle_threads[id];
397 _kernel.cpus[id].id = id;
398 _kernel.cpus[id].irq_stack =
399 (Z_KERNEL_STACK_BUFFER(z_interrupt_stacks[id]) +
400 K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[id]));
401#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
402 _kernel.cpus[id].usage.track_usage =
403 CONFIG_SCHED_THREAD_USAGE_AUTO_ENABLE;
404#endif
Flavio Ceolin4f299302023-05-30 11:49:45 -0700405
406 /*
407 * Increment number of CPUs active. The pm subsystem
408 * will keep track of this from here.
409 */
410 atomic_inc(&_cpus_active);
Andy Rossc6d077e2021-08-18 06:28:11 -0700411}
412
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400413/**
414 *
Anas Nashifdc3d73b2016-12-19 20:25:56 -0500415 * @brief Initializes kernel data structures
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400416 *
Anas Nashifdc3d73b2016-12-19 20:25:56 -0500417 * This routine initializes various kernel data structures, including
418 * the init and idle threads and any architecture-specific initialization.
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400419 *
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500420 * Note that all fields of "_kernel" are set to zero on entry, which may
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400421 * be all the initialization many of them require.
422 *
Andrew Boiee4cc84a2020-04-24 11:29:47 -0700423 * @return initial stack pointer for the main thread
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400424 */
Daniel Leung660d1472021-03-25 16:05:15 -0700425__boot_func
Andrew Boiee4cc84a2020-04-24 11:29:47 -0700426static char *prepare_multithreading(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400427{
Andrew Boiee4cc84a2020-04-24 11:29:47 -0700428 char *stack_ptr;
429
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500430 /* _kernel.ready_q is all zeroes */
Patrik Flykt4344e272019-03-08 14:19:05 -0700431 z_sched_init();
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400432
Andy Ross2724fd12018-01-29 14:55:20 -0800433#ifndef CONFIG_SMP
Benjamin Walsh88b36912016-12-02 10:37:27 -0500434 /*
435 * prime the cache with the main thread since:
436 *
437 * - the cache can never be NULL
438 * - the main thread will be the one to run first
439 * - no other thread is initialized yet and thus their priority fields
440 * contain garbage, which would prevent the cache loading algorithm
441 * to work as intended
442 */
Andrew Boiefe031612019-09-21 17:54:37 -0700443 _kernel.ready_q.cache = &z_main_thread;
Andy Ross2724fd12018-01-29 14:55:20 -0800444#endif
Andrew Boiee4cc84a2020-04-24 11:29:47 -0700445 stack_ptr = z_setup_new_thread(&z_main_thread, z_main_stack,
446 CONFIG_MAIN_STACK_SIZE, bg_thread_main,
447 NULL, NULL, NULL,
448 CONFIG_MAIN_THREAD_PRIORITY,
Ioannis Glaropoulos40aab322021-01-28 21:46:28 +0100449 K_ESSENTIAL, "main");
Andrew Boiefe031612019-09-21 17:54:37 -0700450 z_mark_thread_as_started(&z_main_thread);
451 z_ready_thread(&z_main_thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400452
Andy Ross2b210cb2022-01-17 11:56:54 -0800453 z_init_cpu(0);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400454
Andrew Boiee4cc84a2020-04-24 11:29:47 -0700455 return stack_ptr;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400456}
457
Daniel Leung660d1472021-03-25 16:05:15 -0700458__boot_func
Andrew Boiee4cc84a2020-04-24 11:29:47 -0700459static FUNC_NORETURN void switch_to_main_thread(char *stack_ptr)
Benjamin Walshc742d7e2016-10-05 17:50:54 -0400460{
Benjamin Walsh296a2342016-11-20 11:04:31 -0500461#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
Andrew Boiee4cc84a2020-04-24 11:29:47 -0700462 arch_switch_to_main_thread(&z_main_thread, stack_ptr, bg_thread_main);
Benjamin Walsh296a2342016-11-20 11:04:31 -0500463#else
Andrew Boiee4cc84a2020-04-24 11:29:47 -0700464 ARG_UNUSED(stack_ptr);
Benjamin Walshc742d7e2016-10-05 17:50:54 -0400465 /*
466 * Context switch to main task (entry function is _main()): the
467 * current fake thread is not on a wait queue or ready queue, so it
468 * will never be rescheduled in.
469 */
Patrik Flykt4344e272019-03-08 14:19:05 -0700470 z_swap_unlocked();
Benjamin Walsh296a2342016-11-20 11:04:31 -0500471#endif
Andrew Boiec5164f32019-06-11 13:33:32 -0700472 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
Benjamin Walshc742d7e2016-10-05 17:50:54 -0400473}
Anas Nashifc0ea5052019-01-30 09:58:41 -0500474#endif /* CONFIG_MULTITHREADING */
Benjamin Walshc742d7e2016-10-05 17:50:54 -0400475
Flavio Ceolin8ae822c2020-02-14 16:35:54 -0800476#if defined(CONFIG_ENTROPY_HAS_DRIVER) || defined(CONFIG_TEST_RANDOM_GENERATOR)
Daniel Leung660d1472021-03-25 16:05:15 -0700477__boot_func
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500478void z_early_boot_rand_get(uint8_t *buf, size_t length)
Leandro Pereira389c3642018-05-23 13:38:52 -0700479{
480#ifdef CONFIG_ENTROPY_HAS_DRIVER
Gerard Marull-Paretasa2023412022-08-22 10:36:10 +0200481 const struct device *const entropy = DEVICE_DT_GET_OR_NULL(DT_CHOSEN(zephyr_entropy));
Leandro Pereira389c3642018-05-23 13:38:52 -0700482 int rc;
483
Gerard Marull-Paretas5573d8d2022-02-25 18:31:13 +0100484 if (!device_is_ready(entropy)) {
Flavio Ceolin394f66b2019-08-09 16:31:33 -0700485 goto sys_rand_fallback;
Leandro Pereira389c3642018-05-23 13:38:52 -0700486 }
487
Carles Cufib5464492018-05-24 20:12:23 +0200488 /* Try to see if driver provides an ISR-specific API */
Flavio Ceolin394f66b2019-08-09 16:31:33 -0700489 rc = entropy_get_entropy_isr(entropy, buf, length, ENTROPY_BUSYWAIT);
Carles Cufib5464492018-05-24 20:12:23 +0200490 if (rc == -ENOTSUP) {
491 /* Driver does not provide an ISR-specific API, assume it can
492 * be called from ISR context
493 */
Flavio Ceolin394f66b2019-08-09 16:31:33 -0700494 rc = entropy_get_entropy(entropy, buf, length);
Carles Cufib5464492018-05-24 20:12:23 +0200495 }
496
497 if (rc >= 0) {
Flavio Ceolin394f66b2019-08-09 16:31:33 -0700498 return;
Leandro Pereira389c3642018-05-23 13:38:52 -0700499 }
500
Carles Cufib5464492018-05-24 20:12:23 +0200501 /* Fall through to fallback */
Leandro Pereira389c3642018-05-23 13:38:52 -0700502
Flavio Ceolin394f66b2019-08-09 16:31:33 -0700503sys_rand_fallback:
Mazen NEIFERe2bbad92017-02-07 10:01:12 +0100504#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400505
Leandro Pereira389c3642018-05-23 13:38:52 -0700506 /* FIXME: this assumes sys_rand32_get() won't use any synchronization
507 * primitive, like semaphores or mutexes. It's too early in the boot
508 * process to use any of them. Ideally, only the path where entropy
509 * devices are available should be built, this is only a fallback for
510 * those devices without a HWRNG entropy driver.
511 */
Flavio Ceolinac079fc2022-03-09 12:07:57 -0800512 sys_rand_get(buf, length);
Flavio Ceolin394f66b2019-08-09 16:31:33 -0700513}
Flavio Ceolin8ae822c2020-02-14 16:35:54 -0800514/* defined(CONFIG_ENTROPY_HAS_DRIVER) || defined(CONFIG_TEST_RANDOM_GENERATOR) */
515#endif
Flavio Ceolin394f66b2019-08-09 16:31:33 -0700516
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400517/**
518 *
Anas Nashifdc3d73b2016-12-19 20:25:56 -0500519 * @brief Initialize kernel
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400520 *
521 * This routine is invoked when the system is ready to run C code. The
522 * processor must be running in 32-bit mode, and the BSS must have been
523 * cleared/zeroed.
524 *
525 * @return Does not return
526 */
Daniel Leung660d1472021-03-25 16:05:15 -0700527__boot_func
Daniel Leung256db602022-12-15 15:54:56 -0800528FUNC_NO_STACK_PROTECTOR
Patrik Flykt4344e272019-03-08 14:19:05 -0700529FUNC_NORETURN void z_cstart(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400530{
Adithya Baglody71e90f92018-08-29 16:44:16 +0530531 /* gcov hook needed to get the coverage report.*/
532 gcov_static_init();
533
Gerard Marull-Paretase42f58e2022-10-11 17:17:18 +0200534 /* initialize early init calls */
Gerard Marull-Paretas495245a2022-10-04 11:52:18 +0200535 z_sys_init_run_level(INIT_LEVEL_EARLY);
Gerard Marull-Paretase42f58e2022-10-11 17:17:18 +0200536
Andrew Boie982d5c82018-05-23 13:30:34 -0700537 /* perform any architecture-specific initialization */
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800538 arch_kernel_init();
Andrew Boie982d5c82018-05-23 13:30:34 -0700539
Ederson de Souza75fd4522022-01-25 10:47:05 -0800540 LOG_CORE_INIT();
541
Andrew Boie468efad2020-05-12 16:20:14 -0700542#if defined(CONFIG_MULTITHREADING)
543 /* Note: The z_ready_thread() call in prepare_multithreading() requires
544 * a dummy thread even if CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN=y
545 */
546 struct k_thread dummy_thread;
Andy Ross6d9106f2019-02-01 14:42:28 -0800547
Andrew Boie468efad2020-05-12 16:20:14 -0700548 z_dummy_thread_init(&dummy_thread);
Andy Ross6d9106f2019-02-01 14:42:28 -0800549#endif
Peter Bigot1cadd8b2021-02-02 10:07:18 -0600550 /* do any necessary initialization of static devices */
551 z_device_state_init();
552
Andrew Boie0b474ee2016-11-08 11:06:55 -0800553 /* perform basic hardware initialization */
Gerard Marull-Paretas495245a2022-10-04 11:52:18 +0200554 z_sys_init_run_level(INIT_LEVEL_PRE_KERNEL_1);
555 z_sys_init_run_level(INIT_LEVEL_PRE_KERNEL_2);
Andrew Boie0b474ee2016-11-08 11:06:55 -0800556
Mazen NEIFERe2bbad92017-02-07 10:01:12 +0100557#ifdef CONFIG_STACK_CANARIES
Andrew Boie468efad2020-05-12 16:20:14 -0700558 uintptr_t stack_guard;
559
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500560 z_early_boot_rand_get((uint8_t *)&stack_guard, sizeof(stack_guard));
Flavio Ceolin394f66b2019-08-09 16:31:33 -0700561 __stack_chk_guard = stack_guard;
562 __stack_chk_guard <<= 8;
563#endif /* CONFIG_STACK_CANARIES */
Leandro Pereira389c3642018-05-23 13:38:52 -0700564
Daniel Leung15597122021-03-31 13:40:01 -0700565#ifdef CONFIG_TIMING_FUNCTIONS_NEED_AT_BOOT
Daniel Leungfd7a68d2020-10-14 12:17:12 -0700566 timing_init();
567 timing_start();
568#endif
569
Andy Ross3d146152018-06-13 10:51:42 -0700570#ifdef CONFIG_MULTITHREADING
Andrew Boiee4cc84a2020-04-24 11:29:47 -0700571 switch_to_main_thread(prepare_multithreading());
Andy Ross3d146152018-06-13 10:51:42 -0700572#else
Ioannis Glaropoulos60bd51a2020-08-03 11:11:19 +0200573#ifdef ARCH_SWITCH_TO_MAIN_NO_MULTITHREADING
574 /* Custom ARCH-specific routine to switch to main()
575 * in the case of no multi-threading.
576 */
577 ARCH_SWITCH_TO_MAIN_NO_MULTITHREADING(bg_thread_main,
578 NULL, NULL, NULL);
579#else
Andy Ross3d146152018-06-13 10:51:42 -0700580 bg_thread_main(NULL, NULL, NULL);
581
Andrew Boiec5164f32019-06-11 13:33:32 -0700582 /* LCOV_EXCL_START
583 * We've already dumped coverage data at this point.
584 */
Andy Ross8daafd42018-08-30 09:45:12 -0700585 irq_lock();
Flavio Ceolinb3d92022018-09-17 15:56:06 -0700586 while (true) {
Andy Ross3d146152018-06-13 10:51:42 -0700587 }
Andrew Boiec5164f32019-06-11 13:33:32 -0700588 /* LCOV_EXCL_STOP */
Andy Ross3d146152018-06-13 10:51:42 -0700589#endif
Ioannis Glaropoulos60bd51a2020-08-03 11:11:19 +0200590#endif /* CONFIG_MULTITHREADING */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400591
592 /*
593 * Compiler can't tell that the above routines won't return and issues
594 * a warning unless we explicitly tell it that control never gets this
595 * far.
596 */
597
Andrew Boiec5164f32019-06-11 13:33:32 -0700598 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400599}