blob: 79c25398caabb5f61c856532c0446ee245902ac3 [file] [log] [blame]
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001/*
2 * Copyright (c) 2010-2014 Wind River Systems, Inc.
3 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08004 * SPDX-License-Identifier: Apache-2.0
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005 */
6
7/**
8 * @file
Anas Nashifcb888e62016-12-18 09:42:55 -05009 * @brief Kernel initialization module
Benjamin Walsh456c6da2016-09-02 18:55:39 -040010 *
Anas Nashifdc3d73b2016-12-19 20:25:56 -050011 * This module contains routines that are used to initialize the kernel.
Benjamin Walsh456c6da2016-09-02 18:55:39 -040012 */
13
Benjamin Walshf6ca7de2016-11-08 10:36:50 -050014#include <offsets_short.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020015#include <zephyr/kernel.h>
16#include <zephyr/sys/printk.h>
17#include <zephyr/debug/stack.h>
18#include <zephyr/random/rand32.h>
19#include <zephyr/linker/sections.h>
20#include <zephyr/toolchain.h>
21#include <zephyr/kernel_structs.h>
22#include <zephyr/device.h>
23#include <zephyr/init.h>
24#include <zephyr/linker/linker-defs.h>
Benjamin Walshb4b108d2016-10-13 10:31:48 -040025#include <ksched.h>
Mahavir Jainacea2412016-12-02 21:48:39 +053026#include <string.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020027#include <zephyr/sys/dlist.h>
Andy Ross245b54e2018-02-08 09:10:46 -080028#include <kernel_internal.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020029#include <zephyr/drivers/entropy.h>
30#include <zephyr/logging/log_ctrl.h>
31#include <zephyr/tracing/tracing.h>
Flavio Ceolinb3d92022018-09-17 15:56:06 -070032#include <stdbool.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020033#include <zephyr/debug/gcov.h>
Andrew Boie468efad2020-05-12 16:20:14 -070034#include <kswap.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020035#include <zephyr/timing/timing.h>
36#include <zephyr/logging/log.h>
Krzysztof Chruscinski3ed80832020-11-26 19:32:34 +010037LOG_MODULE_REGISTER(os, CONFIG_KERNEL_LOG_LEVEL);
Anas Nashif57554052018-03-03 02:31:05 -060038
Krzysztof Chruscinski7dcff6e2021-04-16 15:16:00 +020039/* the only struct z_kernel instance */
40struct z_kernel _kernel;
41
Benjamin Walsh456c6da2016-09-02 18:55:39 -040042/* init/main and idle threads */
Daniel Leungebbfde92021-07-12 13:47:48 -070043K_THREAD_PINNED_STACK_DEFINE(z_main_stack, CONFIG_MAIN_STACK_SIZE);
Andrew Boiefe031612019-09-21 17:54:37 -070044struct k_thread z_main_thread;
Andrew Boie80a0d9d2020-03-12 15:37:29 -070045
46#ifdef CONFIG_MULTITHREADING
Daniel Leung660d1472021-03-25 16:05:15 -070047__pinned_bss
Andrew Boie80a0d9d2020-03-12 15:37:29 -070048struct k_thread z_idle_threads[CONFIG_MP_NUM_CPUS];
Daniel Leung660d1472021-03-25 16:05:15 -070049
50static K_KERNEL_PINNED_STACK_ARRAY_DEFINE(z_idle_stacks,
51 CONFIG_MP_NUM_CPUS,
52 CONFIG_IDLE_STACK_SIZE);
Andrew Boie80a0d9d2020-03-12 15:37:29 -070053#endif /* CONFIG_MULTITHREADING */
Benjamin Walsh456c6da2016-09-02 18:55:39 -040054
55/*
56 * storage space for the interrupt stack
57 *
Anas Nashifdc3d73b2016-12-19 20:25:56 -050058 * Note: This area is used as the system stack during kernel initialization,
59 * since the kernel hasn't yet set up its own stack areas. The dual purposing
60 * of this area is safe since interrupts are disabled until the kernel context
61 * switches to the init thread.
Benjamin Walsh456c6da2016-09-02 18:55:39 -040062 */
Daniel Leung660d1472021-03-25 16:05:15 -070063K_KERNEL_PINNED_STACK_ARRAY_DEFINE(z_interrupt_stacks,
64 CONFIG_MP_NUM_CPUS,
65 CONFIG_ISR_STACK_SIZE);
Andy Ross780ba232018-01-29 09:20:18 -080066
Peter Mitsis96cb05c2016-09-15 12:37:58 -040067extern void idle(void *unused1, void *unused2, void *unused3);
68
Carles Cuficb0cf9f2017-01-10 10:57:38 +010069
Andrew Boiefe228a82019-06-11 12:49:32 -070070/* LCOV_EXCL_START
71 *
72 * This code is called so early in the boot process that code coverage
73 * doesn't work properly. In addition, not all arches call this code,
74 * some like x86 do this with optimized assembly
75 */
76
Benjamin Walsh456c6da2016-09-02 18:55:39 -040077/**
Nicolas Pitre678b76e2022-02-10 13:54:49 -050078 * @brief equivalent of memset() for early boot usage
79 *
80 * Architectures that can't safely use the regular (optimized) memset very
81 * early during boot because e.g. hardware isn't yet sufficiently initialized
82 * may override this with their own safe implementation.
83 */
84__boot_func
85void __weak z_early_memset(void *dst, int c, size_t n)
86{
87 (void) memset(dst, c, n);
88}
89
90/**
91 * @brief equivalent of memcpy() for early boot usage
92 *
93 * Architectures that can't safely use the regular (optimized) memcpy very
94 * early during boot because e.g. hardware isn't yet sufficiently initialized
95 * may override this with their own safe implementation.
96 */
97__boot_func
98void __weak z_early_memcpy(void *dst, const void *src, size_t n)
99{
100 (void) memcpy(dst, src, n);
101}
102
103/**
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400104 * @brief Clear BSS
105 *
106 * This routine clears the BSS region, so all bytes are 0.
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400107 */
Daniel Leung660d1472021-03-25 16:05:15 -0700108__boot_func
Patrik Flykt4344e272019-03-08 14:19:05 -0700109void z_bss_zero(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400110{
Andy Ross5722da72022-08-09 17:55:14 -0700111 if (IS_ENABLED(CONFIG_ARCH_POSIX)) {
112 /* native_posix gets its memory cleared on entry by
113 * the host OS, and in any case the host clang/lld
114 * doesn't emit the __bss_end symbol this code expects
115 * to see
116 */
117 return;
118 }
119
Nicolas Pitre678b76e2022-02-10 13:54:49 -0500120 z_early_memset(__bss_start, 0, __bss_end - __bss_start);
Martí Bolívar6e8775f2020-05-11 11:56:08 -0700121#if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_ccm), okay)
Nicolas Pitre678b76e2022-02-10 13:54:49 -0500122 z_early_memset(&__ccm_bss_start, 0,
123 (uintptr_t) &__ccm_bss_end
124 - (uintptr_t) &__ccm_bss_start);
Erwin Rol1dc41d12017-10-05 01:22:32 +0200125#endif
Martí Bolívar6e8775f2020-05-11 11:56:08 -0700126#if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_dtcm), okay)
Nicolas Pitre678b76e2022-02-10 13:54:49 -0500127 z_early_memset(&__dtcm_bss_start, 0,
128 (uintptr_t) &__dtcm_bss_end
129 - (uintptr_t) &__dtcm_bss_start);
Alexander Wachterb4c5f4b2019-07-03 14:19:29 +0200130#endif
Immo Birnbaumda288292022-01-21 12:38:30 +0100131#if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_ocm), okay)
Nicolas Pitre36173982022-02-22 00:00:52 -0500132 z_early_memset(&__ocm_bss_start, 0,
133 (uintptr_t) &__ocm_bss_end
134 - (uintptr_t) &__ocm_bss_start);
Immo Birnbaumda288292022-01-21 12:38:30 +0100135#endif
Adithya Baglody91c5b842018-11-13 16:57:45 +0530136#ifdef CONFIG_CODE_DATA_RELOCATION
137 extern void bss_zeroing_relocation(void);
138
139 bss_zeroing_relocation();
140#endif /* CONFIG_CODE_DATA_RELOCATION */
Adithya Baglody71e90f92018-08-29 16:44:16 +0530141#ifdef CONFIG_COVERAGE_GCOV
Nicolas Pitre678b76e2022-02-10 13:54:49 -0500142 z_early_memset(&__gcov_bss_start, 0,
143 ((uintptr_t) &__gcov_bss_end - (uintptr_t) &__gcov_bss_start));
Adithya Baglody71e90f92018-08-29 16:44:16 +0530144#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400145}
146
Daniel Leungd8127282021-02-24 10:18:34 -0800147#ifdef CONFIG_LINKER_USE_BOOT_SECTION
148/**
149 * @brief Clear BSS within the bot region
150 *
151 * This routine clears the BSS within the boot region.
152 * This is separate from z_bss_zero() as boot region may
153 * contain symbols required for the boot process before
154 * paging is initialized.
155 */
156__boot_func
157void z_bss_zero_boot(void)
158{
Nicolas Pitre678b76e2022-02-10 13:54:49 -0500159 z_early_memset(&lnkr_boot_bss_start, 0,
160 (uintptr_t)&lnkr_boot_bss_end
161 - (uintptr_t)&lnkr_boot_bss_start);
Daniel Leungd8127282021-02-24 10:18:34 -0800162}
163#endif /* CONFIG_LINKER_USE_BOOT_SECTION */
164
Daniel Leung1310ad62021-02-23 13:33:38 -0800165#ifdef CONFIG_LINKER_USE_PINNED_SECTION
166/**
167 * @brief Clear BSS within the pinned region
168 *
169 * This routine clears the BSS within the pinned region.
170 * This is separate from z_bss_zero() as pinned region may
171 * contain symbols required for the boot process before
172 * paging is initialized.
173 */
174#ifdef CONFIG_LINKER_USE_BOOT_SECTION
175__boot_func
176#else
177__pinned_func
178#endif
179void z_bss_zero_pinned(void)
180{
Nicolas Pitre678b76e2022-02-10 13:54:49 -0500181 z_early_memset(&lnkr_pinned_bss_start, 0,
182 (uintptr_t)&lnkr_pinned_bss_end
183 - (uintptr_t)&lnkr_pinned_bss_start);
Daniel Leung1310ad62021-02-23 13:33:38 -0800184}
185#endif /* CONFIG_LINKER_USE_PINNED_SECTION */
186
Andrew Boie01100ea2019-02-21 15:02:22 -0800187#ifdef CONFIG_STACK_CANARIES
188extern volatile uintptr_t __stack_chk_guard;
189#endif /* CONFIG_STACK_CANARIES */
190
Andrew Boiefe228a82019-06-11 12:49:32 -0700191/* LCOV_EXCL_STOP */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400192
Daniel Leung660d1472021-03-25 16:05:15 -0700193__pinned_bss
Peter Bigot74ef3952019-12-23 11:48:43 -0600194bool z_sys_post_kernel;
Daniel Leung660d1472021-03-25 16:05:15 -0700195
Anas Nashif4b593122020-08-27 09:08:40 -0400196extern void boot_banner(void);
Peter Bigot74ef3952019-12-23 11:48:43 -0600197
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400198/**
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800199 * @brief Mainline for kernel's background thread
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400200 *
201 * This routine completes kernel initialization by invoking the remaining
202 * init functions, then invokes application's main() routine.
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400203 */
Daniel Leung660d1472021-03-25 16:05:15 -0700204__boot_func
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800205static void bg_thread_main(void *unused1, void *unused2, void *unused3)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400206{
207 ARG_UNUSED(unused1);
208 ARG_UNUSED(unused2);
209 ARG_UNUSED(unused3);
210
Andrew Boiee35f1792020-12-09 12:18:40 -0800211#ifdef CONFIG_MMU
212 /* Invoked here such that backing store or eviction algorithms may
213 * initialize kernel objects, and that all POST_KERNEL and later tasks
214 * may perform memory management tasks (except for z_phys_map() which
215 * is allowed at any time)
216 */
217 z_mem_manage_init();
218#endif /* CONFIG_MMU */
Peter Bigot74ef3952019-12-23 11:48:43 -0600219 z_sys_post_kernel = true;
220
Tomasz Bursztyka8d7bb8f2020-03-09 11:02:20 +0100221 z_sys_init_run_level(_SYS_INIT_LEVEL_POST_KERNEL);
Andrew Boie538754c2018-05-23 15:25:23 -0700222#if CONFIG_STACK_POINTER_RANDOM
223 z_stack_adjust_initialized = 1;
224#endif
Anas Nashif4b593122020-08-27 09:08:40 -0400225 boot_banner();
Andrew Boie0b474ee2016-11-08 11:06:55 -0800226
David Palchakb4a7f0f2022-06-07 15:25:37 -0700227#if defined(CONFIG_CPLUSPLUS)
Evgeniy Paltsev497cb2e2021-04-26 16:15:49 +0300228 void z_cpp_init_static(void);
229 z_cpp_init_static();
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400230#endif
231
Peter Bigotc308f802020-05-08 09:28:44 -0500232 /* Final init level before app starts */
233 z_sys_init_run_level(_SYS_INIT_LEVEL_APPLICATION);
234
Patrik Flykt4344e272019-03-08 14:19:05 -0700235 z_init_static_threads();
Anas Nashif83088a22017-08-24 04:27:51 -0400236
Anas Nashif39f632e2020-12-07 13:15:42 -0500237#ifdef CONFIG_KERNEL_COHERENCE
Daniel Leung079bc642021-02-02 16:12:15 -0800238 __ASSERT_NO_MSG(arch_mem_coherent(&_kernel));
Andy Rossf6d32ab2020-05-13 15:34:04 +0000239#endif
240
Andy Rosseb258702018-04-12 12:10:10 -0700241#ifdef CONFIG_SMP
Andy Ross2b210cb2022-01-17 11:56:54 -0800242 if (!IS_ENABLED(CONFIG_SMP_BOOT_DELAY)) {
243 z_smp_init();
244 }
Tomasz Bursztyka8d7bb8f2020-03-09 11:02:20 +0100245 z_sys_init_run_level(_SYS_INIT_LEVEL_SMP);
Andy Rosseb258702018-04-12 12:10:10 -0700246#endif
Inaky Perez-Gonzalezc51f73f2017-06-20 17:01:09 -0700247
Daniel Leunge88afd22021-07-15 13:15:29 -0700248#ifdef CONFIG_MMU
249 z_mem_manage_boot_finish();
250#endif /* CONFIG_MMU */
251
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400252 extern void main(void);
Andrew Boief1c373c2016-10-28 12:45:05 -0700253
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400254 main();
Allan Stephens073442e2016-11-09 07:46:56 -0600255
Nazar Kazakov9713f0d2022-02-24 12:00:55 +0000256 /* Mark nonessential since main() has no more work to do */
Andrew Boiefe031612019-09-21 17:54:37 -0700257 z_main_thread.base.user_options &= ~K_ESSENTIAL;
Andrew Boie8e053332019-06-11 12:58:16 -0700258
Anas Nashif471ffbe2020-01-30 08:44:10 -0500259#ifdef CONFIG_COVERAGE_DUMP
Adithya Baglody71e90f92018-08-29 16:44:16 +0530260 /* Dump coverage data once the main() has exited. */
261 gcov_coverage_dump();
Anas Nashif471ffbe2020-01-30 08:44:10 -0500262#endif
Andrew Boie8e053332019-06-11 12:58:16 -0700263} /* LCOV_EXCL_LINE ... because we just dumped final coverage data */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400264
Andy Ross780ba232018-01-29 09:20:18 -0800265#if defined(CONFIG_MULTITHREADING)
Daniel Leung660d1472021-03-25 16:05:15 -0700266__boot_func
Andrew Boiec24673e2020-03-16 09:44:28 -0700267static void init_idle_thread(int i)
Andy Ross780ba232018-01-29 09:20:18 -0800268{
Andrew Boiec24673e2020-03-16 09:44:28 -0700269 struct k_thread *thread = &z_idle_threads[i];
270 k_thread_stack_t *stack = z_idle_stacks[i];
271
272#ifdef CONFIG_THREAD_NAME
Andrew Boiec24673e2020-03-16 09:44:28 -0700273
Trond Einar Snekvik6224ecb2022-03-25 12:46:32 +0100274#if CONFIG_MP_NUM_CPUS > 1
275 char tname[8];
Andrew Boiec24673e2020-03-16 09:44:28 -0700276 snprintk(tname, 8, "idle %02d", i);
277#else
Trond Einar Snekvik6224ecb2022-03-25 12:46:32 +0100278 char *tname = "idle";
279#endif
280
281#else
Andrew Boiec24673e2020-03-16 09:44:28 -0700282 char *tname = NULL;
283#endif /* CONFIG_THREAD_NAME */
284
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500285 z_setup_new_thread(thread, stack,
Andrew Boief5a7e1a2020-09-02 09:20:38 -0700286 CONFIG_IDLE_STACK_SIZE, idle, &_kernel.cpus[i],
Andy Ross851d14a2021-05-13 15:46:43 -0700287 NULL, NULL, K_IDLE_PRIO, K_ESSENTIAL,
Andrew Boief5a7e1a2020-09-02 09:20:38 -0700288 tname);
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500289 z_mark_thread_as_started(thread);
Andy Ross6c283ca2019-08-16 22:09:30 -0700290
291#ifdef CONFIG_SMP
Anas Nashif9e3e7f62019-12-19 08:19:45 -0500292 thread->base.is_idle = 1U;
Andy Ross6c283ca2019-08-16 22:09:30 -0700293#endif
Andy Ross780ba232018-01-29 09:20:18 -0800294}
Andy Ross780ba232018-01-29 09:20:18 -0800295
Andy Ross2b210cb2022-01-17 11:56:54 -0800296void z_init_cpu(int id)
Andy Rossc6d077e2021-08-18 06:28:11 -0700297{
Andy Ross2b210cb2022-01-17 11:56:54 -0800298 init_idle_thread(id);
299 _kernel.cpus[id].idle_thread = &z_idle_threads[id];
300 _kernel.cpus[id].id = id;
301 _kernel.cpus[id].irq_stack =
302 (Z_KERNEL_STACK_BUFFER(z_interrupt_stacks[id]) +
303 K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[id]));
304#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
305 _kernel.cpus[id].usage.track_usage =
306 CONFIG_SCHED_THREAD_USAGE_AUTO_ENABLE;
307#endif
Andy Rossc6d077e2021-08-18 06:28:11 -0700308}
309
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400310/**
311 *
Anas Nashifdc3d73b2016-12-19 20:25:56 -0500312 * @brief Initializes kernel data structures
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400313 *
Anas Nashifdc3d73b2016-12-19 20:25:56 -0500314 * This routine initializes various kernel data structures, including
315 * the init and idle threads and any architecture-specific initialization.
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400316 *
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500317 * Note that all fields of "_kernel" are set to zero on entry, which may
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400318 * be all the initialization many of them require.
319 *
Andrew Boiee4cc84a2020-04-24 11:29:47 -0700320 * @return initial stack pointer for the main thread
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400321 */
Daniel Leung660d1472021-03-25 16:05:15 -0700322__boot_func
Andrew Boiee4cc84a2020-04-24 11:29:47 -0700323static char *prepare_multithreading(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400324{
Andrew Boiee4cc84a2020-04-24 11:29:47 -0700325 char *stack_ptr;
326
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500327 /* _kernel.ready_q is all zeroes */
Patrik Flykt4344e272019-03-08 14:19:05 -0700328 z_sched_init();
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400329
Andy Ross2724fd12018-01-29 14:55:20 -0800330#ifndef CONFIG_SMP
Benjamin Walsh88b36912016-12-02 10:37:27 -0500331 /*
332 * prime the cache with the main thread since:
333 *
334 * - the cache can never be NULL
335 * - the main thread will be the one to run first
336 * - no other thread is initialized yet and thus their priority fields
337 * contain garbage, which would prevent the cache loading algorithm
338 * to work as intended
339 */
Andrew Boiefe031612019-09-21 17:54:37 -0700340 _kernel.ready_q.cache = &z_main_thread;
Andy Ross2724fd12018-01-29 14:55:20 -0800341#endif
Andrew Boiee4cc84a2020-04-24 11:29:47 -0700342 stack_ptr = z_setup_new_thread(&z_main_thread, z_main_stack,
343 CONFIG_MAIN_STACK_SIZE, bg_thread_main,
344 NULL, NULL, NULL,
345 CONFIG_MAIN_THREAD_PRIORITY,
Ioannis Glaropoulos40aab322021-01-28 21:46:28 +0100346 K_ESSENTIAL, "main");
Andrew Boiefe031612019-09-21 17:54:37 -0700347 z_mark_thread_as_started(&z_main_thread);
348 z_ready_thread(&z_main_thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400349
Andy Ross2b210cb2022-01-17 11:56:54 -0800350 z_init_cpu(0);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400351
Andrew Boiee4cc84a2020-04-24 11:29:47 -0700352 return stack_ptr;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400353}
354
Daniel Leung660d1472021-03-25 16:05:15 -0700355__boot_func
Andrew Boiee4cc84a2020-04-24 11:29:47 -0700356static FUNC_NORETURN void switch_to_main_thread(char *stack_ptr)
Benjamin Walshc742d7e2016-10-05 17:50:54 -0400357{
Benjamin Walsh296a2342016-11-20 11:04:31 -0500358#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
Andrew Boiee4cc84a2020-04-24 11:29:47 -0700359 arch_switch_to_main_thread(&z_main_thread, stack_ptr, bg_thread_main);
Benjamin Walsh296a2342016-11-20 11:04:31 -0500360#else
Andrew Boiee4cc84a2020-04-24 11:29:47 -0700361 ARG_UNUSED(stack_ptr);
Benjamin Walshc742d7e2016-10-05 17:50:54 -0400362 /*
363 * Context switch to main task (entry function is _main()): the
364 * current fake thread is not on a wait queue or ready queue, so it
365 * will never be rescheduled in.
366 */
Patrik Flykt4344e272019-03-08 14:19:05 -0700367 z_swap_unlocked();
Benjamin Walsh296a2342016-11-20 11:04:31 -0500368#endif
Andrew Boiec5164f32019-06-11 13:33:32 -0700369 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
Benjamin Walshc742d7e2016-10-05 17:50:54 -0400370}
Anas Nashifc0ea5052019-01-30 09:58:41 -0500371#endif /* CONFIG_MULTITHREADING */
Benjamin Walshc742d7e2016-10-05 17:50:54 -0400372
Flavio Ceolin8ae822c2020-02-14 16:35:54 -0800373#if defined(CONFIG_ENTROPY_HAS_DRIVER) || defined(CONFIG_TEST_RANDOM_GENERATOR)
Daniel Leung660d1472021-03-25 16:05:15 -0700374__boot_func
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500375void z_early_boot_rand_get(uint8_t *buf, size_t length)
Leandro Pereira389c3642018-05-23 13:38:52 -0700376{
377#ifdef CONFIG_ENTROPY_HAS_DRIVER
Gerard Marull-Paretas5573d8d2022-02-25 18:31:13 +0100378 const struct device *entropy = DEVICE_DT_GET_OR_NULL(DT_CHOSEN(zephyr_entropy));
Leandro Pereira389c3642018-05-23 13:38:52 -0700379 int rc;
380
Gerard Marull-Paretas5573d8d2022-02-25 18:31:13 +0100381 if (!device_is_ready(entropy)) {
Flavio Ceolin394f66b2019-08-09 16:31:33 -0700382 goto sys_rand_fallback;
Leandro Pereira389c3642018-05-23 13:38:52 -0700383 }
384
Carles Cufib5464492018-05-24 20:12:23 +0200385 /* Try to see if driver provides an ISR-specific API */
Flavio Ceolin394f66b2019-08-09 16:31:33 -0700386 rc = entropy_get_entropy_isr(entropy, buf, length, ENTROPY_BUSYWAIT);
Carles Cufib5464492018-05-24 20:12:23 +0200387 if (rc == -ENOTSUP) {
388 /* Driver does not provide an ISR-specific API, assume it can
389 * be called from ISR context
390 */
Flavio Ceolin394f66b2019-08-09 16:31:33 -0700391 rc = entropy_get_entropy(entropy, buf, length);
Carles Cufib5464492018-05-24 20:12:23 +0200392 }
393
394 if (rc >= 0) {
Flavio Ceolin394f66b2019-08-09 16:31:33 -0700395 return;
Leandro Pereira389c3642018-05-23 13:38:52 -0700396 }
397
Carles Cufib5464492018-05-24 20:12:23 +0200398 /* Fall through to fallback */
Leandro Pereira389c3642018-05-23 13:38:52 -0700399
Flavio Ceolin394f66b2019-08-09 16:31:33 -0700400sys_rand_fallback:
Mazen NEIFERe2bbad92017-02-07 10:01:12 +0100401#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400402
Leandro Pereira389c3642018-05-23 13:38:52 -0700403 /* FIXME: this assumes sys_rand32_get() won't use any synchronization
404 * primitive, like semaphores or mutexes. It's too early in the boot
405 * process to use any of them. Ideally, only the path where entropy
406 * devices are available should be built, this is only a fallback for
407 * those devices without a HWRNG entropy driver.
408 */
Flavio Ceolinac079fc2022-03-09 12:07:57 -0800409 sys_rand_get(buf, length);
Flavio Ceolin394f66b2019-08-09 16:31:33 -0700410}
Flavio Ceolin8ae822c2020-02-14 16:35:54 -0800411/* defined(CONFIG_ENTROPY_HAS_DRIVER) || defined(CONFIG_TEST_RANDOM_GENERATOR) */
412#endif
Flavio Ceolin394f66b2019-08-09 16:31:33 -0700413
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400414/**
415 *
Anas Nashifdc3d73b2016-12-19 20:25:56 -0500416 * @brief Initialize kernel
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400417 *
418 * This routine is invoked when the system is ready to run C code. The
419 * processor must be running in 32-bit mode, and the BSS must have been
420 * cleared/zeroed.
421 *
422 * @return Does not return
423 */
Daniel Leung660d1472021-03-25 16:05:15 -0700424__boot_func
Patrik Flykt4344e272019-03-08 14:19:05 -0700425FUNC_NORETURN void z_cstart(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400426{
Adithya Baglody71e90f92018-08-29 16:44:16 +0530427 /* gcov hook needed to get the coverage report.*/
428 gcov_static_init();
429
Andrew Boie982d5c82018-05-23 13:30:34 -0700430 /* perform any architecture-specific initialization */
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800431 arch_kernel_init();
Andrew Boie982d5c82018-05-23 13:30:34 -0700432
Ederson de Souza75fd4522022-01-25 10:47:05 -0800433 LOG_CORE_INIT();
434
Andrew Boie468efad2020-05-12 16:20:14 -0700435#if defined(CONFIG_MULTITHREADING)
436 /* Note: The z_ready_thread() call in prepare_multithreading() requires
437 * a dummy thread even if CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN=y
438 */
439 struct k_thread dummy_thread;
Andy Ross6d9106f2019-02-01 14:42:28 -0800440
Andrew Boie468efad2020-05-12 16:20:14 -0700441 z_dummy_thread_init(&dummy_thread);
Andy Ross6d9106f2019-02-01 14:42:28 -0800442#endif
Peter Bigot1cadd8b2021-02-02 10:07:18 -0600443 /* do any necessary initialization of static devices */
444 z_device_state_init();
445
Andrew Boie0b474ee2016-11-08 11:06:55 -0800446 /* perform basic hardware initialization */
Tomasz Bursztyka8d7bb8f2020-03-09 11:02:20 +0100447 z_sys_init_run_level(_SYS_INIT_LEVEL_PRE_KERNEL_1);
448 z_sys_init_run_level(_SYS_INIT_LEVEL_PRE_KERNEL_2);
Andrew Boie0b474ee2016-11-08 11:06:55 -0800449
Mazen NEIFERe2bbad92017-02-07 10:01:12 +0100450#ifdef CONFIG_STACK_CANARIES
Andrew Boie468efad2020-05-12 16:20:14 -0700451 uintptr_t stack_guard;
452
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500453 z_early_boot_rand_get((uint8_t *)&stack_guard, sizeof(stack_guard));
Flavio Ceolin394f66b2019-08-09 16:31:33 -0700454 __stack_chk_guard = stack_guard;
455 __stack_chk_guard <<= 8;
456#endif /* CONFIG_STACK_CANARIES */
Leandro Pereira389c3642018-05-23 13:38:52 -0700457
Daniel Leung15597122021-03-31 13:40:01 -0700458#ifdef CONFIG_TIMING_FUNCTIONS_NEED_AT_BOOT
Daniel Leungfd7a68d2020-10-14 12:17:12 -0700459 timing_init();
460 timing_start();
461#endif
462
Andy Ross3d146152018-06-13 10:51:42 -0700463#ifdef CONFIG_MULTITHREADING
Andrew Boiee4cc84a2020-04-24 11:29:47 -0700464 switch_to_main_thread(prepare_multithreading());
Andy Ross3d146152018-06-13 10:51:42 -0700465#else
Ioannis Glaropoulos60bd51a2020-08-03 11:11:19 +0200466#ifdef ARCH_SWITCH_TO_MAIN_NO_MULTITHREADING
467 /* Custom ARCH-specific routine to switch to main()
468 * in the case of no multi-threading.
469 */
470 ARCH_SWITCH_TO_MAIN_NO_MULTITHREADING(bg_thread_main,
471 NULL, NULL, NULL);
472#else
Andy Ross3d146152018-06-13 10:51:42 -0700473 bg_thread_main(NULL, NULL, NULL);
474
Andrew Boiec5164f32019-06-11 13:33:32 -0700475 /* LCOV_EXCL_START
476 * We've already dumped coverage data at this point.
477 */
Andy Ross8daafd42018-08-30 09:45:12 -0700478 irq_lock();
Flavio Ceolinb3d92022018-09-17 15:56:06 -0700479 while (true) {
Andy Ross3d146152018-06-13 10:51:42 -0700480 }
Andrew Boiec5164f32019-06-11 13:33:32 -0700481 /* LCOV_EXCL_STOP */
Andy Ross3d146152018-06-13 10:51:42 -0700482#endif
Ioannis Glaropoulos60bd51a2020-08-03 11:11:19 +0200483#endif /* CONFIG_MULTITHREADING */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400484
485 /*
486 * Compiler can't tell that the above routines won't return and issues
487 * a warning unless we explicitly tell it that control never gets this
488 * far.
489 */
490
Andrew Boiec5164f32019-06-11 13:33:32 -0700491 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400492}