blob: d1697ecf276d42d1c53c5e37d0e645d00078820a [file] [log] [blame]
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001/*
2 * Copyright (c) 2010-2014 Wind River Systems, Inc.
3 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08004 * SPDX-License-Identifier: Apache-2.0
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005 */
6
7/**
8 * @file
Anas Nashifcb888e62016-12-18 09:42:55 -05009 * @brief Kernel initialization module
Benjamin Walsh456c6da2016-09-02 18:55:39 -040010 *
Anas Nashifdc3d73b2016-12-19 20:25:56 -050011 * This module contains routines that are used to initialize the kernel.
Benjamin Walsh456c6da2016-09-02 18:55:39 -040012 */
13
Andrew Boief1c373c2016-10-28 12:45:05 -070014#include <zephyr.h>
Benjamin Walshf6ca7de2016-11-08 10:36:50 -050015#include <offsets_short.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040016#include <kernel.h>
17#include <misc/printk.h>
Carles Cuficb0cf9f2017-01-10 10:57:38 +010018#include <misc/stack.h>
Leandro Pereiraadce1d12017-10-13 15:45:02 -070019#include <random/rand32.h>
Anas Nashif397d29d2017-06-17 11:30:47 -040020#include <linker/sections.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040021#include <toolchain.h>
Benjamin Walshf6ca7de2016-11-08 10:36:50 -050022#include <kernel_structs.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040023#include <device.h>
24#include <init.h>
Anas Nashif397d29d2017-06-17 11:30:47 -040025#include <linker/linker-defs.h>
Benjamin Walshb4b108d2016-10-13 10:31:48 -040026#include <ksched.h>
Mahavir Jaina6366042016-12-02 21:45:49 +053027#include <version.h>
Mahavir Jainacea2412016-12-02 21:48:39 +053028#include <string.h>
Anas Nashif8920cf12017-08-31 11:02:47 -040029#include <misc/dlist.h>
Andy Ross245b54e2018-02-08 09:10:46 -080030#include <kernel_internal.h>
Andy Ross9c62cc62018-01-25 15:24:15 -080031#include <kswap.h>
Andrew Boie538754c2018-05-23 15:25:23 -070032#include <entropy.h>
Krzysztof Chruscinski6b01c892018-07-11 12:26:31 +020033#include <logging/log_ctrl.h>
Anas Nashifb6304e62018-07-04 08:03:03 -050034#include <tracing.h>
Flavio Ceolinb3d92022018-09-17 15:56:06 -070035#include <stdbool.h>
Adithya Baglody71e90f92018-08-29 16:44:16 +053036#include <misc/gcov.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040037
Anas Nashif57554052018-03-03 02:31:05 -060038#define IDLE_THREAD_NAME "idle"
Anas Nashif0a0c8c82018-09-17 06:58:09 -050039#define LOG_LEVEL CONFIG_KERNEL_LOG_LEVEL
40#include <logging/log.h>
Anas Nashif6d359df2019-06-04 13:42:17 -040041LOG_MODULE_REGISTER(os);
Anas Nashif57554052018-03-03 02:31:05 -060042
Benjamin Walsh456c6da2016-09-02 18:55:39 -040043/* boot banner items */
Anas Nashif42f45382019-03-26 12:01:44 -040044#if defined(CONFIG_MULTITHREADING) && defined(CONFIG_BOOT_DELAY) \
45 && CONFIG_BOOT_DELAY > 0
Inaky Perez-Gonzalezc51f73f2017-06-20 17:01:09 -070046#define BOOT_DELAY_BANNER " (delayed boot " \
47 STRINGIFY(CONFIG_BOOT_DELAY) "ms)"
Inaky Perez-Gonzalezc51f73f2017-06-20 17:01:09 -070048#else
49#define BOOT_DELAY_BANNER ""
Inaky Perez-Gonzalezc51f73f2017-06-20 17:01:09 -070050#endif
Anas Nashifdaf77162018-04-09 21:53:26 -050051
52#ifdef BUILD_VERSION
Marc Herbertd4df6ba2019-06-17 11:11:19 -070053#define BOOT_BANNER "Booting Zephyr OS build " \
Anas Nashifdaf77162018-04-09 21:53:26 -050054 STRINGIFY(BUILD_VERSION) BOOT_DELAY_BANNER
55#else
Marc Herbertd4df6ba2019-06-17 11:11:19 -070056#define BOOT_BANNER "Booting Zephyr OS version " \
Anas Nashifdaf77162018-04-09 21:53:26 -050057 KERNEL_VERSION_STRING BOOT_DELAY_BANNER
58#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -040059
60#if !defined(CONFIG_BOOT_BANNER)
Flavio Ceolinb3d92022018-09-17 15:56:06 -070061#define PRINT_BOOT_BANNER() do { } while (false)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040062#else
Anas Nashifdaf77162018-04-09 21:53:26 -050063#define PRINT_BOOT_BANNER() printk("***** " BOOT_BANNER " *****\n")
Benjamin Walsh456c6da2016-09-02 18:55:39 -040064#endif
65
66/* boot time measurement items */
67
68#ifdef CONFIG_BOOT_TIME_MEASUREMENT
Adithya Baglodybe1cb962017-06-14 15:15:49 +053069u64_t __noinit __start_time_stamp; /* timestamp when kernel starts */
70u64_t __noinit __main_time_stamp; /* timestamp when main task starts */
71u64_t __noinit __idle_time_stamp; /* timestamp when CPU goes idle */
Benjamin Walsh456c6da2016-09-02 18:55:39 -040072#endif
73
Benjamin Walsh456c6da2016-09-02 18:55:39 -040074/* init/main and idle threads */
75
Benjamin Walshc9dd56e2016-10-24 12:57:46 -040076#define IDLE_STACK_SIZE CONFIG_IDLE_STACK_SIZE
Andrew Boief1c373c2016-10-28 12:45:05 -070077#define MAIN_STACK_SIZE CONFIG_MAIN_STACK_SIZE
Andrew Boief1c373c2016-10-28 12:45:05 -070078
Andrew Boiedc5d9352017-06-02 12:56:47 -070079K_THREAD_STACK_DEFINE(_main_stack, MAIN_STACK_SIZE);
80K_THREAD_STACK_DEFINE(_idle_stack, IDLE_STACK_SIZE);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040081
Andrew Boied26cf2d2017-03-30 13:07:02 -070082static struct k_thread _main_thread_s;
83static struct k_thread _idle_thread_s;
84
85k_tid_t const _main_thread = (k_tid_t)&_main_thread_s;
86k_tid_t const _idle_thread = (k_tid_t)&_idle_thread_s;
Benjamin Walsh456c6da2016-09-02 18:55:39 -040087
88/*
89 * storage space for the interrupt stack
90 *
Anas Nashifdc3d73b2016-12-19 20:25:56 -050091 * Note: This area is used as the system stack during kernel initialization,
92 * since the kernel hasn't yet set up its own stack areas. The dual purposing
93 * of this area is safe since interrupts are disabled until the kernel context
94 * switches to the init thread.
Benjamin Walsh456c6da2016-09-02 18:55:39 -040095 */
Andrew Boiedc5d9352017-06-02 12:56:47 -070096K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040097
Andy Ross780ba232018-01-29 09:20:18 -080098/*
99 * Similar idle thread & interrupt stack definitions for the
100 * auxiliary CPUs. The declaration macros aren't set up to define an
101 * array, so do it with a simple test for up to 4 processors. Should
102 * clean this up in the future.
103 */
104#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 1
105K_THREAD_STACK_DEFINE(_idle_stack1, IDLE_STACK_SIZE);
106static struct k_thread _idle_thread1_s;
107k_tid_t const _idle_thread1 = (k_tid_t)&_idle_thread1_s;
108K_THREAD_STACK_DEFINE(_interrupt_stack1, CONFIG_ISR_STACK_SIZE);
109#endif
110
111#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 2
112K_THREAD_STACK_DEFINE(_idle_stack2, IDLE_STACK_SIZE);
113static struct k_thread _idle_thread2_s;
114k_tid_t const _idle_thread2 = (k_tid_t)&_idle_thread2_s;
115K_THREAD_STACK_DEFINE(_interrupt_stack2, CONFIG_ISR_STACK_SIZE);
116#endif
117
118#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 3
119K_THREAD_STACK_DEFINE(_idle_stack3, IDLE_STACK_SIZE);
120static struct k_thread _idle_thread3_s;
121k_tid_t const _idle_thread3 = (k_tid_t)&_idle_thread3_s;
122K_THREAD_STACK_DEFINE(_interrupt_stack3, CONFIG_ISR_STACK_SIZE);
123#endif
124
Benjamin Walsh1a5450b2016-10-06 15:04:23 -0400125#ifdef CONFIG_SYS_CLOCK_EXISTS
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400126 #define initialize_timeouts() do { \
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500127 sys_dlist_init(&_timeout_q); \
Flavio Ceolin6fdc56d2018-09-18 12:32:27 -0700128 } while (false)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400129#else
130 #define initialize_timeouts() do { } while ((0))
131#endif
132
Peter Mitsis96cb05c2016-09-15 12:37:58 -0400133extern void idle(void *unused1, void *unused2, void *unused3);
134
Carles Cuficb0cf9f2017-01-10 10:57:38 +0100135
Andrew Boiefe228a82019-06-11 12:49:32 -0700136/* LCOV_EXCL_START
137 *
138 * This code is called so early in the boot process that code coverage
139 * doesn't work properly. In addition, not all arches call this code,
140 * some like x86 do this with optimized assembly
141 */
142
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400143/**
144 *
145 * @brief Clear BSS
146 *
147 * This routine clears the BSS region, so all bytes are 0.
148 *
149 * @return N/A
150 */
Patrik Flykt4344e272019-03-08 14:19:05 -0700151void z_bss_zero(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400152{
Nicolas Pitredf0b49c2019-05-17 17:44:19 -0400153 (void)memset(__bss_start, 0, __bss_end - __bss_start);
Kumar Galabfaaa6b2019-02-08 08:09:47 -0600154#ifdef DT_CCM_BASE_ADDRESS
Flavio Ceolinda49f2e2018-09-11 19:09:03 -0700155 (void)memset(&__ccm_bss_start, 0,
156 ((u32_t) &__ccm_bss_end - (u32_t) &__ccm_bss_start));
Erwin Rol1dc41d12017-10-05 01:22:32 +0200157#endif
Adithya Baglody91c5b842018-11-13 16:57:45 +0530158#ifdef CONFIG_CODE_DATA_RELOCATION
159 extern void bss_zeroing_relocation(void);
160
161 bss_zeroing_relocation();
162#endif /* CONFIG_CODE_DATA_RELOCATION */
Adithya Baglody71e90f92018-08-29 16:44:16 +0530163#ifdef CONFIG_COVERAGE_GCOV
164 (void)memset(&__gcov_bss_start, 0,
165 ((u32_t) &__gcov_bss_end - (u32_t) &__gcov_bss_start));
166#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400167}
168
Andrew Boie01100ea2019-02-21 15:02:22 -0800169#ifdef CONFIG_STACK_CANARIES
170extern volatile uintptr_t __stack_chk_guard;
171#endif /* CONFIG_STACK_CANARIES */
172
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400173
174#ifdef CONFIG_XIP
175/**
176 *
177 * @brief Copy the data section from ROM to RAM
178 *
179 * This routine copies the data section from ROM to RAM.
180 *
181 * @return N/A
182 */
Patrik Flykt4344e272019-03-08 14:19:05 -0700183void z_data_copy(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400184{
Flavio Ceolin66994232018-08-13 15:17:04 -0700185 (void)memcpy(&__data_ram_start, &__data_rom_start,
Nicolas Pitredf0b49c2019-05-17 17:44:19 -0400186 __data_ram_end - __data_ram_start);
Aurelien Jarno992f29a2019-02-10 11:05:51 +0100187#ifdef CONFIG_ARCH_HAS_RAMFUNC_SUPPORT
qianfan Zhaoe1cc6572018-09-27 14:14:17 +0800188 (void)memcpy(&_ramfunc_ram_start, &_ramfunc_rom_start,
Nicolas Pitredf0b49c2019-05-17 17:44:19 -0400189 (uintptr_t) &_ramfunc_ram_size);
Aurelien Jarno992f29a2019-02-10 11:05:51 +0100190#endif /* CONFIG_ARCH_HAS_RAMFUNC_SUPPORT */
Kumar Galabfaaa6b2019-02-08 08:09:47 -0600191#ifdef DT_CCM_BASE_ADDRESS
Flavio Ceolin66994232018-08-13 15:17:04 -0700192 (void)memcpy(&__ccm_data_start, &__ccm_data_rom_start,
Nicolas Pitredf0b49c2019-05-17 17:44:19 -0400193 __ccm_data_end - __ccm_data_start);
Erwin Rol1dc41d12017-10-05 01:22:32 +0200194#endif
Adithya Baglody91c5b842018-11-13 16:57:45 +0530195#ifdef CONFIG_CODE_DATA_RELOCATION
196 extern void data_copy_xip_relocation(void);
197
198 data_copy_xip_relocation();
199#endif /* CONFIG_CODE_DATA_RELOCATION */
Andrew Boie4ce652e2019-02-22 16:08:44 -0800200#ifdef CONFIG_USERSPACE
Andrew Boie01100ea2019-02-21 15:02:22 -0800201#ifdef CONFIG_STACK_CANARIES
202 /* stack canary checking is active for all C functions.
203 * __stack_chk_guard is some uninitialized value living in the
204 * app shared memory sections. Preserve it, and don't make any
205 * function calls to perform the memory copy. The true canary
Patrik Flykt4344e272019-03-08 14:19:05 -0700206 * value gets set later in z_cstart().
Andrew Boie01100ea2019-02-21 15:02:22 -0800207 */
208 uintptr_t guard_copy = __stack_chk_guard;
209 u8_t *src = (u8_t *)&_app_smem_rom_start;
210 u8_t *dst = (u8_t *)&_app_smem_start;
Nicolas Pitredf0b49c2019-05-17 17:44:19 -0400211 u32_t count = _app_smem_end - _app_smem_start;
Andrew Boie01100ea2019-02-21 15:02:22 -0800212
213 guard_copy = __stack_chk_guard;
214 while (count > 0) {
215 *(dst++) = *(src++);
216 count--;
217 }
218 __stack_chk_guard = guard_copy;
219#else
Flavio Ceolin66994232018-08-13 15:17:04 -0700220 (void)memcpy(&_app_smem_start, &_app_smem_rom_start,
Nicolas Pitredf0b49c2019-05-17 17:44:19 -0400221 _app_smem_end - _app_smem_start);
Andrew Boie01100ea2019-02-21 15:02:22 -0800222#endif /* CONFIG_STACK_CANARIES */
Andrew Boie4ce652e2019-02-22 16:08:44 -0800223#endif /* CONFIG_USERSPACE */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400224}
Andrew Boiefe228a82019-06-11 12:49:32 -0700225#endif /* CONFIG_XIP */
226
227/* LCOV_EXCL_STOP */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400228
229/**
230 *
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800231 * @brief Mainline for kernel's background thread
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400232 *
233 * This routine completes kernel initialization by invoking the remaining
234 * init functions, then invokes application's main() routine.
235 *
236 * @return N/A
237 */
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800238static void bg_thread_main(void *unused1, void *unused2, void *unused3)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400239{
240 ARG_UNUSED(unused1);
241 ARG_UNUSED(unused2);
242 ARG_UNUSED(unused3);
243
Flavio Ceolinac146852018-11-01 17:42:07 -0700244#if defined(CONFIG_BOOT_DELAY) && CONFIG_BOOT_DELAY > 0
245 static const unsigned int boot_delay = CONFIG_BOOT_DELAY;
246#else
247 static const unsigned int boot_delay;
248#endif
249
Patrik Flykt4344e272019-03-08 14:19:05 -0700250 z_sys_device_do_config_level(_SYS_INIT_LEVEL_POST_KERNEL);
Andrew Boie538754c2018-05-23 15:25:23 -0700251#if CONFIG_STACK_POINTER_RANDOM
252 z_stack_adjust_initialized = 1;
253#endif
Anas Nashif42f45382019-03-26 12:01:44 -0400254 if (boot_delay > 0 && IS_ENABLED(CONFIG_MULTITHREADING)) {
Inaky Perez-Gonzalez1abd0642017-09-07 15:24:39 -0700255 printk("***** delaying boot " STRINGIFY(CONFIG_BOOT_DELAY)
256 "ms (per build configuration) *****\n");
Youvedeep Singh9644f672017-10-24 13:30:33 +0530257 k_busy_wait(CONFIG_BOOT_DELAY * USEC_PER_MSEC);
Inaky Perez-Gonzalez1abd0642017-09-07 15:24:39 -0700258 }
259 PRINT_BOOT_BANNER();
Andrew Boie0b474ee2016-11-08 11:06:55 -0800260
Andrew Boie0b474ee2016-11-08 11:06:55 -0800261 /* Final init level before app starts */
Patrik Flykt4344e272019-03-08 14:19:05 -0700262 z_sys_device_do_config_level(_SYS_INIT_LEVEL_APPLICATION);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400263
264#ifdef CONFIG_CPLUSPLUS
265 /* Process the .ctors and .init_array sections */
266 extern void __do_global_ctors_aux(void);
267 extern void __do_init_array_aux(void);
268 __do_global_ctors_aux();
269 __do_init_array_aux();
270#endif
271
Patrik Flykt4344e272019-03-08 14:19:05 -0700272 z_init_static_threads();
Anas Nashif83088a22017-08-24 04:27:51 -0400273
Andy Rosseb258702018-04-12 12:10:10 -0700274#ifdef CONFIG_SMP
Andy Rossa12f2d62019-06-05 08:58:42 -0700275 z_smp_init();
Andy Rosseb258702018-04-12 12:10:10 -0700276#endif
Inaky Perez-Gonzalezc51f73f2017-06-20 17:01:09 -0700277
Peter Mitsis5f8fa672016-10-27 15:19:49 -0400278#ifdef CONFIG_BOOT_TIME_MEASUREMENT
279 /* record timestamp for kernel's _main() function */
Adithya Baglodybe1cb962017-06-14 15:15:49 +0530280 extern u64_t __main_time_stamp;
Peter Mitsis5f8fa672016-10-27 15:19:49 -0400281
Adithya Baglodybe1cb962017-06-14 15:15:49 +0530282 __main_time_stamp = (u64_t)k_cycle_get_32();
Peter Mitsis5f8fa672016-10-27 15:19:49 -0400283#endif
284
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400285 extern void main(void);
Andrew Boief1c373c2016-10-28 12:45:05 -0700286
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400287 main();
Allan Stephens073442e2016-11-09 07:46:56 -0600288
Andrew Boie8e053332019-06-11 12:58:16 -0700289 /* Mark nonessenrial since main() has no more work to do */
290 _main_thread->base.user_options &= ~K_ESSENTIAL;
291
Adithya Baglody71e90f92018-08-29 16:44:16 +0530292 /* Dump coverage data once the main() has exited. */
293 gcov_coverage_dump();
Andrew Boie8e053332019-06-11 12:58:16 -0700294} /* LCOV_EXCL_LINE ... because we just dumped final coverage data */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400295
Andrew Boiefe228a82019-06-11 12:49:32 -0700296/* LCOV_EXCL_START */
297
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400298void __weak main(void)
299{
300 /* NOP default main() if the application does not provide one. */
Flavio Ceolinb82a3392018-12-04 17:15:27 -0800301 arch_nop();
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400302}
303
Andrew Boiefe228a82019-06-11 12:49:32 -0700304/* LCOV_EXCL_STOP */
305
Andy Ross780ba232018-01-29 09:20:18 -0800306#if defined(CONFIG_MULTITHREADING)
307static void init_idle_thread(struct k_thread *thr, k_thread_stack_t *stack)
308{
Andy Ross2724fd12018-01-29 14:55:20 -0800309#ifdef CONFIG_SMP
Patrik Flykt24d71432019-03-26 19:57:45 -0600310 thr->base.is_idle = 1U;
Andy Ross2724fd12018-01-29 14:55:20 -0800311#endif
312
Patrik Flykt4344e272019-03-08 14:19:05 -0700313 z_setup_new_thread(thr, stack,
Andy Ross780ba232018-01-29 09:20:18 -0800314 IDLE_STACK_SIZE, idle, NULL, NULL, NULL,
Anas Nashif57554052018-03-03 02:31:05 -0600315 K_LOWEST_THREAD_PRIO, K_ESSENTIAL, IDLE_THREAD_NAME);
Patrik Flykt4344e272019-03-08 14:19:05 -0700316 z_mark_thread_as_started(thr);
Andy Ross780ba232018-01-29 09:20:18 -0800317}
Ioannis Glaropoulos75836962019-05-24 15:31:45 +0200318#endif /* CONFIG_MULTITHREADING */
Andy Ross780ba232018-01-29 09:20:18 -0800319
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400320/**
321 *
Anas Nashifdc3d73b2016-12-19 20:25:56 -0500322 * @brief Initializes kernel data structures
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400323 *
Anas Nashifdc3d73b2016-12-19 20:25:56 -0500324 * This routine initializes various kernel data structures, including
325 * the init and idle threads and any architecture-specific initialization.
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400326 *
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500327 * Note that all fields of "_kernel" are set to zero on entry, which may
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400328 * be all the initialization many of them require.
329 *
330 * @return N/A
331 */
Andy Ross3d146152018-06-13 10:51:42 -0700332#ifdef CONFIG_MULTITHREADING
Benjamin Walshc742d7e2016-10-05 17:50:54 -0400333static void prepare_multithreading(struct k_thread *dummy_thread)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400334{
Benjamin Walsh296a2342016-11-20 11:04:31 -0500335#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
336 ARG_UNUSED(dummy_thread);
337#else
Marek Pietae8719382018-10-26 16:54:16 +0200338
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400339 /*
340 * Initialize the current execution thread to permit a level of
Anas Nashifdc3d73b2016-12-19 20:25:56 -0500341 * debugging output if an exception should happen during kernel
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400342 * initialization. However, don't waste effort initializing the
343 * fields of the dummy thread beyond those needed to identify it as a
344 * dummy thread.
345 */
Benjamin Walshed240f22017-01-22 13:05:08 -0500346 dummy_thread->base.user_options = K_ESSENTIAL;
Andrew Boie50a533f2017-05-10 10:31:33 -0700347 dummy_thread->base.thread_state = _THREAD_DUMMY;
Andrew Boie0a85eaa2017-08-30 14:25:38 -0700348#ifdef CONFIG_THREAD_STACK_INFO
Patrik Flykt24d71432019-03-26 19:57:45 -0600349 dummy_thread->stack_info.start = 0U;
350 dummy_thread->stack_info.size = 0U;
Andrew Boie0a85eaa2017-08-30 14:25:38 -0700351#endif
Adithya Baglody7bb40bd2017-11-03 15:06:57 +0530352#ifdef CONFIG_USERSPACE
353 dummy_thread->mem_domain_info.mem_domain = 0;
354#endif
Ioannis Glaropoulos75836962019-05-24 15:31:45 +0200355#endif /* CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400356
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500357 /* _kernel.ready_q is all zeroes */
Patrik Flykt4344e272019-03-08 14:19:05 -0700358 z_sched_init();
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400359
Andy Ross2724fd12018-01-29 14:55:20 -0800360#ifndef CONFIG_SMP
Benjamin Walsh88b36912016-12-02 10:37:27 -0500361 /*
362 * prime the cache with the main thread since:
363 *
364 * - the cache can never be NULL
365 * - the main thread will be the one to run first
366 * - no other thread is initialized yet and thus their priority fields
367 * contain garbage, which would prevent the cache loading algorithm
368 * to work as intended
369 */
Flavio Ceolina3ddded2018-10-24 12:47:18 -0700370 _kernel.ready_q.cache = _main_thread;
Andy Ross2724fd12018-01-29 14:55:20 -0800371#endif
Benjamin Walsh88b36912016-12-02 10:37:27 -0500372
Patrik Flykt4344e272019-03-08 14:19:05 -0700373 z_setup_new_thread(_main_thread, _main_stack,
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800374 MAIN_STACK_SIZE, bg_thread_main,
375 NULL, NULL, NULL,
Anas Nashif57554052018-03-03 02:31:05 -0600376 CONFIG_MAIN_THREAD_PRIORITY, K_ESSENTIAL, "main");
Anas Nashifb6304e62018-07-04 08:03:03 -0500377 sys_trace_thread_create(_main_thread);
378
Patrik Flykt4344e272019-03-08 14:19:05 -0700379 z_mark_thread_as_started(_main_thread);
380 z_ready_thread(_main_thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400381
Andy Ross780ba232018-01-29 09:20:18 -0800382 init_idle_thread(_idle_thread, _idle_stack);
Andy Ross1acd8c22018-05-03 14:51:49 -0700383 _kernel.cpus[0].idle_thread = _idle_thread;
Anas Nashifb6304e62018-07-04 08:03:03 -0500384 sys_trace_thread_create(_idle_thread);
Andy Ross780ba232018-01-29 09:20:18 -0800385
386#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 1
387 init_idle_thread(_idle_thread1, _idle_stack1);
Andy Ross1acd8c22018-05-03 14:51:49 -0700388 _kernel.cpus[1].idle_thread = _idle_thread1;
Andy Rossbdcd18a72018-01-17 11:34:50 -0800389 _kernel.cpus[1].id = 1;
Andrew Boie4e5c0932019-04-04 12:05:28 -0700390 _kernel.cpus[1].irq_stack = Z_THREAD_STACK_BUFFER(_interrupt_stack1)
Andy Rossbdcd18a72018-01-17 11:34:50 -0800391 + CONFIG_ISR_STACK_SIZE;
Andy Ross780ba232018-01-29 09:20:18 -0800392#endif
393
394#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 2
395 init_idle_thread(_idle_thread2, _idle_stack2);
Andy Ross1acd8c22018-05-03 14:51:49 -0700396 _kernel.cpus[2].idle_thread = _idle_thread2;
Andy Rossbdcd18a72018-01-17 11:34:50 -0800397 _kernel.cpus[2].id = 2;
Andrew Boie4e5c0932019-04-04 12:05:28 -0700398 _kernel.cpus[2].irq_stack = Z_THREAD_STACK_BUFFER(_interrupt_stack2)
Andy Rossbdcd18a72018-01-17 11:34:50 -0800399 + CONFIG_ISR_STACK_SIZE;
Andy Ross780ba232018-01-29 09:20:18 -0800400#endif
401
402#if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 3
403 init_idle_thread(_idle_thread3, _idle_stack3);
Andy Ross1acd8c22018-05-03 14:51:49 -0700404 _kernel.cpus[3].idle_thread = _idle_thread3;
Andy Rossbdcd18a72018-01-17 11:34:50 -0800405 _kernel.cpus[3].id = 3;
Andrew Boie4e5c0932019-04-04 12:05:28 -0700406 _kernel.cpus[3].irq_stack = Z_THREAD_STACK_BUFFER(_interrupt_stack3)
Andy Rossbdcd18a72018-01-17 11:34:50 -0800407 + CONFIG_ISR_STACK_SIZE;
Benjamin Walshb12a8e02016-12-14 15:24:12 -0500408#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400409
410 initialize_timeouts();
411
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400412}
413
Andrew Boiec5164f32019-06-11 13:33:32 -0700414static FUNC_NORETURN void switch_to_main_thread(void)
Benjamin Walshc742d7e2016-10-05 17:50:54 -0400415{
Benjamin Walsh296a2342016-11-20 11:04:31 -0500416#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
Ioannis Glaropoulos5a709f52019-05-24 15:25:30 +0200417 z_arch_switch_to_main_thread(_main_thread, _main_stack,
418 K_THREAD_STACK_SIZEOF(_main_stack),
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800419 bg_thread_main);
Benjamin Walsh296a2342016-11-20 11:04:31 -0500420#else
Benjamin Walshc742d7e2016-10-05 17:50:54 -0400421 /*
422 * Context switch to main task (entry function is _main()): the
423 * current fake thread is not on a wait queue or ready queue, so it
424 * will never be rescheduled in.
425 */
Patrik Flykt4344e272019-03-08 14:19:05 -0700426 z_swap_unlocked();
Benjamin Walsh296a2342016-11-20 11:04:31 -0500427#endif
Andrew Boiec5164f32019-06-11 13:33:32 -0700428 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
Benjamin Walshc742d7e2016-10-05 17:50:54 -0400429}
Anas Nashifc0ea5052019-01-30 09:58:41 -0500430#endif /* CONFIG_MULTITHREADING */
Benjamin Walshc742d7e2016-10-05 17:50:54 -0400431
Andrew Boie538754c2018-05-23 15:25:23 -0700432u32_t z_early_boot_rand32_get(void)
Leandro Pereira389c3642018-05-23 13:38:52 -0700433{
434#ifdef CONFIG_ENTROPY_HAS_DRIVER
435 struct device *entropy = device_get_binding(CONFIG_ENTROPY_NAME);
436 int rc;
Andrew Boie538754c2018-05-23 15:25:23 -0700437 u32_t retval;
Leandro Pereira389c3642018-05-23 13:38:52 -0700438
439 if (entropy == NULL) {
440 goto sys_rand32_fallback;
441 }
442
Carles Cufib5464492018-05-24 20:12:23 +0200443 /* Try to see if driver provides an ISR-specific API */
444 rc = entropy_get_entropy_isr(entropy, (u8_t *)&retval,
445 sizeof(retval), ENTROPY_BUSYWAIT);
446 if (rc == -ENOTSUP) {
447 /* Driver does not provide an ISR-specific API, assume it can
448 * be called from ISR context
449 */
450 rc = entropy_get_entropy(entropy, (u8_t *)&retval,
451 sizeof(retval));
452 }
453
454 if (rc >= 0) {
Andrew Boie538754c2018-05-23 15:25:23 -0700455 return retval;
Leandro Pereira389c3642018-05-23 13:38:52 -0700456 }
457
Carles Cufib5464492018-05-24 20:12:23 +0200458 /* Fall through to fallback */
Leandro Pereira389c3642018-05-23 13:38:52 -0700459
460sys_rand32_fallback:
Mazen NEIFERe2bbad92017-02-07 10:01:12 +0100461#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400462
Leandro Pereira389c3642018-05-23 13:38:52 -0700463 /* FIXME: this assumes sys_rand32_get() won't use any synchronization
464 * primitive, like semaphores or mutexes. It's too early in the boot
465 * process to use any of them. Ideally, only the path where entropy
466 * devices are available should be built, this is only a fallback for
467 * those devices without a HWRNG entropy driver.
468 */
Andrew Boie538754c2018-05-23 15:25:23 -0700469 return sys_rand32_get();
Leandro Pereira389c3642018-05-23 13:38:52 -0700470}
Andrew Boie538754c2018-05-23 15:25:23 -0700471
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400472/**
473 *
Anas Nashifdc3d73b2016-12-19 20:25:56 -0500474 * @brief Initialize kernel
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400475 *
476 * This routine is invoked when the system is ready to run C code. The
477 * processor must be running in 32-bit mode, and the BSS must have been
478 * cleared/zeroed.
479 *
480 * @return Does not return
481 */
Patrik Flykt4344e272019-03-08 14:19:05 -0700482FUNC_NORETURN void z_cstart(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400483{
Adithya Baglody71e90f92018-08-29 16:44:16 +0530484 /* gcov hook needed to get the coverage report.*/
485 gcov_static_init();
486
Krzysztof Chruscinski6b01c892018-07-11 12:26:31 +0200487 if (IS_ENABLED(CONFIG_LOG)) {
488 log_core_init();
489 }
490
Andrew Boie982d5c82018-05-23 13:30:34 -0700491 /* perform any architecture-specific initialization */
492 kernel_arch_init();
493
Andy Ross6d9106f2019-02-01 14:42:28 -0800494#ifdef CONFIG_MULTITHREADING
Andy Rossab46b1b2019-01-30 15:00:42 -0800495 struct k_thread dummy_thread = {
496 .base.thread_state = _THREAD_DUMMY,
497# ifdef CONFIG_SCHED_CPU_MASK
498 .base.cpu_mask = -1,
499# endif
500 };
Andy Ross6d9106f2019-02-01 14:42:28 -0800501
502 _current = &dummy_thread;
503#endif
504
Andrew Boie62fad962019-02-27 17:24:46 -0800505#ifdef CONFIG_USERSPACE
506 z_app_shmem_bss_zero();
507#endif
508
Andrew Boie0b474ee2016-11-08 11:06:55 -0800509 /* perform basic hardware initialization */
Patrik Flykt4344e272019-03-08 14:19:05 -0700510 z_sys_device_do_config_level(_SYS_INIT_LEVEL_PRE_KERNEL_1);
511 z_sys_device_do_config_level(_SYS_INIT_LEVEL_PRE_KERNEL_2);
Andrew Boie0b474ee2016-11-08 11:06:55 -0800512
Mazen NEIFERe2bbad92017-02-07 10:01:12 +0100513#ifdef CONFIG_STACK_CANARIES
Andrew Boie538754c2018-05-23 15:25:23 -0700514 __stack_chk_guard = z_early_boot_rand32_get();
Mazen NEIFERe2bbad92017-02-07 10:01:12 +0100515#endif
Leandro Pereira389c3642018-05-23 13:38:52 -0700516
Andy Ross3d146152018-06-13 10:51:42 -0700517#ifdef CONFIG_MULTITHREADING
Andy Ross1763a012019-01-28 10:59:41 -0800518 prepare_multithreading(&dummy_thread);
Benjamin Walshc742d7e2016-10-05 17:50:54 -0400519 switch_to_main_thread();
Andy Ross3d146152018-06-13 10:51:42 -0700520#else
521 bg_thread_main(NULL, NULL, NULL);
522
Andrew Boiec5164f32019-06-11 13:33:32 -0700523 /* LCOV_EXCL_START
524 * We've already dumped coverage data at this point.
525 */
Andy Ross8daafd42018-08-30 09:45:12 -0700526 irq_lock();
Flavio Ceolinb3d92022018-09-17 15:56:06 -0700527 while (true) {
Andy Ross3d146152018-06-13 10:51:42 -0700528 }
Andrew Boiec5164f32019-06-11 13:33:32 -0700529 /* LCOV_EXCL_STOP */
Andy Ross3d146152018-06-13 10:51:42 -0700530#endif
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400531
532 /*
533 * Compiler can't tell that the above routines won't return and issues
534 * a warning unless we explicitly tell it that control never gets this
535 * far.
536 */
537
Andrew Boiec5164f32019-06-11 13:33:32 -0700538 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400539}