Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2010-2014 Wind River Systems, Inc. |
| 3 | * |
David B. Kinder | ac74d8b | 2017-01-18 17:01:01 -0800 | [diff] [blame] | 4 | * SPDX-License-Identifier: Apache-2.0 |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | /** |
| 8 | * @file |
Anas Nashif | cb888e6 | 2016-12-18 09:42:55 -0500 | [diff] [blame] | 9 | * @brief Kernel initialization module |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 10 | * |
Anas Nashif | dc3d73b | 2016-12-19 20:25:56 -0500 | [diff] [blame] | 11 | * This module contains routines that are used to initialize the kernel. |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 12 | */ |
| 13 | |
Andrew Boie | f1c373c | 2016-10-28 12:45:05 -0700 | [diff] [blame] | 14 | #include <zephyr.h> |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 15 | #include <offsets_short.h> |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 16 | #include <kernel.h> |
| 17 | #include <misc/printk.h> |
Carles Cufi | cb0cf9f | 2017-01-10 10:57:38 +0100 | [diff] [blame] | 18 | #include <misc/stack.h> |
Leandro Pereira | adce1d1 | 2017-10-13 15:45:02 -0700 | [diff] [blame] | 19 | #include <random/rand32.h> |
Anas Nashif | 397d29d | 2017-06-17 11:30:47 -0400 | [diff] [blame] | 20 | #include <linker/sections.h> |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 21 | #include <toolchain.h> |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 22 | #include <kernel_structs.h> |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 23 | #include <device.h> |
| 24 | #include <init.h> |
Anas Nashif | 397d29d | 2017-06-17 11:30:47 -0400 | [diff] [blame] | 25 | #include <linker/linker-defs.h> |
Benjamin Walsh | b4b108d | 2016-10-13 10:31:48 -0400 | [diff] [blame] | 26 | #include <ksched.h> |
Mahavir Jain | a636604 | 2016-12-02 21:45:49 +0530 | [diff] [blame] | 27 | #include <version.h> |
Mahavir Jain | acea241 | 2016-12-02 21:48:39 +0530 | [diff] [blame] | 28 | #include <string.h> |
Anas Nashif | 8920cf1 | 2017-08-31 11:02:47 -0400 | [diff] [blame] | 29 | #include <misc/dlist.h> |
Andy Ross | 245b54e | 2018-02-08 09:10:46 -0800 | [diff] [blame] | 30 | #include <kernel_internal.h> |
Andy Ross | 9c62cc6 | 2018-01-25 15:24:15 -0800 | [diff] [blame] | 31 | #include <kswap.h> |
Andrew Boie | 538754c | 2018-05-23 15:25:23 -0700 | [diff] [blame] | 32 | #include <entropy.h> |
Krzysztof Chruscinski | 6b01c89 | 2018-07-11 12:26:31 +0200 | [diff] [blame] | 33 | #include <logging/log_ctrl.h> |
Anas Nashif | b6304e6 | 2018-07-04 08:03:03 -0500 | [diff] [blame] | 34 | #include <tracing.h> |
Flavio Ceolin | b3d9202 | 2018-09-17 15:56:06 -0700 | [diff] [blame] | 35 | #include <stdbool.h> |
Adithya Baglody | 71e90f9 | 2018-08-29 16:44:16 +0530 | [diff] [blame] | 36 | #include <misc/gcov.h> |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 37 | |
Anas Nashif | 5755405 | 2018-03-03 02:31:05 -0600 | [diff] [blame] | 38 | #define IDLE_THREAD_NAME "idle" |
Anas Nashif | 0a0c8c8 | 2018-09-17 06:58:09 -0500 | [diff] [blame] | 39 | #define LOG_LEVEL CONFIG_KERNEL_LOG_LEVEL |
| 40 | #include <logging/log.h> |
Anas Nashif | 6d359df | 2019-06-04 13:42:17 -0400 | [diff] [blame] | 41 | LOG_MODULE_REGISTER(os); |
Anas Nashif | 5755405 | 2018-03-03 02:31:05 -0600 | [diff] [blame] | 42 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 43 | /* boot banner items */ |
Anas Nashif | 42f4538 | 2019-03-26 12:01:44 -0400 | [diff] [blame] | 44 | #if defined(CONFIG_MULTITHREADING) && defined(CONFIG_BOOT_DELAY) \ |
| 45 | && CONFIG_BOOT_DELAY > 0 |
Inaky Perez-Gonzalez | c51f73f | 2017-06-20 17:01:09 -0700 | [diff] [blame] | 46 | #define BOOT_DELAY_BANNER " (delayed boot " \ |
| 47 | STRINGIFY(CONFIG_BOOT_DELAY) "ms)" |
Inaky Perez-Gonzalez | c51f73f | 2017-06-20 17:01:09 -0700 | [diff] [blame] | 48 | #else |
| 49 | #define BOOT_DELAY_BANNER "" |
Inaky Perez-Gonzalez | c51f73f | 2017-06-20 17:01:09 -0700 | [diff] [blame] | 50 | #endif |
Anas Nashif | daf7716 | 2018-04-09 21:53:26 -0500 | [diff] [blame] | 51 | |
| 52 | #ifdef BUILD_VERSION |
Marc Herbert | d4df6ba | 2019-06-17 11:11:19 -0700 | [diff] [blame] | 53 | #define BOOT_BANNER "Booting Zephyr OS build " \ |
Anas Nashif | daf7716 | 2018-04-09 21:53:26 -0500 | [diff] [blame] | 54 | STRINGIFY(BUILD_VERSION) BOOT_DELAY_BANNER |
| 55 | #else |
Marc Herbert | d4df6ba | 2019-06-17 11:11:19 -0700 | [diff] [blame] | 56 | #define BOOT_BANNER "Booting Zephyr OS version " \ |
Anas Nashif | daf7716 | 2018-04-09 21:53:26 -0500 | [diff] [blame] | 57 | KERNEL_VERSION_STRING BOOT_DELAY_BANNER |
| 58 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 59 | |
| 60 | #if !defined(CONFIG_BOOT_BANNER) |
Flavio Ceolin | b3d9202 | 2018-09-17 15:56:06 -0700 | [diff] [blame] | 61 | #define PRINT_BOOT_BANNER() do { } while (false) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 62 | #else |
Anas Nashif | daf7716 | 2018-04-09 21:53:26 -0500 | [diff] [blame] | 63 | #define PRINT_BOOT_BANNER() printk("***** " BOOT_BANNER " *****\n") |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 64 | #endif |
| 65 | |
| 66 | /* boot time measurement items */ |
| 67 | |
| 68 | #ifdef CONFIG_BOOT_TIME_MEASUREMENT |
Adithya Baglody | be1cb96 | 2017-06-14 15:15:49 +0530 | [diff] [blame] | 69 | u64_t __noinit __start_time_stamp; /* timestamp when kernel starts */ |
| 70 | u64_t __noinit __main_time_stamp; /* timestamp when main task starts */ |
| 71 | u64_t __noinit __idle_time_stamp; /* timestamp when CPU goes idle */ |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 72 | #endif |
| 73 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 74 | /* init/main and idle threads */ |
| 75 | |
Benjamin Walsh | c9dd56e | 2016-10-24 12:57:46 -0400 | [diff] [blame] | 76 | #define IDLE_STACK_SIZE CONFIG_IDLE_STACK_SIZE |
Andrew Boie | f1c373c | 2016-10-28 12:45:05 -0700 | [diff] [blame] | 77 | #define MAIN_STACK_SIZE CONFIG_MAIN_STACK_SIZE |
Andrew Boie | f1c373c | 2016-10-28 12:45:05 -0700 | [diff] [blame] | 78 | |
Andrew Boie | dc5d935 | 2017-06-02 12:56:47 -0700 | [diff] [blame] | 79 | K_THREAD_STACK_DEFINE(_main_stack, MAIN_STACK_SIZE); |
| 80 | K_THREAD_STACK_DEFINE(_idle_stack, IDLE_STACK_SIZE); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 81 | |
Andrew Boie | d26cf2d | 2017-03-30 13:07:02 -0700 | [diff] [blame] | 82 | static struct k_thread _main_thread_s; |
| 83 | static struct k_thread _idle_thread_s; |
| 84 | |
| 85 | k_tid_t const _main_thread = (k_tid_t)&_main_thread_s; |
| 86 | k_tid_t const _idle_thread = (k_tid_t)&_idle_thread_s; |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 87 | |
| 88 | /* |
| 89 | * storage space for the interrupt stack |
| 90 | * |
Anas Nashif | dc3d73b | 2016-12-19 20:25:56 -0500 | [diff] [blame] | 91 | * Note: This area is used as the system stack during kernel initialization, |
| 92 | * since the kernel hasn't yet set up its own stack areas. The dual purposing |
| 93 | * of this area is safe since interrupts are disabled until the kernel context |
| 94 | * switches to the init thread. |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 95 | */ |
Andrew Boie | dc5d935 | 2017-06-02 12:56:47 -0700 | [diff] [blame] | 96 | K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 97 | |
Andy Ross | 780ba23 | 2018-01-29 09:20:18 -0800 | [diff] [blame] | 98 | /* |
| 99 | * Similar idle thread & interrupt stack definitions for the |
| 100 | * auxiliary CPUs. The declaration macros aren't set up to define an |
| 101 | * array, so do it with a simple test for up to 4 processors. Should |
| 102 | * clean this up in the future. |
| 103 | */ |
| 104 | #if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 1 |
| 105 | K_THREAD_STACK_DEFINE(_idle_stack1, IDLE_STACK_SIZE); |
| 106 | static struct k_thread _idle_thread1_s; |
| 107 | k_tid_t const _idle_thread1 = (k_tid_t)&_idle_thread1_s; |
| 108 | K_THREAD_STACK_DEFINE(_interrupt_stack1, CONFIG_ISR_STACK_SIZE); |
| 109 | #endif |
| 110 | |
| 111 | #if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 2 |
| 112 | K_THREAD_STACK_DEFINE(_idle_stack2, IDLE_STACK_SIZE); |
| 113 | static struct k_thread _idle_thread2_s; |
| 114 | k_tid_t const _idle_thread2 = (k_tid_t)&_idle_thread2_s; |
| 115 | K_THREAD_STACK_DEFINE(_interrupt_stack2, CONFIG_ISR_STACK_SIZE); |
| 116 | #endif |
| 117 | |
| 118 | #if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 3 |
| 119 | K_THREAD_STACK_DEFINE(_idle_stack3, IDLE_STACK_SIZE); |
| 120 | static struct k_thread _idle_thread3_s; |
| 121 | k_tid_t const _idle_thread3 = (k_tid_t)&_idle_thread3_s; |
| 122 | K_THREAD_STACK_DEFINE(_interrupt_stack3, CONFIG_ISR_STACK_SIZE); |
| 123 | #endif |
| 124 | |
Benjamin Walsh | 1a5450b | 2016-10-06 15:04:23 -0400 | [diff] [blame] | 125 | #ifdef CONFIG_SYS_CLOCK_EXISTS |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 126 | #define initialize_timeouts() do { \ |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 127 | sys_dlist_init(&_timeout_q); \ |
Flavio Ceolin | 6fdc56d | 2018-09-18 12:32:27 -0700 | [diff] [blame] | 128 | } while (false) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 129 | #else |
| 130 | #define initialize_timeouts() do { } while ((0)) |
| 131 | #endif |
| 132 | |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 133 | extern void idle(void *unused1, void *unused2, void *unused3); |
| 134 | |
Carles Cufi | cb0cf9f | 2017-01-10 10:57:38 +0100 | [diff] [blame] | 135 | |
Andrew Boie | fe228a8 | 2019-06-11 12:49:32 -0700 | [diff] [blame] | 136 | /* LCOV_EXCL_START |
| 137 | * |
| 138 | * This code is called so early in the boot process that code coverage |
| 139 | * doesn't work properly. In addition, not all arches call this code, |
| 140 | * some like x86 do this with optimized assembly |
| 141 | */ |
| 142 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 143 | /** |
| 144 | * |
| 145 | * @brief Clear BSS |
| 146 | * |
| 147 | * This routine clears the BSS region, so all bytes are 0. |
| 148 | * |
| 149 | * @return N/A |
| 150 | */ |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 151 | void z_bss_zero(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 152 | { |
Nicolas Pitre | df0b49c | 2019-05-17 17:44:19 -0400 | [diff] [blame] | 153 | (void)memset(__bss_start, 0, __bss_end - __bss_start); |
Kumar Gala | bfaaa6b | 2019-02-08 08:09:47 -0600 | [diff] [blame] | 154 | #ifdef DT_CCM_BASE_ADDRESS |
Flavio Ceolin | da49f2e | 2018-09-11 19:09:03 -0700 | [diff] [blame] | 155 | (void)memset(&__ccm_bss_start, 0, |
| 156 | ((u32_t) &__ccm_bss_end - (u32_t) &__ccm_bss_start)); |
Erwin Rol | 1dc41d1 | 2017-10-05 01:22:32 +0200 | [diff] [blame] | 157 | #endif |
Adithya Baglody | 91c5b84 | 2018-11-13 16:57:45 +0530 | [diff] [blame] | 158 | #ifdef CONFIG_CODE_DATA_RELOCATION |
| 159 | extern void bss_zeroing_relocation(void); |
| 160 | |
| 161 | bss_zeroing_relocation(); |
| 162 | #endif /* CONFIG_CODE_DATA_RELOCATION */ |
Adithya Baglody | 71e90f9 | 2018-08-29 16:44:16 +0530 | [diff] [blame] | 163 | #ifdef CONFIG_COVERAGE_GCOV |
| 164 | (void)memset(&__gcov_bss_start, 0, |
| 165 | ((u32_t) &__gcov_bss_end - (u32_t) &__gcov_bss_start)); |
| 166 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 167 | } |
| 168 | |
Andrew Boie | 01100ea | 2019-02-21 15:02:22 -0800 | [diff] [blame] | 169 | #ifdef CONFIG_STACK_CANARIES |
| 170 | extern volatile uintptr_t __stack_chk_guard; |
| 171 | #endif /* CONFIG_STACK_CANARIES */ |
| 172 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 173 | |
| 174 | #ifdef CONFIG_XIP |
| 175 | /** |
| 176 | * |
| 177 | * @brief Copy the data section from ROM to RAM |
| 178 | * |
| 179 | * This routine copies the data section from ROM to RAM. |
| 180 | * |
| 181 | * @return N/A |
| 182 | */ |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 183 | void z_data_copy(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 184 | { |
Flavio Ceolin | 6699423 | 2018-08-13 15:17:04 -0700 | [diff] [blame] | 185 | (void)memcpy(&__data_ram_start, &__data_rom_start, |
Nicolas Pitre | df0b49c | 2019-05-17 17:44:19 -0400 | [diff] [blame] | 186 | __data_ram_end - __data_ram_start); |
Aurelien Jarno | 992f29a | 2019-02-10 11:05:51 +0100 | [diff] [blame] | 187 | #ifdef CONFIG_ARCH_HAS_RAMFUNC_SUPPORT |
qianfan Zhao | e1cc657 | 2018-09-27 14:14:17 +0800 | [diff] [blame] | 188 | (void)memcpy(&_ramfunc_ram_start, &_ramfunc_rom_start, |
Nicolas Pitre | df0b49c | 2019-05-17 17:44:19 -0400 | [diff] [blame] | 189 | (uintptr_t) &_ramfunc_ram_size); |
Aurelien Jarno | 992f29a | 2019-02-10 11:05:51 +0100 | [diff] [blame] | 190 | #endif /* CONFIG_ARCH_HAS_RAMFUNC_SUPPORT */ |
Kumar Gala | bfaaa6b | 2019-02-08 08:09:47 -0600 | [diff] [blame] | 191 | #ifdef DT_CCM_BASE_ADDRESS |
Flavio Ceolin | 6699423 | 2018-08-13 15:17:04 -0700 | [diff] [blame] | 192 | (void)memcpy(&__ccm_data_start, &__ccm_data_rom_start, |
Nicolas Pitre | df0b49c | 2019-05-17 17:44:19 -0400 | [diff] [blame] | 193 | __ccm_data_end - __ccm_data_start); |
Erwin Rol | 1dc41d1 | 2017-10-05 01:22:32 +0200 | [diff] [blame] | 194 | #endif |
Adithya Baglody | 91c5b84 | 2018-11-13 16:57:45 +0530 | [diff] [blame] | 195 | #ifdef CONFIG_CODE_DATA_RELOCATION |
| 196 | extern void data_copy_xip_relocation(void); |
| 197 | |
| 198 | data_copy_xip_relocation(); |
| 199 | #endif /* CONFIG_CODE_DATA_RELOCATION */ |
Andrew Boie | 4ce652e | 2019-02-22 16:08:44 -0800 | [diff] [blame] | 200 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 01100ea | 2019-02-21 15:02:22 -0800 | [diff] [blame] | 201 | #ifdef CONFIG_STACK_CANARIES |
| 202 | /* stack canary checking is active for all C functions. |
| 203 | * __stack_chk_guard is some uninitialized value living in the |
| 204 | * app shared memory sections. Preserve it, and don't make any |
| 205 | * function calls to perform the memory copy. The true canary |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 206 | * value gets set later in z_cstart(). |
Andrew Boie | 01100ea | 2019-02-21 15:02:22 -0800 | [diff] [blame] | 207 | */ |
| 208 | uintptr_t guard_copy = __stack_chk_guard; |
| 209 | u8_t *src = (u8_t *)&_app_smem_rom_start; |
| 210 | u8_t *dst = (u8_t *)&_app_smem_start; |
Nicolas Pitre | df0b49c | 2019-05-17 17:44:19 -0400 | [diff] [blame] | 211 | u32_t count = _app_smem_end - _app_smem_start; |
Andrew Boie | 01100ea | 2019-02-21 15:02:22 -0800 | [diff] [blame] | 212 | |
| 213 | guard_copy = __stack_chk_guard; |
| 214 | while (count > 0) { |
| 215 | *(dst++) = *(src++); |
| 216 | count--; |
| 217 | } |
| 218 | __stack_chk_guard = guard_copy; |
| 219 | #else |
Flavio Ceolin | 6699423 | 2018-08-13 15:17:04 -0700 | [diff] [blame] | 220 | (void)memcpy(&_app_smem_start, &_app_smem_rom_start, |
Nicolas Pitre | df0b49c | 2019-05-17 17:44:19 -0400 | [diff] [blame] | 221 | _app_smem_end - _app_smem_start); |
Andrew Boie | 01100ea | 2019-02-21 15:02:22 -0800 | [diff] [blame] | 222 | #endif /* CONFIG_STACK_CANARIES */ |
Andrew Boie | 4ce652e | 2019-02-22 16:08:44 -0800 | [diff] [blame] | 223 | #endif /* CONFIG_USERSPACE */ |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 224 | } |
Andrew Boie | fe228a8 | 2019-06-11 12:49:32 -0700 | [diff] [blame] | 225 | #endif /* CONFIG_XIP */ |
| 226 | |
| 227 | /* LCOV_EXCL_STOP */ |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 228 | |
| 229 | /** |
| 230 | * |
Leandro Pereira | a1ae845 | 2018-03-06 15:08:55 -0800 | [diff] [blame] | 231 | * @brief Mainline for kernel's background thread |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 232 | * |
| 233 | * This routine completes kernel initialization by invoking the remaining |
| 234 | * init functions, then invokes application's main() routine. |
| 235 | * |
| 236 | * @return N/A |
| 237 | */ |
Leandro Pereira | a1ae845 | 2018-03-06 15:08:55 -0800 | [diff] [blame] | 238 | static void bg_thread_main(void *unused1, void *unused2, void *unused3) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 239 | { |
| 240 | ARG_UNUSED(unused1); |
| 241 | ARG_UNUSED(unused2); |
| 242 | ARG_UNUSED(unused3); |
| 243 | |
Flavio Ceolin | ac14685 | 2018-11-01 17:42:07 -0700 | [diff] [blame] | 244 | #if defined(CONFIG_BOOT_DELAY) && CONFIG_BOOT_DELAY > 0 |
| 245 | static const unsigned int boot_delay = CONFIG_BOOT_DELAY; |
| 246 | #else |
| 247 | static const unsigned int boot_delay; |
| 248 | #endif |
| 249 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 250 | z_sys_device_do_config_level(_SYS_INIT_LEVEL_POST_KERNEL); |
Andrew Boie | 538754c | 2018-05-23 15:25:23 -0700 | [diff] [blame] | 251 | #if CONFIG_STACK_POINTER_RANDOM |
| 252 | z_stack_adjust_initialized = 1; |
| 253 | #endif |
Anas Nashif | 42f4538 | 2019-03-26 12:01:44 -0400 | [diff] [blame] | 254 | if (boot_delay > 0 && IS_ENABLED(CONFIG_MULTITHREADING)) { |
Inaky Perez-Gonzalez | 1abd064 | 2017-09-07 15:24:39 -0700 | [diff] [blame] | 255 | printk("***** delaying boot " STRINGIFY(CONFIG_BOOT_DELAY) |
| 256 | "ms (per build configuration) *****\n"); |
Youvedeep Singh | 9644f67 | 2017-10-24 13:30:33 +0530 | [diff] [blame] | 257 | k_busy_wait(CONFIG_BOOT_DELAY * USEC_PER_MSEC); |
Inaky Perez-Gonzalez | 1abd064 | 2017-09-07 15:24:39 -0700 | [diff] [blame] | 258 | } |
| 259 | PRINT_BOOT_BANNER(); |
Andrew Boie | 0b474ee | 2016-11-08 11:06:55 -0800 | [diff] [blame] | 260 | |
Andrew Boie | 0b474ee | 2016-11-08 11:06:55 -0800 | [diff] [blame] | 261 | /* Final init level before app starts */ |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 262 | z_sys_device_do_config_level(_SYS_INIT_LEVEL_APPLICATION); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 263 | |
| 264 | #ifdef CONFIG_CPLUSPLUS |
| 265 | /* Process the .ctors and .init_array sections */ |
| 266 | extern void __do_global_ctors_aux(void); |
| 267 | extern void __do_init_array_aux(void); |
| 268 | __do_global_ctors_aux(); |
| 269 | __do_init_array_aux(); |
| 270 | #endif |
| 271 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 272 | z_init_static_threads(); |
Anas Nashif | 83088a2 | 2017-08-24 04:27:51 -0400 | [diff] [blame] | 273 | |
Andy Ross | eb25870 | 2018-04-12 12:10:10 -0700 | [diff] [blame] | 274 | #ifdef CONFIG_SMP |
Andy Ross | a12f2d6 | 2019-06-05 08:58:42 -0700 | [diff] [blame] | 275 | z_smp_init(); |
Andy Ross | eb25870 | 2018-04-12 12:10:10 -0700 | [diff] [blame] | 276 | #endif |
Inaky Perez-Gonzalez | c51f73f | 2017-06-20 17:01:09 -0700 | [diff] [blame] | 277 | |
Peter Mitsis | 5f8fa67 | 2016-10-27 15:19:49 -0400 | [diff] [blame] | 278 | #ifdef CONFIG_BOOT_TIME_MEASUREMENT |
| 279 | /* record timestamp for kernel's _main() function */ |
Adithya Baglody | be1cb96 | 2017-06-14 15:15:49 +0530 | [diff] [blame] | 280 | extern u64_t __main_time_stamp; |
Peter Mitsis | 5f8fa67 | 2016-10-27 15:19:49 -0400 | [diff] [blame] | 281 | |
Adithya Baglody | be1cb96 | 2017-06-14 15:15:49 +0530 | [diff] [blame] | 282 | __main_time_stamp = (u64_t)k_cycle_get_32(); |
Peter Mitsis | 5f8fa67 | 2016-10-27 15:19:49 -0400 | [diff] [blame] | 283 | #endif |
| 284 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 285 | extern void main(void); |
Andrew Boie | f1c373c | 2016-10-28 12:45:05 -0700 | [diff] [blame] | 286 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 287 | main(); |
Allan Stephens | 073442e | 2016-11-09 07:46:56 -0600 | [diff] [blame] | 288 | |
Andrew Boie | 8e05333 | 2019-06-11 12:58:16 -0700 | [diff] [blame] | 289 | /* Mark nonessenrial since main() has no more work to do */ |
| 290 | _main_thread->base.user_options &= ~K_ESSENTIAL; |
| 291 | |
Adithya Baglody | 71e90f9 | 2018-08-29 16:44:16 +0530 | [diff] [blame] | 292 | /* Dump coverage data once the main() has exited. */ |
| 293 | gcov_coverage_dump(); |
Andrew Boie | 8e05333 | 2019-06-11 12:58:16 -0700 | [diff] [blame] | 294 | } /* LCOV_EXCL_LINE ... because we just dumped final coverage data */ |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 295 | |
Andrew Boie | fe228a8 | 2019-06-11 12:49:32 -0700 | [diff] [blame] | 296 | /* LCOV_EXCL_START */ |
| 297 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 298 | void __weak main(void) |
| 299 | { |
| 300 | /* NOP default main() if the application does not provide one. */ |
Flavio Ceolin | b82a339 | 2018-12-04 17:15:27 -0800 | [diff] [blame] | 301 | arch_nop(); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 302 | } |
| 303 | |
Andrew Boie | fe228a8 | 2019-06-11 12:49:32 -0700 | [diff] [blame] | 304 | /* LCOV_EXCL_STOP */ |
| 305 | |
Andy Ross | 780ba23 | 2018-01-29 09:20:18 -0800 | [diff] [blame] | 306 | #if defined(CONFIG_MULTITHREADING) |
| 307 | static void init_idle_thread(struct k_thread *thr, k_thread_stack_t *stack) |
| 308 | { |
Andy Ross | 2724fd1 | 2018-01-29 14:55:20 -0800 | [diff] [blame] | 309 | #ifdef CONFIG_SMP |
Patrik Flykt | 24d7143 | 2019-03-26 19:57:45 -0600 | [diff] [blame] | 310 | thr->base.is_idle = 1U; |
Andy Ross | 2724fd1 | 2018-01-29 14:55:20 -0800 | [diff] [blame] | 311 | #endif |
| 312 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 313 | z_setup_new_thread(thr, stack, |
Andy Ross | 780ba23 | 2018-01-29 09:20:18 -0800 | [diff] [blame] | 314 | IDLE_STACK_SIZE, idle, NULL, NULL, NULL, |
Anas Nashif | 5755405 | 2018-03-03 02:31:05 -0600 | [diff] [blame] | 315 | K_LOWEST_THREAD_PRIO, K_ESSENTIAL, IDLE_THREAD_NAME); |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 316 | z_mark_thread_as_started(thr); |
Andy Ross | 780ba23 | 2018-01-29 09:20:18 -0800 | [diff] [blame] | 317 | } |
Ioannis Glaropoulos | 7583696 | 2019-05-24 15:31:45 +0200 | [diff] [blame] | 318 | #endif /* CONFIG_MULTITHREADING */ |
Andy Ross | 780ba23 | 2018-01-29 09:20:18 -0800 | [diff] [blame] | 319 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 320 | /** |
| 321 | * |
Anas Nashif | dc3d73b | 2016-12-19 20:25:56 -0500 | [diff] [blame] | 322 | * @brief Initializes kernel data structures |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 323 | * |
Anas Nashif | dc3d73b | 2016-12-19 20:25:56 -0500 | [diff] [blame] | 324 | * This routine initializes various kernel data structures, including |
| 325 | * the init and idle threads and any architecture-specific initialization. |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 326 | * |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 327 | * Note that all fields of "_kernel" are set to zero on entry, which may |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 328 | * be all the initialization many of them require. |
| 329 | * |
| 330 | * @return N/A |
| 331 | */ |
Andy Ross | 3d14615 | 2018-06-13 10:51:42 -0700 | [diff] [blame] | 332 | #ifdef CONFIG_MULTITHREADING |
Benjamin Walsh | c742d7e | 2016-10-05 17:50:54 -0400 | [diff] [blame] | 333 | static void prepare_multithreading(struct k_thread *dummy_thread) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 334 | { |
Benjamin Walsh | 296a234 | 2016-11-20 11:04:31 -0500 | [diff] [blame] | 335 | #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN |
| 336 | ARG_UNUSED(dummy_thread); |
| 337 | #else |
Marek Pieta | e871938 | 2018-10-26 16:54:16 +0200 | [diff] [blame] | 338 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 339 | /* |
| 340 | * Initialize the current execution thread to permit a level of |
Anas Nashif | dc3d73b | 2016-12-19 20:25:56 -0500 | [diff] [blame] | 341 | * debugging output if an exception should happen during kernel |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 342 | * initialization. However, don't waste effort initializing the |
| 343 | * fields of the dummy thread beyond those needed to identify it as a |
| 344 | * dummy thread. |
| 345 | */ |
Benjamin Walsh | ed240f2 | 2017-01-22 13:05:08 -0500 | [diff] [blame] | 346 | dummy_thread->base.user_options = K_ESSENTIAL; |
Andrew Boie | 50a533f | 2017-05-10 10:31:33 -0700 | [diff] [blame] | 347 | dummy_thread->base.thread_state = _THREAD_DUMMY; |
Andrew Boie | 0a85eaa | 2017-08-30 14:25:38 -0700 | [diff] [blame] | 348 | #ifdef CONFIG_THREAD_STACK_INFO |
Patrik Flykt | 24d7143 | 2019-03-26 19:57:45 -0600 | [diff] [blame] | 349 | dummy_thread->stack_info.start = 0U; |
| 350 | dummy_thread->stack_info.size = 0U; |
Andrew Boie | 0a85eaa | 2017-08-30 14:25:38 -0700 | [diff] [blame] | 351 | #endif |
Adithya Baglody | 7bb40bd | 2017-11-03 15:06:57 +0530 | [diff] [blame] | 352 | #ifdef CONFIG_USERSPACE |
| 353 | dummy_thread->mem_domain_info.mem_domain = 0; |
| 354 | #endif |
Ioannis Glaropoulos | 7583696 | 2019-05-24 15:31:45 +0200 | [diff] [blame] | 355 | #endif /* CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN */ |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 356 | |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 357 | /* _kernel.ready_q is all zeroes */ |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 358 | z_sched_init(); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 359 | |
Andy Ross | 2724fd1 | 2018-01-29 14:55:20 -0800 | [diff] [blame] | 360 | #ifndef CONFIG_SMP |
Benjamin Walsh | 88b3691 | 2016-12-02 10:37:27 -0500 | [diff] [blame] | 361 | /* |
| 362 | * prime the cache with the main thread since: |
| 363 | * |
| 364 | * - the cache can never be NULL |
| 365 | * - the main thread will be the one to run first |
| 366 | * - no other thread is initialized yet and thus their priority fields |
| 367 | * contain garbage, which would prevent the cache loading algorithm |
| 368 | * to work as intended |
| 369 | */ |
Flavio Ceolin | a3ddded | 2018-10-24 12:47:18 -0700 | [diff] [blame] | 370 | _kernel.ready_q.cache = _main_thread; |
Andy Ross | 2724fd1 | 2018-01-29 14:55:20 -0800 | [diff] [blame] | 371 | #endif |
Benjamin Walsh | 88b3691 | 2016-12-02 10:37:27 -0500 | [diff] [blame] | 372 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 373 | z_setup_new_thread(_main_thread, _main_stack, |
Leandro Pereira | a1ae845 | 2018-03-06 15:08:55 -0800 | [diff] [blame] | 374 | MAIN_STACK_SIZE, bg_thread_main, |
| 375 | NULL, NULL, NULL, |
Anas Nashif | 5755405 | 2018-03-03 02:31:05 -0600 | [diff] [blame] | 376 | CONFIG_MAIN_THREAD_PRIORITY, K_ESSENTIAL, "main"); |
Anas Nashif | b6304e6 | 2018-07-04 08:03:03 -0500 | [diff] [blame] | 377 | sys_trace_thread_create(_main_thread); |
| 378 | |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 379 | z_mark_thread_as_started(_main_thread); |
| 380 | z_ready_thread(_main_thread); |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 381 | |
Andy Ross | 780ba23 | 2018-01-29 09:20:18 -0800 | [diff] [blame] | 382 | init_idle_thread(_idle_thread, _idle_stack); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 383 | _kernel.cpus[0].idle_thread = _idle_thread; |
Anas Nashif | b6304e6 | 2018-07-04 08:03:03 -0500 | [diff] [blame] | 384 | sys_trace_thread_create(_idle_thread); |
Andy Ross | 780ba23 | 2018-01-29 09:20:18 -0800 | [diff] [blame] | 385 | |
| 386 | #if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 1 |
| 387 | init_idle_thread(_idle_thread1, _idle_stack1); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 388 | _kernel.cpus[1].idle_thread = _idle_thread1; |
Andy Ross | bdcd18a7 | 2018-01-17 11:34:50 -0800 | [diff] [blame] | 389 | _kernel.cpus[1].id = 1; |
Andrew Boie | 4e5c093 | 2019-04-04 12:05:28 -0700 | [diff] [blame] | 390 | _kernel.cpus[1].irq_stack = Z_THREAD_STACK_BUFFER(_interrupt_stack1) |
Andy Ross | bdcd18a7 | 2018-01-17 11:34:50 -0800 | [diff] [blame] | 391 | + CONFIG_ISR_STACK_SIZE; |
Andy Ross | 780ba23 | 2018-01-29 09:20:18 -0800 | [diff] [blame] | 392 | #endif |
| 393 | |
| 394 | #if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 2 |
| 395 | init_idle_thread(_idle_thread2, _idle_stack2); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 396 | _kernel.cpus[2].idle_thread = _idle_thread2; |
Andy Ross | bdcd18a7 | 2018-01-17 11:34:50 -0800 | [diff] [blame] | 397 | _kernel.cpus[2].id = 2; |
Andrew Boie | 4e5c093 | 2019-04-04 12:05:28 -0700 | [diff] [blame] | 398 | _kernel.cpus[2].irq_stack = Z_THREAD_STACK_BUFFER(_interrupt_stack2) |
Andy Ross | bdcd18a7 | 2018-01-17 11:34:50 -0800 | [diff] [blame] | 399 | + CONFIG_ISR_STACK_SIZE; |
Andy Ross | 780ba23 | 2018-01-29 09:20:18 -0800 | [diff] [blame] | 400 | #endif |
| 401 | |
| 402 | #if defined(CONFIG_SMP) && CONFIG_MP_NUM_CPUS > 3 |
| 403 | init_idle_thread(_idle_thread3, _idle_stack3); |
Andy Ross | 1acd8c2 | 2018-05-03 14:51:49 -0700 | [diff] [blame] | 404 | _kernel.cpus[3].idle_thread = _idle_thread3; |
Andy Ross | bdcd18a7 | 2018-01-17 11:34:50 -0800 | [diff] [blame] | 405 | _kernel.cpus[3].id = 3; |
Andrew Boie | 4e5c093 | 2019-04-04 12:05:28 -0700 | [diff] [blame] | 406 | _kernel.cpus[3].irq_stack = Z_THREAD_STACK_BUFFER(_interrupt_stack3) |
Andy Ross | bdcd18a7 | 2018-01-17 11:34:50 -0800 | [diff] [blame] | 407 | + CONFIG_ISR_STACK_SIZE; |
Benjamin Walsh | b12a8e0 | 2016-12-14 15:24:12 -0500 | [diff] [blame] | 408 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 409 | |
| 410 | initialize_timeouts(); |
| 411 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 412 | } |
| 413 | |
Andrew Boie | c5164f3 | 2019-06-11 13:33:32 -0700 | [diff] [blame] | 414 | static FUNC_NORETURN void switch_to_main_thread(void) |
Benjamin Walsh | c742d7e | 2016-10-05 17:50:54 -0400 | [diff] [blame] | 415 | { |
Benjamin Walsh | 296a234 | 2016-11-20 11:04:31 -0500 | [diff] [blame] | 416 | #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN |
Ioannis Glaropoulos | 5a709f5 | 2019-05-24 15:25:30 +0200 | [diff] [blame] | 417 | z_arch_switch_to_main_thread(_main_thread, _main_stack, |
| 418 | K_THREAD_STACK_SIZEOF(_main_stack), |
Leandro Pereira | a1ae845 | 2018-03-06 15:08:55 -0800 | [diff] [blame] | 419 | bg_thread_main); |
Benjamin Walsh | 296a234 | 2016-11-20 11:04:31 -0500 | [diff] [blame] | 420 | #else |
Benjamin Walsh | c742d7e | 2016-10-05 17:50:54 -0400 | [diff] [blame] | 421 | /* |
| 422 | * Context switch to main task (entry function is _main()): the |
| 423 | * current fake thread is not on a wait queue or ready queue, so it |
| 424 | * will never be rescheduled in. |
| 425 | */ |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 426 | z_swap_unlocked(); |
Benjamin Walsh | 296a234 | 2016-11-20 11:04:31 -0500 | [diff] [blame] | 427 | #endif |
Andrew Boie | c5164f3 | 2019-06-11 13:33:32 -0700 | [diff] [blame] | 428 | CODE_UNREACHABLE; /* LCOV_EXCL_LINE */ |
Benjamin Walsh | c742d7e | 2016-10-05 17:50:54 -0400 | [diff] [blame] | 429 | } |
Anas Nashif | c0ea505 | 2019-01-30 09:58:41 -0500 | [diff] [blame] | 430 | #endif /* CONFIG_MULTITHREADING */ |
Benjamin Walsh | c742d7e | 2016-10-05 17:50:54 -0400 | [diff] [blame] | 431 | |
Andrew Boie | 538754c | 2018-05-23 15:25:23 -0700 | [diff] [blame] | 432 | u32_t z_early_boot_rand32_get(void) |
Leandro Pereira | 389c364 | 2018-05-23 13:38:52 -0700 | [diff] [blame] | 433 | { |
| 434 | #ifdef CONFIG_ENTROPY_HAS_DRIVER |
| 435 | struct device *entropy = device_get_binding(CONFIG_ENTROPY_NAME); |
| 436 | int rc; |
Andrew Boie | 538754c | 2018-05-23 15:25:23 -0700 | [diff] [blame] | 437 | u32_t retval; |
Leandro Pereira | 389c364 | 2018-05-23 13:38:52 -0700 | [diff] [blame] | 438 | |
| 439 | if (entropy == NULL) { |
| 440 | goto sys_rand32_fallback; |
| 441 | } |
| 442 | |
Carles Cufi | b546449 | 2018-05-24 20:12:23 +0200 | [diff] [blame] | 443 | /* Try to see if driver provides an ISR-specific API */ |
| 444 | rc = entropy_get_entropy_isr(entropy, (u8_t *)&retval, |
| 445 | sizeof(retval), ENTROPY_BUSYWAIT); |
| 446 | if (rc == -ENOTSUP) { |
| 447 | /* Driver does not provide an ISR-specific API, assume it can |
| 448 | * be called from ISR context |
| 449 | */ |
| 450 | rc = entropy_get_entropy(entropy, (u8_t *)&retval, |
| 451 | sizeof(retval)); |
| 452 | } |
| 453 | |
| 454 | if (rc >= 0) { |
Andrew Boie | 538754c | 2018-05-23 15:25:23 -0700 | [diff] [blame] | 455 | return retval; |
Leandro Pereira | 389c364 | 2018-05-23 13:38:52 -0700 | [diff] [blame] | 456 | } |
| 457 | |
Carles Cufi | b546449 | 2018-05-24 20:12:23 +0200 | [diff] [blame] | 458 | /* Fall through to fallback */ |
Leandro Pereira | 389c364 | 2018-05-23 13:38:52 -0700 | [diff] [blame] | 459 | |
| 460 | sys_rand32_fallback: |
Mazen NEIFER | e2bbad9 | 2017-02-07 10:01:12 +0100 | [diff] [blame] | 461 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 462 | |
Leandro Pereira | 389c364 | 2018-05-23 13:38:52 -0700 | [diff] [blame] | 463 | /* FIXME: this assumes sys_rand32_get() won't use any synchronization |
| 464 | * primitive, like semaphores or mutexes. It's too early in the boot |
| 465 | * process to use any of them. Ideally, only the path where entropy |
| 466 | * devices are available should be built, this is only a fallback for |
| 467 | * those devices without a HWRNG entropy driver. |
| 468 | */ |
Andrew Boie | 538754c | 2018-05-23 15:25:23 -0700 | [diff] [blame] | 469 | return sys_rand32_get(); |
Leandro Pereira | 389c364 | 2018-05-23 13:38:52 -0700 | [diff] [blame] | 470 | } |
Andrew Boie | 538754c | 2018-05-23 15:25:23 -0700 | [diff] [blame] | 471 | |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 472 | /** |
| 473 | * |
Anas Nashif | dc3d73b | 2016-12-19 20:25:56 -0500 | [diff] [blame] | 474 | * @brief Initialize kernel |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 475 | * |
| 476 | * This routine is invoked when the system is ready to run C code. The |
| 477 | * processor must be running in 32-bit mode, and the BSS must have been |
| 478 | * cleared/zeroed. |
| 479 | * |
| 480 | * @return Does not return |
| 481 | */ |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 482 | FUNC_NORETURN void z_cstart(void) |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 483 | { |
Adithya Baglody | 71e90f9 | 2018-08-29 16:44:16 +0530 | [diff] [blame] | 484 | /* gcov hook needed to get the coverage report.*/ |
| 485 | gcov_static_init(); |
| 486 | |
Krzysztof Chruscinski | 6b01c89 | 2018-07-11 12:26:31 +0200 | [diff] [blame] | 487 | if (IS_ENABLED(CONFIG_LOG)) { |
| 488 | log_core_init(); |
| 489 | } |
| 490 | |
Andrew Boie | 982d5c8 | 2018-05-23 13:30:34 -0700 | [diff] [blame] | 491 | /* perform any architecture-specific initialization */ |
| 492 | kernel_arch_init(); |
| 493 | |
Andy Ross | 6d9106f | 2019-02-01 14:42:28 -0800 | [diff] [blame] | 494 | #ifdef CONFIG_MULTITHREADING |
Andy Ross | ab46b1b | 2019-01-30 15:00:42 -0800 | [diff] [blame] | 495 | struct k_thread dummy_thread = { |
| 496 | .base.thread_state = _THREAD_DUMMY, |
| 497 | # ifdef CONFIG_SCHED_CPU_MASK |
| 498 | .base.cpu_mask = -1, |
| 499 | # endif |
| 500 | }; |
Andy Ross | 6d9106f | 2019-02-01 14:42:28 -0800 | [diff] [blame] | 501 | |
| 502 | _current = &dummy_thread; |
| 503 | #endif |
| 504 | |
Andrew Boie | 62fad96 | 2019-02-27 17:24:46 -0800 | [diff] [blame] | 505 | #ifdef CONFIG_USERSPACE |
| 506 | z_app_shmem_bss_zero(); |
| 507 | #endif |
| 508 | |
Andrew Boie | 0b474ee | 2016-11-08 11:06:55 -0800 | [diff] [blame] | 509 | /* perform basic hardware initialization */ |
Patrik Flykt | 4344e27 | 2019-03-08 14:19:05 -0700 | [diff] [blame] | 510 | z_sys_device_do_config_level(_SYS_INIT_LEVEL_PRE_KERNEL_1); |
| 511 | z_sys_device_do_config_level(_SYS_INIT_LEVEL_PRE_KERNEL_2); |
Andrew Boie | 0b474ee | 2016-11-08 11:06:55 -0800 | [diff] [blame] | 512 | |
Mazen NEIFER | e2bbad9 | 2017-02-07 10:01:12 +0100 | [diff] [blame] | 513 | #ifdef CONFIG_STACK_CANARIES |
Andrew Boie | 538754c | 2018-05-23 15:25:23 -0700 | [diff] [blame] | 514 | __stack_chk_guard = z_early_boot_rand32_get(); |
Mazen NEIFER | e2bbad9 | 2017-02-07 10:01:12 +0100 | [diff] [blame] | 515 | #endif |
Leandro Pereira | 389c364 | 2018-05-23 13:38:52 -0700 | [diff] [blame] | 516 | |
Andy Ross | 3d14615 | 2018-06-13 10:51:42 -0700 | [diff] [blame] | 517 | #ifdef CONFIG_MULTITHREADING |
Andy Ross | 1763a01 | 2019-01-28 10:59:41 -0800 | [diff] [blame] | 518 | prepare_multithreading(&dummy_thread); |
Benjamin Walsh | c742d7e | 2016-10-05 17:50:54 -0400 | [diff] [blame] | 519 | switch_to_main_thread(); |
Andy Ross | 3d14615 | 2018-06-13 10:51:42 -0700 | [diff] [blame] | 520 | #else |
| 521 | bg_thread_main(NULL, NULL, NULL); |
| 522 | |
Andrew Boie | c5164f3 | 2019-06-11 13:33:32 -0700 | [diff] [blame] | 523 | /* LCOV_EXCL_START |
| 524 | * We've already dumped coverage data at this point. |
| 525 | */ |
Andy Ross | 8daafd4 | 2018-08-30 09:45:12 -0700 | [diff] [blame] | 526 | irq_lock(); |
Flavio Ceolin | b3d9202 | 2018-09-17 15:56:06 -0700 | [diff] [blame] | 527 | while (true) { |
Andy Ross | 3d14615 | 2018-06-13 10:51:42 -0700 | [diff] [blame] | 528 | } |
Andrew Boie | c5164f3 | 2019-06-11 13:33:32 -0700 | [diff] [blame] | 529 | /* LCOV_EXCL_STOP */ |
Andy Ross | 3d14615 | 2018-06-13 10:51:42 -0700 | [diff] [blame] | 530 | #endif |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 531 | |
| 532 | /* |
| 533 | * Compiler can't tell that the above routines won't return and issues |
| 534 | * a warning unless we explicitly tell it that control never gets this |
| 535 | * far. |
| 536 | */ |
| 537 | |
Andrew Boie | c5164f3 | 2019-06-11 13:33:32 -0700 | [diff] [blame] | 538 | CODE_UNREACHABLE; /* LCOV_EXCL_LINE */ |
Benjamin Walsh | 456c6da | 2016-09-02 18:55:39 -0400 | [diff] [blame] | 539 | } |