Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2016 Wind River Systems, Inc. |
| 3 | * |
David B. Kinder | ac74d8b | 2017-01-18 17:01:01 -0800 | [diff] [blame] | 4 | * SPDX-License-Identifier: Apache-2.0 |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 5 | */ |
| 6 | |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 7 | #include <zephyr/kernel.h> |
| 8 | #include <zephyr/toolchain.h> |
| 9 | #include <zephyr/linker/sections.h> |
| 10 | #include <zephyr/drivers/timer/system_timer.h> |
| 11 | #include <zephyr/wait_q.h> |
| 12 | #include <zephyr/pm/pm.h> |
Flavio Ceolin | b3d9202 | 2018-09-17 15:56:06 -0700 | [diff] [blame] | 13 | #include <stdbool.h> |
Gerard Marull-Paretas | cffefc8 | 2022-05-06 11:04:23 +0200 | [diff] [blame] | 14 | #include <zephyr/logging/log.h> |
Andrew Boie | f5a7e1a | 2020-09-02 09:20:38 -0700 | [diff] [blame] | 15 | #include <ksched.h> |
Andy Ross | 851d14a | 2021-05-13 15:46:43 -0700 | [diff] [blame] | 16 | #include <kswap.h> |
Andrew Boie | f5a7e1a | 2020-09-02 09:20:38 -0700 | [diff] [blame] | 17 | |
Krzysztof Chruscinski | 3ed8083 | 2020-11-26 19:32:34 +0100 | [diff] [blame] | 18 | LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 19 | |
Flavio Ceolin | 7dd4297 | 2021-11-15 21:22:38 -0800 | [diff] [blame] | 20 | void z_pm_save_idle_exit(void) |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 21 | { |
Flavio Ceolin | d21cfd5 | 2021-01-07 09:29:17 -0800 | [diff] [blame] | 22 | #ifdef CONFIG_PM |
Ramesh Thomas | 3e0f20a | 2016-10-26 21:16:37 -0700 | [diff] [blame] | 23 | /* Some CPU low power states require notification at the ISR |
| 24 | * to allow any operations that needs to be done before kernel |
Flavio Ceolin | e277134 | 2021-02-24 11:22:53 -0800 | [diff] [blame] | 25 | * switches task or processes nested interrupts. |
| 26 | * This can be simply ignored if not required. |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 27 | */ |
Flavio Ceolin | e277134 | 2021-02-24 11:22:53 -0800 | [diff] [blame] | 28 | pm_system_resume(); |
| 29 | #endif /* CONFIG_PM */ |
Anas Nashif | 9c1efe6 | 2021-02-25 15:33:15 -0500 | [diff] [blame] | 30 | sys_clock_idle_exit(); |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 31 | } |
| 32 | |
Andy Ross | 6400bb5 | 2021-03-03 13:20:15 -0800 | [diff] [blame] | 33 | void idle(void *unused1, void *unused2, void *unused3) |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 34 | { |
Andy Ross | 6400bb5 | 2021-03-03 13:20:15 -0800 | [diff] [blame] | 35 | ARG_UNUSED(unused1); |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 36 | ARG_UNUSED(unused2); |
| 37 | ARG_UNUSED(unused3); |
| 38 | |
Andy Ross | 851d14a | 2021-05-13 15:46:43 -0700 | [diff] [blame] | 39 | __ASSERT_NO_MSG(_current->base.prio >= 0); |
| 40 | |
Flavio Ceolin | b3d9202 | 2018-09-17 15:56:06 -0700 | [diff] [blame] | 41 | while (true) { |
Andy Ross | 0ca7150 | 2022-09-13 10:13:40 -0700 | [diff] [blame] | 42 | /* SMP systems without a working IPI can't actual |
| 43 | * enter an idle state, because they can't be notified |
| 44 | * of scheduler changes (i.e. threads they should |
| 45 | * run). They just spin instead, with a minimal |
| 46 | * relaxation loop to prevent hammering the scheduler |
| 47 | * lock and/or timer driver. This is intended as a |
| 48 | * fallback configuration for new platform bringup. |
Andy Ross | 6400bb5 | 2021-03-03 13:20:15 -0800 | [diff] [blame] | 49 | */ |
Francois Ramu | e01bee5 | 2022-10-10 18:21:16 +0200 | [diff] [blame] | 50 | if (IS_ENABLED(CONFIG_SMP) && !IS_ENABLED(CONFIG_SCHED_IPI_SUPPORTED)) { |
| 51 | for (volatile int i = 0; i < 100000; i++) { |
| 52 | /* Empty loop */ |
| 53 | } |
Andy Ross | 0ca7150 | 2022-09-13 10:13:40 -0700 | [diff] [blame] | 54 | z_swap_unlocked(); |
Andy Ross | 6400bb5 | 2021-03-03 13:20:15 -0800 | [diff] [blame] | 55 | } |
Daniel Leung | 8ad5ad2 | 2020-12-14 14:31:11 -0800 | [diff] [blame] | 56 | |
Andy Ross | 6400bb5 | 2021-03-03 13:20:15 -0800 | [diff] [blame] | 57 | /* Note weird API: k_cpu_idle() is called with local |
| 58 | * CPU interrupts masked, and returns with them |
| 59 | * unmasked. It does not take a spinlock or other |
| 60 | * higher level construct. |
| 61 | */ |
Andy Ross | 39a8f3b | 2021-03-03 12:16:35 -0800 | [diff] [blame] | 62 | (void) arch_irq_lock(); |
| 63 | |
Flavio Ceolin | 7226247 | 2021-11-19 15:54:56 -0800 | [diff] [blame] | 64 | #ifdef CONFIG_PM |
| 65 | _kernel.idle = z_get_next_timeout_expiry(); |
| 66 | |
| 67 | /* |
| 68 | * Call the suspend hook function of the soc interface |
| 69 | * to allow entry into a low power state. The function |
| 70 | * returns false if low power state was not entered, in |
| 71 | * which case, kernel does normal idle processing. |
| 72 | * |
| 73 | * This function is entered with interrupts disabled. |
| 74 | * If a low power state was entered, then the hook |
| 75 | * function should enable inerrupts before exiting. |
| 76 | * This is because the kernel does not do its own idle |
| 77 | * processing in those cases i.e. skips k_cpu_idle(). |
| 78 | * The kernel's idle processing re-enables interrupts |
| 79 | * which is essential for the kernel's scheduling |
| 80 | * logic. |
| 81 | */ |
Flavio Ceolin | bb5d644 | 2022-01-18 15:05:54 -0800 | [diff] [blame] | 82 | if (k_is_pre_kernel() || !pm_system_suspend(_kernel.idle)) { |
Anas Nashif | 98cef83 | 2020-09-07 12:23:30 -0400 | [diff] [blame] | 83 | k_cpu_idle(); |
| 84 | } |
Flavio Ceolin | 7226247 | 2021-11-19 15:54:56 -0800 | [diff] [blame] | 85 | #else |
| 86 | k_cpu_idle(); |
| 87 | #endif |
Anas Nashif | 98cef83 | 2020-09-07 12:23:30 -0400 | [diff] [blame] | 88 | |
Andy Ross | 851d14a | 2021-05-13 15:46:43 -0700 | [diff] [blame] | 89 | #if !defined(CONFIG_PREEMPT_ENABLED) |
| 90 | # if !defined(CONFIG_USE_SWITCH) || defined(CONFIG_SPARC) |
| 91 | /* A legacy mess: the idle thread is by definition |
| 92 | * preemptible as far as the modern scheduler is |
| 93 | * concerned, but older platforms use |
| 94 | * CONFIG_PREEMPT_ENABLED=n as an optimization hint |
| 95 | * that interrupt exit always returns to the |
| 96 | * interrupted context. So in that setup we need to |
| 97 | * explicitly yield in the idle thread otherwise |
| 98 | * nothing else will run once it starts. |
Andy Ross | 6400bb5 | 2021-03-03 13:20:15 -0800 | [diff] [blame] | 99 | */ |
Andy Ross | 851d14a | 2021-05-13 15:46:43 -0700 | [diff] [blame] | 100 | if (_kernel.ready_q.cache != _current) { |
| 101 | z_swap_unlocked(); |
Andy Ross | 6400bb5 | 2021-03-03 13:20:15 -0800 | [diff] [blame] | 102 | } |
Andy Ross | 851d14a | 2021-05-13 15:46:43 -0700 | [diff] [blame] | 103 | # endif |
| 104 | #endif |
Charles E. Youse | c0c4ba8 | 2019-09-28 12:40:19 -0400 | [diff] [blame] | 105 | } |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 106 | } |