Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2016 Wind River Systems, Inc. |
| 3 | * |
David B. Kinder | ac74d8b | 2017-01-18 17:01:01 -0800 | [diff] [blame] | 4 | * SPDX-License-Identifier: Apache-2.0 |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 5 | */ |
| 6 | |
Anas Nashif | dc3d73b | 2016-12-19 20:25:56 -0500 | [diff] [blame] | 7 | #include <kernel.h> |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 8 | #include <toolchain.h> |
Anas Nashif | 397d29d | 2017-06-17 11:30:47 -0400 | [diff] [blame] | 9 | #include <linker/sections.h> |
Anas Nashif | 68c389c | 2019-06-21 12:55:37 -0400 | [diff] [blame] | 10 | #include <drivers/timer/system_timer.h> |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 11 | #include <wait_q.h> |
Anas Nashif | 190e368 | 2019-06-25 12:26:13 -0400 | [diff] [blame] | 12 | #include <power/power.h> |
Flavio Ceolin | b3d9202 | 2018-09-17 15:56:06 -0700 | [diff] [blame] | 13 | #include <stdbool.h> |
Andrew Boie | f5a7e1a | 2020-09-02 09:20:38 -0700 | [diff] [blame] | 14 | #include <logging/log.h> |
| 15 | #include <ksched.h> |
| 16 | |
Krzysztof Chruscinski | 3ed8083 | 2020-11-26 19:32:34 +0100 | [diff] [blame] | 17 | LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 18 | |
Andy Ross | 7aae75b | 2018-09-22 09:01:52 -0700 | [diff] [blame] | 19 | #ifdef CONFIG_TICKLESS_IDLE_THRESH |
| 20 | #define IDLE_THRESH CONFIG_TICKLESS_IDLE_THRESH |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 21 | #else |
Andy Ross | 7aae75b | 2018-09-22 09:01:52 -0700 | [diff] [blame] | 22 | #define IDLE_THRESH 1 |
Ramesh Thomas | 89ffd44 | 2017-02-05 19:37:19 -0800 | [diff] [blame] | 23 | #endif |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 24 | |
Andy Ross | 11bd67d | 2019-08-19 14:29:21 -0700 | [diff] [blame] | 25 | /* Fallback idle spin loop for SMP platforms without a working IPI */ |
Jan Van Winkel | 677050c | 2019-09-27 22:52:08 +0200 | [diff] [blame] | 26 | #if (defined(CONFIG_SMP) && !defined(CONFIG_SCHED_IPI_SUPPORTED)) |
| 27 | #define SMP_FALLBACK 1 |
| 28 | #else |
| 29 | #define SMP_FALLBACK 0 |
| 30 | #endif |
Andy Ross | 11bd67d | 2019-08-19 14:29:21 -0700 | [diff] [blame] | 31 | |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 32 | #ifdef CONFIG_SYS_POWER_MANAGEMENT |
Ramesh Thomas | 3e0f20a | 2016-10-26 21:16:37 -0700 | [diff] [blame] | 33 | /* |
Piotr Mienkowski | 17b08ce | 2019-03-05 09:15:17 +0100 | [diff] [blame] | 34 | * Used to allow _sys_suspend() implementation to control notification |
Ramesh Thomas | 8367056 | 2016-11-10 21:16:12 -0800 | [diff] [blame] | 35 | * of the event that caused exit from kernel idling after pm operations. |
Ramesh Thomas | 3e0f20a | 2016-10-26 21:16:37 -0700 | [diff] [blame] | 36 | */ |
Anas Nashif | 9151fbe | 2018-12-08 14:09:36 -0500 | [diff] [blame] | 37 | unsigned char sys_pm_idle_exit_notify; |
Ramesh Thomas | 3e0f20a | 2016-10-26 21:16:37 -0700 | [diff] [blame] | 38 | |
Andrew Boie | 676b1ae | 2019-06-11 18:03:37 -0700 | [diff] [blame] | 39 | |
| 40 | /* LCOV_EXCL_START |
| 41 | * These are almost certainly overidden and in any event do nothing |
| 42 | */ |
Piotr Mienkowski | 204311d | 2019-02-28 07:20:24 +0100 | [diff] [blame] | 43 | #if defined(CONFIG_SYS_POWER_SLEEP_STATES) |
Piotr Mienkowski | 17b08ce | 2019-03-05 09:15:17 +0100 | [diff] [blame] | 44 | void __attribute__((weak)) _sys_resume(void) |
Ramesh Thomas | ded076d | 2016-11-02 23:56:34 -0700 | [diff] [blame] | 45 | { |
| 46 | } |
Ramakrishna Pallala | d9c37d6 | 2018-07-17 21:21:30 +0530 | [diff] [blame] | 47 | #endif |
Ramesh Thomas | ded076d | 2016-11-02 23:56:34 -0700 | [diff] [blame] | 48 | |
Piotr Zięcik | 9cc63e0 | 2019-02-12 11:07:20 +0100 | [diff] [blame] | 49 | #if defined(CONFIG_SYS_POWER_DEEP_SLEEP_STATES) |
Piotr Mienkowski | 17b08ce | 2019-03-05 09:15:17 +0100 | [diff] [blame] | 50 | void __attribute__((weak)) _sys_resume_from_deep_sleep(void) |
Ramesh Thomas | c0cd7ac | 2016-11-09 22:55:14 -0800 | [diff] [blame] | 51 | { |
| 52 | } |
Ramakrishna Pallala | d9c37d6 | 2018-07-17 21:21:30 +0530 | [diff] [blame] | 53 | #endif |
Andrew Boie | 676b1ae | 2019-06-11 18:03:37 -0700 | [diff] [blame] | 54 | /* LCOV_EXCL_STOP */ |
Pawel Dunaj | baea224 | 2018-11-22 11:49:32 +0100 | [diff] [blame] | 55 | |
| 56 | #endif /* CONFIG_SYS_POWER_MANAGEMENT */ |
| 57 | |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 58 | /** |
| 59 | * |
| 60 | * @brief Indicate that kernel is idling in tickless mode |
| 61 | * |
Anas Nashif | dc3d73b | 2016-12-19 20:25:56 -0500 | [diff] [blame] | 62 | * Sets the kernel data structure idle field to either a positive value or |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 63 | * K_FOREVER. |
| 64 | * |
| 65 | * @param ticks the number of ticks to idle |
| 66 | * |
| 67 | * @return N/A |
| 68 | */ |
Andy Ross | 11bd67d | 2019-08-19 14:29:21 -0700 | [diff] [blame] | 69 | #if !SMP_FALLBACK |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 70 | static void set_kernel_idle_time_in_ticks(int32_t ticks) |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 71 | { |
Pawel Dunaj | baea224 | 2018-11-22 11:49:32 +0100 | [diff] [blame] | 72 | #ifdef CONFIG_SYS_POWER_MANAGEMENT |
Benjamin Walsh | f6ca7de | 2016-11-08 10:36:50 -0500 | [diff] [blame] | 73 | _kernel.idle = ticks; |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 74 | #endif |
Pawel Dunaj | baea224 | 2018-11-22 11:49:32 +0100 | [diff] [blame] | 75 | } |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 76 | |
Pawel Dunaj | baea224 | 2018-11-22 11:49:32 +0100 | [diff] [blame] | 77 | static void sys_power_save_idle(void) |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 78 | { |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 79 | int32_t ticks = z_get_next_timeout_expiry(); |
Pawel Dunaj | baea224 | 2018-11-22 11:49:32 +0100 | [diff] [blame] | 80 | |
Andy Ross | 7aae75b | 2018-09-22 09:01:52 -0700 | [diff] [blame] | 81 | /* The documented behavior of CONFIG_TICKLESS_IDLE_THRESH is |
| 82 | * that the system should not enter a tickless idle for |
| 83 | * periods less than that. This seems... silly, given that it |
| 84 | * saves no power and does not improve latency. But it's an |
| 85 | * API we need to honor... |
| 86 | */ |
Andy Ross | 8b54953 | 2018-09-25 08:19:21 -0700 | [diff] [blame] | 87 | #ifdef CONFIG_SYS_CLOCK_EXISTS |
Pawel Dunaj | baea224 | 2018-11-22 11:49:32 +0100 | [diff] [blame] | 88 | z_set_timeout_expiry((ticks < IDLE_THRESH) ? 1 : ticks, true); |
Andy Ross | 8b54953 | 2018-09-25 08:19:21 -0700 | [diff] [blame] | 89 | #endif |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 90 | |
| 91 | set_kernel_idle_time_in_ticks(ticks); |
Piotr Mienkowski | 204311d | 2019-02-28 07:20:24 +0100 | [diff] [blame] | 92 | #if (defined(CONFIG_SYS_POWER_SLEEP_STATES) || \ |
Piotr Zięcik | 9cc63e0 | 2019-02-12 11:07:20 +0100 | [diff] [blame] | 93 | defined(CONFIG_SYS_POWER_DEEP_SLEEP_STATES)) |
Ramesh Thomas | 3e0f20a | 2016-10-26 21:16:37 -0700 | [diff] [blame] | 94 | |
Anas Nashif | 9151fbe | 2018-12-08 14:09:36 -0500 | [diff] [blame] | 95 | sys_pm_idle_exit_notify = 1U; |
Ramesh Thomas | 3e0f20a | 2016-10-26 21:16:37 -0700 | [diff] [blame] | 96 | |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 97 | /* |
Ramesh Thomas | 3e0f20a | 2016-10-26 21:16:37 -0700 | [diff] [blame] | 98 | * Call the suspend hook function of the soc interface to allow |
| 99 | * entry into a low power state. The function returns |
Piotr Zięcik | d02e3eb | 2019-01-16 14:49:16 +0100 | [diff] [blame] | 100 | * SYS_POWER_STATE_ACTIVE if low power state was not entered, in which |
Ramesh Thomas | 3e0f20a | 2016-10-26 21:16:37 -0700 | [diff] [blame] | 101 | * case, kernel does normal idle processing. |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 102 | * |
Ramesh Thomas | 3e0f20a | 2016-10-26 21:16:37 -0700 | [diff] [blame] | 103 | * This function is entered with interrupts disabled. If a low power |
| 104 | * state was entered, then the hook function should enable inerrupts |
| 105 | * before exiting. This is because the kernel does not do its own idle |
Benjamin Walsh | c3a2bbb | 2016-12-14 13:04:36 -0500 | [diff] [blame] | 106 | * processing in those cases i.e. skips k_cpu_idle(). The kernel's |
Ramesh Thomas | 3e0f20a | 2016-10-26 21:16:37 -0700 | [diff] [blame] | 107 | * idle processing re-enables interrupts which is essential for |
| 108 | * the kernel's scheduling logic. |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 109 | */ |
Piotr Mienkowski | 17b08ce | 2019-03-05 09:15:17 +0100 | [diff] [blame] | 110 | if (_sys_suspend(ticks) == SYS_POWER_STATE_ACTIVE) { |
Anas Nashif | 9151fbe | 2018-12-08 14:09:36 -0500 | [diff] [blame] | 111 | sys_pm_idle_exit_notify = 0U; |
Benjamin Walsh | c3a2bbb | 2016-12-14 13:04:36 -0500 | [diff] [blame] | 112 | k_cpu_idle(); |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 113 | } |
| 114 | #else |
Benjamin Walsh | c3a2bbb | 2016-12-14 13:04:36 -0500 | [diff] [blame] | 115 | k_cpu_idle(); |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 116 | #endif |
| 117 | } |
Andy Ross | 15c4007 | 2018-04-12 12:50:05 -0700 | [diff] [blame] | 118 | #endif |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 119 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 120 | void z_sys_power_save_idle_exit(int32_t ticks) |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 121 | { |
Piotr Mienkowski | 204311d | 2019-02-28 07:20:24 +0100 | [diff] [blame] | 122 | #if defined(CONFIG_SYS_POWER_SLEEP_STATES) |
Ramesh Thomas | 3e0f20a | 2016-10-26 21:16:37 -0700 | [diff] [blame] | 123 | /* Some CPU low power states require notification at the ISR |
| 124 | * to allow any operations that needs to be done before kernel |
| 125 | * switches task or processes nested interrupts. This can be |
Piotr Mienkowski | 17b08ce | 2019-03-05 09:15:17 +0100 | [diff] [blame] | 126 | * disabled by calling _sys_pm_idle_exit_notification_disable(). |
Ramesh Thomas | 3e0f20a | 2016-10-26 21:16:37 -0700 | [diff] [blame] | 127 | * Alternatively it can be simply ignored if not required. |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 128 | */ |
Anas Nashif | 9151fbe | 2018-12-08 14:09:36 -0500 | [diff] [blame] | 129 | if (sys_pm_idle_exit_notify) { |
Piotr Mienkowski | 17b08ce | 2019-03-05 09:15:17 +0100 | [diff] [blame] | 130 | _sys_resume(); |
Ramesh Thomas | 3e0f20a | 2016-10-26 21:16:37 -0700 | [diff] [blame] | 131 | } |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 132 | #endif |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 133 | |
Andy Ross | 7aae75b | 2018-09-22 09:01:52 -0700 | [diff] [blame] | 134 | z_clock_idle_exit(); |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 135 | } |
| 136 | |
| 137 | |
Benjamin Walsh | 8450c90 | 2016-11-08 15:38:45 -0500 | [diff] [blame] | 138 | #if K_IDLE_PRIO < 0 |
| 139 | #define IDLE_YIELD_IF_COOP() k_yield() |
| 140 | #else |
Flavio Ceolin | 6fdc56d | 2018-09-18 12:32:27 -0700 | [diff] [blame] | 141 | #define IDLE_YIELD_IF_COOP() do { } while (false) |
Benjamin Walsh | 8450c90 | 2016-11-08 15:38:45 -0500 | [diff] [blame] | 142 | #endif |
| 143 | |
Andrew Boie | f5a7e1a | 2020-09-02 09:20:38 -0700 | [diff] [blame] | 144 | void idle(void *p1, void *unused2, void *unused3) |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 145 | { |
Andrew Boie | f5a7e1a | 2020-09-02 09:20:38 -0700 | [diff] [blame] | 146 | struct _cpu *cpu = p1; |
| 147 | |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 148 | ARG_UNUSED(unused2); |
| 149 | ARG_UNUSED(unused3); |
| 150 | |
Peter Mitsis | 5f8fa67 | 2016-10-27 15:19:49 -0400 | [diff] [blame] | 151 | #ifdef CONFIG_BOOT_TIME_MEASUREMENT |
| 152 | /* record timestamp when idling begins */ |
| 153 | |
Kumar Gala | a1b77fd | 2020-05-27 11:26:57 -0500 | [diff] [blame] | 154 | extern uint32_t z_timestamp_idle; |
Peter Mitsis | 5f8fa67 | 2016-10-27 15:19:49 -0400 | [diff] [blame] | 155 | |
Andrew Boie | e665410 | 2019-09-21 16:55:55 -0700 | [diff] [blame] | 156 | z_timestamp_idle = k_cycle_get_32(); |
Peter Mitsis | 5f8fa67 | 2016-10-27 15:19:49 -0400 | [diff] [blame] | 157 | #endif |
| 158 | |
Flavio Ceolin | b3d9202 | 2018-09-17 15:56:06 -0700 | [diff] [blame] | 159 | while (true) { |
Andrew Boie | f5a7e1a | 2020-09-02 09:20:38 -0700 | [diff] [blame] | 160 | /* Lock interrupts to atomically check if to_abort is non-NULL, |
| 161 | * and if so clear it |
| 162 | */ |
| 163 | int key = arch_irq_lock(); |
| 164 | struct k_thread *to_abort = cpu->pending_abort; |
| 165 | |
| 166 | if (to_abort) { |
| 167 | cpu->pending_abort = NULL; |
| 168 | arch_irq_unlock(key); |
| 169 | |
| 170 | /* Safe to unlock interrupts here. We've atomically |
| 171 | * checked and stashed cpu->pending_abort into a stack |
| 172 | * variable. If we get preempted here and another |
| 173 | * thread aborts, cpu->pending abort will get set |
| 174 | * again and we'll handle it when the loop iteration |
| 175 | * is continued below. |
| 176 | */ |
| 177 | LOG_DBG("idle %p aborting thread %p", |
| 178 | _current, to_abort); |
| 179 | |
| 180 | z_thread_single_abort(to_abort); |
| 181 | |
| 182 | /* We have to invoke this scheduler now. If we got |
| 183 | * here, the idle thread preempted everything else |
| 184 | * in order to abort the thread, and we now need to |
| 185 | * figure out what to do next, it's not necessarily |
| 186 | * the case that there are no other runnable threads. |
| 187 | */ |
| 188 | z_reschedule_unlocked(); |
| 189 | continue; |
| 190 | } |
| 191 | arch_irq_unlock(key); |
Andy Ross | 11bd67d | 2019-08-19 14:29:21 -0700 | [diff] [blame] | 192 | #if SMP_FALLBACK |
Andy Ross | 15c4007 | 2018-04-12 12:50:05 -0700 | [diff] [blame] | 193 | k_busy_wait(100); |
| 194 | k_yield(); |
Andy Ross | 15c4007 | 2018-04-12 12:50:05 -0700 | [diff] [blame] | 195 | #else |
Andrew Boie | 4f77c2a | 2019-11-07 12:43:29 -0800 | [diff] [blame] | 196 | (void)arch_irq_lock(); |
Pawel Dunaj | baea224 | 2018-11-22 11:49:32 +0100 | [diff] [blame] | 197 | sys_power_save_idle(); |
Benjamin Walsh | 8450c90 | 2016-11-08 15:38:45 -0500 | [diff] [blame] | 198 | IDLE_YIELD_IF_COOP(); |
Andy Ross | 15c4007 | 2018-04-12 12:50:05 -0700 | [diff] [blame] | 199 | #endif |
Charles E. Youse | c0c4ba8 | 2019-09-28 12:40:19 -0400 | [diff] [blame] | 200 | } |
Peter Mitsis | 96cb05c | 2016-09-15 12:37:58 -0400 | [diff] [blame] | 201 | } |