blob: 0d7aea7ec957c350540fad304426d64051f69621 [file] [log] [blame]
Peter Mitsis96cb05c2016-09-15 12:37:58 -04001/*
2 * Copyright (c) 2016 Wind River Systems, Inc.
3 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08004 * SPDX-License-Identifier: Apache-2.0
Peter Mitsis96cb05c2016-09-15 12:37:58 -04005 */
6
Anas Nashifdc3d73b2016-12-19 20:25:56 -05007#include <kernel.h>
Peter Mitsis96cb05c2016-09-15 12:37:58 -04008#include <toolchain.h>
Anas Nashif397d29d2017-06-17 11:30:47 -04009#include <linker/sections.h>
Anas Nashif68c389c2019-06-21 12:55:37 -040010#include <drivers/timer/system_timer.h>
Peter Mitsis96cb05c2016-09-15 12:37:58 -040011#include <wait_q.h>
Anas Nashif190e3682019-06-25 12:26:13 -040012#include <power/power.h>
Flavio Ceolinb3d92022018-09-17 15:56:06 -070013#include <stdbool.h>
Andrew Boief5a7e1a2020-09-02 09:20:38 -070014#include <logging/log.h>
15#include <ksched.h>
16
Krzysztof Chruscinski3ed80832020-11-26 19:32:34 +010017LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
Peter Mitsis96cb05c2016-09-15 12:37:58 -040018
Andy Ross7aae75b2018-09-22 09:01:52 -070019#ifdef CONFIG_TICKLESS_IDLE_THRESH
20#define IDLE_THRESH CONFIG_TICKLESS_IDLE_THRESH
Ramesh Thomas89ffd442017-02-05 19:37:19 -080021#else
Andy Ross7aae75b2018-09-22 09:01:52 -070022#define IDLE_THRESH 1
Ramesh Thomas89ffd442017-02-05 19:37:19 -080023#endif
Peter Mitsis96cb05c2016-09-15 12:37:58 -040024
Andy Ross11bd67d2019-08-19 14:29:21 -070025/* Fallback idle spin loop for SMP platforms without a working IPI */
Jan Van Winkel677050c2019-09-27 22:52:08 +020026#if (defined(CONFIG_SMP) && !defined(CONFIG_SCHED_IPI_SUPPORTED))
27#define SMP_FALLBACK 1
28#else
29#define SMP_FALLBACK 0
30#endif
Andy Ross11bd67d2019-08-19 14:29:21 -070031
Peter Mitsis96cb05c2016-09-15 12:37:58 -040032#ifdef CONFIG_SYS_POWER_MANAGEMENT
Ramesh Thomas3e0f20a2016-10-26 21:16:37 -070033/*
Piotr Mienkowski17b08ce2019-03-05 09:15:17 +010034 * Used to allow _sys_suspend() implementation to control notification
Ramesh Thomas83670562016-11-10 21:16:12 -080035 * of the event that caused exit from kernel idling after pm operations.
Ramesh Thomas3e0f20a2016-10-26 21:16:37 -070036 */
Anas Nashif9151fbe2018-12-08 14:09:36 -050037unsigned char sys_pm_idle_exit_notify;
Ramesh Thomas3e0f20a2016-10-26 21:16:37 -070038
Andrew Boie676b1ae2019-06-11 18:03:37 -070039
40/* LCOV_EXCL_START
41 * These are almost certainly overidden and in any event do nothing
42 */
Piotr Mienkowski204311d2019-02-28 07:20:24 +010043#if defined(CONFIG_SYS_POWER_SLEEP_STATES)
Piotr Mienkowski17b08ce2019-03-05 09:15:17 +010044void __attribute__((weak)) _sys_resume(void)
Ramesh Thomasded076d2016-11-02 23:56:34 -070045{
46}
Ramakrishna Pallalad9c37d62018-07-17 21:21:30 +053047#endif
Ramesh Thomasded076d2016-11-02 23:56:34 -070048
Piotr Zięcik9cc63e02019-02-12 11:07:20 +010049#if defined(CONFIG_SYS_POWER_DEEP_SLEEP_STATES)
Piotr Mienkowski17b08ce2019-03-05 09:15:17 +010050void __attribute__((weak)) _sys_resume_from_deep_sleep(void)
Ramesh Thomasc0cd7ac2016-11-09 22:55:14 -080051{
52}
Ramakrishna Pallalad9c37d62018-07-17 21:21:30 +053053#endif
Andrew Boie676b1ae2019-06-11 18:03:37 -070054/* LCOV_EXCL_STOP */
Pawel Dunajbaea2242018-11-22 11:49:32 +010055
56#endif /* CONFIG_SYS_POWER_MANAGEMENT */
57
Peter Mitsis96cb05c2016-09-15 12:37:58 -040058/**
59 *
60 * @brief Indicate that kernel is idling in tickless mode
61 *
Anas Nashifdc3d73b2016-12-19 20:25:56 -050062 * Sets the kernel data structure idle field to either a positive value or
Peter Mitsis96cb05c2016-09-15 12:37:58 -040063 * K_FOREVER.
64 *
65 * @param ticks the number of ticks to idle
66 *
67 * @return N/A
68 */
Andy Ross11bd67d2019-08-19 14:29:21 -070069#if !SMP_FALLBACK
Kumar Galaa1b77fd2020-05-27 11:26:57 -050070static void set_kernel_idle_time_in_ticks(int32_t ticks)
Peter Mitsis96cb05c2016-09-15 12:37:58 -040071{
Pawel Dunajbaea2242018-11-22 11:49:32 +010072#ifdef CONFIG_SYS_POWER_MANAGEMENT
Benjamin Walshf6ca7de2016-11-08 10:36:50 -050073 _kernel.idle = ticks;
Peter Mitsis96cb05c2016-09-15 12:37:58 -040074#endif
Pawel Dunajbaea2242018-11-22 11:49:32 +010075}
Peter Mitsis96cb05c2016-09-15 12:37:58 -040076
Pawel Dunajbaea2242018-11-22 11:49:32 +010077static void sys_power_save_idle(void)
Peter Mitsis96cb05c2016-09-15 12:37:58 -040078{
Kumar Galaa1b77fd2020-05-27 11:26:57 -050079 int32_t ticks = z_get_next_timeout_expiry();
Pawel Dunajbaea2242018-11-22 11:49:32 +010080
Andy Ross7aae75b2018-09-22 09:01:52 -070081 /* The documented behavior of CONFIG_TICKLESS_IDLE_THRESH is
82 * that the system should not enter a tickless idle for
83 * periods less than that. This seems... silly, given that it
84 * saves no power and does not improve latency. But it's an
85 * API we need to honor...
86 */
Andy Ross8b549532018-09-25 08:19:21 -070087#ifdef CONFIG_SYS_CLOCK_EXISTS
Pawel Dunajbaea2242018-11-22 11:49:32 +010088 z_set_timeout_expiry((ticks < IDLE_THRESH) ? 1 : ticks, true);
Andy Ross8b549532018-09-25 08:19:21 -070089#endif
Peter Mitsis96cb05c2016-09-15 12:37:58 -040090
91 set_kernel_idle_time_in_ticks(ticks);
Piotr Mienkowski204311d2019-02-28 07:20:24 +010092#if (defined(CONFIG_SYS_POWER_SLEEP_STATES) || \
Piotr Zięcik9cc63e02019-02-12 11:07:20 +010093 defined(CONFIG_SYS_POWER_DEEP_SLEEP_STATES))
Ramesh Thomas3e0f20a2016-10-26 21:16:37 -070094
Anas Nashif9151fbe2018-12-08 14:09:36 -050095 sys_pm_idle_exit_notify = 1U;
Ramesh Thomas3e0f20a2016-10-26 21:16:37 -070096
Peter Mitsis96cb05c2016-09-15 12:37:58 -040097 /*
Ramesh Thomas3e0f20a2016-10-26 21:16:37 -070098 * Call the suspend hook function of the soc interface to allow
99 * entry into a low power state. The function returns
Piotr Zięcikd02e3eb2019-01-16 14:49:16 +0100100 * SYS_POWER_STATE_ACTIVE if low power state was not entered, in which
Ramesh Thomas3e0f20a2016-10-26 21:16:37 -0700101 * case, kernel does normal idle processing.
Peter Mitsis96cb05c2016-09-15 12:37:58 -0400102 *
Ramesh Thomas3e0f20a2016-10-26 21:16:37 -0700103 * This function is entered with interrupts disabled. If a low power
104 * state was entered, then the hook function should enable inerrupts
105 * before exiting. This is because the kernel does not do its own idle
Benjamin Walshc3a2bbb2016-12-14 13:04:36 -0500106 * processing in those cases i.e. skips k_cpu_idle(). The kernel's
Ramesh Thomas3e0f20a2016-10-26 21:16:37 -0700107 * idle processing re-enables interrupts which is essential for
108 * the kernel's scheduling logic.
Peter Mitsis96cb05c2016-09-15 12:37:58 -0400109 */
Piotr Mienkowski17b08ce2019-03-05 09:15:17 +0100110 if (_sys_suspend(ticks) == SYS_POWER_STATE_ACTIVE) {
Anas Nashif9151fbe2018-12-08 14:09:36 -0500111 sys_pm_idle_exit_notify = 0U;
Benjamin Walshc3a2bbb2016-12-14 13:04:36 -0500112 k_cpu_idle();
Peter Mitsis96cb05c2016-09-15 12:37:58 -0400113 }
114#else
Benjamin Walshc3a2bbb2016-12-14 13:04:36 -0500115 k_cpu_idle();
Peter Mitsis96cb05c2016-09-15 12:37:58 -0400116#endif
117}
Andy Ross15c40072018-04-12 12:50:05 -0700118#endif
Peter Mitsis96cb05c2016-09-15 12:37:58 -0400119
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500120void z_sys_power_save_idle_exit(int32_t ticks)
Peter Mitsis96cb05c2016-09-15 12:37:58 -0400121{
Piotr Mienkowski204311d2019-02-28 07:20:24 +0100122#if defined(CONFIG_SYS_POWER_SLEEP_STATES)
Ramesh Thomas3e0f20a2016-10-26 21:16:37 -0700123 /* Some CPU low power states require notification at the ISR
124 * to allow any operations that needs to be done before kernel
125 * switches task or processes nested interrupts. This can be
Piotr Mienkowski17b08ce2019-03-05 09:15:17 +0100126 * disabled by calling _sys_pm_idle_exit_notification_disable().
Ramesh Thomas3e0f20a2016-10-26 21:16:37 -0700127 * Alternatively it can be simply ignored if not required.
Peter Mitsis96cb05c2016-09-15 12:37:58 -0400128 */
Anas Nashif9151fbe2018-12-08 14:09:36 -0500129 if (sys_pm_idle_exit_notify) {
Piotr Mienkowski17b08ce2019-03-05 09:15:17 +0100130 _sys_resume();
Ramesh Thomas3e0f20a2016-10-26 21:16:37 -0700131 }
Peter Mitsis96cb05c2016-09-15 12:37:58 -0400132#endif
Peter Mitsis96cb05c2016-09-15 12:37:58 -0400133
Andy Ross7aae75b2018-09-22 09:01:52 -0700134 z_clock_idle_exit();
Peter Mitsis96cb05c2016-09-15 12:37:58 -0400135}
136
137
Benjamin Walsh8450c902016-11-08 15:38:45 -0500138#if K_IDLE_PRIO < 0
139#define IDLE_YIELD_IF_COOP() k_yield()
140#else
Flavio Ceolin6fdc56d2018-09-18 12:32:27 -0700141#define IDLE_YIELD_IF_COOP() do { } while (false)
Benjamin Walsh8450c902016-11-08 15:38:45 -0500142#endif
143
Andrew Boief5a7e1a2020-09-02 09:20:38 -0700144void idle(void *p1, void *unused2, void *unused3)
Peter Mitsis96cb05c2016-09-15 12:37:58 -0400145{
Andrew Boief5a7e1a2020-09-02 09:20:38 -0700146 struct _cpu *cpu = p1;
147
Peter Mitsis96cb05c2016-09-15 12:37:58 -0400148 ARG_UNUSED(unused2);
149 ARG_UNUSED(unused3);
150
Peter Mitsis5f8fa672016-10-27 15:19:49 -0400151#ifdef CONFIG_BOOT_TIME_MEASUREMENT
152 /* record timestamp when idling begins */
153
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500154 extern uint32_t z_timestamp_idle;
Peter Mitsis5f8fa672016-10-27 15:19:49 -0400155
Andrew Boiee6654102019-09-21 16:55:55 -0700156 z_timestamp_idle = k_cycle_get_32();
Peter Mitsis5f8fa672016-10-27 15:19:49 -0400157#endif
158
Flavio Ceolinb3d92022018-09-17 15:56:06 -0700159 while (true) {
Andrew Boief5a7e1a2020-09-02 09:20:38 -0700160 /* Lock interrupts to atomically check if to_abort is non-NULL,
161 * and if so clear it
162 */
163 int key = arch_irq_lock();
164 struct k_thread *to_abort = cpu->pending_abort;
165
166 if (to_abort) {
167 cpu->pending_abort = NULL;
168 arch_irq_unlock(key);
169
170 /* Safe to unlock interrupts here. We've atomically
171 * checked and stashed cpu->pending_abort into a stack
172 * variable. If we get preempted here and another
173 * thread aborts, cpu->pending abort will get set
174 * again and we'll handle it when the loop iteration
175 * is continued below.
176 */
177 LOG_DBG("idle %p aborting thread %p",
178 _current, to_abort);
179
180 z_thread_single_abort(to_abort);
181
182 /* We have to invoke this scheduler now. If we got
183 * here, the idle thread preempted everything else
184 * in order to abort the thread, and we now need to
185 * figure out what to do next, it's not necessarily
186 * the case that there are no other runnable threads.
187 */
188 z_reschedule_unlocked();
189 continue;
190 }
191 arch_irq_unlock(key);
Andy Ross11bd67d2019-08-19 14:29:21 -0700192#if SMP_FALLBACK
Andy Ross15c40072018-04-12 12:50:05 -0700193 k_busy_wait(100);
194 k_yield();
Andy Ross15c40072018-04-12 12:50:05 -0700195#else
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800196 (void)arch_irq_lock();
Pawel Dunajbaea2242018-11-22 11:49:32 +0100197 sys_power_save_idle();
Benjamin Walsh8450c902016-11-08 15:38:45 -0500198 IDLE_YIELD_IF_COOP();
Andy Ross15c40072018-04-12 12:50:05 -0700199#endif
Charles E. Yousec0c4ba82019-09-28 12:40:19 -0400200 }
Peter Mitsis96cb05c2016-09-15 12:37:58 -0400201}