blob: 75cf75242d7e5b8f6733c91a8e7fd52e2a21584d [file] [log] [blame]
Peter Mitsis96cb05c2016-09-15 12:37:58 -04001/*
2 * Copyright (c) 2016 Wind River Systems, Inc.
3 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08004 * SPDX-License-Identifier: Apache-2.0
Peter Mitsis96cb05c2016-09-15 12:37:58 -04005 */
6
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +02007#include <zephyr/kernel.h>
8#include <zephyr/toolchain.h>
9#include <zephyr/linker/sections.h>
10#include <zephyr/drivers/timer/system_timer.h>
11#include <zephyr/wait_q.h>
12#include <zephyr/pm/pm.h>
Flavio Ceolinb3d92022018-09-17 15:56:06 -070013#include <stdbool.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020014#include <zephyr/logging/log.h>
Andrew Boief5a7e1a2020-09-02 09:20:38 -070015#include <ksched.h>
Andy Ross851d14a2021-05-13 15:46:43 -070016#include <kswap.h>
Andrew Boief5a7e1a2020-09-02 09:20:38 -070017
Krzysztof Chruscinski3ed80832020-11-26 19:32:34 +010018LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
Peter Mitsis96cb05c2016-09-15 12:37:58 -040019
Flavio Ceolin7dd42972021-11-15 21:22:38 -080020void z_pm_save_idle_exit(void)
Peter Mitsis96cb05c2016-09-15 12:37:58 -040021{
Flavio Ceolind21cfd52021-01-07 09:29:17 -080022#ifdef CONFIG_PM
Ramesh Thomas3e0f20a2016-10-26 21:16:37 -070023 /* Some CPU low power states require notification at the ISR
24 * to allow any operations that needs to be done before kernel
Flavio Ceoline2771342021-02-24 11:22:53 -080025 * switches task or processes nested interrupts.
26 * This can be simply ignored if not required.
Peter Mitsis96cb05c2016-09-15 12:37:58 -040027 */
Flavio Ceoline2771342021-02-24 11:22:53 -080028 pm_system_resume();
29#endif /* CONFIG_PM */
Anas Nashif9c1efe62021-02-25 15:33:15 -050030 sys_clock_idle_exit();
Peter Mitsis96cb05c2016-09-15 12:37:58 -040031}
32
Andy Ross6400bb52021-03-03 13:20:15 -080033void idle(void *unused1, void *unused2, void *unused3)
Peter Mitsis96cb05c2016-09-15 12:37:58 -040034{
Andy Ross6400bb52021-03-03 13:20:15 -080035 ARG_UNUSED(unused1);
Peter Mitsis96cb05c2016-09-15 12:37:58 -040036 ARG_UNUSED(unused2);
37 ARG_UNUSED(unused3);
38
Andy Ross851d14a2021-05-13 15:46:43 -070039 __ASSERT_NO_MSG(_current->base.prio >= 0);
40
Flavio Ceolinb3d92022018-09-17 15:56:06 -070041 while (true) {
Andy Ross0ca71502022-09-13 10:13:40 -070042 /* SMP systems without a working IPI can't actual
43 * enter an idle state, because they can't be notified
44 * of scheduler changes (i.e. threads they should
45 * run). They just spin instead, with a minimal
46 * relaxation loop to prevent hammering the scheduler
47 * lock and/or timer driver. This is intended as a
48 * fallback configuration for new platform bringup.
Andy Ross6400bb52021-03-03 13:20:15 -080049 */
Francois Ramue01bee52022-10-10 18:21:16 +020050 if (IS_ENABLED(CONFIG_SMP) && !IS_ENABLED(CONFIG_SCHED_IPI_SUPPORTED)) {
51 for (volatile int i = 0; i < 100000; i++) {
52 /* Empty loop */
53 }
Andy Ross0ca71502022-09-13 10:13:40 -070054 z_swap_unlocked();
Andy Ross6400bb52021-03-03 13:20:15 -080055 }
Daniel Leung8ad5ad22020-12-14 14:31:11 -080056
Andy Ross6400bb52021-03-03 13:20:15 -080057 /* Note weird API: k_cpu_idle() is called with local
58 * CPU interrupts masked, and returns with them
59 * unmasked. It does not take a spinlock or other
60 * higher level construct.
61 */
Andy Ross39a8f3b2021-03-03 12:16:35 -080062 (void) arch_irq_lock();
63
Flavio Ceolin72262472021-11-19 15:54:56 -080064#ifdef CONFIG_PM
65 _kernel.idle = z_get_next_timeout_expiry();
66
67 /*
68 * Call the suspend hook function of the soc interface
69 * to allow entry into a low power state. The function
70 * returns false if low power state was not entered, in
71 * which case, kernel does normal idle processing.
72 *
73 * This function is entered with interrupts disabled.
74 * If a low power state was entered, then the hook
75 * function should enable inerrupts before exiting.
76 * This is because the kernel does not do its own idle
77 * processing in those cases i.e. skips k_cpu_idle().
78 * The kernel's idle processing re-enables interrupts
79 * which is essential for the kernel's scheduling
80 * logic.
81 */
Flavio Ceolinbb5d6442022-01-18 15:05:54 -080082 if (k_is_pre_kernel() || !pm_system_suspend(_kernel.idle)) {
Anas Nashif98cef832020-09-07 12:23:30 -040083 k_cpu_idle();
84 }
Flavio Ceolin72262472021-11-19 15:54:56 -080085#else
86 k_cpu_idle();
87#endif
Anas Nashif98cef832020-09-07 12:23:30 -040088
Andy Ross851d14a2021-05-13 15:46:43 -070089#if !defined(CONFIG_PREEMPT_ENABLED)
90# if !defined(CONFIG_USE_SWITCH) || defined(CONFIG_SPARC)
91 /* A legacy mess: the idle thread is by definition
92 * preemptible as far as the modern scheduler is
93 * concerned, but older platforms use
94 * CONFIG_PREEMPT_ENABLED=n as an optimization hint
95 * that interrupt exit always returns to the
96 * interrupted context. So in that setup we need to
97 * explicitly yield in the idle thread otherwise
98 * nothing else will run once it starts.
Andy Ross6400bb52021-03-03 13:20:15 -080099 */
Andy Ross851d14a2021-05-13 15:46:43 -0700100 if (_kernel.ready_q.cache != _current) {
101 z_swap_unlocked();
Andy Ross6400bb52021-03-03 13:20:15 -0800102 }
Andy Ross851d14a2021-05-13 15:46:43 -0700103# endif
104#endif
Charles E. Yousec0c4ba82019-09-28 12:40:19 -0400105 }
Peter Mitsis96cb05c2016-09-15 12:37:58 -0400106}