arch: custom cpu_idle and cpu_atomic harmonization
custom arch_cpu_idle and arch_cpu_atomic_idle implementation was done
differently on different architectures. riscv implemented those as weak
symbols, xtensa used a kconfig and all other architectures did not
really care, but this was a global kconfig that should apply to all
architectures.
Signed-off-by: Anas Nashif <anas.nashif@intel.com>
diff --git a/arch/Kconfig b/arch/Kconfig
index bd95979..86a0e62 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -1084,3 +1084,10 @@
help
This options allows applications to override the default arch idle implementation with
a custom one.
+
+config ARCH_CPU_ATOMIC_IDLE_CUSTOM
+ bool "Custom arch_cpu_atomic_idle implementation"
+ default n
+ help
+ This options allows applications to override the default arch idle implementation with
+ a custom one.
diff --git a/arch/arc/core/cpu_idle.S b/arch/arc/core/cpu_idle.S
index a5996a2..797aff8 100644
--- a/arch/arc/core/cpu_idle.S
+++ b/arch/arc/core/cpu_idle.S
@@ -26,6 +26,7 @@
.align 4
.word 0
+#ifndef CONFIG_ARCH_CPU_IDLE_CUSTOM
/*
* @brief Put the CPU in low-power mode
*
@@ -48,7 +49,9 @@
sleep r1
j_s [blink]
nop
+#endif
+#ifndef CONFIG_ARCH_CPU_ATOMIC_IDLE_CUSTOM
/*
* @brief Put the CPU in low-power mode, entered with IRQs locked
*
@@ -56,6 +59,7 @@
*
* void arch_cpu_atomic_idle(unsigned int key)
*/
+
SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
#ifdef CONFIG_TRACING
@@ -70,3 +74,4 @@
sleep r1
j_s.d [blink]
seti r0
+#endif
diff --git a/arch/arm/core/cortex_a_r/cpu_idle.S b/arch/arm/core/cortex_a_r/cpu_idle.S
index 5c6ef3f..38168a5 100644
--- a/arch/arm/core/cortex_a_r/cpu_idle.S
+++ b/arch/arm/core/cortex_a_r/cpu_idle.S
@@ -49,6 +49,7 @@
#endif /* CONFIG_ARM_ON_ENTER_CPU_IDLE_HOOK */
.endm
+#ifndef CONFIG_ARCH_CPU_IDLE_CUSTOM
SECTION_FUNC(TEXT, arch_cpu_idle)
#ifdef CONFIG_TRACING
push {r0, lr}
@@ -68,6 +69,9 @@
bx lr
+#endif
+
+#ifndef CONFIG_ARCH_CPU_ATOMIC_IDLE_CUSTOM
SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
#ifdef CONFIG_TRACING
push {r0, lr}
@@ -93,3 +97,4 @@
_irq_disabled:
bx lr
+#endif
diff --git a/arch/arm/core/cortex_m/cpu_idle.c b/arch/arm/core/cortex_m/cpu_idle.c
index 4df091f..3b48b19 100644
--- a/arch/arm/core/cortex_m/cpu_idle.c
+++ b/arch/arm/core/cortex_m/cpu_idle.c
@@ -53,6 +53,7 @@
} while (false)
#endif
+#ifndef CONFIG_ARCH_CPU_IDLE_CUSTOM
void arch_cpu_idle(void)
{
#if defined(CONFIG_TRACING)
@@ -96,7 +97,9 @@
__enable_irq();
__ISB();
}
+#endif
+#ifndef CONFIG_ARCH_CPU_ATOMIC_IDLE_CUSTOM
void arch_cpu_atomic_idle(unsigned int key)
{
#if defined(CONFIG_TRACING)
@@ -135,3 +138,4 @@
__enable_irq();
#endif
}
+#endif
diff --git a/arch/arm64/core/cpu_idle.S b/arch/arm64/core/cpu_idle.S
index e01881e..a8c5bca 100644
--- a/arch/arm64/core/cpu_idle.S
+++ b/arch/arm64/core/cpu_idle.S
@@ -13,7 +13,7 @@
#include <zephyr/arch/cpu.h>
_ASM_FILE_PROLOGUE
-
+#ifndef CONFIG_ARCH_CPU_IDLE_CUSTOM
GTEXT(arch_cpu_idle)
SECTION_FUNC(TEXT, arch_cpu_idle)
#ifdef CONFIG_TRACING
@@ -25,7 +25,9 @@
wfi
msr daifclr, #(DAIFCLR_IRQ_BIT)
ret
+#endif
+#ifndef CONFIG_ARCH_CPU_ATOMIC_IDLE_CUSTOM
GTEXT(arch_cpu_atomic_idle)
SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
#ifdef CONFIG_TRACING
@@ -41,3 +43,5 @@
msr daifclr, #(DAIFCLR_IRQ_BIT)
_irq_disabled:
ret
+
+#endif
diff --git a/arch/mips/core/cpu_idle.c b/arch/mips/core/cpu_idle.c
index d91a6b3..51b5324 100644
--- a/arch/mips/core/cpu_idle.c
+++ b/arch/mips/core/cpu_idle.c
@@ -19,12 +19,16 @@
__asm__ volatile("wait");
}
+#ifndef CONFIG_ARCH_CPU_IDLE_CUSTOM
void arch_cpu_idle(void)
{
mips_idle(1);
}
+#endif
+#ifndef CONFIG_ARCH_CPU_ATOMIC_IDLE_CUSTOM
void arch_cpu_atomic_idle(unsigned int key)
{
mips_idle(key);
}
+#endif
diff --git a/arch/nios2/core/cpu_idle.c b/arch/nios2/core/cpu_idle.c
index ecdea13..5678c27 100644
--- a/arch/nios2/core/cpu_idle.c
+++ b/arch/nios2/core/cpu_idle.c
@@ -7,6 +7,7 @@
#include <zephyr/kernel.h>
#include <zephyr/kernel_structs.h>
+#ifndef CONFIG_ARCH_CPU_IDLE_CUSTOM
void arch_cpu_idle(void)
{
/* Do nothing but unconditionally unlock interrupts and return to the
@@ -14,7 +15,9 @@
*/
irq_unlock(NIOS2_STATUS_PIE_MSK);
}
+#endif
+#ifndef CONFIG_ARCH_CPU_ATOMIC_IDLE_CUSTOM
void arch_cpu_atomic_idle(unsigned int key)
{
/* Do nothing but restore IRQ state. This CPU does not have any
@@ -22,3 +25,4 @@
*/
irq_unlock(key);
}
+#endif
diff --git a/arch/riscv/core/cpu_idle.c b/arch/riscv/core/cpu_idle.c
index 1d47680..5429e54 100644
--- a/arch/riscv/core/cpu_idle.c
+++ b/arch/riscv/core/cpu_idle.c
@@ -7,16 +7,20 @@
#include <zephyr/irq.h>
#include <zephyr/tracing/tracing.h>
-void __weak arch_cpu_idle(void)
+#ifndef CONFIG_ARCH_CPU_IDLE_CUSTOM
+void arch_cpu_idle(void)
{
sys_trace_idle();
__asm__ volatile("wfi");
irq_unlock(MSTATUS_IEN);
}
+#endif
-void __weak arch_cpu_atomic_idle(unsigned int key)
+#ifndef CONFIG_ARCH_CPU_ATOMIC_IDLE_CUSTOM
+void arch_cpu_atomic_idle(unsigned int key)
{
sys_trace_idle();
__asm__ volatile("wfi");
irq_unlock(key);
}
+#endif
diff --git a/arch/x86/core/cpuhalt.c b/arch/x86/core/cpuhalt.c
index 77c727b..47f0381 100644
--- a/arch/x86/core/cpuhalt.c
+++ b/arch/x86/core/cpuhalt.c
@@ -7,6 +7,7 @@
#include <zephyr/tracing/tracing.h>
#include <zephyr/arch/cpu.h>
+#ifndef CONFIG_ARCH_CPU_IDLE_CUSTOM
__pinned_func
void arch_cpu_idle(void)
{
@@ -15,7 +16,9 @@
"sti\n\t"
"hlt\n\t");
}
+#endif
+#ifndef CONFIG_ARCH_CPU_ATOMIC_IDLE_CUSTOM
__pinned_func
void arch_cpu_atomic_idle(unsigned int key)
{
@@ -42,3 +45,4 @@
__asm__ volatile("cli");
}
}
+#endif
diff --git a/arch/xtensa/core/cpu_idle.c b/arch/xtensa/core/cpu_idle.c
index dae79f0..13a4960 100644
--- a/arch/xtensa/core/cpu_idle.c
+++ b/arch/xtensa/core/cpu_idle.c
@@ -14,6 +14,7 @@
}
#endif
+#ifndef CONFIG_ARCH_CPU_ATOMIC_IDLE_CUSTOM
void arch_cpu_atomic_idle(unsigned int key)
{
sys_trace_idle();
@@ -21,3 +22,4 @@
"wsr.ps %0\n\t"
"rsync" :: "a"(key));
}
+#endif
diff --git a/soc/ite/ec/it8xxx2/Kconfig b/soc/ite/ec/it8xxx2/Kconfig
index 74d16b9..8966b6e 100644
--- a/soc/ite/ec/it8xxx2/Kconfig
+++ b/soc/ite/ec/it8xxx2/Kconfig
@@ -4,6 +4,8 @@
config SOC_SERIES_IT8XXX2
select CPU_HAS_FPU if "$(ZEPHYR_TOOLCHAIN_VARIANT)" != "zephyr" || RISCV_ISA_EXT_M
select HAS_PM
+ select ARCH_CPU_IDLE_CUSTOM
+ select ARCH_CPU_ATOMIC_IDLE_CUSTOM
if SOC_SERIES_IT8XXX2
diff --git a/soc/nordic/common/vpr/Kconfig b/soc/nordic/common/vpr/Kconfig
index 0c60b8a..76edb34 100644
--- a/soc/nordic/common/vpr/Kconfig
+++ b/soc/nordic/common/vpr/Kconfig
@@ -18,6 +18,7 @@
select RISCV_SOC_CONTEXT_SAVE
select HAS_FLASH_LOAD_OFFSET
select ARCH_CPU_IDLE_CUSTOM
+ select ARCH_CPU_ATOMIC_IDLE_CUSTOM
select INCLUDE_RESET_VECTOR
help
Enable support for the RISC-V Nordic VPR core.