arch: arm: Separate common irq_manage and isr_wrapper code

There are too many differences between Cortex-A/R and Cortex-M on irq
code, e.g. Cortex-A/R use GIC and Cortex-M uses NVIC. For reducing
the complexity and easier to maintain, this commit separates irq_manage.c
and isr_wrapper.S into two different parts based on the architecture.

This commit also Removes the part related to the option
'CONFIG_ARM_SECURE_FIRMWARE' in 'cortex_a_r/irq_manage.c' because
this code is written for the Cortex-M architecture.

Signed-off-by: Huifeng Zhang <Huifeng.Zhang@arm.com>
diff --git a/arch/arm/core/CMakeLists.txt b/arch/arm/core/CMakeLists.txt
index 6388a6d..27c19d7 100644
--- a/arch/arm/core/CMakeLists.txt
+++ b/arch/arm/core/CMakeLists.txt
@@ -5,14 +5,12 @@
 zephyr_library_sources(
   cpu_idle.S
   fatal.c
-  irq_manage.c
   nmi.c
   nmi_on_reset.S
   prep_c.c
   thread.c
 )
 
-zephyr_library_sources_ifdef(CONFIG_GEN_SW_ISR_TABLE isr_wrapper.S)
 zephyr_library_sources_ifdef(CONFIG_CPP __aeabi_atexit.c)
 zephyr_library_sources_ifdef(CONFIG_IRQ_OFFLOAD irq_offload.c)
 zephyr_library_sources_ifdef(CONFIG_THREAD_LOCAL_STORAGE tls.c)
diff --git a/arch/arm/core/cortex_a_r/CMakeLists.txt b/arch/arm/core/cortex_a_r/CMakeLists.txt
index 2a573a8..ccd28b7 100644
--- a/arch/arm/core/cortex_a_r/CMakeLists.txt
+++ b/arch/arm/core/cortex_a_r/CMakeLists.txt
@@ -14,8 +14,10 @@
   vector_table.S
   swap.c
   swap_helper.S
+  irq_manage.c
   )
 
+zephyr_library_sources_ifdef(CONFIG_GEN_SW_ISR_TABLE isr_wrapper.S)
 zephyr_library_sources_ifdef(CONFIG_USERSPACE thread.c)
 zephyr_library_sources_ifdef(CONFIG_SEMIHOST semihost.c)
 zephyr_library_sources_ifdef(CONFIG_THREAD_LOCAL_STORAGE __aeabi_read_tp.S)
diff --git a/arch/arm/core/cortex_a_r/irq_manage.c b/arch/arm/core/cortex_a_r/irq_manage.c
new file mode 100644
index 0000000..a381fad
--- /dev/null
+++ b/arch/arm/core/cortex_a_r/irq_manage.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013-2014 Wind River Systems, Inc.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+/**
+ * @file
+ * @brief ARM Cortex-A and Cortex-R interrupt management
+ *
+ *
+ * Interrupt management: enabling/disabling and dynamic ISR
+ * connecting/replacing.  SW_ISR_TABLE_DYNAMIC has to be enabled for
+ * connecting ISRs at runtime.
+ */
+
+#include <zephyr/kernel.h>
+#include <zephyr/arch/cpu.h>
+#include <zephyr/drivers/interrupt_controller/gic.h>
+#include <zephyr/sys/__assert.h>
+#include <zephyr/sys/barrier.h>
+#include <zephyr/toolchain.h>
+#include <zephyr/linker/sections.h>
+#include <zephyr/sw_isr_table.h>
+#include <zephyr/irq.h>
+#include <zephyr/tracing/tracing.h>
+#include <zephyr/pm/pm.h>
+
+extern void z_arm_reserved(void);
+
+/*
+ * For Cortex-A and Cortex-R cores, the default interrupt controller is the ARM
+ * Generic Interrupt Controller (GIC) and therefore the architecture interrupt
+ * control functions are mapped to the GIC driver interface.
+ *
+ * When a custom interrupt controller is used (i.e.
+ * CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER is enabled), the architecture
+ * interrupt control functions are mapped to the SoC layer in
+ * `include/arch/arm/irq.h`.
+ */
+
+#if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER)
+void arch_irq_enable(unsigned int irq)
+{
+	arm_gic_irq_enable(irq);
+}
+
+void arch_irq_disable(unsigned int irq)
+{
+	arm_gic_irq_disable(irq);
+}
+
+int arch_irq_is_enabled(unsigned int irq)
+{
+	return arm_gic_irq_is_enabled(irq);
+}
+
+/**
+ * @internal
+ *
+ * @brief Set an interrupt's priority
+ *
+ * The priority is verified if ASSERT_ON is enabled. The maximum number
+ * of priority levels is a little complex, as there are some hardware
+ * priority levels which are reserved: three for various types of exceptions,
+ * and possibly one additional to support zero latency interrupts.
+ */
+void z_arm_irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
+{
+	arm_gic_irq_set_priority(irq, prio, flags);
+}
+#endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */
+
+void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf);
+
+/**
+ *
+ * @brief Spurious interrupt handler
+ *
+ * Installed in all _sw_isr_table slots at boot time. Throws an error if
+ * called.
+ *
+ */
+void z_irq_spurious(const void *unused)
+{
+	ARG_UNUSED(unused);
+
+	z_arm_fatal_error(K_ERR_SPURIOUS_IRQ, NULL);
+}
+
+#ifdef CONFIG_PM
+void _arch_isr_direct_pm(void)
+{
+	unsigned int key;
+
+	/* irq_lock() does what we want for this CPU */
+	key = irq_lock();
+
+	if (_kernel.idle) {
+		_kernel.idle = 0;
+		z_pm_save_idle_exit();
+	}
+
+	irq_unlock(key);
+}
+#endif
+
+#ifdef CONFIG_DYNAMIC_INTERRUPTS
+#ifdef CONFIG_GEN_ISR_TABLES
+int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
+			     void (*routine)(const void *parameter),
+			     const void *parameter, uint32_t flags)
+{
+	z_isr_install(irq, routine, parameter);
+	z_arm_irq_priority_set(irq, priority, flags);
+	return irq;
+}
+#endif /* CONFIG_GEN_ISR_TABLES */
+
+#ifdef CONFIG_DYNAMIC_DIRECT_INTERRUPTS
+static inline void z_arm_irq_dynamic_direct_isr_dispatch(void)
+{
+	uint32_t irq = __get_IPSR() - 16;
+
+	if (irq < IRQ_TABLE_SIZE) {
+		struct _isr_table_entry *isr_entry = &_sw_isr_table[irq];
+
+		isr_entry->isr(isr_entry->arg);
+	}
+}
+
+ISR_DIRECT_DECLARE(z_arm_irq_direct_dynamic_dispatch_reschedule)
+{
+	z_arm_irq_dynamic_direct_isr_dispatch();
+
+	return 1;
+}
+
+ISR_DIRECT_DECLARE(z_arm_irq_direct_dynamic_dispatch_no_reschedule)
+{
+	z_arm_irq_dynamic_direct_isr_dispatch();
+
+	return 0;
+}
+
+#endif /* CONFIG_DYNAMIC_DIRECT_INTERRUPTS */
+
+#endif /* CONFIG_DYNAMIC_INTERRUPTS */
diff --git a/arch/arm/core/isr_wrapper.S b/arch/arm/core/cortex_a_r/isr_wrapper.S
similarity index 70%
rename from arch/arm/core/isr_wrapper.S
rename to arch/arm/core/cortex_a_r/isr_wrapper.S
index 205420c..dc5fa9c 100644
--- a/arch/arm/core/isr_wrapper.S
+++ b/arch/arm/core/cortex_a_r/isr_wrapper.S
@@ -7,7 +7,7 @@
 
 /**
  * @file
- * @brief ARM Cortex-A, Cortex-M and Cortex-R wrapper for ISRs with parameter
+ * @brief ARM Cortex-A and Cortex-R wrapper for ISRs with parameter
  *
  * Wrapper installed in vector table for handling dynamic interrupts that accept
  * a parameter.
@@ -45,10 +45,6 @@
  */
 SECTION_FUNC(TEXT, _isr_wrapper)
 
-#if defined(CONFIG_CPU_CORTEX_M)
-	push {r0,lr}		/* r0, lr are now the first items on the stack */
-#elif defined(CONFIG_CPU_AARCH32_CORTEX_R) || defined(CONFIG_CPU_AARCH32_CORTEX_A)
-
 #if defined(CONFIG_USERSPACE)
 	/* See comment below about svc stack usage */
 	cps #MODE_SVC
@@ -147,7 +143,6 @@
 	ldr r0, [r2, #_kernel_offset_to_nested]
 	add r0, r0, #1
 	str r0, [r2, #_kernel_offset_to_nested]
-#endif /* CONFIG_CPU_CORTEX_M */
 
 #ifdef CONFIG_TRACING_ISR
 	bl sys_trace_isr_enter
@@ -163,65 +158,21 @@
 	 * is called with interrupts disabled.
 	 */
 
-#if defined(CONFIG_CPU_CORTEX_M)
-	/*
-	 * Disable interrupts to prevent nesting while exiting idle state. This
-	 * is only necessary for the Cortex-M because it is the only ARM
-	 * architecture variant that automatically enables interrupts when
-	 * entering an ISR.
-	 */
-	cpsid i  /* PRIMASK = 1 */
-#endif
-
 	/* is this a wakeup from idle ? */
 	ldr r2, =_kernel
 	/* requested idle duration, in ticks */
 	ldr r0, [r2, #_kernel_offset_to_idle]
 	cmp r0, #0
 
-#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
-	beq _idle_state_cleared
-	movs.n r1, #0
-	/* clear kernel idle state */
-	str r1, [r2, #_kernel_offset_to_idle]
-	bl z_pm_save_idle_exit
-_idle_state_cleared:
-
-#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
-	ittt ne
-	movne	r1, #0
-		/* clear kernel idle state */
-		strne	r1, [r2, #_kernel_offset_to_idle]
-		blne	z_pm_save_idle_exit
-#elif defined(CONFIG_ARMV7_R) || defined(CONFIG_AARCH32_ARMV8_R) \
-	|| defined(CONFIG_ARMV7_A)
 	beq _idle_state_cleared
 	movs r1, #0
 	/* clear kernel idle state */
 	str r1, [r2, #_kernel_offset_to_idle]
 	bl z_pm_save_idle_exit
 _idle_state_cleared:
-#else
-#error Unknown ARM architecture
-#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
-
-#if defined(CONFIG_CPU_CORTEX_M)
-	cpsie i		/* re-enable interrupts (PRIMASK = 0) */
-#endif
 
 #endif /* CONFIG_PM */
 
-#if defined(CONFIG_CPU_CORTEX_M)
-	mrs r0, IPSR	/* get exception number */
-#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
-	ldr r1, =16
-	subs r0, r1	/* get IRQ number */
-	lsls r0, #3	/* table is 8-byte wide */
-#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
-	sub r0, r0, #16	/* get IRQ number */
-	lsl r0, r0, #3	/* table is 8-byte wide */
-#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
-#elif defined(CONFIG_CPU_AARCH32_CORTEX_R) || defined(CONFIG_CPU_AARCH32_CORTEX_A)
 	/* Get active IRQ number from the interrupt controller */
 #if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER)
 	bl arm_gic_get_active
@@ -230,11 +181,7 @@
 #endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */
 	push {r0, r1}
 	lsl r0, r0, #3	/* table is 8-byte wide */
-#else
-#error Unknown ARM architecture
-#endif /* CONFIG_CPU_CORTEX_M */
 
-#if !defined(CONFIG_CPU_CORTEX_M)
 	/*
 	 * Enable interrupts to allow nesting.
 	 *
@@ -254,7 +201,6 @@
 	lsl r1, r1, #3
 	cmp r0, r1
 	bge spurious_continue
-#endif /* !CONFIG_CPU_CORTEX_M */
 
 	ldr r1, =_sw_isr_table
 	add r1, r1, r0	/* table entry: ISRs must have their MSB set to stay
@@ -263,7 +209,6 @@
 	ldm r1!,{r0,r3}	/* arg in r0, ISR in r3 */
 	blx r3		/* call ISR */
 
-#if defined(CONFIG_CPU_AARCH32_CORTEX_R) || defined(CONFIG_CPU_AARCH32_CORTEX_A)
 spurious_continue:
 	/* Signal end-of-interrupt */
 	pop {r0, r1}
@@ -272,29 +217,11 @@
 #else
 	bl z_soc_irq_eoi
 #endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */
-#endif /* CONFIG_CPU_AARCH32_CORTEX_R || CONFIG_CPU_AARCH32_CORTEX_A */
 
 #ifdef CONFIG_TRACING_ISR
 	bl sys_trace_isr_exit
 #endif
 
-#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
-	pop {r0, r3}
-	mov lr, r3
-#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
-	pop {r0, lr}
-#elif defined(CONFIG_ARMV7_R) || defined(CONFIG_AARCH32_ARMV8_R) \
-	|| defined(CONFIG_ARMV7_A)
-	/*
-	 * r0 and lr_irq were saved on the process stack since a swap could
-	 * happen.  exc_exit will handle getting those values back
-	 * from the process stack to return to the correct location
-	 * so there is no need to do anything here.
-	 */
-#else
-#error Unknown ARM architecture
-#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
-
 	/* Use 'bx' instead of 'b' because 'bx' can jump further, and use
 	 * 'bx' instead of 'blx' because exception return is done in
 	 * z_arm_int_exit() */
diff --git a/arch/arm/core/cortex_m/CMakeLists.txt b/arch/arm/core/cortex_m/CMakeLists.txt
index f1ac9ee..e4e6787 100644
--- a/arch/arm/core/cortex_m/CMakeLists.txt
+++ b/arch/arm/core/cortex_m/CMakeLists.txt
@@ -14,8 +14,10 @@
   vector_table.S
   swap.c
   swap_helper.S
+  irq_manage.c
   )
 
+zephyr_library_sources_ifdef(CONFIG_GEN_SW_ISR_TABLE isr_wrapper.S)
 zephyr_library_sources_ifdef(CONFIG_USERSPACE thread.c)
 zephyr_library_sources_ifdef(CONFIG_DEBUG_COREDUMP coredump.c)
 zephyr_library_sources_ifdef(CONFIG_THREAD_LOCAL_STORAGE __aeabi_read_tp.S)
diff --git a/arch/arm/core/irq_manage.c b/arch/arm/core/cortex_m/irq_manage.c
similarity index 78%
rename from arch/arm/core/irq_manage.c
rename to arch/arm/core/cortex_m/irq_manage.c
index 6f6a590..cd0c6f6 100644
--- a/arch/arm/core/irq_manage.c
+++ b/arch/arm/core/cortex_m/irq_manage.c
@@ -6,7 +6,7 @@
 
 /**
  * @file
- * @brief ARM Cortex-A, Cortex-M and Cortex-R interrupt management
+ * @brief ARM Cortex-M interrupt management
  *
  *
  * Interrupt management: enabling/disabling and dynamic ISR
@@ -16,12 +16,7 @@
 
 #include <zephyr/kernel.h>
 #include <zephyr/arch/cpu.h>
-#if defined(CONFIG_CPU_CORTEX_M)
 #include <cmsis_core.h>
-#elif defined(CONFIG_CPU_AARCH32_CORTEX_A) \
-	|| defined(CONFIG_CPU_AARCH32_CORTEX_R)
-#include <zephyr/drivers/interrupt_controller/gic.h>
-#endif
 #include <zephyr/sys/__assert.h>
 #include <zephyr/sys/barrier.h>
 #include <zephyr/toolchain.h>
@@ -33,7 +28,6 @@
 
 extern void z_arm_reserved(void);
 
-#if defined(CONFIG_CPU_CORTEX_M)
 #define NUM_IRQS_PER_REG 32
 #define REG_FROM_IRQ(irq) (irq / NUM_IRQS_PER_REG)
 #define BIT_FROM_IRQ(irq) (irq % NUM_IRQS_PER_REG)
@@ -96,53 +90,6 @@
 	NVIC_SetPriority((IRQn_Type)irq, prio);
 }
 
-#elif defined(CONFIG_CPU_AARCH32_CORTEX_A) \
-	|| defined(CONFIG_CPU_AARCH32_CORTEX_R)
-/*
- * For Cortex-A and Cortex-R cores, the default interrupt controller is the ARM
- * Generic Interrupt Controller (GIC) and therefore the architecture interrupt
- * control functions are mapped to the GIC driver interface.
- *
- * When a custom interrupt controller is used (i.e.
- * CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER is enabled), the architecture
- * interrupt control functions are mapped to the SoC layer in
- * `include/arch/arm/aarch32/irq.h`.
- */
-
-#if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER)
-void arch_irq_enable(unsigned int irq)
-{
-	arm_gic_irq_enable(irq);
-}
-
-void arch_irq_disable(unsigned int irq)
-{
-	arm_gic_irq_disable(irq);
-}
-
-int arch_irq_is_enabled(unsigned int irq)
-{
-	return arm_gic_irq_is_enabled(irq);
-}
-
-/**
- * @internal
- *
- * @brief Set an interrupt's priority
- *
- * The priority is verified if ASSERT_ON is enabled. The maximum number
- * of priority levels is a little complex, as there are some hardware
- * priority levels which are reserved: three for various types of exceptions,
- * and possibly one additional to support zero latency interrupts.
- */
-void z_arm_irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
-{
-	arm_gic_irq_set_priority(irq, prio, flags);
-}
-#endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */
-
-#endif /* CONFIG_CPU_CORTEX_M */
-
 void z_arm_fatal_error(unsigned int reason, const z_arch_esf_t *esf);
 
 /**
@@ -163,10 +110,7 @@
 #ifdef CONFIG_PM
 void _arch_isr_direct_pm(void)
 {
-#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) \
-	|| defined(CONFIG_ARMV7_R) \
-	|| defined(CONFIG_AARCH32_ARMV8_R) \
-	|| defined(CONFIG_ARMV7_A)
+#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
 	unsigned int key;
 
 	/* irq_lock() does what we want for this CPU */
@@ -186,10 +130,7 @@
 		z_pm_save_idle_exit();
 	}
 
-#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) \
-	|| defined(CONFIG_ARMV7_R) \
-	|| defined(CONFIG_AARCH32_ARMV8_R) \
-	|| defined(CONFIG_ARMV7_A)
+#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
 	irq_unlock(key);
 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
 	__asm__ volatile("cpsie i" : : : "memory");
diff --git a/arch/arm/core/cortex_m/isr_wrapper.S b/arch/arm/core/cortex_m/isr_wrapper.S
new file mode 100644
index 0000000..78ad6cd
--- /dev/null
+++ b/arch/arm/core/cortex_m/isr_wrapper.S
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2013-2014 Wind River Systems, Inc.
+ * Copyright (c) 2020 Stephanos Ioannidis <root@stephanos.io>
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+/**
+ * @file
+ * @brief ARM Cortex-M wrapper for ISRs with parameter
+ *
+ * Wrapper installed in vector table for handling dynamic interrupts that accept
+ * a parameter.
+ */
+/*
+ * Tell armclang that stack alignment are ensured.
+ */
+.eabi_attribute Tag_ABI_align_preserved, 1
+
+#include <zephyr/toolchain.h>
+#include <zephyr/linker/sections.h>
+#include <offsets_short.h>
+#include <zephyr/arch/cpu.h>
+#include <zephyr/sw_isr_table.h>
+
+
+_ASM_FILE_PROLOGUE
+
+GDATA(_sw_isr_table)
+
+GTEXT(_isr_wrapper)
+GTEXT(z_arm_int_exit)
+
+/**
+ *
+ * @brief Wrapper around ISRs when inserted in software ISR table
+ *
+ * When inserted in the vector table, _isr_wrapper() demuxes the ISR table
+ * using the running interrupt number as the index, and invokes the registered
+ * ISR with its corresponding argument. When returning from the ISR, it
+ * determines if a context switch needs to happen (see documentation for
+ * z_arm_pendsv()) and pends the PendSV exception if so: the latter will
+ * perform the context switch itself.
+ *
+ */
+SECTION_FUNC(TEXT, _isr_wrapper)
+
+	push {r0,lr}		/* r0, lr are now the first items on the stack */
+
+#ifdef CONFIG_TRACING_ISR
+	bl sys_trace_isr_enter
+#endif
+
+#ifdef CONFIG_PM
+	/*
+	 * All interrupts are disabled when handling idle wakeup.  For tickless
+	 * idle, this ensures that the calculation and programming of the
+	 * device for the next timer deadline is not interrupted.  For
+	 * non-tickless idle, this ensures that the clearing of the kernel idle
+	 * state is not interrupted.  In each case, z_pm_save_idle_exit
+	 * is called with interrupts disabled.
+	 */
+
+	/*
+	 * Disable interrupts to prevent nesting while exiting idle state. This
+	 * is only necessary for the Cortex-M because it is the only ARM
+	 * architecture variant that automatically enables interrupts when
+	 * entering an ISR.
+	 */
+	cpsid i  /* PRIMASK = 1 */
+
+	/* is this a wakeup from idle ? */
+	ldr r2, =_kernel
+	/* requested idle duration, in ticks */
+	ldr r0, [r2, #_kernel_offset_to_idle]
+	cmp r0, #0
+
+#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
+	beq _idle_state_cleared
+	movs.n r1, #0
+	/* clear kernel idle state */
+	str r1, [r2, #_kernel_offset_to_idle]
+	bl z_pm_save_idle_exit
+_idle_state_cleared:
+
+#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
+	ittt ne
+	movne	r1, #0
+		/* clear kernel idle state */
+		strne	r1, [r2, #_kernel_offset_to_idle]
+		blne	z_pm_save_idle_exit
+#else
+#error Unknown ARM architecture
+#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
+
+	cpsie i		/* re-enable interrupts (PRIMASK = 0) */
+
+#endif /* CONFIG_PM */
+
+	mrs r0, IPSR	/* get exception number */
+#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
+	ldr r1, =16
+	subs r0, r1	/* get IRQ number */
+	lsls r0, #3	/* table is 8-byte wide */
+#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
+	sub r0, r0, #16	/* get IRQ number */
+	lsl r0, r0, #3	/* table is 8-byte wide */
+#else
+#error Unknown ARM architecture
+#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
+
+	ldr r1, =_sw_isr_table
+	add r1, r1, r0	/* table entry: ISRs must have their MSB set to stay
+			 * in thumb mode */
+
+	ldm r1!,{r0,r3}	/* arg in r0, ISR in r3 */
+	blx r3		/* call ISR */
+
+#ifdef CONFIG_TRACING_ISR
+	bl sys_trace_isr_exit
+#endif
+
+#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
+	pop {r0, r3}
+	mov lr, r3
+#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
+	pop {r0, lr}
+#else
+#error Unknown ARM architecture
+#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
+
+	/* Use 'bx' instead of 'b' because 'bx' can jump further, and use
+	 * 'bx' instead of 'blx' because exception return is done in
+	 * z_arm_int_exit() */
+	ldr r1, =z_arm_int_exit
+	bx r1