stack_sentinel: change cooperative check
One of the stack sentinel policies was to check the sentinel
any time a cooperative context switch is done (i.e, _Swap is
called).
This was done by adding a hook to _check_stack_sentinel in
every arch's __swap function.
This way is cleaner as we just have the hook in one inline
function rather than implemented in several different assembly
dialects.
The check upon interrupt is now made unconditionally rather
than checking if we are calling __swap, since the check now
is only called on cooperative _Swap(). The interrupt is always
serviced first.
Issue: ZEP-2244
Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
diff --git a/arch/arm/core/swap.S b/arch/arm/core/swap.S
index 7270bf4..2280ba7 100644
--- a/arch/arm/core/swap.S
+++ b/arch/arm/core/swap.S
@@ -42,24 +42,17 @@
SECTION_FUNC(TEXT, __pendsv)
-#if defined (CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH) || \
- defined(CONFIG_STACK_SENTINEL)
+#ifdef CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
/* Register the context switch */
push {lr}
-
-#ifdef CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
bl _sys_k_event_logger_context_switch
-#endif
-#ifdef CONFIG_STACK_SENTINEL
- bl _check_stack_sentinel
-#endif
#if defined(CONFIG_ARMV6_M)
pop {r0}
mov lr, r0
#else
pop {lr}
#endif /* CONFIG_ARMV6_M */
-#endif /* CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH || CONFIG_STACK_SENTINEL */
+#endif /* CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH */
/* load _kernel into r1 and current k_thread into r2 */
ldr r1, =_kernel
diff --git a/arch/nios2/core/swap.S b/arch/nios2/core/swap.S
index 45e52f7..a8688b6 100644
--- a/arch/nios2/core/swap.S
+++ b/arch/nios2/core/swap.S
@@ -59,12 +59,6 @@
#if CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
call _sys_k_event_logger_context_switch
-#endif
-#ifdef CONFIG_STACK_SENTINEL
- call _check_stack_sentinel
-#endif
-#if defined(CONFIG_STACK_SENTINEL) || \
- defined (CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH)
/* restore caller-saved r10 */
movhi r10, %hi(_kernel)
ori r10, r10, %lo(_kernel)
diff --git a/arch/riscv32/core/isr.S b/arch/riscv32/core/isr.S
index 9691fb7..facc96d 100644
--- a/arch/riscv32/core/isr.S
+++ b/arch/riscv32/core/isr.S
@@ -274,15 +274,15 @@
addi t2, t2, -1
sw t2, _kernel_offset_to_nested(t1)
+ /* Restore thread stack pointer */
+ lw t0, 0x00(sp)
+ addi sp, t0, 0
+
#ifdef CONFIG_STACK_SENTINEL
call _check_stack_sentinel
la t1, _kernel
#endif
- /* Restore thread stack pointer */
- lw t0, 0x00(sp)
- addi sp, t0, 0
-
#ifdef CONFIG_PREEMPT_ENABLED
/*
* Check if we need to perform a reschedule
@@ -316,9 +316,6 @@
#if CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
call _sys_k_event_logger_context_switch
#endif /* CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH */
-#ifdef CONFIG_STACK_SENTINEL
- call _check_stack_sentinel
-#endif
/* Get reference to _kernel */
la t0, _kernel
diff --git a/arch/x86/core/intstub.S b/arch/x86/core/intstub.S
index ac0e7ba..c3bf02f 100644
--- a/arch/x86/core/intstub.S
+++ b/arch/x86/core/intstub.S
@@ -343,6 +343,9 @@
#if defined(CONFIG_TIMESLICING)
call _update_time_slice_before_swap
#endif
+#ifdef CONFIG_STACK_SENTINEL
+ call _check_stack_sentinel
+#endif
pushfl /* push KERNEL_LOCK_KEY argument */
#ifdef CONFIG_X86_IAMCU
/* IAMCU first argument goes into a register, not the stack.
@@ -398,8 +401,9 @@
popl %esp /* pop thread stack pointer */
#ifdef CONFIG_STACK_SENTINEL
- call _check_stack_sentinel
+ call _check_stack_sentinel
#endif
+
/* fall through to 'nestedInterrupt' */
diff --git a/arch/x86/core/swap.S b/arch/x86/core/swap.S
index f019375..8687ebe 100644
--- a/arch/x86/core/swap.S
+++ b/arch/x86/core/swap.S
@@ -27,9 +27,6 @@
/* externs */
GDATA(_k_neg_eagain)
-#ifdef CONFIG_STACK_SENTINEL
- GTEXT(_check_stack_sentinel)
-#endif
/**
*
@@ -143,9 +140,6 @@
/* Register the context switch */
call _sys_k_event_logger_context_switch
#endif
-#ifdef CONFIG_STACK_SENTINEL
- call _check_stack_sentinel
-#endif
movl _kernel_offset_to_ready_q_cache(%edi), %eax
/*
diff --git a/arch/xtensa/core/swap.S b/arch/xtensa/core/swap.S
index 07f28fe..319248c 100644
--- a/arch/xtensa/core/swap.S
+++ b/arch/xtensa/core/swap.S
@@ -83,13 +83,6 @@
call4 _sys_k_event_logger_context_switch
#endif
#endif
-#ifdef CONFIG_STACK_SENTINEL
-#ifdef __XTENSA_CALL0_ABI__
- call0 _check_stack_sentinel
-#else
- call4 _check_stack_sentinel
-#endif
-#endif
/* _thread := _kernel.ready_q.cache */
l32i a3, a2, KERNEL_OFFSET(ready_q_cache)
/*
diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h
index d544f64..2b1bfb5 100644
--- a/kernel/include/ksched.h
+++ b/kernel/include/ksched.h
@@ -31,9 +31,6 @@
extern s32_t _ms_to_ticks(s32_t ms);
#endif
extern void idle(void *, void *, void *);
-#ifdef CONFIG_STACK_SENTINEL
-extern void _check_stack_sentinel(void);
-#endif
/* find which one is the next thread to run */
/* must be called with interrupts locked */
diff --git a/kernel/include/nano_internal.h b/kernel/include/nano_internal.h
index 9052648..e76ae85 100644
--- a/kernel/include/nano_internal.h
+++ b/kernel/include/nano_internal.h
@@ -52,19 +52,27 @@
extern unsigned int __swap(unsigned int key);
-#if defined(CONFIG_TIMESLICING)
+#ifdef CONFIG_TIMESLICING
extern void _update_time_slice_before_swap(void);
+#endif
-static inline unsigned int _time_slice_swap(unsigned int key)
+#ifdef CONFIG_STACK_SENTINEL
+extern void _check_stack_sentinel(void);
+#endif
+
+static inline unsigned int _Swap(unsigned int key)
{
+
+#ifdef CONFIG_STACK_SENTINEL
+ _check_stack_sentinel();
+#endif
+#ifdef CONFIG_TIMESLICING
_update_time_slice_before_swap();
+#endif
+
return __swap(key);
}
-#define _Swap(x) _time_slice_swap(x)
-#else
-#define _Swap(x) __swap(x)
-#endif
/* set and clear essential fiber/task flag */
extern void _thread_essential_set(void);
diff --git a/kernel/thread.c b/kernel/thread.c
index aa18bea..cd56347 100644
--- a/kernel/thread.c
+++ b/kernel/thread.c
@@ -141,12 +141,11 @@
* in a few places:
*
* 1) In k_yield() if the current thread is not swapped out
- * 2) In the interrupt code, after the interrupt has been serviced, and
- * a decision *not* to call _Swap() has been made.
+ * 2) After servicing a non-nested interrupt
* 3) In _Swap(), check the sentinel in the outgoing thread
* 4) When a thread returns from its entry function to cooperatively terminate
*
- * Items 2 and 3 require support in arch/ code.
+ * Item 2 requires support in arch/ code.
*
* If the check fails, the thread will be terminated appropriately through
* the system fatal error handler.