kernel/sched: Properly synchronize pend()
Kernel wait_q's and the thread pended_on backpointer are scheduler
state and need to be modified under the scheduler lock. There was one
spot in pend() where they were not.
Also unpack z_remove_thread_from_ready_q() into an unsynchronized
utility so that it can be called by this process in a single lock
block.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
diff --git a/kernel/sched.c b/kernel/sched.c
index ccdebe4..0067005 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -483,26 +483,33 @@
#endif
}
+static void unready_thread(struct k_thread *thread)
+{
+ if (z_is_thread_queued(thread)) {
+ _priq_run_remove(&_kernel.ready_q.runq, thread);
+ z_mark_thread_as_not_queued(thread);
+ }
+ update_cache(thread == _current);
+}
+
void z_remove_thread_from_ready_q(struct k_thread *thread)
{
LOCKED(&sched_spinlock) {
- if (z_is_thread_queued(thread)) {
- _priq_run_remove(&_kernel.ready_q.runq, thread);
- z_mark_thread_as_not_queued(thread);
- }
- update_cache(thread == _current);
+ unready_thread(thread);
}
}
static void pend(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout)
{
- z_remove_thread_from_ready_q(thread);
- z_mark_thread_as_pending(thread);
- sys_trace_thread_pend(thread);
+ LOCKED(&sched_spinlock) {
+ unready_thread(thread);
+ z_mark_thread_as_pending(thread);
+ sys_trace_thread_pend(thread);
- if (wait_q != NULL) {
- thread->base.pended_on = wait_q;
- z_priq_wait_add(&wait_q->waitq, thread);
+ if (wait_q != NULL) {
+ thread->base.pended_on = wait_q;
+ z_priq_wait_add(&wait_q->waitq, thread);
+ }
}
if (timeout != K_FOREVER) {
@@ -546,9 +553,8 @@
LOCKED(&sched_spinlock) {
_priq_wait_remove(&pended_on(thread)->waitq, thread);
z_mark_thread_as_not_pending(thread);
+ thread->base.pended_on = NULL;
}
-
- thread->base.pended_on = NULL;
}
#ifdef CONFIG_SYS_CLOCK_EXISTS