kernel/queue: Clean up scheduler API usage
This was the only spot where the scheduler-internal
_peek_first_pending_thread() API was used. Given that this kind of
thing is inherently racy (it may not be pending as long as you expect
if a timeout expires, etc...), it would be nice to retire it.
And as it happens all the queue code was using it for was to detect
the case of a non-empty wait_q over which it was looping, which is
trivial to do without API support.
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
diff --git a/kernel/queue.c b/kernel/queue.c
index ce566d9..d570a38 100644
--- a/kernel/queue.c
+++ b/kernel/queue.c
@@ -148,26 +148,21 @@
{
__ASSERT(head && tail, "invalid head or tail");
+ int need_sched = 0;
unsigned int key = irq_lock();
#if !defined(CONFIG_POLL)
- struct k_thread *first_thread, *thread;
+ struct k_thread *thread;
- first_thread = _peek_first_pending_thread(&queue->wait_q);
while (head && ((thread = _unpend_first_thread(&queue->wait_q)))) {
prepare_thread_to_run(thread, head);
head = *(void **)head;
+ need_sched = 1;
}
if (head) {
sys_slist_append_list(&queue->data_q, head, tail);
}
- if (first_thread) {
- if (!_is_in_isr() && _must_switch_threads()) {
- (void)_Swap(key);
- return;
- }
- }
#else
sys_slist_append_list(&queue->data_q, head, tail);
if (handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE)) {
@@ -176,7 +171,12 @@
}
#endif /* !CONFIG_POLL */
- irq_unlock(key);
+ if (need_sched) {
+ _reschedule_threads(key);
+ return;
+ } else {
+ irq_unlock(key);
+ }
}
void k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list)