kernel/queue: Remove interior use of k_poll()

The k_queue data structure, when CONFIG_POLL was enabled, would
inexplicably use k_poll() as its blocking mechanism instead of the
original wait_q/pend() code.  This was actually racy, see commit
b173e4353fe5.  The code was structured as a condition variable: using
a spinlock around the queue data before deciding to block.  But unlike
pend_current_thread(), k_poll() cannot atomically release a lock.

A workaround had been in place for this, and then accidentally
reverted (both by me!) because the code looked "wrong".

This is just fragile, there's no reason to have two implementations of
k_queue_get().  Remove.

Note that this also removes a test case in the work_queue test where
(when CONFIG_POLL was enabled, but not otherwise) it was checking for
the ability to immediately cancel a delayed work item that was
submitted with a timeout of K_NO_WAIT (i.e. "queue it immediately").
This DOES NOT work with the origina/non-poll queue backend, and has
never been a documented behavior of k_delayed_work_submit_to_queue()
under any circumstances.  I don't know why we were testing this.

Fixes #25904

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
diff --git a/include/kernel.h b/include/kernel.h
index f02adba..697c9c1 100644
--- a/include/kernel.h
+++ b/include/kernel.h
@@ -2155,12 +2155,9 @@
 struct k_queue {
 	sys_sflist_t data_q;
 	struct k_spinlock lock;
-	union {
-		_wait_q_t wait_q;
+	_wait_q_t wait_q;
 
-		_POLL_EVENT;
-	};
-
+	_POLL_EVENT;
 	_OBJECT_TRACING_NEXT_PTR(k_queue)
 	_OBJECT_TRACING_LINKED_FLAG
 };
@@ -2169,10 +2166,8 @@
 	{ \
 	.data_q = SYS_SLIST_STATIC_INIT(&obj.data_q), \
 	.lock = { }, \
-	{ \
-		.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
-		_POLL_EVENT_OBJ_INIT(obj) \
-	}, \
+	.wait_q = Z_WAIT_Q_INIT(&obj.wait_q),	\
+	_POLL_EVENT_OBJ_INIT(obj)		\
 	_OBJECT_TRACING_INIT \
 	}
 
diff --git a/kernel/queue.c b/kernel/queue.c
index c60c9f7..578325a 100644
--- a/kernel/queue.c
+++ b/kernel/queue.c
@@ -100,25 +100,22 @@
 #include <syscalls/k_queue_init_mrsh.c>
 #endif
 
-#if !defined(CONFIG_POLL)
 static void prepare_thread_to_run(struct k_thread *thread, void *data)
 {
 	z_thread_return_value_set_with_data(thread, 0, data);
 	z_ready_thread(thread);
 }
-#endif /* CONFIG_POLL */
 
-#ifdef CONFIG_POLL
 static inline void handle_poll_events(struct k_queue *queue, u32_t state)
 {
+#ifdef CONFIG_POLL
 	z_handle_obj_poll_events(&queue->poll_events, state);
-}
 #endif
+}
 
 void z_impl_k_queue_cancel_wait(struct k_queue *queue)
 {
 	k_spinlock_key_t key = k_spin_lock(&queue->lock);
-#if !defined(CONFIG_POLL)
 	struct k_thread *first_pending_thread;
 
 	first_pending_thread = z_unpend_first_thread(&queue->wait_q);
@@ -126,10 +123,8 @@
 	if (first_pending_thread != NULL) {
 		prepare_thread_to_run(first_pending_thread, NULL);
 	}
-#else
-	handle_poll_events(queue, K_POLL_STATE_CANCELLED);
-#endif /* !CONFIG_POLL */
 
+	handle_poll_events(queue, K_POLL_STATE_CANCELLED);
 	z_reschedule(&queue->lock, key);
 }
 
@@ -146,7 +141,6 @@
 			  bool alloc)
 {
 	k_spinlock_key_t key = k_spin_lock(&queue->lock);
-#if !defined(CONFIG_POLL)
 	struct k_thread *first_pending_thread;
 
 	first_pending_thread = z_unpend_first_thread(&queue->wait_q);
@@ -156,7 +150,6 @@
 		z_reschedule(&queue->lock, key);
 		return 0;
 	}
-#endif /* !CONFIG_POLL */
 
 	/* Only need to actually allocate if no threads are pending */
 	if (alloc) {
@@ -173,12 +166,9 @@
 	} else {
 		sys_sfnode_init(data, 0x0);
 	}
+
 	sys_sflist_insert(&queue->data_q, prev, data);
-
-#if defined(CONFIG_POLL)
 	handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
-#endif /* CONFIG_POLL */
-
 	z_reschedule(&queue->lock, key);
 	return 0;
 }
@@ -238,7 +228,6 @@
 	}
 
 	k_spinlock_key_t key = k_spin_lock(&queue->lock);
-#if !defined(CONFIG_POLL)
 	struct k_thread *thread = NULL;
 
 	if (head != NULL) {
@@ -255,13 +244,8 @@
 		sys_sflist_append_list(&queue->data_q, head, tail);
 	}
 
-#else
-	sys_sflist_append_list(&queue->data_q, head, tail);
 	handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
-#endif /* !CONFIG_POLL */
-
 	z_reschedule(&queue->lock, key);
-
 	return 0;
 }
 
@@ -292,32 +276,6 @@
 	return 0;
 }
 
-#if defined(CONFIG_POLL)
-static void *k_queue_poll(struct k_queue *queue, k_timeout_t timeout)
-{
-	struct k_poll_event event;
-	int err;
-	k_spinlock_key_t key;
-	void *val;
-
-	k_poll_event_init(&event, K_POLL_TYPE_FIFO_DATA_AVAILABLE,
-			  K_POLL_MODE_NOTIFY_ONLY, queue);
-
-	event.state = K_POLL_STATE_NOT_READY;
-	err = k_poll(&event, 1, timeout);
-
-	if (err && err != -EAGAIN) {
-		return NULL;
-	}
-
-	key = k_spin_lock(&queue->lock);
-	val = z_queue_node_peek(sys_sflist_get(&queue->data_q), true);
-	k_spin_unlock(&queue->lock, key);
-
-	return val;
-}
-#endif /* CONFIG_POLL */
-
 void *z_impl_k_queue_get(struct k_queue *queue, k_timeout_t timeout)
 {
 	k_spinlock_key_t key = k_spin_lock(&queue->lock);
@@ -337,16 +295,9 @@
 		return NULL;
 	}
 
-#if defined(CONFIG_POLL)
-	k_spin_unlock(&queue->lock, key);
-
-	return k_queue_poll(queue, timeout);
-
-#else
 	int ret = z_pend_curr(&queue->lock, key, &queue->wait_q, timeout);
 
 	return (ret != 0) ? NULL : _current->base.swap_data;
-#endif /* CONFIG_POLL */
 }
 
 #ifdef CONFIG_USERSPACE
diff --git a/tests/kernel/workq/work_queue/src/main.c b/tests/kernel/workq/work_queue/src/main.c
index e3fdcc7..9e2f868 100644
--- a/tests/kernel/workq/work_queue/src/main.c
+++ b/tests/kernel/workq/work_queue/src/main.c
@@ -286,14 +286,6 @@
 
 	TC_PRINT(" - Cancel delayed work from coop thread\n");
 	k_delayed_work_cancel(&delayed_tests[1].work);
-
-#if defined(CONFIG_POLL)
-	k_delayed_work_submit(&delayed_tests[2].work,
-			      K_NO_WAIT /* Submit immediately */);
-
-	TC_PRINT(" - Cancel pending delayed work from coop thread\n");
-	k_delayed_work_cancel(&delayed_tests[2].work);
-#endif
 }
 
 /**