blob: ce566d99f9085c78f39aaf571b6284c27b4a6797 [file] [log] [blame]
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001/*
2 * Copyright (c) 2010-2016 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7/**
8 * @file
9 *
10 * @brief dynamic-size QUEUE object.
11 */
12
13
14#include <kernel.h>
15#include <kernel_structs.h>
16#include <debug/object_tracing_common.h>
17#include <toolchain.h>
Anas Nashif397d29d2017-06-17 11:30:47 -040018#include <linker/sections.h>
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020019#include <wait_q.h>
20#include <ksched.h>
21#include <misc/slist.h>
22#include <init.h>
Andy Ross9c62cc62018-01-25 15:24:15 -080023#include <kswap.h>
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020024
25extern struct k_queue _k_queue_list_start[];
26extern struct k_queue _k_queue_list_end[];
27
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020028#ifdef CONFIG_OBJECT_TRACING
29
Maciek Borzecki059544d2017-05-18 12:16:45 +020030struct k_queue *_trace_list_k_queue;
31
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020032/*
33 * Complete initialization of statically defined queues.
34 */
35static int init_queue_module(struct device *dev)
36{
37 ARG_UNUSED(dev);
38
39 struct k_queue *queue;
40
41 for (queue = _k_queue_list_start; queue < _k_queue_list_end; queue++) {
42 SYS_TRACING_OBJ_INIT(k_queue, queue);
43 }
44 return 0;
45}
46
47SYS_INIT(init_queue_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
48
49#endif /* CONFIG_OBJECT_TRACING */
50
51void k_queue_init(struct k_queue *queue)
52{
53 sys_slist_init(&queue->data_q);
54 sys_dlist_init(&queue->wait_q);
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +030055#if defined(CONFIG_POLL)
56 sys_dlist_init(&queue->poll_events);
57#endif
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020058
59 SYS_TRACING_OBJ_INIT(k_queue, queue);
60}
61
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +030062#if !defined(CONFIG_POLL)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020063static void prepare_thread_to_run(struct k_thread *thread, void *data)
64{
65 _abort_thread_timeout(thread);
66 _ready_thread(thread);
67 _set_thread_return_value_with_data(thread, 0, data);
68}
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +030069#endif /* CONFIG_POLL */
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020070
71/* returns 1 if a reschedule must take place, 0 otherwise */
Luiz Augusto von Dentz48fadfe2017-10-17 15:37:26 +030072static inline int handle_poll_events(struct k_queue *queue, u32_t state)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020073{
74#ifdef CONFIG_POLL
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +030075 return _handle_obj_poll_events(&queue->poll_events, state);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020076#else
77 return 0;
78#endif
79}
80
Paul Sokolovsky3f507072017-04-25 17:54:31 +030081void k_queue_cancel_wait(struct k_queue *queue)
82{
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +030083 unsigned int key = irq_lock();
84#if !defined(CONFIG_POLL)
Paul Sokolovsky3f507072017-04-25 17:54:31 +030085 struct k_thread *first_pending_thread;
Paul Sokolovsky3f507072017-04-25 17:54:31 +030086
87 first_pending_thread = _unpend_first_thread(&queue->wait_q);
88
89 if (first_pending_thread) {
90 prepare_thread_to_run(first_pending_thread, NULL);
91 if (!_is_in_isr() && _must_switch_threads()) {
92 (void)_Swap(key);
93 return;
94 }
Paul Sokolovsky3f507072017-04-25 17:54:31 +030095 }
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +030096#else
Luiz Augusto von Dentz48fadfe2017-10-17 15:37:26 +030097 if (handle_poll_events(queue, K_POLL_STATE_NOT_READY)) {
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +030098 (void)_Swap(key);
99 return;
100 }
101#endif /* !CONFIG_POLL */
Paul Sokolovsky3f507072017-04-25 17:54:31 +0300102
103 irq_unlock(key);
104}
105
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200106void k_queue_insert(struct k_queue *queue, void *prev, void *data)
107{
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300108 unsigned int key = irq_lock();
109#if !defined(CONFIG_POLL)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200110 struct k_thread *first_pending_thread;
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200111
112 first_pending_thread = _unpend_first_thread(&queue->wait_q);
113
114 if (first_pending_thread) {
115 prepare_thread_to_run(first_pending_thread, data);
116 if (!_is_in_isr() && _must_switch_threads()) {
117 (void)_Swap(key);
118 return;
119 }
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300120 irq_unlock(key);
121 return;
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200122 }
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300123#endif /* !CONFIG_POLL */
124
125 sys_slist_insert(&queue->data_q, prev, data);
126
127#if defined(CONFIG_POLL)
Luiz Augusto von Dentz48fadfe2017-10-17 15:37:26 +0300128 if (handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE)) {
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300129 (void)_Swap(key);
130 return;
131 }
132#endif /* CONFIG_POLL */
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200133
134 irq_unlock(key);
135}
136
137void k_queue_append(struct k_queue *queue, void *data)
138{
139 return k_queue_insert(queue, queue->data_q.tail, data);
140}
141
142void k_queue_prepend(struct k_queue *queue, void *data)
143{
144 return k_queue_insert(queue, NULL, data);
145}
146
147void k_queue_append_list(struct k_queue *queue, void *head, void *tail)
148{
149 __ASSERT(head && tail, "invalid head or tail");
150
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300151 unsigned int key = irq_lock();
152#if !defined(CONFIG_POLL)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200153 struct k_thread *first_thread, *thread;
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200154
155 first_thread = _peek_first_pending_thread(&queue->wait_q);
156 while (head && ((thread = _unpend_first_thread(&queue->wait_q)))) {
157 prepare_thread_to_run(thread, head);
158 head = *(void **)head;
159 }
160
161 if (head) {
162 sys_slist_append_list(&queue->data_q, head, tail);
163 }
164
165 if (first_thread) {
166 if (!_is_in_isr() && _must_switch_threads()) {
167 (void)_Swap(key);
168 return;
169 }
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200170 }
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300171#else
172 sys_slist_append_list(&queue->data_q, head, tail);
Luiz Augusto von Dentz48fadfe2017-10-17 15:37:26 +0300173 if (handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE)) {
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300174 (void)_Swap(key);
175 return;
176 }
177#endif /* !CONFIG_POLL */
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200178
179 irq_unlock(key);
180}
181
182void k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list)
183{
184 __ASSERT(!sys_slist_is_empty(list), "list must not be empty");
185
186 /*
187 * note: this works as long as:
188 * - the slist implementation keeps the next pointer as the first
189 * field of the node object type
190 * - list->tail->next = NULL.
191 */
192 k_queue_append_list(queue, list->head, list->tail);
193 sys_slist_init(list);
194}
195
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300196#if defined(CONFIG_POLL)
197static void *k_queue_poll(struct k_queue *queue, s32_t timeout)
198{
199 struct k_poll_event event;
200 int err;
Paul Sokolovsky199d07e2017-10-16 13:36:37 +0300201 unsigned int key;
202 void *val;
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300203
204 k_poll_event_init(&event, K_POLL_TYPE_FIFO_DATA_AVAILABLE,
205 K_POLL_MODE_NOTIFY_ONLY, queue);
206
Luiz Augusto von Dentzf87c4c62017-10-17 11:34:21 +0300207 do {
208 event.state = K_POLL_STATE_NOT_READY;
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300209
Luiz Augusto von Dentzf87c4c62017-10-17 11:34:21 +0300210 err = k_poll(&event, 1, timeout);
211 if (err) {
212 return NULL;
213 }
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300214
Luiz Augusto von Dentzf87c4c62017-10-17 11:34:21 +0300215 __ASSERT_NO_MSG(event.state ==
216 K_POLL_STATE_FIFO_DATA_AVAILABLE);
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300217
Luiz Augusto von Dentzf87c4c62017-10-17 11:34:21 +0300218 /* sys_slist_* aren't threadsafe, so must be always protected by
219 * irq_lock.
220 */
221 key = irq_lock();
222 val = sys_slist_get(&queue->data_q);
223 irq_unlock(key);
224 } while (!val && timeout == K_FOREVER);
225
Paul Sokolovsky199d07e2017-10-16 13:36:37 +0300226 return val;
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300227}
228#endif /* CONFIG_POLL */
229
Kumar Galacc334c72017-04-21 10:55:34 -0500230void *k_queue_get(struct k_queue *queue, s32_t timeout)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200231{
232 unsigned int key;
233 void *data;
234
235 key = irq_lock();
236
237 if (likely(!sys_slist_is_empty(&queue->data_q))) {
238 data = sys_slist_get_not_empty(&queue->data_q);
239 irq_unlock(key);
240 return data;
241 }
242
243 if (timeout == K_NO_WAIT) {
244 irq_unlock(key);
245 return NULL;
246 }
247
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300248#if defined(CONFIG_POLL)
249 irq_unlock(key);
250
251 return k_queue_poll(queue, timeout);
252
253#else
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200254 _pend_current_thread(&queue->wait_q, timeout);
255
256 return _Swap(key) ? NULL : _current->base.swap_data;
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300257#endif /* CONFIG_POLL */
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200258}