blob: 028808122e4d4513eab0c2895a5d824e1ed07122 [file] [log] [blame]
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001/*
2 * Copyright (c) 2010-2016 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7/**
8 * @file
9 *
10 * @brief dynamic-size QUEUE object.
11 */
12
13
14#include <kernel.h>
15#include <kernel_structs.h>
16#include <debug/object_tracing_common.h>
17#include <toolchain.h>
Anas Nashif397d29d2017-06-17 11:30:47 -040018#include <linker/sections.h>
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020019#include <wait_q.h>
20#include <ksched.h>
Andrew Boie2b9b4b22018-04-27 13:21:22 -070021#include <misc/sflist.h>
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020022#include <init.h>
Andrew Boie2b9b4b22018-04-27 13:21:22 -070023#include <syscall_handler.h>
Andy Ross4f911e12018-09-05 10:13:38 -070024#include <kernel_internal.h>
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020025
Andrew Boie2b9b4b22018-04-27 13:21:22 -070026struct alloc_node {
27 sys_sfnode_t node;
28 void *data;
29};
30
31void *z_queue_node_peek(sys_sfnode_t *node, bool needs_free)
32{
33 void *ret;
34
Adithya Baglody2a78b8d2018-10-25 12:09:04 +053035 if ((node != NULL) && (sys_sfnode_flags_get(node) != (u8_t)0)) {
Andrew Boie2b9b4b22018-04-27 13:21:22 -070036 /* If the flag is set, then the enqueue operation for this item
37 * did a behind-the scenes memory allocation of an alloc_node
38 * struct, which is what got put in the queue. Free it and pass
39 * back the data pointer.
40 */
41 struct alloc_node *anode;
42
43 anode = CONTAINER_OF(node, struct alloc_node, node);
44 ret = anode->data;
45 if (needs_free) {
46 k_free(anode);
47 }
48 } else {
49 /* Data was directly placed in the queue, the first 4 bytes
50 * reserved for the linked list. User mode isn't allowed to
51 * do this, although it can get data sent this way.
52 */
53 ret = (void *)node;
54 }
55
56 return ret;
57}
58
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020059#ifdef CONFIG_OBJECT_TRACING
60
Maciek Borzecki059544d2017-05-18 12:16:45 +020061struct k_queue *_trace_list_k_queue;
62
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020063/*
64 * Complete initialization of statically defined queues.
65 */
66static int init_queue_module(struct device *dev)
67{
68 ARG_UNUSED(dev);
69
Nicolas Pitreaa9228852019-06-03 13:01:43 -040070 Z_STRUCT_SECTION_FOREACH(k_queue, queue) {
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020071 SYS_TRACING_OBJ_INIT(k_queue, queue);
72 }
73 return 0;
74}
75
76SYS_INIT(init_queue_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
77
78#endif /* CONFIG_OBJECT_TRACING */
79
Patrik Flykt4344e272019-03-08 14:19:05 -070080void z_impl_k_queue_init(struct k_queue *queue)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020081{
Andrew Boie2b9b4b22018-04-27 13:21:22 -070082 sys_sflist_init(&queue->data_q);
Andy Ross603ea422018-07-25 13:01:54 -070083 queue->lock = (struct k_spinlock) {};
Patrik Flykt4344e272019-03-08 14:19:05 -070084 z_waitq_init(&queue->wait_q);
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +030085#if defined(CONFIG_POLL)
86 sys_dlist_init(&queue->poll_events);
87#endif
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020088
89 SYS_TRACING_OBJ_INIT(k_queue, queue);
Patrik Flykt4344e272019-03-08 14:19:05 -070090 z_object_init(queue);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020091}
92
Andrew Boie2b9b4b22018-04-27 13:21:22 -070093#ifdef CONFIG_USERSPACE
Andrew Boie8345e5e2018-05-04 15:57:57 -070094Z_SYSCALL_HANDLER(k_queue_init, queue_ptr)
Andrew Boie2b9b4b22018-04-27 13:21:22 -070095{
96 struct k_queue *queue = (struct k_queue *)queue_ptr;
97
Andrew Boie8345e5e2018-05-04 15:57:57 -070098 Z_OOPS(Z_SYSCALL_OBJ_NEVER_INIT(queue, K_OBJ_QUEUE));
Patrik Flykt4344e272019-03-08 14:19:05 -070099 z_impl_k_queue_init(queue);
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700100 return 0;
101}
102#endif
103
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300104#if !defined(CONFIG_POLL)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200105static void prepare_thread_to_run(struct k_thread *thread, void *data)
106{
Patrik Flykt4344e272019-03-08 14:19:05 -0700107 z_ready_thread(thread);
108 z_set_thread_return_value_with_data(thread, 0, data);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200109}
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300110#endif /* CONFIG_POLL */
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200111
Anas Nashif80e6a972018-06-23 08:20:34 -0500112#ifdef CONFIG_POLL
Andy Ross8606fab2018-03-26 10:54:40 -0700113static inline void handle_poll_events(struct k_queue *queue, u32_t state)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200114{
Patrik Flykt4344e272019-03-08 14:19:05 -0700115 z_handle_obj_poll_events(&queue->poll_events, state);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200116}
Anas Nashif80e6a972018-06-23 08:20:34 -0500117#endif
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200118
Patrik Flykt4344e272019-03-08 14:19:05 -0700119void z_impl_k_queue_cancel_wait(struct k_queue *queue)
Paul Sokolovsky3f507072017-04-25 17:54:31 +0300120{
Andy Ross603ea422018-07-25 13:01:54 -0700121 k_spinlock_key_t key = k_spin_lock(&queue->lock);
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300122#if !defined(CONFIG_POLL)
Paul Sokolovsky3f507072017-04-25 17:54:31 +0300123 struct k_thread *first_pending_thread;
Paul Sokolovsky3f507072017-04-25 17:54:31 +0300124
Patrik Flykt4344e272019-03-08 14:19:05 -0700125 first_pending_thread = z_unpend_first_thread(&queue->wait_q);
Paul Sokolovsky3f507072017-04-25 17:54:31 +0300126
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700127 if (first_pending_thread != NULL) {
Paul Sokolovsky3f507072017-04-25 17:54:31 +0300128 prepare_thread_to_run(first_pending_thread, NULL);
Paul Sokolovsky3f507072017-04-25 17:54:31 +0300129 }
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300130#else
Paul Sokolovsky45c0b202018-08-21 23:29:11 +0300131 handle_poll_events(queue, K_POLL_STATE_CANCELLED);
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300132#endif /* !CONFIG_POLL */
Paul Sokolovsky3f507072017-04-25 17:54:31 +0300133
Patrik Flykt4344e272019-03-08 14:19:05 -0700134 z_reschedule(&queue->lock, key);
Paul Sokolovsky3f507072017-04-25 17:54:31 +0300135}
136
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700137#ifdef CONFIG_USERSPACE
Andrew Boie8345e5e2018-05-04 15:57:57 -0700138Z_SYSCALL_HANDLER1_SIMPLE_VOID(k_queue_cancel_wait, K_OBJ_QUEUE,
139 struct k_queue *);
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700140#endif
141
Adithya Baglody2a78b8d2018-10-25 12:09:04 +0530142static s32_t queue_insert(struct k_queue *queue, void *prev, void *data,
143 bool alloc)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200144{
Andy Ross603ea422018-07-25 13:01:54 -0700145 k_spinlock_key_t key = k_spin_lock(&queue->lock);
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300146#if !defined(CONFIG_POLL)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200147 struct k_thread *first_pending_thread;
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200148
Patrik Flykt4344e272019-03-08 14:19:05 -0700149 first_pending_thread = z_unpend_first_thread(&queue->wait_q);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200150
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700151 if (first_pending_thread != NULL) {
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200152 prepare_thread_to_run(first_pending_thread, data);
Patrik Flykt4344e272019-03-08 14:19:05 -0700153 z_reschedule(&queue->lock, key);
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700154 return 0;
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200155 }
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300156#endif /* !CONFIG_POLL */
157
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700158 /* Only need to actually allocate if no threads are pending */
159 if (alloc) {
160 struct alloc_node *anode;
161
162 anode = z_thread_malloc(sizeof(*anode));
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700163 if (anode == NULL) {
Andy Ross603ea422018-07-25 13:01:54 -0700164 k_spin_unlock(&queue->lock, key);
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700165 return -ENOMEM;
166 }
167 anode->data = data;
168 sys_sfnode_init(&anode->node, 0x1);
169 data = anode;
170 } else {
171 sys_sfnode_init(data, 0x0);
172 }
173 sys_sflist_insert(&queue->data_q, prev, data);
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300174
175#if defined(CONFIG_POLL)
Andy Ross8606fab2018-03-26 10:54:40 -0700176 handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300177#endif /* CONFIG_POLL */
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200178
Patrik Flykt4344e272019-03-08 14:19:05 -0700179 z_reschedule(&queue->lock, key);
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700180 return 0;
181}
182
183void k_queue_insert(struct k_queue *queue, void *prev, void *data)
184{
Flavio Ceolincc74ad02018-08-13 14:34:11 -0700185 (void)queue_insert(queue, prev, data, false);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200186}
187
188void k_queue_append(struct k_queue *queue, void *data)
189{
Flavio Ceolincc74ad02018-08-13 14:34:11 -0700190 (void)queue_insert(queue, sys_sflist_peek_tail(&queue->data_q),
191 data, false);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200192}
193
194void k_queue_prepend(struct k_queue *queue, void *data)
195{
Flavio Ceolincc74ad02018-08-13 14:34:11 -0700196 (void)queue_insert(queue, NULL, data, false);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200197}
198
Patrik Flykt4344e272019-03-08 14:19:05 -0700199s32_t z_impl_k_queue_alloc_append(struct k_queue *queue, void *data)
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700200{
201 return queue_insert(queue, sys_sflist_peek_tail(&queue->data_q), data,
202 true);
203}
204
205#ifdef CONFIG_USERSPACE
Andrew Boie8345e5e2018-05-04 15:57:57 -0700206Z_SYSCALL_HANDLER(k_queue_alloc_append, queue, data)
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700207{
Andrew Boie8345e5e2018-05-04 15:57:57 -0700208 Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700209
Patrik Flykt4344e272019-03-08 14:19:05 -0700210 return z_impl_k_queue_alloc_append((struct k_queue *)queue,
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700211 (void *)data);
212}
213#endif
214
Patrik Flykt4344e272019-03-08 14:19:05 -0700215s32_t z_impl_k_queue_alloc_prepend(struct k_queue *queue, void *data)
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700216{
217 return queue_insert(queue, NULL, data, true);
218}
219
220#ifdef CONFIG_USERSPACE
Andrew Boie8345e5e2018-05-04 15:57:57 -0700221Z_SYSCALL_HANDLER(k_queue_alloc_prepend, queue, data)
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700222{
Andrew Boie8345e5e2018-05-04 15:57:57 -0700223 Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700224
Patrik Flykt4344e272019-03-08 14:19:05 -0700225 return z_impl_k_queue_alloc_prepend((struct k_queue *)queue,
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700226 (void *)data);
227}
228#endif
229
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200230void k_queue_append_list(struct k_queue *queue, void *head, void *tail)
231{
232 __ASSERT(head && tail, "invalid head or tail");
233
Andy Ross603ea422018-07-25 13:01:54 -0700234 k_spinlock_key_t key = k_spin_lock(&queue->lock);
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300235#if !defined(CONFIG_POLL)
Flavio Ceolina42de642018-11-13 16:26:56 -0800236 struct k_thread *thread = NULL;
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200237
Flavio Ceolin76b35182018-12-16 12:48:29 -0800238 if (head != NULL) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700239 thread = z_unpend_first_thread(&queue->wait_q);
Flavio Ceolina42de642018-11-13 16:26:56 -0800240 }
241
242 while ((head != NULL) && (thread != NULL)) {
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200243 prepare_thread_to_run(thread, head);
244 head = *(void **)head;
Patrik Flykt4344e272019-03-08 14:19:05 -0700245 thread = z_unpend_first_thread(&queue->wait_q);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200246 }
247
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700248 if (head != NULL) {
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700249 sys_sflist_append_list(&queue->data_q, head, tail);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200250 }
251
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300252#else
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700253 sys_sflist_append_list(&queue->data_q, head, tail);
Andy Ross8606fab2018-03-26 10:54:40 -0700254 handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300255#endif /* !CONFIG_POLL */
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200256
Patrik Flykt4344e272019-03-08 14:19:05 -0700257 z_reschedule(&queue->lock, key);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200258}
259
260void k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list)
261{
262 __ASSERT(!sys_slist_is_empty(list), "list must not be empty");
263
264 /*
265 * note: this works as long as:
266 * - the slist implementation keeps the next pointer as the first
267 * field of the node object type
268 * - list->tail->next = NULL.
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700269 * - sflist implementation only differs from slist by stuffing
270 * flag bytes in the lower order bits of the data pointer
271 * - source list is really an slist and not an sflist with flags set
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200272 */
273 k_queue_append_list(queue, list->head, list->tail);
274 sys_slist_init(list);
275}
276
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300277#if defined(CONFIG_POLL)
278static void *k_queue_poll(struct k_queue *queue, s32_t timeout)
279{
280 struct k_poll_event event;
Andy Rossb173e432018-06-04 09:25:14 -0700281 int err, elapsed = 0, done = 0;
Andy Ross603ea422018-07-25 13:01:54 -0700282 k_spinlock_key_t key;
Paul Sokolovsky199d07e2017-10-16 13:36:37 +0300283 void *val;
Andy Rossb173e432018-06-04 09:25:14 -0700284 u32_t start;
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300285
286 k_poll_event_init(&event, K_POLL_TYPE_FIFO_DATA_AVAILABLE,
287 K_POLL_MODE_NOTIFY_ONLY, queue);
288
Andy Rossb173e432018-06-04 09:25:14 -0700289 if (timeout != K_FOREVER) {
290 start = k_uptime_get_32();
291 }
292
Luiz Augusto von Dentzf87c4c62017-10-17 11:34:21 +0300293 do {
294 event.state = K_POLL_STATE_NOT_READY;
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300295
Andy Rossb173e432018-06-04 09:25:14 -0700296 err = k_poll(&event, 1, timeout - elapsed);
297
298 if (err && err != -EAGAIN) {
Luiz Augusto von Dentzf87c4c62017-10-17 11:34:21 +0300299 return NULL;
300 }
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300301
Andy Ross603ea422018-07-25 13:01:54 -0700302 key = k_spin_lock(&queue->lock);
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700303 val = z_queue_node_peek(sys_sflist_get(&queue->data_q), true);
Andy Ross603ea422018-07-25 13:01:54 -0700304 k_spin_unlock(&queue->lock, key);
Andy Rossb173e432018-06-04 09:25:14 -0700305
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700306 if ((val == NULL) && (timeout != K_FOREVER)) {
Andy Rossb173e432018-06-04 09:25:14 -0700307 elapsed = k_uptime_get_32() - start;
308 done = elapsed > timeout;
309 }
310 } while (!val && !done);
Luiz Augusto von Dentzf87c4c62017-10-17 11:34:21 +0300311
Paul Sokolovsky199d07e2017-10-16 13:36:37 +0300312 return val;
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300313}
314#endif /* CONFIG_POLL */
315
Patrik Flykt4344e272019-03-08 14:19:05 -0700316void *z_impl_k_queue_get(struct k_queue *queue, s32_t timeout)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200317{
Andy Ross603ea422018-07-25 13:01:54 -0700318 k_spinlock_key_t key = k_spin_lock(&queue->lock);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200319 void *data;
320
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700321 if (likely(!sys_sflist_is_empty(&queue->data_q))) {
322 sys_sfnode_t *node;
323
324 node = sys_sflist_get_not_empty(&queue->data_q);
325 data = z_queue_node_peek(node, true);
Andy Ross603ea422018-07-25 13:01:54 -0700326 k_spin_unlock(&queue->lock, key);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200327 return data;
328 }
329
330 if (timeout == K_NO_WAIT) {
Andy Ross603ea422018-07-25 13:01:54 -0700331 k_spin_unlock(&queue->lock, key);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200332 return NULL;
333 }
334
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300335#if defined(CONFIG_POLL)
Andy Ross603ea422018-07-25 13:01:54 -0700336 k_spin_unlock(&queue->lock, key);
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300337
338 return k_queue_poll(queue, timeout);
339
340#else
Patrik Flykt4344e272019-03-08 14:19:05 -0700341 int ret = z_pend_curr(&queue->lock, key, &queue->wait_q, timeout);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200342
Adithya Baglody2a78b8d2018-10-25 12:09:04 +0530343 return (ret != 0) ? NULL : _current->base.swap_data;
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300344#endif /* CONFIG_POLL */
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200345}
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700346
347#ifdef CONFIG_USERSPACE
Andrew Boie8345e5e2018-05-04 15:57:57 -0700348Z_SYSCALL_HANDLER(k_queue_get, queue, timeout_p)
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700349{
350 s32_t timeout = timeout_p;
351
Andrew Boie8345e5e2018-05-04 15:57:57 -0700352 Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700353
Patrik Flykt4344e272019-03-08 14:19:05 -0700354 return (u32_t)z_impl_k_queue_get((struct k_queue *)queue, timeout);
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700355}
356
Andrew Boie8345e5e2018-05-04 15:57:57 -0700357Z_SYSCALL_HANDLER1_SIMPLE(k_queue_is_empty, K_OBJ_QUEUE, struct k_queue *);
358Z_SYSCALL_HANDLER1_SIMPLE(k_queue_peek_head, K_OBJ_QUEUE, struct k_queue *);
359Z_SYSCALL_HANDLER1_SIMPLE(k_queue_peek_tail, K_OBJ_QUEUE, struct k_queue *);
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700360#endif /* CONFIG_USERSPACE */