blob: 04aaa149d97ed765e3db9f7528693d4b93a87d30 [file] [log] [blame]
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001/*
2 * Copyright (c) 2010-2016 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7/**
8 * @file
9 *
10 * @brief dynamic-size QUEUE object.
11 */
12
13
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020014#include <zephyr/kernel.h>
15#include <zephyr/kernel_structs.h>
Anas Nashif4d994af2021-04-18 23:24:40 -040016
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020017#include <zephyr/toolchain.h>
Anas Nashif8634c3b2023-08-29 17:03:12 +000018#include <wait_q.h>
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020019#include <ksched.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020020#include <zephyr/init.h>
Anas Nashif4e396172023-09-26 22:46:01 +000021#include <zephyr/internal/syscall_handler.h>
Andy Ross4f911e12018-09-05 10:13:38 -070022#include <kernel_internal.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020023#include <zephyr/sys/check.h>
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020024
Andrew Boie2b9b4b22018-04-27 13:21:22 -070025struct alloc_node {
26 sys_sfnode_t node;
27 void *data;
28};
29
30void *z_queue_node_peek(sys_sfnode_t *node, bool needs_free)
31{
32 void *ret;
33
Kumar Galaa1b77fd2020-05-27 11:26:57 -050034 if ((node != NULL) && (sys_sfnode_flags_get(node) != (uint8_t)0)) {
Andrew Boie2b9b4b22018-04-27 13:21:22 -070035 /* If the flag is set, then the enqueue operation for this item
36 * did a behind-the scenes memory allocation of an alloc_node
37 * struct, which is what got put in the queue. Free it and pass
38 * back the data pointer.
39 */
40 struct alloc_node *anode;
41
42 anode = CONTAINER_OF(node, struct alloc_node, node);
43 ret = anode->data;
44 if (needs_free) {
45 k_free(anode);
46 }
47 } else {
Nicolas Pitre659fa0d2019-05-21 22:13:01 -040048 /* Data was directly placed in the queue, the first word
Andrew Boie2b9b4b22018-04-27 13:21:22 -070049 * reserved for the linked list. User mode isn't allowed to
50 * do this, although it can get data sent this way.
51 */
52 ret = (void *)node;
53 }
54
55 return ret;
56}
57
Patrik Flykt4344e272019-03-08 14:19:05 -070058void z_impl_k_queue_init(struct k_queue *queue)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020059{
Andrew Boie2b9b4b22018-04-27 13:21:22 -070060 sys_sflist_init(&queue->data_q);
Andy Ross603ea422018-07-25 13:01:54 -070061 queue->lock = (struct k_spinlock) {};
Patrik Flykt4344e272019-03-08 14:19:05 -070062 z_waitq_init(&queue->wait_q);
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +030063#if defined(CONFIG_POLL)
64 sys_dlist_init(&queue->poll_events);
65#endif
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020066
Torbjörn Leksellf9848232021-03-26 11:19:35 +010067 SYS_PORT_TRACING_OBJ_INIT(k_queue, queue);
68
Anas Nashifc91cad72023-09-26 21:32:13 +000069 k_object_init(queue);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020070}
71
Andrew Boie2b9b4b22018-04-27 13:21:22 -070072#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -070073static inline void z_vrfy_k_queue_init(struct k_queue *queue)
Andrew Boie2b9b4b22018-04-27 13:21:22 -070074{
Anas Nashifa08bfeb2023-09-27 11:20:28 +000075 K_OOPS(K_SYSCALL_OBJ_NEVER_INIT(queue, K_OBJ_QUEUE));
Patrik Flykt4344e272019-03-08 14:19:05 -070076 z_impl_k_queue_init(queue);
Andrew Boie2b9b4b22018-04-27 13:21:22 -070077}
Andy Ross65649742019-08-06 13:34:31 -070078#include <syscalls/k_queue_init_mrsh.c>
Andrew Boie2b9b4b22018-04-27 13:21:22 -070079#endif
80
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020081static void prepare_thread_to_run(struct k_thread *thread, void *data)
82{
Andrew Boie4ad9f682019-09-21 16:25:56 -070083 z_thread_return_value_set_with_data(thread, 0, data);
Andy Ross4c2fc2a2020-01-17 10:43:26 -080084 z_ready_thread(thread);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020085}
86
Kumar Galaa1b77fd2020-05-27 11:26:57 -050087static inline void handle_poll_events(struct k_queue *queue, uint32_t state)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020088{
Andy Ross99c2d2d2020-06-02 08:34:12 -070089#ifdef CONFIG_POLL
Patrik Flykt4344e272019-03-08 14:19:05 -070090 z_handle_obj_poll_events(&queue->poll_events, state);
Benjamin Cabéa46f1b92023-08-21 15:30:26 +020091#else
92 ARG_UNUSED(queue);
93 ARG_UNUSED(state);
Anas Nashif80e6a972018-06-23 08:20:34 -050094#endif
Andy Ross99c2d2d2020-06-02 08:34:12 -070095}
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020096
Patrik Flykt4344e272019-03-08 14:19:05 -070097void z_impl_k_queue_cancel_wait(struct k_queue *queue)
Paul Sokolovsky3f507072017-04-25 17:54:31 +030098{
Torbjörn Leksellf9848232021-03-26 11:19:35 +010099 SYS_PORT_TRACING_OBJ_FUNC(k_queue, cancel_wait, queue);
100
Andy Ross603ea422018-07-25 13:01:54 -0700101 k_spinlock_key_t key = k_spin_lock(&queue->lock);
Paul Sokolovsky3f507072017-04-25 17:54:31 +0300102 struct k_thread *first_pending_thread;
Paul Sokolovsky3f507072017-04-25 17:54:31 +0300103
Patrik Flykt4344e272019-03-08 14:19:05 -0700104 first_pending_thread = z_unpend_first_thread(&queue->wait_q);
Paul Sokolovsky3f507072017-04-25 17:54:31 +0300105
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700106 if (first_pending_thread != NULL) {
Paul Sokolovsky3f507072017-04-25 17:54:31 +0300107 prepare_thread_to_run(first_pending_thread, NULL);
Paul Sokolovsky3f507072017-04-25 17:54:31 +0300108 }
109
Andy Ross99c2d2d2020-06-02 08:34:12 -0700110 handle_poll_events(queue, K_POLL_STATE_CANCELLED);
Patrik Flykt4344e272019-03-08 14:19:05 -0700111 z_reschedule(&queue->lock, key);
Paul Sokolovsky3f507072017-04-25 17:54:31 +0300112}
113
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700114#ifdef CONFIG_USERSPACE
Andy Ross65649742019-08-06 13:34:31 -0700115static inline void z_vrfy_k_queue_cancel_wait(struct k_queue *queue)
116{
Anas Nashifa08bfeb2023-09-27 11:20:28 +0000117 K_OOPS(K_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
Andy Ross65649742019-08-06 13:34:31 -0700118 z_impl_k_queue_cancel_wait(queue);
119}
120#include <syscalls/k_queue_cancel_wait_mrsh.c>
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700121#endif
122
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500123static int32_t queue_insert(struct k_queue *queue, void *prev, void *data,
iva kik687f7992020-10-21 18:07:57 -0700124 bool alloc, bool is_append)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200125{
126 struct k_thread *first_pending_thread;
iva kik687f7992020-10-21 18:07:57 -0700127 k_spinlock_key_t key = k_spin_lock(&queue->lock);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200128
Torbjörn Leksellf9848232021-03-26 11:19:35 +0100129 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, queue_insert, queue, alloc);
130
iva kik687f7992020-10-21 18:07:57 -0700131 if (is_append) {
132 prev = sys_sflist_peek_tail(&queue->data_q);
133 }
Patrik Flykt4344e272019-03-08 14:19:05 -0700134 first_pending_thread = z_unpend_first_thread(&queue->wait_q);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200135
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700136 if (first_pending_thread != NULL) {
Torbjörn Leksellf9848232021-03-26 11:19:35 +0100137 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_queue, queue_insert, queue, alloc, K_FOREVER);
138
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200139 prepare_thread_to_run(first_pending_thread, data);
Patrik Flykt4344e272019-03-08 14:19:05 -0700140 z_reschedule(&queue->lock, key);
Torbjörn Leksellf9848232021-03-26 11:19:35 +0100141
142 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, queue_insert, queue, alloc, 0);
143
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700144 return 0;
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200145 }
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300146
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700147 /* Only need to actually allocate if no threads are pending */
148 if (alloc) {
149 struct alloc_node *anode;
150
151 anode = z_thread_malloc(sizeof(*anode));
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700152 if (anode == NULL) {
Andy Ross603ea422018-07-25 13:01:54 -0700153 k_spin_unlock(&queue->lock, key);
Torbjörn Leksellf9848232021-03-26 11:19:35 +0100154
155 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, queue_insert, queue, alloc,
156 -ENOMEM);
157
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700158 return -ENOMEM;
159 }
160 anode->data = data;
161 sys_sfnode_init(&anode->node, 0x1);
162 data = anode;
163 } else {
164 sys_sfnode_init(data, 0x0);
165 }
Andy Ross99c2d2d2020-06-02 08:34:12 -0700166
Torbjörn Leksellf9848232021-03-26 11:19:35 +0100167 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_queue, queue_insert, queue, alloc, K_FOREVER);
168
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700169 sys_sflist_insert(&queue->data_q, prev, data);
Andy Ross8606fab2018-03-26 10:54:40 -0700170 handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
Patrik Flykt4344e272019-03-08 14:19:05 -0700171 z_reschedule(&queue->lock, key);
Torbjörn Leksellf9848232021-03-26 11:19:35 +0100172
173 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, queue_insert, queue, alloc, 0);
174
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700175 return 0;
176}
177
178void k_queue_insert(struct k_queue *queue, void *prev, void *data)
179{
Torbjörn Leksellf9848232021-03-26 11:19:35 +0100180 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, insert, queue);
181
iva kik687f7992020-10-21 18:07:57 -0700182 (void)queue_insert(queue, prev, data, false, false);
Torbjörn Leksellf9848232021-03-26 11:19:35 +0100183
184 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, insert, queue);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200185}
186
187void k_queue_append(struct k_queue *queue, void *data)
188{
Torbjörn Leksellf9848232021-03-26 11:19:35 +0100189 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, append, queue);
190
iva kik687f7992020-10-21 18:07:57 -0700191 (void)queue_insert(queue, NULL, data, false, true);
Torbjörn Leksellf9848232021-03-26 11:19:35 +0100192
193 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, append, queue);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200194}
195
196void k_queue_prepend(struct k_queue *queue, void *data)
197{
Torbjörn Leksellf9848232021-03-26 11:19:35 +0100198 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, prepend, queue);
199
iva kik687f7992020-10-21 18:07:57 -0700200 (void)queue_insert(queue, NULL, data, false, false);
Torbjörn Leksellf9848232021-03-26 11:19:35 +0100201
202 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, prepend, queue);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200203}
204
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500205int32_t z_impl_k_queue_alloc_append(struct k_queue *queue, void *data)
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700206{
Torbjörn Leksellf9848232021-03-26 11:19:35 +0100207 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, alloc_append, queue);
208
209 int32_t ret = queue_insert(queue, NULL, data, true, true);
210
211 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, alloc_append, queue, ret);
212
213 return ret;
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700214}
215
216#ifdef CONFIG_USERSPACE
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500217static inline int32_t z_vrfy_k_queue_alloc_append(struct k_queue *queue,
iva kik687f7992020-10-21 18:07:57 -0700218 void *data)
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700219{
Anas Nashifa08bfeb2023-09-27 11:20:28 +0000220 K_OOPS(K_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
Andy Ross65649742019-08-06 13:34:31 -0700221 return z_impl_k_queue_alloc_append(queue, data);
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700222}
Andy Ross65649742019-08-06 13:34:31 -0700223#include <syscalls/k_queue_alloc_append_mrsh.c>
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700224#endif
225
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500226int32_t z_impl_k_queue_alloc_prepend(struct k_queue *queue, void *data)
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700227{
Torbjörn Leksellf9848232021-03-26 11:19:35 +0100228 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, alloc_prepend, queue);
iva kik687f7992020-10-21 18:07:57 -0700229
Torbjörn Leksellf9848232021-03-26 11:19:35 +0100230 int32_t ret = queue_insert(queue, NULL, data, true, false);
231
232 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, alloc_prepend, queue, ret);
233
234 return ret;
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700235}
236
237#ifdef CONFIG_USERSPACE
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500238static inline int32_t z_vrfy_k_queue_alloc_prepend(struct k_queue *queue,
iva kik687f7992020-10-21 18:07:57 -0700239 void *data)
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700240{
Anas Nashifa08bfeb2023-09-27 11:20:28 +0000241 K_OOPS(K_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
Andy Ross65649742019-08-06 13:34:31 -0700242 return z_impl_k_queue_alloc_prepend(queue, data);
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700243}
Andy Ross65649742019-08-06 13:34:31 -0700244#include <syscalls/k_queue_alloc_prepend_mrsh.c>
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700245#endif
246
Anas Nashif756d8b02019-06-16 09:53:55 -0400247int k_queue_append_list(struct k_queue *queue, void *head, void *tail)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200248{
Torbjörn Leksellf9848232021-03-26 11:19:35 +0100249 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, append_list, queue);
250
Anas Nashif756d8b02019-06-16 09:53:55 -0400251 /* invalid head or tail of list */
252 CHECKIF(head == NULL || tail == NULL) {
Torbjörn Leksellf9848232021-03-26 11:19:35 +0100253 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, append_list, queue, -EINVAL);
254
Anas Nashif756d8b02019-06-16 09:53:55 -0400255 return -EINVAL;
256 }
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200257
Andy Ross603ea422018-07-25 13:01:54 -0700258 k_spinlock_key_t key = k_spin_lock(&queue->lock);
Flavio Ceolina42de642018-11-13 16:26:56 -0800259 struct k_thread *thread = NULL;
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200260
Flavio Ceolin76b35182018-12-16 12:48:29 -0800261 if (head != NULL) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700262 thread = z_unpend_first_thread(&queue->wait_q);
Flavio Ceolina42de642018-11-13 16:26:56 -0800263 }
264
265 while ((head != NULL) && (thread != NULL)) {
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200266 prepare_thread_to_run(thread, head);
267 head = *(void **)head;
Patrik Flykt4344e272019-03-08 14:19:05 -0700268 thread = z_unpend_first_thread(&queue->wait_q);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200269 }
270
Flavio Ceolin4218d5f2018-09-17 09:39:51 -0700271 if (head != NULL) {
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700272 sys_sflist_append_list(&queue->data_q, head, tail);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200273 }
274
Torbjörn Leksellf9848232021-03-26 11:19:35 +0100275 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, append_list, queue, 0);
276
Andy Ross8606fab2018-03-26 10:54:40 -0700277 handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
Patrik Flykt4344e272019-03-08 14:19:05 -0700278 z_reschedule(&queue->lock, key);
Anas Nashif756d8b02019-06-16 09:53:55 -0400279 return 0;
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200280}
281
Anas Nashif756d8b02019-06-16 09:53:55 -0400282int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200283{
Anas Nashif756d8b02019-06-16 09:53:55 -0400284 int ret;
285
Torbjörn Leksellf9848232021-03-26 11:19:35 +0100286 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, merge_slist, queue);
287
Anas Nashif756d8b02019-06-16 09:53:55 -0400288 /* list must not be empty */
289 CHECKIF(sys_slist_is_empty(list)) {
Torbjörn Leksellf9848232021-03-26 11:19:35 +0100290 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, merge_slist, queue, -EINVAL);
291
Anas Nashif756d8b02019-06-16 09:53:55 -0400292 return -EINVAL;
293 }
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200294
295 /*
296 * note: this works as long as:
297 * - the slist implementation keeps the next pointer as the first
298 * field of the node object type
299 * - list->tail->next = NULL.
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700300 * - sflist implementation only differs from slist by stuffing
301 * flag bytes in the lower order bits of the data pointer
302 * - source list is really an slist and not an sflist with flags set
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200303 */
Anas Nashif756d8b02019-06-16 09:53:55 -0400304 ret = k_queue_append_list(queue, list->head, list->tail);
305 CHECKIF(ret != 0) {
Torbjörn Leksellf9848232021-03-26 11:19:35 +0100306 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, merge_slist, queue, ret);
307
Anas Nashif756d8b02019-06-16 09:53:55 -0400308 return ret;
309 }
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200310 sys_slist_init(list);
Anas Nashif756d8b02019-06-16 09:53:55 -0400311
Torbjörn Leksellf9848232021-03-26 11:19:35 +0100312 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, merge_slist, queue, 0);
313
Anas Nashif756d8b02019-06-16 09:53:55 -0400314 return 0;
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200315}
316
Andy Ross78327382020-03-05 15:18:14 -0800317void *z_impl_k_queue_get(struct k_queue *queue, k_timeout_t timeout)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200318{
Andy Ross603ea422018-07-25 13:01:54 -0700319 k_spinlock_key_t key = k_spin_lock(&queue->lock);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200320 void *data;
321
Torbjörn Leksellf9848232021-03-26 11:19:35 +0100322 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, get, queue, timeout);
323
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700324 if (likely(!sys_sflist_is_empty(&queue->data_q))) {
325 sys_sfnode_t *node;
326
327 node = sys_sflist_get_not_empty(&queue->data_q);
328 data = z_queue_node_peek(node, true);
Andy Ross603ea422018-07-25 13:01:54 -0700329 k_spin_unlock(&queue->lock, key);
Torbjörn Leksellf9848232021-03-26 11:19:35 +0100330
331 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, get, queue, timeout, data);
332
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200333 return data;
334 }
335
Torbjörn Leksellf9848232021-03-26 11:19:35 +0100336 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_queue, get, queue, timeout);
337
Andy Ross78327382020-03-05 15:18:14 -0800338 if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
Andy Ross603ea422018-07-25 13:01:54 -0700339 k_spin_unlock(&queue->lock, key);
Torbjörn Leksellf9848232021-03-26 11:19:35 +0100340
341 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, get, queue, timeout, NULL);
342
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200343 return NULL;
344 }
345
Patrik Flykt4344e272019-03-08 14:19:05 -0700346 int ret = z_pend_curr(&queue->lock, key, &queue->wait_q, timeout);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200347
Torbjörn Leksellf9848232021-03-26 11:19:35 +0100348 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, get, queue, timeout,
349 (ret != 0) ? NULL : _current->base.swap_data);
350
Adithya Baglody2a78b8d2018-10-25 12:09:04 +0530351 return (ret != 0) ? NULL : _current->base.swap_data;
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200352}
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700353
Torbjörn Leksellf9848232021-03-26 11:19:35 +0100354bool k_queue_remove(struct k_queue *queue, void *data)
355{
356 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, remove, queue);
357
358 bool ret = sys_sflist_find_and_remove(&queue->data_q, (sys_sfnode_t *)data);
359
360 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, remove, queue, ret);
361
362 return ret;
363}
364
365bool k_queue_unique_append(struct k_queue *queue, void *data)
366{
367 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, unique_append, queue);
368
369 sys_sfnode_t *test;
370
371 SYS_SFLIST_FOR_EACH_NODE(&queue->data_q, test) {
372 if (test == (sys_sfnode_t *) data) {
373 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, unique_append, queue, false);
374
375 return false;
376 }
377 }
378
379 k_queue_append(queue, data);
380
381 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, unique_append, queue, true);
382
383 return true;
384}
385
386void *z_impl_k_queue_peek_head(struct k_queue *queue)
387{
388 void *ret = z_queue_node_peek(sys_sflist_peek_head(&queue->data_q), false);
389
390 SYS_PORT_TRACING_OBJ_FUNC(k_queue, peek_head, queue, ret);
391
392 return ret;
393}
394
395void *z_impl_k_queue_peek_tail(struct k_queue *queue)
396{
397 void *ret = z_queue_node_peek(sys_sflist_peek_tail(&queue->data_q), false);
398
399 SYS_PORT_TRACING_OBJ_FUNC(k_queue, peek_tail, queue, ret);
400
401 return ret;
402}
403
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700404#ifdef CONFIG_USERSPACE
Andy Ross78327382020-03-05 15:18:14 -0800405static inline void *z_vrfy_k_queue_get(struct k_queue *queue,
406 k_timeout_t timeout)
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700407{
Anas Nashifa08bfeb2023-09-27 11:20:28 +0000408 K_OOPS(K_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
Andy Ross65649742019-08-06 13:34:31 -0700409 return z_impl_k_queue_get(queue, timeout);
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700410}
Andy Ross65649742019-08-06 13:34:31 -0700411#include <syscalls/k_queue_get_mrsh.c>
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700412
Andy Ross65649742019-08-06 13:34:31 -0700413static inline int z_vrfy_k_queue_is_empty(struct k_queue *queue)
414{
Anas Nashifa08bfeb2023-09-27 11:20:28 +0000415 K_OOPS(K_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
Andy Ross65649742019-08-06 13:34:31 -0700416 return z_impl_k_queue_is_empty(queue);
417}
418#include <syscalls/k_queue_is_empty_mrsh.c>
419
420static inline void *z_vrfy_k_queue_peek_head(struct k_queue *queue)
421{
Anas Nashifa08bfeb2023-09-27 11:20:28 +0000422 K_OOPS(K_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
Andy Ross65649742019-08-06 13:34:31 -0700423 return z_impl_k_queue_peek_head(queue);
424}
425#include <syscalls/k_queue_peek_head_mrsh.c>
426
427static inline void *z_vrfy_k_queue_peek_tail(struct k_queue *queue)
428{
Anas Nashifa08bfeb2023-09-27 11:20:28 +0000429 K_OOPS(K_SYSCALL_OBJ(queue, K_OBJ_QUEUE));
Andy Ross65649742019-08-06 13:34:31 -0700430 return z_impl_k_queue_peek_tail(queue);
431}
432#include <syscalls/k_queue_peek_tail_mrsh.c>
433
Andrew Boie2b9b4b22018-04-27 13:21:22 -0700434#endif /* CONFIG_USERSPACE */
Peter Mitsis6df8efe2023-05-11 14:06:46 -0400435
436#ifdef CONFIG_OBJ_CORE_FIFO
437struct k_obj_type _obj_type_fifo;
438
439static int init_fifo_obj_core_list(void)
440{
441 /* Initialize fifo object type */
442
443 z_obj_type_init(&_obj_type_fifo, K_OBJ_TYPE_FIFO_ID,
444 offsetof(struct k_fifo, obj_core));
445
446 /* Initialize and link statically defined fifos */
447
448 STRUCT_SECTION_FOREACH(k_fifo, fifo) {
449 k_obj_core_init_and_link(K_OBJ_CORE(fifo), &_obj_type_fifo);
450 }
451
452 return 0;
453}
454
455SYS_INIT(init_fifo_obj_core_list, PRE_KERNEL_1,
456 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
457#endif
458
459#ifdef CONFIG_OBJ_CORE_LIFO
460struct k_obj_type _obj_type_lifo;
461
462static int init_lifo_obj_core_list(void)
463{
464 /* Initialize lifo object type */
465
466 z_obj_type_init(&_obj_type_lifo, K_OBJ_TYPE_LIFO_ID,
467 offsetof(struct k_lifo, obj_core));
468
469 /* Initialize and link statically defined lifo */
470
471 STRUCT_SECTION_FOREACH(k_lifo, lifo) {
472 k_obj_core_init_and_link(K_OBJ_CORE(lifo), &_obj_type_lifo);
473 }
474
475 return 0;
476}
477
478SYS_INIT(init_lifo_obj_core_list, PRE_KERNEL_1,
479 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
480#endif