Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2010-2016 Wind River Systems, Inc. |
| 3 | * |
| 4 | * SPDX-License-Identifier: Apache-2.0 |
| 5 | */ |
| 6 | |
| 7 | /** |
| 8 | * @file |
| 9 | * |
| 10 | * @brief dynamic-size QUEUE object. |
| 11 | */ |
| 12 | |
| 13 | |
| 14 | #include <kernel.h> |
| 15 | #include <kernel_structs.h> |
| 16 | #include <debug/object_tracing_common.h> |
| 17 | #include <toolchain.h> |
Anas Nashif | 397d29d | 2017-06-17 11:30:47 -0400 | [diff] [blame] | 18 | #include <linker/sections.h> |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 19 | #include <wait_q.h> |
| 20 | #include <ksched.h> |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 21 | #include <misc/sflist.h> |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 22 | #include <init.h> |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 23 | #include <syscall_handler.h> |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 24 | |
| 25 | extern struct k_queue _k_queue_list_start[]; |
| 26 | extern struct k_queue _k_queue_list_end[]; |
| 27 | |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 28 | struct alloc_node { |
| 29 | sys_sfnode_t node; |
| 30 | void *data; |
| 31 | }; |
| 32 | |
| 33 | void *z_queue_node_peek(sys_sfnode_t *node, bool needs_free) |
| 34 | { |
| 35 | void *ret; |
| 36 | |
| 37 | if (node && sys_sfnode_flags_get(node)) { |
| 38 | /* If the flag is set, then the enqueue operation for this item |
| 39 | * did a behind-the scenes memory allocation of an alloc_node |
| 40 | * struct, which is what got put in the queue. Free it and pass |
| 41 | * back the data pointer. |
| 42 | */ |
| 43 | struct alloc_node *anode; |
| 44 | |
| 45 | anode = CONTAINER_OF(node, struct alloc_node, node); |
| 46 | ret = anode->data; |
| 47 | if (needs_free) { |
| 48 | k_free(anode); |
| 49 | } |
| 50 | } else { |
| 51 | /* Data was directly placed in the queue, the first 4 bytes |
| 52 | * reserved for the linked list. User mode isn't allowed to |
| 53 | * do this, although it can get data sent this way. |
| 54 | */ |
| 55 | ret = (void *)node; |
| 56 | } |
| 57 | |
| 58 | return ret; |
| 59 | } |
| 60 | |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 61 | #ifdef CONFIG_OBJECT_TRACING |
| 62 | |
Maciek Borzecki | 059544d | 2017-05-18 12:16:45 +0200 | [diff] [blame] | 63 | struct k_queue *_trace_list_k_queue; |
| 64 | |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 65 | /* |
| 66 | * Complete initialization of statically defined queues. |
| 67 | */ |
| 68 | static int init_queue_module(struct device *dev) |
| 69 | { |
| 70 | ARG_UNUSED(dev); |
| 71 | |
| 72 | struct k_queue *queue; |
| 73 | |
| 74 | for (queue = _k_queue_list_start; queue < _k_queue_list_end; queue++) { |
| 75 | SYS_TRACING_OBJ_INIT(k_queue, queue); |
| 76 | } |
| 77 | return 0; |
| 78 | } |
| 79 | |
| 80 | SYS_INIT(init_queue_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS); |
| 81 | |
| 82 | #endif /* CONFIG_OBJECT_TRACING */ |
| 83 | |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 84 | void _impl_k_queue_init(struct k_queue *queue) |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 85 | { |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 86 | sys_sflist_init(&queue->data_q); |
Andy Ross | ccf3bf7 | 2018-05-10 11:10:34 -0700 | [diff] [blame] | 87 | _waitq_init(&queue->wait_q); |
Luiz Augusto von Dentz | 7d01c5e | 2017-08-21 10:49:29 +0300 | [diff] [blame] | 88 | #if defined(CONFIG_POLL) |
| 89 | sys_dlist_init(&queue->poll_events); |
| 90 | #endif |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 91 | |
| 92 | SYS_TRACING_OBJ_INIT(k_queue, queue); |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 93 | _k_object_init(queue); |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 94 | } |
| 95 | |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 96 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 97 | Z_SYSCALL_HANDLER(k_queue_init, queue_ptr) |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 98 | { |
| 99 | struct k_queue *queue = (struct k_queue *)queue_ptr; |
| 100 | |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 101 | Z_OOPS(Z_SYSCALL_OBJ_NEVER_INIT(queue, K_OBJ_QUEUE)); |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 102 | _impl_k_queue_init(queue); |
| 103 | return 0; |
| 104 | } |
| 105 | #endif |
| 106 | |
Luiz Augusto von Dentz | 84db641 | 2017-07-13 12:43:59 +0300 | [diff] [blame] | 107 | #if !defined(CONFIG_POLL) |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 108 | static void prepare_thread_to_run(struct k_thread *thread, void *data) |
| 109 | { |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 110 | _ready_thread(thread); |
| 111 | _set_thread_return_value_with_data(thread, 0, data); |
| 112 | } |
Luiz Augusto von Dentz | 84db641 | 2017-07-13 12:43:59 +0300 | [diff] [blame] | 113 | #endif /* CONFIG_POLL */ |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 114 | |
Anas Nashif | 80e6a97 | 2018-06-23 08:20:34 -0500 | [diff] [blame] | 115 | #ifdef CONFIG_POLL |
Andy Ross | 8606fab | 2018-03-26 10:54:40 -0700 | [diff] [blame] | 116 | static inline void handle_poll_events(struct k_queue *queue, u32_t state) |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 117 | { |
Andy Ross | 8606fab | 2018-03-26 10:54:40 -0700 | [diff] [blame] | 118 | _handle_obj_poll_events(&queue->poll_events, state); |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 119 | } |
Anas Nashif | 80e6a97 | 2018-06-23 08:20:34 -0500 | [diff] [blame] | 120 | #endif |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 121 | |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 122 | void _impl_k_queue_cancel_wait(struct k_queue *queue) |
Paul Sokolovsky | 3f50707 | 2017-04-25 17:54:31 +0300 | [diff] [blame] | 123 | { |
Luiz Augusto von Dentz | 84db641 | 2017-07-13 12:43:59 +0300 | [diff] [blame] | 124 | unsigned int key = irq_lock(); |
| 125 | #if !defined(CONFIG_POLL) |
Paul Sokolovsky | 3f50707 | 2017-04-25 17:54:31 +0300 | [diff] [blame] | 126 | struct k_thread *first_pending_thread; |
Paul Sokolovsky | 3f50707 | 2017-04-25 17:54:31 +0300 | [diff] [blame] | 127 | |
| 128 | first_pending_thread = _unpend_first_thread(&queue->wait_q); |
| 129 | |
Flavio Ceolin | 4218d5f | 2018-09-17 09:39:51 -0700 | [diff] [blame^] | 130 | if (first_pending_thread != NULL) { |
Paul Sokolovsky | 3f50707 | 2017-04-25 17:54:31 +0300 | [diff] [blame] | 131 | prepare_thread_to_run(first_pending_thread, NULL); |
Paul Sokolovsky | 3f50707 | 2017-04-25 17:54:31 +0300 | [diff] [blame] | 132 | } |
Luiz Augusto von Dentz | 84db641 | 2017-07-13 12:43:59 +0300 | [diff] [blame] | 133 | #else |
Paul Sokolovsky | 45c0b20 | 2018-08-21 23:29:11 +0300 | [diff] [blame] | 134 | handle_poll_events(queue, K_POLL_STATE_CANCELLED); |
Luiz Augusto von Dentz | 84db641 | 2017-07-13 12:43:59 +0300 | [diff] [blame] | 135 | #endif /* !CONFIG_POLL */ |
Paul Sokolovsky | 3f50707 | 2017-04-25 17:54:31 +0300 | [diff] [blame] | 136 | |
Andy Ross | 15cb5d7 | 2018-04-02 18:40:10 -0700 | [diff] [blame] | 137 | _reschedule(key); |
Paul Sokolovsky | 3f50707 | 2017-04-25 17:54:31 +0300 | [diff] [blame] | 138 | } |
| 139 | |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 140 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 141 | Z_SYSCALL_HANDLER1_SIMPLE_VOID(k_queue_cancel_wait, K_OBJ_QUEUE, |
| 142 | struct k_queue *); |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 143 | #endif |
| 144 | |
| 145 | static int queue_insert(struct k_queue *queue, void *prev, void *data, |
| 146 | bool alloc) |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 147 | { |
Luiz Augusto von Dentz | 84db641 | 2017-07-13 12:43:59 +0300 | [diff] [blame] | 148 | unsigned int key = irq_lock(); |
| 149 | #if !defined(CONFIG_POLL) |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 150 | struct k_thread *first_pending_thread; |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 151 | |
| 152 | first_pending_thread = _unpend_first_thread(&queue->wait_q); |
| 153 | |
Flavio Ceolin | 4218d5f | 2018-09-17 09:39:51 -0700 | [diff] [blame^] | 154 | if (first_pending_thread != NULL) { |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 155 | prepare_thread_to_run(first_pending_thread, data); |
Andy Ross | 15cb5d7 | 2018-04-02 18:40:10 -0700 | [diff] [blame] | 156 | _reschedule(key); |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 157 | return 0; |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 158 | } |
Luiz Augusto von Dentz | 84db641 | 2017-07-13 12:43:59 +0300 | [diff] [blame] | 159 | #endif /* !CONFIG_POLL */ |
| 160 | |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 161 | /* Only need to actually allocate if no threads are pending */ |
| 162 | if (alloc) { |
| 163 | struct alloc_node *anode; |
| 164 | |
| 165 | anode = z_thread_malloc(sizeof(*anode)); |
Flavio Ceolin | 4218d5f | 2018-09-17 09:39:51 -0700 | [diff] [blame^] | 166 | if (anode == NULL) { |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 167 | return -ENOMEM; |
| 168 | } |
| 169 | anode->data = data; |
| 170 | sys_sfnode_init(&anode->node, 0x1); |
| 171 | data = anode; |
| 172 | } else { |
| 173 | sys_sfnode_init(data, 0x0); |
| 174 | } |
| 175 | sys_sflist_insert(&queue->data_q, prev, data); |
Luiz Augusto von Dentz | 84db641 | 2017-07-13 12:43:59 +0300 | [diff] [blame] | 176 | |
| 177 | #if defined(CONFIG_POLL) |
Andy Ross | 8606fab | 2018-03-26 10:54:40 -0700 | [diff] [blame] | 178 | handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE); |
Luiz Augusto von Dentz | 84db641 | 2017-07-13 12:43:59 +0300 | [diff] [blame] | 179 | #endif /* CONFIG_POLL */ |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 180 | |
Andy Ross | 15cb5d7 | 2018-04-02 18:40:10 -0700 | [diff] [blame] | 181 | _reschedule(key); |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 182 | return 0; |
| 183 | } |
| 184 | |
| 185 | void k_queue_insert(struct k_queue *queue, void *prev, void *data) |
| 186 | { |
Flavio Ceolin | cc74ad0 | 2018-08-13 14:34:11 -0700 | [diff] [blame] | 187 | (void)queue_insert(queue, prev, data, false); |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 188 | } |
| 189 | |
| 190 | void k_queue_append(struct k_queue *queue, void *data) |
| 191 | { |
Flavio Ceolin | cc74ad0 | 2018-08-13 14:34:11 -0700 | [diff] [blame] | 192 | (void)queue_insert(queue, sys_sflist_peek_tail(&queue->data_q), |
| 193 | data, false); |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 194 | } |
| 195 | |
| 196 | void k_queue_prepend(struct k_queue *queue, void *data) |
| 197 | { |
Flavio Ceolin | cc74ad0 | 2018-08-13 14:34:11 -0700 | [diff] [blame] | 198 | (void)queue_insert(queue, NULL, data, false); |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 199 | } |
| 200 | |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 201 | int _impl_k_queue_alloc_append(struct k_queue *queue, void *data) |
| 202 | { |
| 203 | return queue_insert(queue, sys_sflist_peek_tail(&queue->data_q), data, |
| 204 | true); |
| 205 | } |
| 206 | |
| 207 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 208 | Z_SYSCALL_HANDLER(k_queue_alloc_append, queue, data) |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 209 | { |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 210 | Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE)); |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 211 | |
| 212 | return _impl_k_queue_alloc_append((struct k_queue *)queue, |
| 213 | (void *)data); |
| 214 | } |
| 215 | #endif |
| 216 | |
| 217 | int _impl_k_queue_alloc_prepend(struct k_queue *queue, void *data) |
| 218 | { |
| 219 | return queue_insert(queue, NULL, data, true); |
| 220 | } |
| 221 | |
| 222 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 223 | Z_SYSCALL_HANDLER(k_queue_alloc_prepend, queue, data) |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 224 | { |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 225 | Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE)); |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 226 | |
| 227 | return _impl_k_queue_alloc_prepend((struct k_queue *)queue, |
| 228 | (void *)data); |
| 229 | } |
| 230 | #endif |
| 231 | |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 232 | void k_queue_append_list(struct k_queue *queue, void *head, void *tail) |
| 233 | { |
| 234 | __ASSERT(head && tail, "invalid head or tail"); |
| 235 | |
Luiz Augusto von Dentz | 84db641 | 2017-07-13 12:43:59 +0300 | [diff] [blame] | 236 | unsigned int key = irq_lock(); |
| 237 | #if !defined(CONFIG_POLL) |
Andy Ross | 345553b | 2018-03-09 13:05:15 -0800 | [diff] [blame] | 238 | struct k_thread *thread; |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 239 | |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 240 | while (head && ((thread = _unpend_first_thread(&queue->wait_q)))) { |
| 241 | prepare_thread_to_run(thread, head); |
| 242 | head = *(void **)head; |
| 243 | } |
| 244 | |
Flavio Ceolin | 4218d5f | 2018-09-17 09:39:51 -0700 | [diff] [blame^] | 245 | if (head != NULL) { |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 246 | sys_sflist_append_list(&queue->data_q, head, tail); |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 247 | } |
| 248 | |
Luiz Augusto von Dentz | 84db641 | 2017-07-13 12:43:59 +0300 | [diff] [blame] | 249 | #else |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 250 | sys_sflist_append_list(&queue->data_q, head, tail); |
Andy Ross | 8606fab | 2018-03-26 10:54:40 -0700 | [diff] [blame] | 251 | handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE); |
Luiz Augusto von Dentz | 84db641 | 2017-07-13 12:43:59 +0300 | [diff] [blame] | 252 | #endif /* !CONFIG_POLL */ |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 253 | |
Andy Ross | 15cb5d7 | 2018-04-02 18:40:10 -0700 | [diff] [blame] | 254 | _reschedule(key); |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 255 | } |
| 256 | |
| 257 | void k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list) |
| 258 | { |
| 259 | __ASSERT(!sys_slist_is_empty(list), "list must not be empty"); |
| 260 | |
| 261 | /* |
| 262 | * note: this works as long as: |
| 263 | * - the slist implementation keeps the next pointer as the first |
| 264 | * field of the node object type |
| 265 | * - list->tail->next = NULL. |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 266 | * - sflist implementation only differs from slist by stuffing |
| 267 | * flag bytes in the lower order bits of the data pointer |
| 268 | * - source list is really an slist and not an sflist with flags set |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 269 | */ |
| 270 | k_queue_append_list(queue, list->head, list->tail); |
| 271 | sys_slist_init(list); |
| 272 | } |
| 273 | |
Luiz Augusto von Dentz | 84db641 | 2017-07-13 12:43:59 +0300 | [diff] [blame] | 274 | #if defined(CONFIG_POLL) |
| 275 | static void *k_queue_poll(struct k_queue *queue, s32_t timeout) |
| 276 | { |
| 277 | struct k_poll_event event; |
Andy Ross | b173e43 | 2018-06-04 09:25:14 -0700 | [diff] [blame] | 278 | int err, elapsed = 0, done = 0; |
Paul Sokolovsky | 199d07e | 2017-10-16 13:36:37 +0300 | [diff] [blame] | 279 | unsigned int key; |
| 280 | void *val; |
Andy Ross | b173e43 | 2018-06-04 09:25:14 -0700 | [diff] [blame] | 281 | u32_t start; |
Luiz Augusto von Dentz | 84db641 | 2017-07-13 12:43:59 +0300 | [diff] [blame] | 282 | |
| 283 | k_poll_event_init(&event, K_POLL_TYPE_FIFO_DATA_AVAILABLE, |
| 284 | K_POLL_MODE_NOTIFY_ONLY, queue); |
| 285 | |
Andy Ross | b173e43 | 2018-06-04 09:25:14 -0700 | [diff] [blame] | 286 | if (timeout != K_FOREVER) { |
| 287 | start = k_uptime_get_32(); |
| 288 | } |
| 289 | |
Luiz Augusto von Dentz | f87c4c6 | 2017-10-17 11:34:21 +0300 | [diff] [blame] | 290 | do { |
| 291 | event.state = K_POLL_STATE_NOT_READY; |
Luiz Augusto von Dentz | 84db641 | 2017-07-13 12:43:59 +0300 | [diff] [blame] | 292 | |
Andy Ross | b173e43 | 2018-06-04 09:25:14 -0700 | [diff] [blame] | 293 | err = k_poll(&event, 1, timeout - elapsed); |
| 294 | |
| 295 | if (err && err != -EAGAIN) { |
Luiz Augusto von Dentz | f87c4c6 | 2017-10-17 11:34:21 +0300 | [diff] [blame] | 296 | return NULL; |
| 297 | } |
Luiz Augusto von Dentz | 84db641 | 2017-07-13 12:43:59 +0300 | [diff] [blame] | 298 | |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 299 | /* sys_sflist_* aren't threadsafe, so must be always protected |
| 300 | * by irq_lock. |
Luiz Augusto von Dentz | f87c4c6 | 2017-10-17 11:34:21 +0300 | [diff] [blame] | 301 | */ |
| 302 | key = irq_lock(); |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 303 | val = z_queue_node_peek(sys_sflist_get(&queue->data_q), true); |
Luiz Augusto von Dentz | f87c4c6 | 2017-10-17 11:34:21 +0300 | [diff] [blame] | 304 | irq_unlock(key); |
Andy Ross | b173e43 | 2018-06-04 09:25:14 -0700 | [diff] [blame] | 305 | |
Flavio Ceolin | 4218d5f | 2018-09-17 09:39:51 -0700 | [diff] [blame^] | 306 | if ((val == NULL) && (timeout != K_FOREVER)) { |
Andy Ross | b173e43 | 2018-06-04 09:25:14 -0700 | [diff] [blame] | 307 | elapsed = k_uptime_get_32() - start; |
| 308 | done = elapsed > timeout; |
| 309 | } |
| 310 | } while (!val && !done); |
Luiz Augusto von Dentz | f87c4c6 | 2017-10-17 11:34:21 +0300 | [diff] [blame] | 311 | |
Paul Sokolovsky | 199d07e | 2017-10-16 13:36:37 +0300 | [diff] [blame] | 312 | return val; |
Luiz Augusto von Dentz | 84db641 | 2017-07-13 12:43:59 +0300 | [diff] [blame] | 313 | } |
| 314 | #endif /* CONFIG_POLL */ |
| 315 | |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 316 | void *_impl_k_queue_get(struct k_queue *queue, s32_t timeout) |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 317 | { |
| 318 | unsigned int key; |
| 319 | void *data; |
| 320 | |
| 321 | key = irq_lock(); |
| 322 | |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 323 | if (likely(!sys_sflist_is_empty(&queue->data_q))) { |
| 324 | sys_sfnode_t *node; |
| 325 | |
| 326 | node = sys_sflist_get_not_empty(&queue->data_q); |
| 327 | data = z_queue_node_peek(node, true); |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 328 | irq_unlock(key); |
| 329 | return data; |
| 330 | } |
| 331 | |
| 332 | if (timeout == K_NO_WAIT) { |
| 333 | irq_unlock(key); |
| 334 | return NULL; |
| 335 | } |
| 336 | |
Luiz Augusto von Dentz | 84db641 | 2017-07-13 12:43:59 +0300 | [diff] [blame] | 337 | #if defined(CONFIG_POLL) |
| 338 | irq_unlock(key); |
| 339 | |
| 340 | return k_queue_poll(queue, timeout); |
| 341 | |
| 342 | #else |
Andy Ross | e0a572b | 2018-03-26 11:58:10 -0700 | [diff] [blame] | 343 | int ret = _pend_current_thread(key, &queue->wait_q, timeout); |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 344 | |
Andy Ross | e0a572b | 2018-03-26 11:58:10 -0700 | [diff] [blame] | 345 | return ret ? NULL : _current->base.swap_data; |
Luiz Augusto von Dentz | 84db641 | 2017-07-13 12:43:59 +0300 | [diff] [blame] | 346 | #endif /* CONFIG_POLL */ |
Luiz Augusto von Dentz | a7ddb87 | 2017-02-21 14:50:42 +0200 | [diff] [blame] | 347 | } |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 348 | |
| 349 | #ifdef CONFIG_USERSPACE |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 350 | Z_SYSCALL_HANDLER(k_queue_get, queue, timeout_p) |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 351 | { |
| 352 | s32_t timeout = timeout_p; |
| 353 | |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 354 | Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE)); |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 355 | |
| 356 | return (u32_t)_impl_k_queue_get((struct k_queue *)queue, timeout); |
| 357 | } |
| 358 | |
Andrew Boie | 8345e5e | 2018-05-04 15:57:57 -0700 | [diff] [blame] | 359 | Z_SYSCALL_HANDLER1_SIMPLE(k_queue_is_empty, K_OBJ_QUEUE, struct k_queue *); |
| 360 | Z_SYSCALL_HANDLER1_SIMPLE(k_queue_peek_head, K_OBJ_QUEUE, struct k_queue *); |
| 361 | Z_SYSCALL_HANDLER1_SIMPLE(k_queue_peek_tail, K_OBJ_QUEUE, struct k_queue *); |
Andrew Boie | 2b9b4b2 | 2018-04-27 13:21:22 -0700 | [diff] [blame] | 362 | #endif /* CONFIG_USERSPACE */ |