blob: 36ff8908a5376c9be8fff9ab5ce5691b13a71750 [file] [log] [blame]
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +02001/*
2 * Copyright (c) 2010-2016 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7/**
8 * @file
9 *
10 * @brief dynamic-size QUEUE object.
11 */
12
13
14#include <kernel.h>
15#include <kernel_structs.h>
16#include <debug/object_tracing_common.h>
17#include <toolchain.h>
Anas Nashif397d29d2017-06-17 11:30:47 -040018#include <linker/sections.h>
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020019#include <wait_q.h>
20#include <ksched.h>
21#include <misc/slist.h>
22#include <init.h>
Andy Ross9c62cc62018-01-25 15:24:15 -080023#include <kswap.h>
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020024
25extern struct k_queue _k_queue_list_start[];
26extern struct k_queue _k_queue_list_end[];
27
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020028#ifdef CONFIG_OBJECT_TRACING
29
Maciek Borzecki059544d2017-05-18 12:16:45 +020030struct k_queue *_trace_list_k_queue;
31
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020032/*
33 * Complete initialization of statically defined queues.
34 */
35static int init_queue_module(struct device *dev)
36{
37 ARG_UNUSED(dev);
38
39 struct k_queue *queue;
40
41 for (queue = _k_queue_list_start; queue < _k_queue_list_end; queue++) {
42 SYS_TRACING_OBJ_INIT(k_queue, queue);
43 }
44 return 0;
45}
46
47SYS_INIT(init_queue_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
48
49#endif /* CONFIG_OBJECT_TRACING */
50
51void k_queue_init(struct k_queue *queue)
52{
53 sys_slist_init(&queue->data_q);
54 sys_dlist_init(&queue->wait_q);
Luiz Augusto von Dentz7d01c5e2017-08-21 10:49:29 +030055#if defined(CONFIG_POLL)
56 sys_dlist_init(&queue->poll_events);
57#endif
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020058
59 SYS_TRACING_OBJ_INIT(k_queue, queue);
60}
61
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +030062#if !defined(CONFIG_POLL)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020063static void prepare_thread_to_run(struct k_thread *thread, void *data)
64{
65 _abort_thread_timeout(thread);
66 _ready_thread(thread);
67 _set_thread_return_value_with_data(thread, 0, data);
68}
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +030069#endif /* CONFIG_POLL */
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020070
Andy Ross8606fab2018-03-26 10:54:40 -070071static inline void handle_poll_events(struct k_queue *queue, u32_t state)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020072{
73#ifdef CONFIG_POLL
Andy Ross8606fab2018-03-26 10:54:40 -070074 _handle_obj_poll_events(&queue->poll_events, state);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020075#endif
76}
77
Paul Sokolovsky3f507072017-04-25 17:54:31 +030078void k_queue_cancel_wait(struct k_queue *queue)
79{
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +030080 unsigned int key = irq_lock();
81#if !defined(CONFIG_POLL)
Paul Sokolovsky3f507072017-04-25 17:54:31 +030082 struct k_thread *first_pending_thread;
Paul Sokolovsky3f507072017-04-25 17:54:31 +030083
84 first_pending_thread = _unpend_first_thread(&queue->wait_q);
85
86 if (first_pending_thread) {
87 prepare_thread_to_run(first_pending_thread, NULL);
Paul Sokolovsky3f507072017-04-25 17:54:31 +030088 }
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +030089#else
Andy Ross8606fab2018-03-26 10:54:40 -070090 handle_poll_events(queue, K_POLL_STATE_NOT_READY);
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +030091#endif /* !CONFIG_POLL */
Paul Sokolovsky3f507072017-04-25 17:54:31 +030092
Andy Ross8606fab2018-03-26 10:54:40 -070093 _reschedule_noyield(key);
Paul Sokolovsky3f507072017-04-25 17:54:31 +030094}
95
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +020096void k_queue_insert(struct k_queue *queue, void *prev, void *data)
97{
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +030098 unsigned int key = irq_lock();
99#if !defined(CONFIG_POLL)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200100 struct k_thread *first_pending_thread;
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200101
102 first_pending_thread = _unpend_first_thread(&queue->wait_q);
103
104 if (first_pending_thread) {
105 prepare_thread_to_run(first_pending_thread, data);
Andy Ross8606fab2018-03-26 10:54:40 -0700106 _reschedule_noyield(key);
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300107 return;
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200108 }
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300109#endif /* !CONFIG_POLL */
110
111 sys_slist_insert(&queue->data_q, prev, data);
112
113#if defined(CONFIG_POLL)
Andy Ross8606fab2018-03-26 10:54:40 -0700114 handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300115#endif /* CONFIG_POLL */
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200116
Andy Ross8606fab2018-03-26 10:54:40 -0700117 _reschedule_noyield(key);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200118}
119
120void k_queue_append(struct k_queue *queue, void *data)
121{
122 return k_queue_insert(queue, queue->data_q.tail, data);
123}
124
125void k_queue_prepend(struct k_queue *queue, void *data)
126{
127 return k_queue_insert(queue, NULL, data);
128}
129
130void k_queue_append_list(struct k_queue *queue, void *head, void *tail)
131{
132 __ASSERT(head && tail, "invalid head or tail");
133
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300134 unsigned int key = irq_lock();
135#if !defined(CONFIG_POLL)
Andy Ross345553b2018-03-09 13:05:15 -0800136 struct k_thread *thread;
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200137
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200138 while (head && ((thread = _unpend_first_thread(&queue->wait_q)))) {
139 prepare_thread_to_run(thread, head);
140 head = *(void **)head;
141 }
142
143 if (head) {
144 sys_slist_append_list(&queue->data_q, head, tail);
145 }
146
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300147#else
148 sys_slist_append_list(&queue->data_q, head, tail);
Andy Ross8606fab2018-03-26 10:54:40 -0700149 handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300150#endif /* !CONFIG_POLL */
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200151
Andy Ross8606fab2018-03-26 10:54:40 -0700152 _reschedule_noyield(key);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200153}
154
155void k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list)
156{
157 __ASSERT(!sys_slist_is_empty(list), "list must not be empty");
158
159 /*
160 * note: this works as long as:
161 * - the slist implementation keeps the next pointer as the first
162 * field of the node object type
163 * - list->tail->next = NULL.
164 */
165 k_queue_append_list(queue, list->head, list->tail);
166 sys_slist_init(list);
167}
168
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300169#if defined(CONFIG_POLL)
170static void *k_queue_poll(struct k_queue *queue, s32_t timeout)
171{
172 struct k_poll_event event;
173 int err;
Paul Sokolovsky199d07e2017-10-16 13:36:37 +0300174 unsigned int key;
175 void *val;
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300176
177 k_poll_event_init(&event, K_POLL_TYPE_FIFO_DATA_AVAILABLE,
178 K_POLL_MODE_NOTIFY_ONLY, queue);
179
Luiz Augusto von Dentzf87c4c62017-10-17 11:34:21 +0300180 do {
181 event.state = K_POLL_STATE_NOT_READY;
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300182
Luiz Augusto von Dentzf87c4c62017-10-17 11:34:21 +0300183 err = k_poll(&event, 1, timeout);
184 if (err) {
185 return NULL;
186 }
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300187
Luiz Augusto von Dentzf87c4c62017-10-17 11:34:21 +0300188 __ASSERT_NO_MSG(event.state ==
189 K_POLL_STATE_FIFO_DATA_AVAILABLE);
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300190
Luiz Augusto von Dentzf87c4c62017-10-17 11:34:21 +0300191 /* sys_slist_* aren't threadsafe, so must be always protected by
192 * irq_lock.
193 */
194 key = irq_lock();
195 val = sys_slist_get(&queue->data_q);
196 irq_unlock(key);
197 } while (!val && timeout == K_FOREVER);
198
Paul Sokolovsky199d07e2017-10-16 13:36:37 +0300199 return val;
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300200}
201#endif /* CONFIG_POLL */
202
Kumar Galacc334c72017-04-21 10:55:34 -0500203void *k_queue_get(struct k_queue *queue, s32_t timeout)
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200204{
205 unsigned int key;
206 void *data;
207
208 key = irq_lock();
209
210 if (likely(!sys_slist_is_empty(&queue->data_q))) {
211 data = sys_slist_get_not_empty(&queue->data_q);
212 irq_unlock(key);
213 return data;
214 }
215
216 if (timeout == K_NO_WAIT) {
217 irq_unlock(key);
218 return NULL;
219 }
220
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300221#if defined(CONFIG_POLL)
222 irq_unlock(key);
223
224 return k_queue_poll(queue, timeout);
225
226#else
Andy Rosse0a572b2018-03-26 11:58:10 -0700227 int ret = _pend_current_thread(key, &queue->wait_q, timeout);
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200228
Andy Rosse0a572b2018-03-26 11:58:10 -0700229 return ret ? NULL : _current->base.swap_data;
Luiz Augusto von Dentz84db6412017-07-13 12:43:59 +0300230#endif /* CONFIG_POLL */
Luiz Augusto von Dentza7ddb872017-02-21 14:50:42 +0200231}