blob: d7da8e3c8e49ae054eb6b23a4bbecc5c3d559274 [file] [log] [blame]
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001/*
2 * Copyright (c) 2016 Wind River Systems, Inc.
3 *
David B. Kinderac74d8b2017-01-18 17:01:01 -08004 * SPDX-License-Identifier: Apache-2.0
Benjamin Walsh456c6da2016-09-02 18:55:39 -04005 */
6
7/**
8 * @brief Mailboxes.
9 */
10
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020011#include <zephyr/kernel.h>
12#include <zephyr/kernel_structs.h>
Anas Nashif4d994af2021-04-18 23:24:40 -040013
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020014#include <zephyr/toolchain.h>
15#include <zephyr/linker/sections.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040016#include <string.h>
Gerard Marull-Paretascffefc82022-05-06 11:04:23 +020017#include <zephyr/sys/dlist.h>
18#include <zephyr/init.h>
Anas Nashif8634c3b2023-08-29 17:03:12 +000019/* private kernel APIs */
20#include <ksched.h>
Anas Nashifebb503f2024-03-07 18:00:45 -050021#include <kthread.h>
Anas Nashif8634c3b2023-08-29 17:03:12 +000022#include <wait_q.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040023
Peter Mitsis6df8efe2023-05-11 14:06:46 -040024#ifdef CONFIG_OBJ_CORE_MAILBOX
25static struct k_obj_type obj_type_mailbox;
Simon Heinbcd1d192024-03-08 12:00:10 +010026#endif /* CONFIG_OBJ_CORE_MAILBOX */
Peter Mitsis6df8efe2023-05-11 14:06:46 -040027
Benjamin Walsh456c6da2016-09-02 18:55:39 -040028#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
29
30/* asynchronous message descriptor type */
31struct k_mbox_async {
Benjamin Walshf6ca7de2016-11-08 10:36:50 -050032 struct _thread_base thread; /* dummy thread object */
Benjamin Walsh456c6da2016-09-02 18:55:39 -040033 struct k_mbox_msg tx_msg; /* transmit message descriptor */
34};
35
Benjamin Walsh456c6da2016-09-02 18:55:39 -040036/* stack of unused asynchronous message descriptors */
37K_STACK_DEFINE(async_msg_free, CONFIG_NUM_MBOX_ASYNC_MSGS);
38
Allan Stephense7d2cc22016-10-19 16:10:46 -050039/* allocate an asynchronous message descriptor */
Leandro Pereiraa1ae8452018-03-06 15:08:55 -080040static inline void mbox_async_alloc(struct k_mbox_async **async)
Allan Stephense7d2cc22016-10-19 16:10:46 -050041{
Nicolas Pitre3d51f7c2019-05-17 22:48:26 -040042 (void)k_stack_pop(&async_msg_free, (stack_data_t *)async, K_FOREVER);
Allan Stephense7d2cc22016-10-19 16:10:46 -050043}
44
45/* free an asynchronous message descriptor */
Leandro Pereiraa1ae8452018-03-06 15:08:55 -080046static inline void mbox_async_free(struct k_mbox_async *async)
Allan Stephense7d2cc22016-10-19 16:10:46 -050047{
Nicolas Pitre3d51f7c2019-05-17 22:48:26 -040048 k_stack_push(&async_msg_free, (stack_data_t)async);
Allan Stephense7d2cc22016-10-19 16:10:46 -050049}
50
Allan Stephense7d2cc22016-10-19 16:10:46 -050051/*
52 * Do run-time initialization of mailbox object subsystem.
Benjamin Walsh456c6da2016-09-02 18:55:39 -040053 */
Gerard Marull-Paretasa5fd0d12022-10-19 09:33:44 +020054static int init_mbox_module(void)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040055{
Flavio Ceolinac146852018-11-01 17:42:07 -070056 /* array of asynchronous message descriptors */
57 static struct k_mbox_async __noinit async_msg[CONFIG_NUM_MBOX_ASYNC_MSGS];
58
Allan Stephense7d2cc22016-10-19 16:10:46 -050059 /*
60 * Create pool of asynchronous message descriptors.
61 *
62 * A dummy thread requires minimal initialization, since it never gets
Benjamin Walsha8978ab2017-01-22 11:41:59 -050063 * to execute. The _THREAD_DUMMY flag is sufficient to distinguish a
64 * dummy thread from a real one. The threads are *not* added to the
65 * kernel's list of known threads.
Allan Stephense7d2cc22016-10-19 16:10:46 -050066 *
67 * Once initialized, the address of each descriptor is added to a stack
68 * that governs access to them.
69 */
70
Benjamin Walsh456c6da2016-09-02 18:55:39 -040071 int i;
72
73 for (i = 0; i < CONFIG_NUM_MBOX_ASYNC_MSGS; i++) {
Patrik Flykt4344e272019-03-08 14:19:05 -070074 z_init_thread_base(&async_msg[i].thread, 0, _THREAD_DUMMY, 0);
Nicolas Pitre3d51f7c2019-05-17 22:48:26 -040075 k_stack_push(&async_msg_free, (stack_data_t)&async_msg[i]);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040076 }
Allan Stephense7d2cc22016-10-19 16:10:46 -050077
78 /* Complete initialization of statically defined mailboxes. */
79
Dmitriy Korovkin284042d2016-09-09 11:24:27 -040080 return 0;
Benjamin Walsh456c6da2016-09-02 18:55:39 -040081}
82
Andrew Boie0b474ee2016-11-08 11:06:55 -080083SYS_INIT(init_mbox_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
Dmitriy Korovkin284042d2016-09-09 11:24:27 -040084
Simon Heinebdb07a2024-03-18 14:30:08 +010085#endif /* CONFIG_NUM_MBOX_ASYNC_MSGS > 0 */
Benjamin Walsh456c6da2016-09-02 18:55:39 -040086
Anas Nashif25c87db2021-03-29 10:54:23 -040087void k_mbox_init(struct k_mbox *mbox)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040088{
Anas Nashif25c87db2021-03-29 10:54:23 -040089 z_waitq_init(&mbox->tx_msg_queue);
90 z_waitq_init(&mbox->rx_msg_queue);
91 mbox->lock = (struct k_spinlock) {};
Torbjörn Lekselld2e7de52021-03-26 12:47:13 +010092
Peter Mitsis6df8efe2023-05-11 14:06:46 -040093#ifdef CONFIG_OBJ_CORE_MAILBOX
94 k_obj_core_init_and_link(K_OBJ_CORE(mbox), &obj_type_mailbox);
Simon Heinbcd1d192024-03-08 12:00:10 +010095#endif /* CONFIG_OBJ_CORE_MAILBOX */
Peter Mitsis6df8efe2023-05-11 14:06:46 -040096
Torbjörn Lekselld2e7de52021-03-26 12:47:13 +010097 SYS_PORT_TRACING_OBJ_INIT(k_mbox, mbox);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040098}
99
100/**
101 * @brief Check compatibility of sender's and receiver's message descriptors.
102 *
103 * Compares sender's and receiver's message descriptors to see if they are
104 * compatible. If so, the descriptor fields are updated to reflect that a
105 * match has occurred.
106 *
107 * @param tx_msg Pointer to transmit message descriptor.
108 * @param rx_msg Pointer to receive message descriptor.
109 *
110 * @return 0 if successfully matched, otherwise -1.
111 */
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800112static int mbox_message_match(struct k_mbox_msg *tx_msg,
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400113 struct k_mbox_msg *rx_msg)
114{
Kumar Galaa1b77fd2020-05-27 11:26:57 -0500115 uint32_t temp_info;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400116
117 if (((tx_msg->tx_target_thread == (k_tid_t)K_ANY) ||
118 (tx_msg->tx_target_thread == rx_msg->tx_target_thread)) &&
119 ((rx_msg->rx_source_thread == (k_tid_t)K_ANY) ||
120 (rx_msg->rx_source_thread == tx_msg->rx_source_thread))) {
121
122 /* update thread identifier fields for both descriptors */
123 rx_msg->rx_source_thread = tx_msg->rx_source_thread;
124 tx_msg->tx_target_thread = rx_msg->tx_target_thread;
125
126 /* update application info fields for both descriptors */
127 temp_info = rx_msg->info;
128 rx_msg->info = tx_msg->info;
129 tx_msg->info = temp_info;
130
131 /* update data size field for receiver only */
132 if (rx_msg->size > tx_msg->size) {
133 rx_msg->size = tx_msg->size;
134 }
135
136 /* update data location fields for receiver only */
137 rx_msg->tx_data = tx_msg->tx_data;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400138
139 /* update syncing thread field for receiver only */
140 rx_msg->_syncing_thread = tx_msg->_syncing_thread;
141
142 return 0;
143 }
144
145 return -1;
146}
147
148/**
149 * @brief Dispose of received message.
150 *
Peter Mitsise9987aa2023-09-30 11:28:31 -0400151 * Notifies the sender that message processing is complete.
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400152 *
153 * @param rx_msg Pointer to receive message descriptor.
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400154 */
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800155static void mbox_message_dispose(struct k_mbox_msg *rx_msg)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400156{
Benjamin Walshb7ef0cb2016-10-05 17:32:01 -0400157 struct k_thread *sending_thread;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400158 struct k_mbox_msg *tx_msg;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400159
160 /* do nothing if message was disposed of when it was received */
161 if (rx_msg->_syncing_thread == NULL) {
162 return;
163 }
164
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400165 /* recover sender info */
166 sending_thread = rx_msg->_syncing_thread;
167 rx_msg->_syncing_thread = NULL;
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500168 tx_msg = (struct k_mbox_msg *)sending_thread->base.swap_data;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400169
170 /* update data size field for sender */
171 tx_msg->size = rx_msg->size;
172
173#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
174 /*
175 * asynchronous send: free asynchronous message descriptor +
176 * dummy thread pair, then give semaphore (if needed)
177 */
Patrik Flykt24d71432019-03-26 19:57:45 -0600178 if ((sending_thread->base.thread_state & _THREAD_DUMMY) != 0U) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400179 struct k_sem *async_sem = tx_msg->_async_sem;
180
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800181 mbox_async_free((struct k_mbox_async *)sending_thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400182 if (async_sem != NULL) {
183 k_sem_give(async_sem);
184 }
185 return;
186 }
Simon Heinbcd1d192024-03-08 12:00:10 +0100187#endif /* CONFIG_NUM_MBOX_ASYNC_MSGS */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400188
189 /* synchronous send: wake up sending thread */
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800190 arch_thread_return_value_set(sending_thread, 0);
Patrik Flykt4344e272019-03-08 14:19:05 -0700191 z_mark_thread_as_not_pending(sending_thread);
192 z_ready_thread(sending_thread);
193 z_reschedule_unlocked();
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400194}
195
196/**
197 * @brief Send a mailbox message.
198 *
199 * Helper routine that handles both synchronous and asynchronous sends.
200 *
201 * @param mbox Pointer to the mailbox object.
202 * @param tx_msg Pointer to transmit message descriptor.
Peter Mitsis40680f62016-10-14 10:04:55 -0400203 * @param timeout Maximum time (milliseconds) to wait for the message to be
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400204 * received (although not necessarily completely processed).
205 * Use K_NO_WAIT to return immediately, or K_FOREVER to wait as long
206 * as necessary.
207 *
208 * @return 0 if successful, -ENOMSG if failed immediately, -EAGAIN if timed out
209 */
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800210static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
Andy Ross78327382020-03-05 15:18:14 -0800211 k_timeout_t timeout)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400212{
Benjamin Walshb7ef0cb2016-10-05 17:32:01 -0400213 struct k_thread *sending_thread;
Andy Rossccf3bf72018-05-10 11:10:34 -0700214 struct k_thread *receiving_thread;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400215 struct k_mbox_msg *rx_msg;
Andy Ross9eeb6b82018-07-25 15:06:24 -0700216 k_spinlock_key_t key;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400217
218 /* save sender id so it can be used during message matching */
Nicolas Pitre46aa6712025-01-07 12:00:43 -0500219 tx_msg->rx_source_thread = _current;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400220
221 /* finish readying sending thread (actual or dummy) for send */
222 sending_thread = tx_msg->_syncing_thread;
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500223 sending_thread->base.swap_data = tx_msg;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400224
225 /* search mailbox's rx queue for a compatible receiver */
Andy Ross9eeb6b82018-07-25 15:06:24 -0700226 key = k_spin_lock(&mbox->lock);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400227
Torbjörn Lekselld2e7de52021-03-26 12:47:13 +0100228 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mbox, message_put, mbox, timeout);
229
Andy Rossccf3bf72018-05-10 11:10:34 -0700230 _WAIT_Q_FOR_EACH(&mbox->rx_msg_queue, receiving_thread) {
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500231 rx_msg = (struct k_mbox_msg *)receiving_thread->base.swap_data;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400232
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800233 if (mbox_message_match(tx_msg, rx_msg) == 0) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400234 /* take receiver out of rx queue */
Patrik Flykt4344e272019-03-08 14:19:05 -0700235 z_unpend_thread(receiving_thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400236
237 /* ready receiver for execution */
Andrew Boie4f77c2a2019-11-07 12:43:29 -0800238 arch_thread_return_value_set(receiving_thread, 0);
Patrik Flykt4344e272019-03-08 14:19:05 -0700239 z_ready_thread(receiving_thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400240
241#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
242 /*
243 * asynchronous send: swap out current thread
244 * if receiver has priority, otherwise let it continue
245 *
246 * note: dummy sending thread sits (unqueued)
247 * until the receiver consumes the message
248 */
Flavio Ceolin76b35182018-12-16 12:48:29 -0800249 if ((sending_thread->base.thread_state & _THREAD_DUMMY)
Patrik Flykt24d71432019-03-26 19:57:45 -0600250 != 0U) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700251 z_reschedule(&mbox->lock, key);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400252 return 0;
253 }
Simon Heinbcd1d192024-03-08 12:00:10 +0100254#endif /* CONFIG_NUM_MBOX_ASYNC_MSGS */
Torbjörn Lekselld2e7de52021-03-26 12:47:13 +0100255 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mbox, message_put, mbox, timeout);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400256
257 /*
258 * synchronous send: pend current thread (unqueued)
259 * until the receiver consumes the message
260 */
Torbjörn Lekselld2e7de52021-03-26 12:47:13 +0100261 int ret = z_pend_curr(&mbox->lock, key, NULL, K_FOREVER);
Andy Rosse0a572b2018-03-26 11:58:10 -0700262
Torbjörn Lekselld2e7de52021-03-26 12:47:13 +0100263 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, message_put, mbox, timeout, ret);
264
265 return ret;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400266 }
267 }
268
269 /* didn't find a matching receiver: don't wait for one */
Andy Ross78327382020-03-05 15:18:14 -0800270 if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
Torbjörn Lekselld2e7de52021-03-26 12:47:13 +0100271 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, message_put, mbox, timeout, -ENOMSG);
272
Andy Ross9eeb6b82018-07-25 15:06:24 -0700273 k_spin_unlock(&mbox->lock, key);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400274 return -ENOMSG;
275 }
276
277#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
278 /* asynchronous send: dummy thread waits on tx queue for receiver */
Patrik Flykt24d71432019-03-26 19:57:45 -0600279 if ((sending_thread->base.thread_state & _THREAD_DUMMY) != 0U) {
Patrik Flykt4344e272019-03-08 14:19:05 -0700280 z_pend_thread(sending_thread, &mbox->tx_msg_queue, K_FOREVER);
Andy Ross9eeb6b82018-07-25 15:06:24 -0700281 k_spin_unlock(&mbox->lock, key);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400282 return 0;
283 }
Simon Heinbcd1d192024-03-08 12:00:10 +0100284#endif /* CONFIG_NUM_MBOX_ASYNC_MSGS */
Torbjörn Lekselld2e7de52021-03-26 12:47:13 +0100285 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mbox, message_put, mbox, timeout);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400286
287 /* synchronous send: sender waits on tx queue for receiver or timeout */
Torbjörn Lekselld2e7de52021-03-26 12:47:13 +0100288 int ret = z_pend_curr(&mbox->lock, key, &mbox->tx_msg_queue, timeout);
289
290 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, message_put, mbox, timeout, ret);
291
292 return ret;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400293}
294
Andy Ross78327382020-03-05 15:18:14 -0800295int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
296 k_timeout_t timeout)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400297{
298 /* configure things for a synchronous send, then send the message */
Nicolas Pitre46aa6712025-01-07 12:00:43 -0500299 tx_msg->_syncing_thread = _current;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400300
Torbjörn Lekselld2e7de52021-03-26 12:47:13 +0100301 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mbox, put, mbox, timeout);
302
303 int ret = mbox_message_put(mbox, tx_msg, timeout);
304
305 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, put, mbox, timeout, ret);
306
307 return ret;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400308}
309
310#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400311void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
312 struct k_sem *sem)
313{
314 struct k_mbox_async *async;
315
Torbjörn Lekselld2e7de52021-03-26 12:47:13 +0100316 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mbox, async_put, mbox, sem);
317
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400318 /*
319 * allocate an asynchronous message descriptor, configure both parts,
320 * then send the message asynchronously
321 */
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800322 mbox_async_alloc(&async);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400323
Nicolas Pitre46aa6712025-01-07 12:00:43 -0500324 async->thread.prio = _current->base.prio;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400325
326 async->tx_msg = *tx_msg;
Benjamin Walshb7ef0cb2016-10-05 17:32:01 -0400327 async->tx_msg._syncing_thread = (struct k_thread *)&async->thread;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400328 async->tx_msg._async_sem = sem;
329
Flavio Ceolin0a447842018-09-13 11:24:30 -0700330 (void)mbox_message_put(mbox, &async->tx_msg, K_FOREVER);
Torbjörn Lekselld2e7de52021-03-26 12:47:13 +0100331 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, async_put, mbox, sem);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400332}
Simon Heinbcd1d192024-03-08 12:00:10 +0100333#endif /* CONFIG_NUM_MBOX_ASYNC_MSGS */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400334
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400335void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer)
336{
337 /* handle case where data is to be discarded */
338 if (buffer == NULL) {
339 rx_msg->size = 0;
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800340 mbox_message_dispose(rx_msg);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400341 return;
342 }
343
344 /* copy message data to buffer, then dispose of message */
Anas Nashifbbbc38b2021-03-29 10:03:49 -0400345 if ((rx_msg->tx_data != NULL) && (rx_msg->size > 0U)) {
Flavio Ceolin66994232018-08-13 15:17:04 -0700346 (void)memcpy(buffer, rx_msg->tx_data, rx_msg->size);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400347 }
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800348 mbox_message_dispose(rx_msg);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400349}
350
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400351/**
352 * @brief Handle immediate consumption of received mailbox message data.
353 *
354 * Checks to see if received message data should be kept for later retrieval,
355 * or if the data should consumed immediately and the message disposed of.
356 *
357 * The data is consumed immediately in either of the following cases:
Stefan Eicherfbe7d722021-08-23 11:36:22 +0000358 * 1) The receiver requested immediate retrieval by supplying a buffer
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400359 * to receive the data.
360 * 2) There is no data to be retrieved. (i.e. Data size is 0 bytes.)
361 *
362 * @param rx_msg Pointer to receive message descriptor.
363 * @param buffer Pointer to buffer to receive data.
364 *
365 * @return 0
366 */
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800367static int mbox_message_data_check(struct k_mbox_msg *rx_msg, void *buffer)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400368{
369 if (buffer != NULL) {
370 /* retrieve data now, then dispose of message */
371 k_mbox_data_get(rx_msg, buffer);
Anas Nashifbbbc38b2021-03-29 10:03:49 -0400372 } else if (rx_msg->size == 0U) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400373 /* there is no data to get, so just dispose of message */
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800374 mbox_message_dispose(rx_msg);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400375 } else {
376 /* keep message around for later data retrieval */
377 }
378
379 return 0;
380}
381
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400382int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
Andy Ross78327382020-03-05 15:18:14 -0800383 k_timeout_t timeout)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400384{
Andy Rossccf3bf72018-05-10 11:10:34 -0700385 struct k_thread *sending_thread;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400386 struct k_mbox_msg *tx_msg;
Andy Ross9eeb6b82018-07-25 15:06:24 -0700387 k_spinlock_key_t key;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400388 int result;
389
390 /* save receiver id so it can be used during message matching */
Nicolas Pitre46aa6712025-01-07 12:00:43 -0500391 rx_msg->tx_target_thread = _current;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400392
393 /* search mailbox's tx queue for a compatible sender */
Andy Ross9eeb6b82018-07-25 15:06:24 -0700394 key = k_spin_lock(&mbox->lock);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400395
Torbjörn Lekselld2e7de52021-03-26 12:47:13 +0100396 SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_mbox, get, mbox, timeout);
397
Andy Rossccf3bf72018-05-10 11:10:34 -0700398 _WAIT_Q_FOR_EACH(&mbox->tx_msg_queue, sending_thread) {
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500399 tx_msg = (struct k_mbox_msg *)sending_thread->base.swap_data;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400400
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800401 if (mbox_message_match(tx_msg, rx_msg) == 0) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400402 /* take sender out of mailbox's tx queue */
Patrik Flykt4344e272019-03-08 14:19:05 -0700403 z_unpend_thread(sending_thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400404
Andy Ross9eeb6b82018-07-25 15:06:24 -0700405 k_spin_unlock(&mbox->lock, key);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400406
407 /* consume message data immediately, if needed */
Torbjörn Lekselld2e7de52021-03-26 12:47:13 +0100408 result = mbox_message_data_check(rx_msg, buffer);
409
410 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, get, mbox, timeout, result);
411 return result;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400412 }
413 }
414
415 /* didn't find a matching sender */
416
Andy Ross78327382020-03-05 15:18:14 -0800417 if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
Torbjörn Lekselld2e7de52021-03-26 12:47:13 +0100418 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, get, mbox, timeout, -ENOMSG);
419
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400420 /* don't wait for a matching sender to appear */
Andy Ross9eeb6b82018-07-25 15:06:24 -0700421 k_spin_unlock(&mbox->lock, key);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400422 return -ENOMSG;
423 }
424
Torbjörn Lekselld2e7de52021-03-26 12:47:13 +0100425 SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_mbox, get, mbox, timeout);
426
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400427 /* wait until a matching sender appears or a timeout occurs */
Nicolas Pitre46aa6712025-01-07 12:00:43 -0500428 _current->base.swap_data = rx_msg;
Patrik Flykt4344e272019-03-08 14:19:05 -0700429 result = z_pend_curr(&mbox->lock, key, &mbox->rx_msg_queue, timeout);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400430
431 /* consume message data immediately, if needed */
432 if (result == 0) {
Leandro Pereiraa1ae8452018-03-06 15:08:55 -0800433 result = mbox_message_data_check(rx_msg, buffer);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400434 }
435
Torbjörn Lekselld2e7de52021-03-26 12:47:13 +0100436 SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_mbox, get, mbox, timeout, result);
437
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400438 return result;
439}
Peter Mitsis6df8efe2023-05-11 14:06:46 -0400440
441#ifdef CONFIG_OBJ_CORE_MAILBOX
442
443static int init_mailbox_obj_core_list(void)
444{
445 /* Initialize mailbox object type */
446
447 z_obj_type_init(&obj_type_mailbox, K_OBJ_TYPE_MBOX_ID,
448 offsetof(struct k_mbox, obj_core));
449
Nguyen Minh Thien8188be52024-02-19 13:16:58 +0100450 /* Initialize and link statically defined mailboxes */
Peter Mitsis6df8efe2023-05-11 14:06:46 -0400451
452 STRUCT_SECTION_FOREACH(k_mbox, mbox) {
453 k_obj_core_init_and_link(K_OBJ_CORE(mbox), &obj_type_mailbox);
454 }
455
456 return 0;
457}
458
459SYS_INIT(init_mailbox_obj_core_list, PRE_KERNEL_1,
460 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
Simon Heinbcd1d192024-03-08 12:00:10 +0100461#endif /* CONFIG_OBJ_CORE_MAILBOX */