blob: c26aaa830a3eabd33d908f794efbf4c8dc733e19 [file] [log] [blame]
Benjamin Walsh456c6da2016-09-02 18:55:39 -04001/*
2 * Copyright (c) 2016 Wind River Systems, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/**
18 * @brief Mailboxes.
19 */
20
21#include <kernel.h>
Benjamin Walshf6ca7de2016-11-08 10:36:50 -050022#include <kernel_structs.h>
Anas Nashif569f0b42016-12-17 13:18:45 -050023#include <debug/object_tracing_common.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040024#include <toolchain.h>
25#include <sections.h>
26#include <string.h>
27#include <wait_q.h>
28#include <misc/dlist.h>
Dmitriy Korovkin284042d2016-09-09 11:24:27 -040029#include <init.h>
Benjamin Walsh456c6da2016-09-02 18:55:39 -040030
31
32#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
33
34/* asynchronous message descriptor type */
35struct k_mbox_async {
Benjamin Walshf6ca7de2016-11-08 10:36:50 -050036 struct _thread_base thread; /* dummy thread object */
Benjamin Walsh456c6da2016-09-02 18:55:39 -040037 struct k_mbox_msg tx_msg; /* transmit message descriptor */
38};
39
40/* array of asynchronous message descriptors */
41static struct k_mbox_async __noinit async_msg[CONFIG_NUM_MBOX_ASYNC_MSGS];
42
43/* stack of unused asynchronous message descriptors */
44K_STACK_DEFINE(async_msg_free, CONFIG_NUM_MBOX_ASYNC_MSGS);
45
Allan Stephense7d2cc22016-10-19 16:10:46 -050046/* allocate an asynchronous message descriptor */
47static inline void _mbox_async_alloc(struct k_mbox_async **async)
48{
49 k_stack_pop(&async_msg_free, (uint32_t *)async, K_FOREVER);
50}
51
52/* free an asynchronous message descriptor */
53static inline void _mbox_async_free(struct k_mbox_async *async)
54{
55 k_stack_push(&async_msg_free, (uint32_t)async);
56}
57
58#endif /* CONFIG_NUM_MBOX_ASYNC_MSGS > 0 */
59
60extern struct k_mbox _k_mbox_list_start[];
61extern struct k_mbox _k_mbox_list_end[];
62
63struct k_mbox *_trace_list_k_mbox;
64
65#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0) || \
66 defined(CONFIG_DEBUG_TRACING_KERNEL_OBJECTS)
67
68/*
69 * Do run-time initialization of mailbox object subsystem.
Benjamin Walsh456c6da2016-09-02 18:55:39 -040070 */
Dmitriy Korovkin284042d2016-09-09 11:24:27 -040071static int init_mbox_module(struct device *dev)
Benjamin Walsh456c6da2016-09-02 18:55:39 -040072{
Dmitriy Korovkin284042d2016-09-09 11:24:27 -040073 ARG_UNUSED(dev);
74
Allan Stephense7d2cc22016-10-19 16:10:46 -050075#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
76 /*
77 * Create pool of asynchronous message descriptors.
78 *
79 * A dummy thread requires minimal initialization, since it never gets
80 * to execute. The K_DUMMY flag is sufficient to distinguish a dummy
81 * thread from a real one. The threads are *not* added to the kernel's
82 * list of known threads.
83 *
84 * Once initialized, the address of each descriptor is added to a stack
85 * that governs access to them.
86 */
87
Benjamin Walsh456c6da2016-09-02 18:55:39 -040088 int i;
89
90 for (i = 0; i < CONFIG_NUM_MBOX_ASYNC_MSGS; i++) {
Benjamin Walsh04c542d2016-11-22 17:52:30 -050091 _init_thread_base(&async_msg[i].thread, 0, K_DUMMY, 0);
Benjamin Walsh0bee91d2016-09-15 17:16:38 -040092 k_stack_push(&async_msg_free, (uint32_t)&async_msg[i]);
Benjamin Walsh456c6da2016-09-02 18:55:39 -040093 }
Allan Stephense7d2cc22016-10-19 16:10:46 -050094#endif /* CONFIG_NUM_MBOX_ASYNC_MSGS > 0 */
95
96 /* Complete initialization of statically defined mailboxes. */
97
98#ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS
99 struct k_mbox *mbox;
100
101 for (mbox = _k_mbox_list_start; mbox < _k_mbox_list_end; mbox++) {
102 SYS_TRACING_OBJ_INIT(k_mbox, mbox);
103 }
104#endif /* CONFIG_DEBUG_TRACING_KERNEL_OBJECTS */
105
Dmitriy Korovkin284042d2016-09-09 11:24:27 -0400106 return 0;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400107}
108
Andrew Boie0b474ee2016-11-08 11:06:55 -0800109SYS_INIT(init_mbox_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
Dmitriy Korovkin284042d2016-09-09 11:24:27 -0400110
Allan Stephense7d2cc22016-10-19 16:10:46 -0500111#endif /* CONFIG_NUM_MBOX_ASYNC_MSGS or CONFIG_DEBUG_TRACING_KERNEL_OBJECTS */
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400112
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400113void k_mbox_init(struct k_mbox *mbox_ptr)
114{
115 sys_dlist_init(&mbox_ptr->tx_msg_queue);
116 sys_dlist_init(&mbox_ptr->rx_msg_queue);
Allan Stephense7d2cc22016-10-19 16:10:46 -0500117 SYS_TRACING_OBJ_INIT(k_mbox, mbox_ptr);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400118}
119
120/**
121 * @brief Check compatibility of sender's and receiver's message descriptors.
122 *
123 * Compares sender's and receiver's message descriptors to see if they are
124 * compatible. If so, the descriptor fields are updated to reflect that a
125 * match has occurred.
126 *
127 * @param tx_msg Pointer to transmit message descriptor.
128 * @param rx_msg Pointer to receive message descriptor.
129 *
130 * @return 0 if successfully matched, otherwise -1.
131 */
132static int _mbox_message_match(struct k_mbox_msg *tx_msg,
133 struct k_mbox_msg *rx_msg)
134{
135 uint32_t temp_info;
136
137 if (((tx_msg->tx_target_thread == (k_tid_t)K_ANY) ||
138 (tx_msg->tx_target_thread == rx_msg->tx_target_thread)) &&
139 ((rx_msg->rx_source_thread == (k_tid_t)K_ANY) ||
140 (rx_msg->rx_source_thread == tx_msg->rx_source_thread))) {
141
142 /* update thread identifier fields for both descriptors */
143 rx_msg->rx_source_thread = tx_msg->rx_source_thread;
144 tx_msg->tx_target_thread = rx_msg->tx_target_thread;
145
146 /* update application info fields for both descriptors */
147 temp_info = rx_msg->info;
148 rx_msg->info = tx_msg->info;
149 tx_msg->info = temp_info;
150
151 /* update data size field for receiver only */
152 if (rx_msg->size > tx_msg->size) {
153 rx_msg->size = tx_msg->size;
154 }
155
156 /* update data location fields for receiver only */
157 rx_msg->tx_data = tx_msg->tx_data;
158 rx_msg->tx_block = tx_msg->tx_block;
159 if (rx_msg->tx_data != NULL) {
160 rx_msg->tx_block.pool_id = NULL;
161 } else if (rx_msg->tx_block.pool_id != NULL) {
162 rx_msg->tx_data = rx_msg->tx_block.data;
163 }
164
165 /* update syncing thread field for receiver only */
166 rx_msg->_syncing_thread = tx_msg->_syncing_thread;
167
168 return 0;
169 }
170
171 return -1;
172}
173
174/**
175 * @brief Dispose of received message.
176 *
177 * Releases any memory pool block still associated with the message,
178 * then notifies the sender that message processing is complete.
179 *
180 * @param rx_msg Pointer to receive message descriptor.
181 *
182 * @return N/A
183 */
184static void _mbox_message_dispose(struct k_mbox_msg *rx_msg)
185{
Benjamin Walshb7ef0cb2016-10-05 17:32:01 -0400186 struct k_thread *sending_thread;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400187 struct k_mbox_msg *tx_msg;
188 unsigned int key;
189
190 /* do nothing if message was disposed of when it was received */
191 if (rx_msg->_syncing_thread == NULL) {
192 return;
193 }
194
195 /* release sender's memory pool block */
196 if (rx_msg->tx_block.pool_id != NULL) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400197 k_mem_pool_free(&rx_msg->tx_block);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400198 rx_msg->tx_block.pool_id = NULL;
199 }
200
201 /* recover sender info */
202 sending_thread = rx_msg->_syncing_thread;
203 rx_msg->_syncing_thread = NULL;
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500204 tx_msg = (struct k_mbox_msg *)sending_thread->base.swap_data;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400205
206 /* update data size field for sender */
207 tx_msg->size = rx_msg->size;
208
209#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
210 /*
211 * asynchronous send: free asynchronous message descriptor +
212 * dummy thread pair, then give semaphore (if needed)
213 */
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500214 if (sending_thread->base.flags & K_DUMMY) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400215 struct k_sem *async_sem = tx_msg->_async_sem;
216
217 _mbox_async_free((struct k_mbox_async *)sending_thread);
218 if (async_sem != NULL) {
219 k_sem_give(async_sem);
220 }
221 return;
222 }
223#endif
224
225 /* synchronous send: wake up sending thread */
226 key = irq_lock();
227 _set_thread_return_value(sending_thread, 0);
228 _mark_thread_as_not_pending(sending_thread);
229 _ready_thread(sending_thread);
230 _reschedule_threads(key);
231}
232
233/**
234 * @brief Send a mailbox message.
235 *
236 * Helper routine that handles both synchronous and asynchronous sends.
237 *
238 * @param mbox Pointer to the mailbox object.
239 * @param tx_msg Pointer to transmit message descriptor.
Peter Mitsis40680f62016-10-14 10:04:55 -0400240 * @param timeout Maximum time (milliseconds) to wait for the message to be
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400241 * received (although not necessarily completely processed).
242 * Use K_NO_WAIT to return immediately, or K_FOREVER to wait as long
243 * as necessary.
244 *
245 * @return 0 if successful, -ENOMSG if failed immediately, -EAGAIN if timed out
246 */
247static int _mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
248 int32_t timeout)
249{
Benjamin Walshb7ef0cb2016-10-05 17:32:01 -0400250 struct k_thread *sending_thread;
251 struct k_thread *receiving_thread;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400252 struct k_mbox_msg *rx_msg;
Benjamin Walshade6dc92016-11-12 11:53:21 -0500253 sys_dnode_t *wait_q_item, *next_wait_q_item;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400254 unsigned int key;
255
256 /* save sender id so it can be used during message matching */
257 tx_msg->rx_source_thread = _current;
258
259 /* finish readying sending thread (actual or dummy) for send */
260 sending_thread = tx_msg->_syncing_thread;
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500261 sending_thread->base.swap_data = tx_msg;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400262
263 /* search mailbox's rx queue for a compatible receiver */
264 key = irq_lock();
265
Benjamin Walshade6dc92016-11-12 11:53:21 -0500266 SYS_DLIST_FOR_EACH_NODE_SAFE(&mbox->rx_msg_queue, wait_q_item,
267 next_wait_q_item) {
268
Benjamin Walshb7ef0cb2016-10-05 17:32:01 -0400269 receiving_thread = (struct k_thread *)wait_q_item;
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500270 rx_msg = (struct k_mbox_msg *)receiving_thread->base.swap_data;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400271
272 if (_mbox_message_match(tx_msg, rx_msg) == 0) {
273 /* take receiver out of rx queue */
274 _unpend_thread(receiving_thread);
Benjamin Walsh7caef452016-10-05 12:55:17 -0400275 _abort_thread_timeout(receiving_thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400276
277 /* ready receiver for execution */
278 _set_thread_return_value(receiving_thread, 0);
279 _ready_thread(receiving_thread);
280
281#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
282 /*
283 * asynchronous send: swap out current thread
284 * if receiver has priority, otherwise let it continue
285 *
286 * note: dummy sending thread sits (unqueued)
287 * until the receiver consumes the message
288 */
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500289 if (sending_thread->base.flags & K_DUMMY) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400290 _reschedule_threads(key);
291 return 0;
292 }
293#endif
294
295 /*
296 * synchronous send: pend current thread (unqueued)
297 * until the receiver consumes the message
298 */
299 _remove_thread_from_ready_q(_current);
300 _mark_thread_as_pending(_current);
301 return _Swap(key);
302 }
303 }
304
305 /* didn't find a matching receiver: don't wait for one */
306 if (timeout == K_NO_WAIT) {
307 irq_unlock(key);
308 return -ENOMSG;
309 }
310
311#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
312 /* asynchronous send: dummy thread waits on tx queue for receiver */
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500313 if (sending_thread->base.flags & K_DUMMY) {
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400314 _pend_thread(sending_thread, &mbox->tx_msg_queue, K_FOREVER);
315 irq_unlock(key);
316 return 0;
317 }
318#endif
319
320 /* synchronous send: sender waits on tx queue for receiver or timeout */
321 _pend_current_thread(&mbox->tx_msg_queue, timeout);
322 return _Swap(key);
323}
324
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400325int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, int32_t timeout)
326{
327 /* configure things for a synchronous send, then send the message */
328 tx_msg->_syncing_thread = _current;
329
330 return _mbox_message_put(mbox, tx_msg, timeout);
331}
332
333#if (CONFIG_NUM_MBOX_ASYNC_MSGS > 0)
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400334void k_mbox_async_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
335 struct k_sem *sem)
336{
337 struct k_mbox_async *async;
338
339 /*
340 * allocate an asynchronous message descriptor, configure both parts,
341 * then send the message asynchronously
342 */
343 _mbox_async_alloc(&async);
344
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500345 async->thread.prio = _current->base.prio;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400346
347 async->tx_msg = *tx_msg;
Benjamin Walshb7ef0cb2016-10-05 17:32:01 -0400348 async->tx_msg._syncing_thread = (struct k_thread *)&async->thread;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400349 async->tx_msg._async_sem = sem;
350
351 _mbox_message_put(mbox, &async->tx_msg, K_FOREVER);
352}
353#endif
354
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400355void k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer)
356{
357 /* handle case where data is to be discarded */
358 if (buffer == NULL) {
359 rx_msg->size = 0;
360 _mbox_message_dispose(rx_msg);
361 return;
362 }
363
364 /* copy message data to buffer, then dispose of message */
365 if ((rx_msg->tx_data != NULL) && (rx_msg->size > 0)) {
366 memcpy(buffer, rx_msg->tx_data, rx_msg->size);
367 }
368 _mbox_message_dispose(rx_msg);
369}
370
Peter Mitsis0cb65c32016-09-29 14:07:36 -0400371int k_mbox_data_block_get(struct k_mbox_msg *rx_msg, struct k_mem_pool *pool,
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400372 struct k_mem_block *block, int32_t timeout)
373{
374 int result;
375
376 /* handle case where data is to be discarded */
377 if (pool == NULL) {
378 rx_msg->size = 0;
379 _mbox_message_dispose(rx_msg);
380 return 0;
381 }
382
383 /* handle case where data is already in a memory pool block */
384 if (rx_msg->tx_block.pool_id != NULL) {
385 /* give ownership of the block to receiver */
386 *block = rx_msg->tx_block;
387 rx_msg->tx_block.pool_id = NULL;
388
389 /* now dispose of message */
390 _mbox_message_dispose(rx_msg);
391 return 0;
392 }
393
394 /* allocate memory pool block (even when message size is 0!) */
395 result = k_mem_pool_alloc(pool, block, rx_msg->size, timeout);
396 if (result != 0) {
397 return result;
398 }
399
400 /* retrieve non-block data into new block, then dispose of message */
401 k_mbox_data_get(rx_msg, block->data);
402 return 0;
403}
404
405/**
406 * @brief Handle immediate consumption of received mailbox message data.
407 *
408 * Checks to see if received message data should be kept for later retrieval,
409 * or if the data should consumed immediately and the message disposed of.
410 *
411 * The data is consumed immediately in either of the following cases:
412 * 1) The receiver requested immediate retrieval by suppling a buffer
413 * to receive the data.
414 * 2) There is no data to be retrieved. (i.e. Data size is 0 bytes.)
415 *
416 * @param rx_msg Pointer to receive message descriptor.
417 * @param buffer Pointer to buffer to receive data.
418 *
419 * @return 0
420 */
421static int _mbox_message_data_check(struct k_mbox_msg *rx_msg, void *buffer)
422{
423 if (buffer != NULL) {
424 /* retrieve data now, then dispose of message */
425 k_mbox_data_get(rx_msg, buffer);
426 } else if (rx_msg->size == 0) {
427 /* there is no data to get, so just dispose of message */
428 _mbox_message_dispose(rx_msg);
429 } else {
430 /* keep message around for later data retrieval */
431 }
432
433 return 0;
434}
435
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400436int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
437 int32_t timeout)
438{
Benjamin Walshb7ef0cb2016-10-05 17:32:01 -0400439 struct k_thread *sending_thread;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400440 struct k_mbox_msg *tx_msg;
Benjamin Walshade6dc92016-11-12 11:53:21 -0500441 sys_dnode_t *wait_q_item, *next_wait_q_item;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400442 unsigned int key;
443 int result;
444
445 /* save receiver id so it can be used during message matching */
446 rx_msg->tx_target_thread = _current;
447
448 /* search mailbox's tx queue for a compatible sender */
449 key = irq_lock();
450
Benjamin Walshade6dc92016-11-12 11:53:21 -0500451 SYS_DLIST_FOR_EACH_NODE_SAFE(&mbox->tx_msg_queue, wait_q_item,
452 next_wait_q_item) {
453
Benjamin Walshb7ef0cb2016-10-05 17:32:01 -0400454 sending_thread = (struct k_thread *)wait_q_item;
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500455 tx_msg = (struct k_mbox_msg *)sending_thread->base.swap_data;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400456
457 if (_mbox_message_match(tx_msg, rx_msg) == 0) {
458 /* take sender out of mailbox's tx queue */
459 _unpend_thread(sending_thread);
Benjamin Walsh7caef452016-10-05 12:55:17 -0400460 _abort_thread_timeout(sending_thread);
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400461
462 irq_unlock(key);
463
464 /* consume message data immediately, if needed */
465 return _mbox_message_data_check(rx_msg, buffer);
466 }
467 }
468
469 /* didn't find a matching sender */
470
471 if (timeout == K_NO_WAIT) {
472 /* don't wait for a matching sender to appear */
473 irq_unlock(key);
474 return -ENOMSG;
475 }
476
477 /* wait until a matching sender appears or a timeout occurs */
478 _pend_current_thread(&mbox->rx_msg_queue, timeout);
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500479 _current->base.swap_data = rx_msg;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400480 result = _Swap(key);
481
482 /* consume message data immediately, if needed */
483 if (result == 0) {
484 result = _mbox_message_data_check(rx_msg, buffer);
485 }
486
487 return result;
488}
489
490
491
492int task_mbox_put(kmbox_t mbox, kpriority_t prio, struct k_msg *msg,
493 int32_t timeout)
494{
495 struct k_mbox_msg *tx_msg = (struct k_mbox_msg *)msg;
496 kpriority_t curr_prio;
497 unsigned int key;
498 int result;
499
500 /* handle old-style request to send an empty message */
501 if (tx_msg->size == 0) {
502 tx_msg->tx_block.pool_id = NULL;
503 }
504
505 /* handle sending message of current thread priority */
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500506 curr_prio = _current->base.prio;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400507 if (prio == curr_prio) {
508 return _error_to_rc(k_mbox_put(mbox, tx_msg,
509 _ticks_to_ms(timeout)));
510 }
511
512 /* handle sending message of a different thread priority */
513 key = irq_lock();
514 _thread_priority_set(_current, prio);
515 _reschedule_threads(key);
516
517 result = _error_to_rc(k_mbox_put(mbox, tx_msg, _ticks_to_ms(timeout)));
518
519 key = irq_lock();
520 _thread_priority_set(_current, curr_prio);
521 _reschedule_threads(key);
522
523 return result;
524}
525
526void task_mbox_block_put(kmbox_t mbox, kpriority_t prio, struct k_msg *msg,
527 ksem_t sema)
528{
529 struct k_mbox_msg *tx_msg = (struct k_mbox_msg *)msg;
530 kpriority_t curr_prio;
531 unsigned int key;
532
533 /* handle sending message of current thread priority */
Benjamin Walshf6ca7de2016-11-08 10:36:50 -0500534 curr_prio = _current->base.prio;
Benjamin Walsh456c6da2016-09-02 18:55:39 -0400535 if (prio == curr_prio) {
536 k_mbox_async_put(mbox, tx_msg, sema);
537 return;
538 }
539
540 /* handle sending message of a different thread priority */
541 key = irq_lock();
542 _thread_priority_set(_current, prio);
543 _reschedule_threads(key);
544
545 k_mbox_async_put(mbox, tx_msg, sema);
546
547 key = irq_lock();
548 _thread_priority_set(_current, curr_prio);
549 _reschedule_threads(key);
550}
551
552int task_mbox_get(kmbox_t mbox, struct k_msg *msg, int32_t timeout)
553{
554 struct k_mbox_msg *rx_msg = (struct k_mbox_msg *)msg;
555
556 return _error_to_rc(k_mbox_get(mbox, rx_msg, rx_msg->_rx_data,
557 _ticks_to_ms(timeout)));
558}
559
560void task_mbox_data_get(struct k_msg *msg)
561{
562 struct k_mbox_msg *rx_msg = (struct k_mbox_msg *)msg;
563
564 /* handle old-style request to discard message data */
565 if (rx_msg->size == 0) {
566 rx_msg->_rx_data = NULL;
567 }
568
569 k_mbox_data_get(rx_msg, rx_msg->_rx_data);
570}
571
572int task_mbox_data_block_get(struct k_msg *msg, struct k_block *block,
573 kmemory_pool_t pool_id, int32_t timeout)
574{
575 struct k_mbox_msg *rx_msg = (struct k_mbox_msg *)msg;
576
577 return _error_to_rc(k_mbox_data_block_get(rx_msg, pool_id, block,
578 _ticks_to_ms(timeout)));
579}