blob: 23a2a4a1eac3cd3c65b3e25278a6cdf1cd677372 [file] [log] [blame]
/*
* Copyright (c) 2010-2015 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
*
* @brief Nanokernel dynamic-size FIFO queue object.
*
* This module provides the nanokernel FIFO object implementation, including
* the following APIs:
*
* nano_fifo_init
* nano_fiber_fifo_put, nano_task_fifo_put, nano_isr_fifo_put
* nano_fiber_fifo_get, nano_task_fifo_get, nano_isr_fifo_get
* nano_fifo_get
*/
/*
* INTERNAL
* In some cases the compiler "alias" attribute is used to map two or more
* APIs to the same function, since they have identical implementations.
*/
#include <nano_private.h>
#include <toolchain.h>
#include <sections.h>
#include <wait_q.h>
/*
* INTERNAL
* Although the existing implementation will support invocation from an ISR
* context, for future flexibility, this API will be restricted from ISR
* level invocation.
*/
void nano_fifo_init(struct nano_fifo *fifo)
{
/*
* The wait queue and data queue occupy the same space since there
* cannot be both queued data and pending fibers in the FIFO. Care
* must be taken that, when one of the queues becomes empty, it is
* reset to a state that reflects an empty queue to both the data and
* wait queues.
*/
_nano_wait_q_init(&fifo->wait_q);
/*
* If the 'stat' field is a positive value, it indicates how many data
* elements reside in the FIFO. If the 'stat' field is a negative
* value, its absolute value indicates how many fibers are pending on
* the LIFO object. Thus a value of '0' indicates that there are no
* data elements in the LIFO _and_ there are no pending fibers.
*/
fifo->stat = 0;
DEBUG_TRACING_OBJ_INIT(struct nano_fifo *, fifo, _track_list_nano_fifo);
}
FUNC_ALIAS(_fifo_put_non_preemptible, nano_isr_fifo_put, void);
FUNC_ALIAS(_fifo_put_non_preemptible, nano_fiber_fifo_put, void);
/**
*
* @brief Internal routine to append data to a fifo
*
* @return N/A
*/
static inline void enqueue_data(struct nano_fifo *fifo, void *data)
{
*(void **)fifo->data_q.tail = data;
fifo->data_q.tail = data;
*(int *)data = 0;
}
/**
*
* @brief Append an element to a fifo (no context switch)
*
* This routine adds an element to the end of a fifo object; it may be called
* from either either a fiber or an ISR context. A fiber pending on the fifo
* object will be made ready, but will NOT be scheduled to execute.
*
* If a fiber is waiting on the fifo, the address of the element is returned to
* the waiting fiber. Otherwise, the element is linked to the end of the list.
*
* @param fifo FIFO on which to interact.
* @param data Data to send.
*
* @return N/A
*
* INTERNAL
* This function is capable of supporting invocations from both a fiber and an
* ISR context. However, the nano_isr_fifo_put and nano_fiber_fifo_put aliases
* are created to support any required implementation differences in the future
* without introducing a source code migration issue.
*/
void _fifo_put_non_preemptible(struct nano_fifo *fifo, void *data)
{
unsigned int imask;
imask = irq_lock();
fifo->stat++;
if (fifo->stat <= 0) {
struct tcs *tcs = _nano_wait_q_remove_no_check(&fifo->wait_q);
_nano_timeout_abort(tcs);
fiberRtnValueSet(tcs, (unsigned int)data);
} else {
enqueue_data(fifo, data);
}
irq_unlock(imask);
}
void nano_task_fifo_put(struct nano_fifo *fifo, void *data)
{
unsigned int imask;
imask = irq_lock();
fifo->stat++;
if (fifo->stat <= 0) {
struct tcs *tcs = _nano_wait_q_remove_no_check(&fifo->wait_q);
_nano_timeout_abort(tcs);
fiberRtnValueSet(tcs, (unsigned int)data);
_Swap(imask);
return;
}
enqueue_data(fifo, data);
irq_unlock(imask);
}
void nano_fifo_put(struct nano_fifo *fifo, void *data)
{
static void (*func[3])(struct nano_fifo *fifo, void *data) = {
nano_isr_fifo_put,
nano_fiber_fifo_put,
nano_task_fifo_put
};
func[sys_execution_context_type_get()](fifo, data);
}
/**
*
* @brief Internal routine to remove data from a fifo
*
* @return The data item removed
*/
static inline void *dequeue_data(struct nano_fifo *fifo)
{
void *data = fifo->data_q.head;
if (fifo->stat == 0) {
/*
* The data_q and wait_q occupy the same space and have the same
* format, and there is already an API for resetting the wait_q,
* so use it.
*/
_nano_wait_q_reset(&fifo->wait_q);
} else {
fifo->data_q.head = *(void **)data;
}
return data;
}
FUNC_ALIAS(_fifo_get, nano_isr_fifo_get, void *);
FUNC_ALIAS(_fifo_get, nano_fiber_fifo_get, void *);
void *_fifo_get(struct nano_fifo *fifo, int32_t timeout_in_ticks)
{
unsigned int key;
void *data = NULL;
key = irq_lock();
if (likely(fifo->stat > 0)) {
fifo->stat--;
data = dequeue_data(fifo);
} else if (timeout_in_ticks != TICKS_NONE) {
fifo->stat--;
_NANO_TIMEOUT_ADD(&fifo->wait_q, timeout_in_ticks);
_nano_wait_q_put(&fifo->wait_q);
data = (void *)_Swap(key);
return data;
}
irq_unlock(key);
return data;
}
void *nano_task_fifo_get(struct nano_fifo *fifo, int32_t timeout_in_ticks)
{
int64_t cur_ticks;
int64_t limit = 0x7fffffffffffffffll;
unsigned int key;
key = irq_lock();
cur_ticks = _NANO_TIMEOUT_TICK_GET();
if (timeout_in_ticks != TICKS_UNLIMITED) {
limit = cur_ticks + timeout_in_ticks;
}
do {
/*
* Predict that the branch will be taken to break out of the
* loop. There is little cost to a misprediction since that
* leads to idle.
*/
if (likely(fifo->stat > 0)) {
void *data;
fifo->stat--;
data = dequeue_data(fifo);
irq_unlock(key);
return data;
}
if (timeout_in_ticks != TICKS_NONE) {
_NANO_TIMEOUT_SET_TASK_TIMEOUT(timeout_in_ticks);
/* see explanation in
* nano_stack.c:nano_task_stack_pop()
*/
nano_cpu_atomic_idle(key);
key = irq_lock();
cur_ticks = _NANO_TIMEOUT_TICK_GET();
}
} while (cur_ticks < limit);
irq_unlock(key);
return NULL;
}
void *nano_fifo_get(struct nano_fifo *fifo, int32_t timeout)
{
static void *(*func[3])(struct nano_fifo *, int32_t) = {
nano_isr_fifo_get,
nano_fiber_fifo_get,
nano_task_fifo_get
};
return func[sys_execution_context_type_get()](fifo, timeout);
}