blob: 85a4a1c9101c6639f5adeb2f57acec7ccd3a1dfa [file] [log] [blame]
/* ARM Cortex-M GCC specific public inline assembler functions and macros */
/*
* Copyright (c) 2015, Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* Either public functions or macros or invoked by public functions */
#ifndef _ASM_INLINE_GCC_PUBLIC_GCC_H
#define _ASM_INLINE_GCC_PUBLIC_GCC_H
#ifdef __cplusplus
extern "C" {
#endif
/*
* The file must not be included directly
* Include arch/cpu.h instead
*/
#ifdef _ASMLANGUAGE
#define _SCS_BASE_ADDR _PPB_INT_SCS
#define _SCS_ICSR (_SCS_BASE_ADDR + 0xd04)
#define _SCS_ICSR_PENDSV (1 << 28)
#define _SCS_ICSR_UNPENDSV (1 << 27)
#define _SCS_ICSR_RETTOBASE (1 << 11)
#else /* !_ASMLANGUAGE */
#include <stdint.h>
#include <arch/arm/cortex_m/nvic.h>
#include <irq.h>
/**
*
* @brief find most significant bit set in a 32-bit word
*
* This routine finds the first bit set starting from the most significant bit
* in the argument passed in and returns the index of that bit. Bits are
* numbered starting at 1 from the least significant bit. A return value of
* zero indicates that the value passed is zero.
*
* @return most significant bit set, 0 if @a op is 0
*/
static ALWAYS_INLINE unsigned int find_msb_set(uint32_t op)
{
unsigned int bit;
__asm__ volatile(
"cmp %1, #0;\n\t"
"itt ne;\n\t"
" clzne %1, %1;\n\t"
" rsbne %0, %1, #32;\n\t"
: "=r"(bit)
: "r"(op));
return bit;
}
/**
*
* @brief find least significant bit set in a 32-bit word
*
* This routine finds the first bit set starting from the least significant bit
* in the argument passed in and returns the index of that bit. Bits are
* numbered starting at 1 from the least significant bit. A return value of
* zero indicates that the value passed is zero.
*
* @return least significant bit set, 0 if @a op is 0
*/
static ALWAYS_INLINE unsigned int find_lsb_set(uint32_t op)
{
unsigned int bit;
__asm__ volatile(
"rsb %0, %1, #0;\n\t"
"ands %0, %0, %1;\n\t" /* r0 = x & (-x): only LSB set */
"itt ne;\n\t"
" clzne %0, %0;\n\t" /* count leading zeroes */
" rsbne %0, %0, #32;\n\t"
: "=&r"(bit)
: "r"(op));
return bit;
}
/**
*
* @brief Disable all interrupts on the CPU
*
* This routine disables interrupts. It can be called from either interrupt,
* task or fiber level. This routine returns an architecture-dependent
* lock-out key representing the "interrupt disable state" prior to the call;
* this key can be passed to irq_unlock() to re-enable interrupts.
*
* The lock-out key should only be used as the argument to the irq_unlock()
* API. It should never be used to manually re-enable interrupts or to inspect
* or manipulate the contents of the source register.
*
* This function can be called recursively: it will return a key to return the
* state of interrupt locking to the previous level.
*
* WARNINGS
* Invoking a kernel routine with interrupts locked may result in
* interrupts being re-enabled for an unspecified period of time. If the
* called routine blocks, interrupts will be re-enabled while another
* thread executes, or while the system is idle.
*
* The "interrupt disable state" is an attribute of a thread. Thus, if a
* fiber or task disables interrupts and subsequently invokes a kernel
* routine that causes the calling thread to block, the interrupt
* disable state will be restored when the thread is later rescheduled
* for execution.
*
* @return An architecture-dependent lock-out key representing the
* "interrupt disable state" prior to the call.
*
* @internal
*
* On Cortex-M3/M4, this function prevents exceptions of priority lower than
* the two highest priorities from interrupting the CPU.
*/
static ALWAYS_INLINE unsigned int _arch_irq_lock(void)
{
unsigned int key;
__asm__ volatile(
"movs.n %%r1, %1;\n\t"
"mrs %0, BASEPRI;\n\t"
"msr BASEPRI, %%r1;\n\t"
: "=r"(key)
: "i"(_EXC_IRQ_DEFAULT_PRIO)
: "r1");
return key;
}
/**
*
* @brief Enable all interrupts on the CPU (inline)
*
* This routine re-enables interrupts on the CPU. The @a key parameter is an
* architecture-dependent lock-out key that is returned by a previous
* invocation of irq_lock().
*
* This routine can be called from either interrupt, task or fiber level.
*
* @param key architecture-dependent lock-out key
*
* @return N/A
*/
static ALWAYS_INLINE void _arch_irq_unlock(unsigned int key)
{
__asm__ volatile("msr BASEPRI, %0;\n\t" : : "r"(key));
}
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#endif
#endif /* _ASM_INLINE_GCC_PUBLIC_GCC_H */